repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
Raul3212/TopicosAvancadosBD
|
dev/model/Vertice.py
|
Python
|
gpl-3.0
| 198
| 0.005051
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class Vertice:
def __init__(self, id, latitude, longitude):
self.id = i
|
d
self.latitude = latitude
|
self.longitude = longitude
|
PhoenixWright/MobileBDDCore
|
mobilebdd/drivers/drivers.py
|
Python
|
apache-2.0
| 3,463
| 0.002599
|
import logging
from mobilebdd.drivers.android import AndroidWebDriver, SelendroidWebDriver
from mobilebdd.drivers.desktop import DesktopChromeWebDriver, DesktopInternetExplorerWebDriver, DesktopFirefoxWebDriver
from mobilebdd.drivers.ios import iOSWebDriver
from mobilebdd.drivers.mobileweb import ChromeWebDriver, WebViewAppDriver
from mobilebdd.hacks.webdriver import HackedWebDriver
log = logging.getLogger(u'mobilebdd')
# mapping of device types to custom, specific webdrivers
HackedDrivers = {
u'android': AndroidWebDriver,
u'selendroid': SelendroidWebDriver,
u'ios': iOSWebDriver,
u'ipad': iOSWebDriver,
u'iphone': iOSWebDriver,
# mobile browsers
u'chrome': ChromeWebDriver,
u'webviewapp': WebViewAppDriver,
# desktop browsers
u'desktop-chrome': DesktopChromeWebDriver,
u'desktop-firefox': DesktopFirefoxWebDriver,
u'desktop-ie': DesktopInternetExplorerWebDriver
}
# all the android versions that appium doesn't support natively. these have to
# use selendroid
SelendroidVersions = [
u'2.3',
u'3.0',
u'3.1',
u'3.2',
u'4.0',
u'4.1',
# at time writing, appium 1.x lost support for some things that make test
# writing easier. like find by link text or partial link text. i like those
# so im making everything use selendroid. seems to work fine so far.
# plus appium tries to do fancy chromedriver stuff for native
|
webviews. prob
# a bug but i dont want to deal with it right now.
u'4.2',
u'4.3',
u'4.4',
]
def webdriver_me(os_ver, os_type, app_path=u'', device_type=u''):
"""
returns a ref to the class that matches for the given os and device type
:param os_ver: version of the os
:param os_type: device... type. like android/selendroid/ipad/ios/etc
:param app_path:
|
the path to the application to be installed, or a browser name
:param device_type: the type of device ('phone' or 'tablet')
"""
# ensure these aren't none so we can work with them as strings
if not os_ver:
os_ver = u''
if not os_type:
os_type = u''
if not app_path:
app_path = u''
if not device_type:
device_type = u''
# start off vague with the os type, and hone in on a specific driver if one exists
driver_type = os_type.lower()
if os_ver in SelendroidVersions and driver_type == u'android' and not app_path.lower() == u'chrome':
driver_type = u'selendroid'
elif driver_type == u'kindle':
driver_type = u'android'
elif os_type.lower() == u'linux' or os_type.lower() == u'osx' or os_type.lower() == u'windows':
if app_path.lower() == u'chrome':
driver_type = u'desktop-chrome'
elif app_path.lower() == u'firefox':
driver_type = u'desktop-firefox'
elif app_path.lower() == u'ie' or app_path.lower() == u'internet explorer':
driver_type = u'desktop-ie'
elif app_path.lower() == u'chrome':
driver_type = u'chrome'
elif u'webviewapp' in app_path.lower():
driver_type = u'webviewapp'
if driver_type in HackedDrivers:
log.debug(u'using driver_type "{}" for driver_type "{}" with os_type "{}" and app_path "{}"'.format(HackedDrivers[driver_type], driver_type, os_type, app_path))
return HackedDrivers[driver_type]
else:
log.warning(u'could not find a specific webdriver for {}. using default'.format(driver_type))
return HackedWebDriver
|
erinspace/osf.io
|
website/archiver/utils.py
|
Python
|
apache-2.0
| 10,456
| 0.002965
|
import functools
from framework.auth import Auth
from website.archiver import (
StatResult, AggregateStatResult,
ARCHIVER_NETWORK_ERROR,
ARCHIVER_SIZE_EXCEEDED,
ARCHIVER_FILE_NOT_FOUND,
ARCHIVER_FORCED_FAILURE,
)
from website import (
mails,
settings
)
from osf.utils.sanitize import unescape_entities
def send_archiver_size_exceeded_mails(src, user, stat_result, url):
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.ARCHIVE_SIZE_EXCEEDED_DESK,
user=user,
src=src,
stat_result=stat_result,
can_change_preferences=False,
url=url,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_SIZE_EXCEEDED_USER,
user=user,
src=src,
can_change_preferences=False,
mimetype='html',
)
def send_archiver_copy_error_mails(src, user, results, url):
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.ARCHIVE_COPY_ERROR_DESK,
user=user,
src=src,
results=results,
url=url,
can_change_preferences=False,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_COPY_ERROR_USER,
user=user,
src=src,
results=results,
can_change_preferences=False,
mimetype='html',
)
def send_archiver_file_not_found_mails(src, user, results, url):
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.ARCHIVE_FILE_NOT_FOUND_DESK,
can_change_preferences=False,
user=user,
src=src,
results=results,
url=url,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_FILE_NOT_FOUND_USER,
user=user,
src=src,
results=results,
can_change_preferences=False,
mimetype='html',
)
def send_archiver_uncaught_error_mails(src, user, results, url):
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.ARCHIVE_UNCAUGHT_ERROR_DESK,
user=user,
src=src,
results=results,
can_change_preferences=False,
url=url,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_UNCAUGHT_ERROR_USER,
user=user,
src=src,
results=results,
can_change_preferences=False,
mimetype='html',
)
def handle_archive_fail(reason, src, dst, user, result):
url = settings.INTERNAL_DOMAIN + src._id
if reason == ARCHIVER_NETWORK_ERROR:
send_archiver_copy_error_mails(src, user, result, url)
elif reason == ARCHIVER_SIZE_EXCEEDED:
send_archiver_size_exceeded_mails(src, user, result, url)
elif reason == ARCHIVER_FILE_NOT_FOUND:
send_archiver_file_not_found_mails(src, user, result, url)
elif reason == ARCHIVER_FORCED_FAILURE: # Forced failure using scripts.force_f
|
ail_registration
pass
else: # reason == ARCHIVER_UNCAUGHT_ERROR
send_archiver_uncaught_error_mails(src, user, result, url)
dst.root.sanction.forcibly_reject()
dst.root.sanction.save()
dst.root.delete_registration_tree(save=True)
def archive_p
|
rovider_for(node, user):
"""A generic function to get the archive provider for some node, user pair.
:param node: target node
:param user: target user (currently unused, but left in for future-proofing
the code for use with archive providers other than OSF Storage)
"""
return node.get_addon(settings.ARCHIVE_PROVIDER)
def has_archive_provider(node, user):
"""A generic function for checking whether or not some node, user pair has
an attached provider for archiving
:param node: target node
:param user: target user (currently unused, but left in for future-proofing
the code for use with archive providers other than OSF Storage)
"""
return node.has_addon(settings.ARCHIVE_PROVIDER)
def link_archive_provider(node, user):
"""A generic function for linking some node, user pair with the configured
archive provider
:param node: target node
:param user: target user (currently unused, but left in for future-proofing
the code for use with archive providers other than OSF Storage)
"""
addon = node.get_or_add_addon(settings.ARCHIVE_PROVIDER, auth=Auth(user), log=False)
if hasattr(addon, 'on_add'):
addon.on_add()
node.save()
def aggregate_file_tree_metadata(addon_short_name, fileobj_metadata, user):
"""Recursively traverse the addon's file tree and collect metadata in AggregateStatResult
:param src_addon: AddonNodeSettings instance of addon being examined
:param fileobj_metadata: file or folder metadata of current point of reference
in file tree
:param user: archive initatior
:return: top-most recursive call returns AggregateStatResult containing addon file tree metadata
"""
disk_usage = fileobj_metadata.get('size')
if fileobj_metadata['kind'] == 'file':
result = StatResult(
target_name=fileobj_metadata['name'],
target_id=fileobj_metadata['path'].lstrip('/'),
disk_usage=disk_usage or 0,
)
return result
else:
return AggregateStatResult(
target_id=fileobj_metadata['path'].lstrip('/'),
target_name=fileobj_metadata['name'],
targets=[aggregate_file_tree_metadata(addon_short_name, child, user) for child in fileobj_metadata.get('children', [])],
)
def before_archive(node, user):
from osf.models import ArchiveJob
link_archive_provider(node, user)
job = ArchiveJob.objects.create(
src_node=node.registered_from,
dst_node=node,
initiator=user
)
job.set_targets()
def _do_get_file_map(file_tree):
"""Reduces a tree of folders and files into a list of (<sha256>, <file_metadata>) pairs
"""
file_map = []
stack = [file_tree]
while len(stack):
tree_node = stack.pop(0)
if tree_node['kind'] == 'file':
file_map.append((tree_node['extra']['hashes']['sha256'], tree_node))
else:
stack = stack + tree_node['children']
return file_map
def _memoize_get_file_map(func):
cache = {}
@functools.wraps(func)
def wrapper(node):
if node._id not in cache:
osf_storage = node.get_addon('osfstorage')
file_tree = osf_storage._get_file_tree(user=node.creator)
cache[node._id] = _do_get_file_map(file_tree)
return func(node, cache[node._id])
return wrapper
@_memoize_get_file_map
def get_file_map(node, file_map):
"""
note:: file_map is injected implictly by the decorator; this method is called like:
get_file_map(node)
"""
for (key, value) in file_map:
yield (key, value, node._id)
for child in node.nodes_primary:
for key, value, node_id in get_file_map(child):
yield (key, value, node_id)
def find_registration_file(value, node):
from osf.models import AbstractNode
orig_sha256 = value['sha256']
orig_name = unescape_entities(
value['selectedFileName'],
safe={
'<': '<',
'>': '>'
}
)
orig_node = value['nodeId']
file_map = get_file_map(node)
for sha256, value, node_id in file_map:
registered_from_id = AbstractNode.load(node_id).registered_from._id
if sha256 == orig_sha256 and registered_from_id == orig_node and orig_name == value['name']:
return value, node_id
return None, None
def find_registration_files(values, node):
ret = []
for i in range(len(values.get('extra', []))):
ret.append(find_registration_file(values['extra'][i], node) + (i,))
return ret
def get_title_for_question(schema, path):
path = path.split('.')
root = path.pop(0)
item = None
for page in schema['pages']:
questions = {
q['qid']: q
for q in page['questions']
}
if root in questions:
item = questions[root]
title = item.get('title')
while len(path):
item = item.get
|
pythonbyexample/PBE
|
dbe/businesstest/urls.py
|
Python
|
bsd-3-clause
| 619
| 0.006462
|
from django.conf.urls import *
|
from django.contrib.auth import views as auth_views
from businesstest.views import Messages
urlpatterns = patterns("businesstest.views",
# (r"messages/$", "message_list"),
(r"messages/$", Messages.as_view(), {}, "messages"),
(r"(\d+)/$", "test"),
(r"test_done/$", "test_done"),
# (r"^melting-temp-rc/$", "melting_temp_rc"),
# (r"^search/(oligo|half|construct|parse|project)/(toggledir|up|down)/(.+)/$",
|
"search", {}, "parse9_search"),
(r"^$", redirect_to, {"url": "/bt/1/"}),
)
urlpatterns += patterns('',
(r"^account/", include("registration.urls")),
)
|
junaruga/rpm-py-installer
|
tests/test_install_suse.py
|
Python
|
mit
| 1,463
| 0
|
"""
Tests for install.py for SUSE based Linux distributions
"""
import os
import shutil
from unittest import mock
import pytest
from install import Cmd, CmdError, RemoteFileNotFoundError
pytestmark = pytest.mark.skipif(
not pytest.helpers.helper_is_suse(),
reason="Tests for openSUSE/SUSE"
)
def test_rpm_download_raise_not_found_error(sys_rpm):
with mock.patch.object(Cmd, 'sh_e') as mock_sh_e:
ce = CmdError('test.')
ce.stderr = 'Package \'dummy\' not found.\n'
mock_sh_e.side_effect = ce
with pytest.raises(RemoteFileNotFoundError) as exc:
sys_rpm.download('dummy')
asse
|
rt mock_sh_e.called
assert str(exc.value) == 'Package dummy not found on remote'
def test_rpm_extract_is_ok(sys_rpm, rpm_files, monkeypatch):
# mo
|
cking arch object for multi arch test cases.
sys_rpm.arch = 'x86_64'
with pytest.helpers.work_dir():
for rpm_file in rpm_files:
shutil.copy(rpm_file, '.')
sys_rpm.extract('rpm-build-libs')
files = os.listdir('./usr/lib64')
files.sort()
assert files == [
'librpmbuild.so.7',
'librpmbuild.so.7.0.1',
'librpmsign.so.7',
'librpmsign.so.7.0.1',
]
@pytest.mark.network
def test_app_verify_system_status_is_ok_on_sys_rpm_and_missing_pkgs(app):
app.linux.rpm.is_system_rpm = mock.MagicMock(return_value=True)
app.linux.verify_system_status()
|
hoytnix/hoyt.io
|
flask-server/hoyt/migrations/versions/d987888a7460_tags_table.py
|
Python
|
gpl-3.0
| 1,553
| 0.003863
|
"""'d987888a7460' - Tags table."""
import sqlalchemy as sa
from alembic import op
#from lib.util_datetime import tzware_datetime
#from lib.util_sqlalchemy import AwareDateTime
"""
Tags table
Revision ID: d987888a7460
Revises: 216ce379d3f0
Create Date: 2016-11-01 14:13:28.216736
"""
# Revision identifiers, used by Alembic.
revision = 'd987888a7460'
down_revision = '216ce379d3f0'
branch_labels = None
depends_on = None
def upgrade():
"""Command to migrate database forward."""
### commands auto generated by Alembic - please adjust! ###
op.create_table(
'tag',
sa.Column(
'id', sa.Integer(), nullable=False),
sa.Column(
'title', sa.String(length=128)
|
, nullable=False),
sa.PrimaryKeyConstraint('id'))
op.add_column('tags', sa.Column('tag_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'tags', 'tag', ['tag_id'], ['id'])
op.drop_column('tags', 'title')
op.drop_column('tags', 'id')
### end Alembic commands ###
def downgrade():
"""Command to migrate database backwards."""
### commands auto generated by Alembic - please adjust! ###
o
|
p.add_column('tags', sa.Column('id', sa.INTEGER(), nullable=False))
op.add_column(
'tags',
sa.Column(
'title',
sa.VARCHAR(length=128),
autoincrement=False,
nullable=False))
op.drop_constraint(None, 'tags', type_='foreignkey')
op.drop_column('tags', 'tag_id')
op.drop_table('tag')
### end Alembic commands ###
|
davy39/eric
|
Preferences/ConfigurationPages/EditorCalltipsPage.py
|
Python
|
gpl-3.0
| 2,886
| 0.003465
|
# -*- coding: utf-8 -*-
# Copyright (c) 2006 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing the Editor Calltips configuration page.
"""
from __future__ import unicode_literals
from PyQt5.Qsci import QsciScintilla
from QScintilla.QsciScintillaCompat import QSCINTILLA_VERSION
from .ConfigurationPageBase import ConfigurationPageBase
from .Ui_EditorCalltipsPage import Ui_EditorCalltipsPage
import Preferences
class EditorCalltipsPage(ConfigurationPageBase, Ui_EditorCalltipsPage):
"""
Class implementing the Editor Calltips configuration page.
"""
def __init__(self):
"""
Constructor
"""
super(EditorCalltipsPage, self).__init__()
self.setupUi(self)
self.setObjectName("EditorCalltipsPage")
if QSCINTILLA_VERSION() >= 0x020700:
self.positionComboBox.addItem(
self.tr("Below Text"),
QsciScintilla.CallTipsBelowText)
self.positionComboBox.addItem(
self.tr("Above Text"),
QsciScintilla.CallTipsAboveText)
else:
self.calltipsPositionBox.hide()
# set initial values
self.ctEnabledCheckBox.setChecked(
Preferences.getEditor("CallTipsEnabled"))
self.ctVisibleSlider.setValue(
Preferences.getEditor("CallTipsVisible"))
self.initColour("CallTipsBackground", self.calltipsBackgroundButton,
Preferences.getEditorColour)
self.ctScintillaCheckBox.setChecked(
Preferences.getEditor("CallTipsScintillaOnFail"))
if QSCINTILLA_VERSION() >= 0x020700:
self.positionComboBox.setCurrentIndex(
self.positionComboBox.findData(
|
Preferences.getEditor("CallTipsPosition")))
|
def save(self):
"""
Public slot to save the EditorCalltips configuration.
"""
Preferences.setEditor(
"CallTipsEnabled",
self.ctEnabledCheckBox.isChecked())
Preferences.setEditor(
"CallTipsVisible",
self.ctVisibleSlider.value())
self.saveColours(Preferences.setEditorColour)
Preferences.setEditor(
"CallTipsScintillaOnFail",
self.ctScintillaCheckBox.isChecked())
if QSCINTILLA_VERSION() >= 0x020700:
Preferences.setEditor(
"CallTipsPosition",
self.positionComboBox.itemData(
self.positionComboBox.currentIndex()))
def create(dlg):
"""
Module function to create the configuration page.
@param dlg reference to the configuration dialog
@return reference to the instantiated page (ConfigurationPageBase)
"""
page = EditorCalltipsPage()
return page
|
aragos/tichu-tournament
|
api/src/movement_handler.py
|
Python
|
mit
| 5,763
| 0.011279
|
import webapp2
import json
from collections import defaultdict
from generic_handler import GenericHandler
from google.appengine.api import users
from google.appengine.ext import ndb
from handler_utils import is_int
from handler_utils import GetPairIdFromRequest
from handler_utils import GetTourneyWithIdAndMaybeReturnStatus
from handler_utils import SetErrorStatus
from models import HandScore
from models import PlayerPair
from models import Tournament
from movements import Movement
class MovementHandler(GenericHandler):
''' Class to handle requests to /api/tournaments/:id/movement/:pair_no '''
def get(self, id, pair_no):
''' Fetch movement for tournament with id and team pair_no.
Args:
id: String. Tournament id.
pair_no: Integer. Pair number for the team whose movement we're getting.
See api for request and response documentation.
'''
tourney = GetTourneyWithIdAndMaybeReturnStatus(self.response, id)
if not tourney:
return
if not self._CheckValidPairMaybeSetStatus(tourney, pair_no):
return
player_pair = PlayerPair.GetByPairNo(tourney, int(pair_no))
if not player_pair:
SetErrorStatus(self.response, 404, "Invalid Request",
"Player pair {} not in tournament".format(pair_no))
return
if not self._CheckUserAllowedToSeeMovementMaybeSetStatus(
tourney, player_pair):
return
no_hands_per_round, no_rounds = Movement.NumBoardsPerRoundFromTotal(
tourney.no_pairs, tourney.no_boards)
try:
movement = Movement.CreateMovement(
tourney.no_pairs, no_hands_per_round, no_rounds,
tourney.legacy_version_id).GetMovement(int(pair_no))
except ValueError:
SetErrorStatus(self.response, 500, "Corrupted Data",
"No valid movement for this tourney's config")
return
movement_list = self._GetMovementHandsAsync(tourney, movement, int(pair_no))
combined_dict = {
'name' : tourney.name,
'players' : player_pair.player_list(),
'allow_score_overwrites' : tourney.IsUnlocked(),
'movement': movement_list
}
self.response.headers['Content-Type'] = 'application/json'
self.response.set_status(200)
self.response.out.write(json.dumps(combined_dict, indent=2))
def _GetMovementHandsAsync(self, tourney, movement, pair_no):
''' Converts movement information to a json interpretable string adding
scored hands if any exist.
Args:
movement: Movement. Movement for this pair.
tourney: Tournament. Tournament in which this is happening.
pair_no: Pair from whose point of view this movement is seen.
Returns:
List as expected by api. Includes any scores that have already been added.
'''
# Dict from round number to list of futures
hand_futures_dict = defaultdict(list)
players_futures_dict = {}
movement_list = []
for round in movement:
hands = round.hands
if not hands:
continue
opp = round.opponent
players_futures_dict[round.round] = PlayerPair.GetByPairNoAsync(tourney, opp)
for h in hands:
if round.is_north:
hand_futures_dict[round.round].append(
HandScore.GetByHandParamsAsync(tourney, h, pair_no, round.opponent))
else:
hand_futures_dict[round.round].append(
HandScore.GetByHandParamsAsync(tourney, h, rou
|
nd.opponent, pair_no))
for round in movement:
hands = round.hands
round_str = round.to_dict()
opp = round.opponent
if opp:
opp_pp = players_futures_dict[rou
|
nd.round].get_result()
if opp_pp:
round_str["opponent_names"] = [x.get("name") for x in
opp_pp.player_list()]
if hands:
del round_str['hands']
for i in xrange(len(hands)):
hand_score = hand_futures_dict[round.round][i].get_result()
if hand_score and not hand_score.deleted:
round_str.setdefault('hands', []).append({
'hand_no' : hands[i],
'score': {
'calls' : hand_score.calls_dict(),
'ns_score' : hand_score.get_ns_score(),
'ew_score' : hand_score.get_ew_score(),
'notes' : hand_score.notes,
}})
else:
round_str.setdefault('hands', []).append({ 'hand_no' : hands[i] })
movement_list.append(round_str)
return movement_list
def _CheckValidPairMaybeSetStatus(self, tourney, pair_no):
''' Test if the provided pair number is valid for tourney.
Args:
tourney: Tournament. Tournament the pair number is being validated for.
pair_no: Integer. Pair number for the team we are validating.
'''
error = "Invalid Input"
if (not is_int(pair_no)) or int(pair_no) < 1 or int(pair_no) > tourney.no_pairs:
SetErrorStatus(self.response, 404, error,
"Pair number {} is invalid".format(pair_no))
return False
return True
def _CheckUserAllowedToSeeMovementMaybeSetStatus(self, tourney, player_pair):
error = "Forbidden User"
user = users.get_current_user()
if user and tourney.owner_id == user.user_id():
return True
pair_id = GetPairIdFromRequest(self.request)
if not pair_id:
SetErrorStatus(self.response, 403, error,
"User does not own tournament and is not authenticated " +
"with a pair code to see this movement")
return False
if pair_id != player_pair.id:
SetErrorStatus(self.response, 403, error,
"User does not own tournament and is authenticated with " +
"the wrong code for pair {}".format(player_pair.pair_no))
return False
return True
|
crazyskady/ai-game-python
|
Chapter08/CMapper.py
|
Python
|
mit
| 1,821
| 0.031851
|
# -*- coding: utf-8 -*-
dCellSize = 20
WindowWidth = 400
WindowHeight = 400
class SCell(object):
def __init__(self, xmin, xmax, ymin, ymax):
self._iTicksSpentHere = 0
self._left = xmin
self._right = xmax
self._top = ymin
self.bottom = ymax
def Update(self):
self._iTicksSpentHere += 1
def Reset(self):
se
|
lf._iTicksSpentHere = 0
class CMapper(object):
def __init__(self, MaxRangeX, MaxRangeY):
self._dCellSize = dCellSize
self._NumCellsX = (MaxRangeX/self._dCellSize) + 1
self._NumCellsY =
|
(MaxRangeY/self._dCellSize) + 1
self._2DvecCells = []
for x in xrange(self._NumCellsX):
temp = []
for y in xrange(self._NumCellsY):
temp.append(SCell(x*self._dCellSize, (x+1)*self._dCellSize, y*self._dCellSize, (y+1)*self._dCellSize))
self._2DvecCells.append(temp)
self._iTotalCells = self._NumCellsX * self._NumCellsY
def Update(self, xPos, yPos):
if ((xPos < 0) or (xPos > WindowWidth) or (yPos < 0) or (yPos > WindowHeight)):
return
cellX = int(xPos/self._dCellSize)
cellY = int(yPos/self._dCellSize)
self._2DvecCells[cellX][cellY].Update()
def TicksLingered(self, xPos, yPos):
if ((xPos < 0) or (xPos > WindowWidth) or (yPos < 0) or (yPos > WindowHeight)):
return 999
cellX = int(xPos/self._dCellSize)
cellY = int(yPos/self._dCellSize)
return self._2DvecCells[cellX][cellY]._iTicksSpentHere
def BeenVisited(self, xPos, yPos):
print "Not implemented!"
def Render(self):
print "To be implemented"
def Reset(self):
for i in xrange(self._NumCellsX):
for j in xrange(self._NumCellsY):
self._2DvecCells[i][j].Reset()
def NumCellsVisited(self):
total = 0
for i in xrange(self._NumCellsX):
for j in xrange(self._NumCellsY):
if self._2DvecCells[i][j]._iTicksSpentHere > 0:
total += 1
return total
|
dmpetrov/dataversioncontrol
|
tests/remotes/s3.py
|
Python
|
apache-2.0
| 4,225
| 0
|
import locale
import os
import uuid
import pytest
from funcy import cached_property
from dvc.utils import env2bool
from .base import Base
from .path_info import CloudURLInfo
TEST_AWS_REPO_BUCKET = os.environ.get("DVC_TEST_AWS_REPO_BUCKET", "dvc-temp")
TEST_AWS_ENDPOINT_URL = "http://127.0.0.1:{port}/"
class S3(Base, CloudURLInfo):
IS_OBJECT_STORAGE = True
TEST_AWS_ENDPOINT_URL = None
@cached_property
def config(self):
return {"url": self.url, "endpointurl": self.TEST_AWS_ENDPOINT_URL}
@staticmethod
def should_test():
do_test = env2bool("DVC_TEST_AWS", undefined=None)
if do_test is not None:
return do_test
if os.getenv("AWS_ACCESS_KEY_ID") and os.getenv(
"AWS_SECRET_ACCESS_KEY"
):
return True
return False
@staticmethod
def _get_storagepath():
return (
TEST_AWS_REPO_BUCKET
+ "/"
+ "dvc_test_caches"
+ "/"
+ str(uuid.uuid4())
)
@staticmethod
def get_url():
return "s3://" + S3._get_storagepath()
@cached_property
def _s3(self):
import boto3
return boto3.client("s3", endpoint_url=self.config["endpointurl"])
def is_file(self):
from botocore.exceptions import ClientError
if self.path.endswith("/"):
return False
try:
self._s3.head_object(Bucket=self.bucket, Key=self.path)
except ClientError as exc:
if exc.response["Error"]["Code"] != "404":
raise
return False
return True
def is_dir(self):
path = (self / "").path
resp = self._s3.list_objects(Bucket=self.bucket, Prefix=path)
return bool(resp.get("Contents"))
def exists(self):
return self.is_file() or self.is_dir()
def mkdir(self, mode=0o777, parents=False, exist_ok=False):
assert mode == 0o777
assert parents
def write_bytes(self, contents):
self._s3.put_object(Bucket=self.bucket, Key=self.path, Body=contents)
def read_bytes(self):
data = self._s3.get_object(Bucket=self.bucket, Key=self.path)
return data["Body"].read()
def read_text(self, encoding=None, errors=None):
if not encoding:
encoding = locale.getpreferredencoding(False)
assert errors is None
return self.read_bytes().decode(encoding)
@property
def fs_path(self):
return self.bucket + "/" + self.path.lstrip("/")
@pytest.fixture
def s3_fake_creds_file(monkeypatch):
# https://github.com/spulec/moto#other-caveats
import pathlib
aws_dir = pathlib.Path("~").expanduser() / ".aws"
aws_dir.mkdir(exist_ok=True)
aws_creds = aws_dir / "credentials"
initially_exists = aws_creds.exists()
if not initially_exists:
aws_creds.touch()
try:
with monkeypatch.context() as m:
m.setenv("AWS_ACCESS_KEY_ID", "testing")
m.setenv("AWS_SECRET_ACCESS_KEY", "testing")
m.setenv("AWS_SECURITY_TOKEN", "testing")
m.setenv("AWS_SESSION_TOKEN", "testing")
yield
finally:
if aws_creds.exists() and not initially_exists:
aws_creds.unlink()
@pytest.fixture(scope="session")
def s3_server(test_config, docker_compose, docker_services):
import requests
test_config.requires("s3")
port = docker_services.port_for("motoserver", 5000)
endpoint_url = TEST_AWS_ENDPOINT_URL.format(port=port)
def _check():
try:
r = requests.get(endpoint_url)
return r.ok
except requests.RequestException:
return False
docker_services.wait_until_responsive(
timeout=60.0, pause=0.1, check=_check
)
S3.TEST_AWS_ENDPOINT_URL = endpoint_url
return endpoint_url
@pytest.fixture
def s3(test_config, s3_server, s3_fake_creds_file):
test_config.requires("s3")
workspace = S3(S3.get_url())
wo
|
rkspace._s3.create_bucket(Bucket=TEST_AWS_REPO_BUCKET)
yield workspace
@pytest.fixture
def real_s3():
if not S3.should_test():
pytest.skip("no real s3")
|
yield S3(S3.get_url())
|
BrainTech/openbci
|
obci/logic/logic_speller_peer.py
|
Python
|
gpl-3.0
| 1,099
| 0.002732
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author:
# Mateusz Kruszyński <mateusz.kruszynski@gmail.com>
#
import time
from obci.utils import tags_helper
from multiplexer.multiplexer_constants import peers, types
from obci.logic import logic_helper
from obci.logic.logic_decision_peer import LogicDecision
from obci.logic.engines.speller_engine import SpellerEngine
from obci.utils import context as ctx
from obci.configs import settings, variables_pb2
from obci.utils.openbci_logging import log_crash
class LogicSpeller(LogicDecision, SpellerEngine):
"""A class for creating a manifest file with metadata."""
@log_crash
def __init__(self, addresses):
LogicDecision.__init__(self, addresses=addresses)
|
context = ctx.get_new_context()
context['logger'] = self.logger
SpellerEngine.__init__(self, self.config.p
|
aram_values(), context)
self.ready()
self._update_letters()
def _run_post_actions(self, p_decision):
self._update_letters()
if __name__ == "__main__":
LogicSpeller(settings.MULTIPLEXER_ADDRESSES).loop()
|
cmoutard/mne-python
|
mne/viz/misc.py
|
Python
|
bsd-3-clause
| 19,712
| 0
|
"""Functions to make simple plots with M/EEG data
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Cathy Nangini <cnangini@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: Simplified BSD
import copy
import warnings
from glob import glob
import os.path as op
from itertools import cycle
import numpy as np
from scipy import linalg
from ..surface import read_surface
from ..io.proj import make_projector
from ..utils import logger, verbose, get_subjects_dir
from ..io.pick import pick_types
from .utils import tight_layout, COLORS, _prepare_trellis, plt_show
@verbose
def plot_cov(cov, info, exclude=[], colorbar=True, proj=False, show_svd=True,
show=True, verbose=None):
"""Plot Covariance data
Parameters
----------
cov : instance of Covariance
The covariance matrix.
info: dict
Measurement info.
exclude : list of string | str
List of channels to exclude. If empty do not exclude any channel.
If 'bads', exclude info['bads'].
colorbar : bool
Show colorbar or not.
proj : bool
Apply projections or not.
show_svd : bool
Plot also singular values of the noise covariance for each sensor
type. We show square roots ie. standard deviations.
show : bool
Show figure if True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig_cov : instance of matplotlib.pyplot.Figure
The covariance plot.
fig_svd : instance of matplotlib.pyplot.Figure | None
The SVD spectra plot of the covariance.
"""
if exclude == 'bads':
exclude = info['bads']
ch_names = [n for n in cov.ch_names if n not in exclude]
ch_idx = [cov.ch_names.index(n) for n in ch_names]
info_ch_names = info['ch_names']
sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=exclude)
sel_mag = pick_types(info, meg='mag', eeg=False, ref_meg=False,
exclude=exclude)
sel_grad = pick_types(info, meg='grad', eeg=False, ref_meg=False,
exclude=exclude)
idx_eeg = [ch_names.index(info_ch_names[c])
for c in sel_eeg if info_ch_names[c] in ch_names]
idx_mag = [ch_names.index(info_ch_names[c])
for c in sel_mag if info_ch_names[c] in ch_names]
idx_grad = [ch_names.index(info_ch_names[c])
for c in sel_grad if info_ch_names[c] in ch_names]
idx_names = [(idx_eeg, 'EEG covariance', 'uV', 1e6),
(idx_grad, 'Gradiometers', 'fT/cm', 1e13),
(idx_mag, 'Magnetometers', 'fT', 1e15)]
idx_names = [(idx, name, unit, scaling)
for idx, name, unit, scaling in idx_names if len(idx) > 0]
C = cov.data[ch_idx][:, ch_idx]
if proj:
projs = copy.deepcopy(info['projs'])
# Activate the projection items
for p in projs:
p['active'] = True
P, ncomp, _ = make_projector(projs, ch_names)
if ncomp > 0:
logger.info(' Created an SSP operator (subspace dimension'
' = %d)' % ncomp)
C = np.dot(P, np.dot(C, P.T))
else:
logger.info(' The projection vectors do not apply to these '
'channels.')
import matplotlib.pyplot as plt
fig_cov = plt.figure(figsize=(2.5 * len(idx_names), 2.7))
for k, (idx, name, _, _) in enumerate(idx_names):
plt.subplot(1, len(idx_names), k
|
+ 1)
plt.imshow(C[idx][:, idx], interpolation="nearest", cmap='RdBu_r')
plt.title(name)
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.2, 0.26)
tight_layout(fig=fig_cov)
fig_svd = None
if show_svd:
fig_svd = plt.figure()
for k, (idx, name, unit, scaling) in enumerate(idx_names):
s = linalg.svd(C[idx][:, idx], compute_uv=False)
pl
|
t.subplot(1, len(idx_names), k + 1)
plt.ylabel('Noise std (%s)' % unit)
plt.xlabel('Eigenvalue index')
plt.semilogy(np.sqrt(s) * scaling)
plt.title(name)
tight_layout(fig=fig_svd)
plt_show(show)
return fig_cov, fig_svd
def plot_source_spectrogram(stcs, freq_bins, tmin=None, tmax=None,
source_index=None, colorbar=False, show=True):
"""Plot source power in time-freqency grid.
Parameters
----------
stcs : list of SourceEstimate
Source power for consecutive time windows, one SourceEstimate object
should be provided for each frequency bin.
freq_bins : list of tuples of float
Start and end points of frequency bins of interest.
tmin : float
Minimum time instant to show.
tmax : float
Maximum time instant to show.
source_index : int | None
Index of source for which the spectrogram will be plotted. If None,
the source with the largest activation will be selected.
colorbar : bool
If true, a colorbar will be added to the plot.
show : bool
Show figure if True.
"""
import matplotlib.pyplot as plt
# Input checks
if len(stcs) == 0:
raise ValueError('cannot plot spectrogram if len(stcs) == 0')
stc = stcs[0]
if tmin is not None and tmin < stc.times[0]:
raise ValueError('tmin cannot be smaller than the first time point '
'provided in stcs')
if tmax is not None and tmax > stc.times[-1] + stc.tstep:
raise ValueError('tmax cannot be larger than the sum of the last time '
'point and the time step, which are provided in stcs')
# Preparing time-frequency cell boundaries for plotting
if tmin is None:
tmin = stc.times[0]
if tmax is None:
tmax = stc.times[-1] + stc.tstep
time_bounds = np.arange(tmin, tmax + stc.tstep, stc.tstep)
freq_bounds = sorted(set(np.ravel(freq_bins)))
freq_ticks = copy.deepcopy(freq_bounds)
# Reject time points that will not be plotted and gather results
source_power = []
for stc in stcs:
stc = stc.copy() # copy since crop modifies inplace
stc.crop(tmin, tmax - stc.tstep)
source_power.append(stc.data)
source_power = np.array(source_power)
# Finding the source with maximum source power
if source_index is None:
source_index = np.unravel_index(source_power.argmax(),
source_power.shape)[1]
# If there is a gap in the frequency bins record its locations so that it
# can be covered with a gray horizontal bar
gap_bounds = []
for i in range(len(freq_bins) - 1):
lower_bound = freq_bins[i][1]
upper_bound = freq_bins[i + 1][0]
if lower_bound != upper_bound:
freq_bounds.remove(lower_bound)
gap_bounds.append((lower_bound, upper_bound))
# Preparing time-frequency grid for plotting
time_grid, freq_grid = np.meshgrid(time_bounds, freq_bounds)
# Plotting the results
fig = plt.figure(figsize=(9, 6))
plt.pcolor(time_grid, freq_grid, source_power[:, source_index, :],
cmap='Reds')
ax = plt.gca()
plt.title('Time-frequency source power')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
time_tick_labels = [str(np.round(t, 2)) for t in time_bounds]
n_skip = 1 + len(time_bounds) // 10
for i in range(len(time_bounds)):
if i % n_skip != 0:
time_tick_labels[i] = ''
ax.set_xticks(time_bounds)
ax.set_xticklabels(time_tick_labels)
plt.xlim(time_bounds[0], time_bounds[-1])
plt.yscale('log')
ax.set_yticks(freq_ticks)
ax.set_yticklabels([np.round(freq, 2) for freq in freq_ticks])
plt.ylim(freq_bounds[0], freq_bounds[-1])
plt.grid(True, ls='-')
if colorbar:
plt.colorbar()
tight_layout(
|
dset0x/invenio-checker
|
invenio_checker/workflows/base_bundle.py
|
Python
|
gpl-2.0
| 847
| 0.001181
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; ei
|
ther version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 5
|
9 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
class base_bundle(object):
workflow = []
__all__ = ('base_bundle', )
|
mugurbil/gnm
|
examples/exp_time_series/acor_sample.py
|
Python
|
mit
| 2,362
| 0.023709
|
# -*- coding: utf-8 -*-
import numpy as np
from numpy import linalg as la
from math import exp,pi
import time
from optparse import OptionParser
import json
import os
import gnm
'''
Calculating the auto-corrolation time which is an ill-posed problem.
Generating the theoretical (quadrature) curve for the auto-corrolation.
'''
print
print 'Auto-corrolation time: sampling'
# command line options to set parameters
parser = Op
|
tionParser()
# experiment number
parser.add_option('-c', dest='count', type='int',
default=0, help='count of experiment')
# seeding
parser.add_option('-s', dest='seed', type='int',
default=5, help='random number generator seed')
# for the sampler
parser.add_option('-n', dest='num_samples', type='int',
default=1
|
0000, help='number of samples')
parser.add_option('-b', dest='num_burn', type='int',
default=1000, help='number of samples burned')
parser.add_option('-m', dest='max_steps', type='int',
default=4, help='max back off steps')
parser.add_option('-z', dest='step_size', type='float',
default=0.1, help='step size of back off')
(opts, arg) = parser.parse_args()
# seed the random number generator
np.random.seed(opts.seed)
# get the data
try:
print 'Importing Data...\n'
folder = 'acor_data_%d/' % opts.count
path = os.path.join(folder, 'data')
data_file = open(path, 'r')
data = json.load(data_file)
data_file.close()
args = data['args']
m = data['m']
H = data['H']
sigma = data['s']
y = data['y']
except:
print "Data could not be imported."
exit(0)
# make function instance
from acor_func import funky
f = gnm.F(funky,args)
# creating sampler object
sampler = gnm.sampler(m,H,y,sigma,f)
# sample the likelihood
print 'Sampling {:.2e} points...'.format(opts.num_samples)
start_time = time.time()
chain,stats = sampler.sample(m,opts.num_samples,
max_steps=opts.max_steps,
step_size=opts.step_size)
chain = chain[opts.num_burn:]
end_time = time.time()
T = end_time-start_time
print 'Acceptence Percentage : {:.3}'.format(stats['accept_rate'])
print 'Ellapsed Time : %d h %d m %d s' % (T/3600,T/60%60,T%60)
print
# write data to file
path = os.path.join(folder, 'chain')
file = open(path, 'w')
json.dump(chain.tolist(), file)
file.close()
path = os.path.join(folder, 'stats')
file = open(path, 'w')
json.dump(stats, file)
file.close()
|
samui13/Gnuplot3D
|
script/split3D.py
|
Python
|
mit
| 1,765
| 0.015864
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Last-Updated : <2013/08/18 22:47:11 by samui>
import sys,os
def mkdir(folder):
if not os.path.isdir(folder):
os.system("mkdir {0}".format(folder))
if __name__ == "__main__":
data_file = os.path.abspath(sys.argv[1])
root,ext = os.path.splitext(os.path.basename(data_file))
data_folder = os.path.join(os.path.dirname(data_file),"split-{0}".format(root))
sdata_folder = os.path.join(os.path.dirname(data_file),"split-{0}".format(root),"data")
png_folder = os.path.join(data_folder,"png")
gnuplot_file = os.path.join(data_folder,"gnuplo
|
t.txt")
mkdir(data_folder)
mkdir(png_folder)
|
mkdir(sdata_folder)
#Split Phase
Nx = 50
Ny = 50
Nz = 50
data = open(data_file,"r")
data_list = []
for k in range(Nz+1):
out_data = os.path.join(sdata_folder,"data{0}.txt".format(k))
data_list.append(out_data)
out_file = open(out_data,"w");
for j in range(0,Ny+1):
for i in range(0,Nx+1):
out_file.write(data.readline())
out_file.write(data.readline())
out_file.close()
data.readline()
data.close()
# Gnuplot File Output
gnup_file = open(gnuplot_file,"w")
gnup_file.write("set pm3d map\n")
gnup_file.write("set cbrange[0:1.0]\n")
gnup_file.write("set term png\n")
for data in (data_list):
root,ext = os.path.splitext(os.path.basename(data))
gnup_file.write("set output \"{0}\"\n".format(os.path.join(png_folder,"{0}.png".format(root))))
gnup_file.write("splot \"{0}\" title \"\"\n".format(data))
gnup_file.close()
# Gnuplot Image file
os.system("gnuplot {0}".format(gnuplot_file))
|
LLNL/spack
|
var/spack/repos/builtin/packages/gengeo/package.py
|
Python
|
lgpl-2.1
| 1,514
| 0.001982
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Gengeo(AutotoolsPackage):
"""GenGeo is a library of tools for creating complex particle
geometries for use in ESyS-Particle simulations. GenGeo is a standalone
application with a Python API that creates geometry files suitable for
importing into ESyS-Particle simulations. The functionality of GenGeo far
exceed
|
s the in-simulation geometry creation utilities
provided by ESyS-Particle itself."""
homepage = "https://launchpad.net/esys-particle/gengeo"
url = "https://launchpad.net/esys-particle/trunk/3.0-alpha/+download/gengeo-163.tar.gz"
maintainers = ['dorton21']
version('163', sha256='9c896d430d8f315a45379d2b82e7d374f36259af66a745bfdee4c022a080d34d')
extends('python')
depends_on('autoconf', type='build')
depends_on('automake', type='build
|
')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('boost+python')
depends_on('openmpi')
def autoreconf(self, spec, prefix):
autogen = Executable('./autogen.sh')
autogen()
def configure_args(self):
args = [
'--verbose',
'--with-boost=' + self.spec['boost'].prefix,
'CCFLAGS=-fpermissive',
'CXXFLAGS=-fpermissive',
]
return args
|
mmoran0032/NMRpy
|
data/AWTconvertTable.py
|
Python
|
mit
| 1,558
| 0
|
#!/usr/bin/env python
# convertAWT.py - Mass Table Conversion Utility
import os
massFile = 'AWTMass-2003.dat'
newFile = os.path.join('..', 'nmrfreq', 'masstable.py')
def main():
with open(massFile, 'r') as file:
massDict = extractMasses(file)
writeToFile(newFile, massDict, massFile)
def extractMasses(file):
massdict = {}
for line in file:
line = adjustLine(line)
if line is not None:
isotope, Z, mass = getValuesFrom(line)
mass = convertMass(mass)
massdict[isotope] = (Z, mass)
return massdict
def adjustLine(line):
line = line.strip()
if line[0] != '#' and line[-1] != '#':
line = line[9:].strip()
line = line.split()
return line
def getValuesFrom(splitline):
isotope = '{0}{1}'.format(splitline[2], splitline[1])
isotope = isotope.upper()
Z = int(splitline[0])
mass = '{0}{1}'.format(splitline[-3], splitline[-2])
return isotope, Z, mass
def convertMass(mass):
mass = flo
|
at(mass) / 1000000.0
return mass
def writeToFile(filename, massdict, massFile):
wi
|
th open(filename, 'w') as f:
f.write('# Mass table for use in nmrfreq from {0}\n'.format(massFile))
f.write('table = {\n')
f.write(createIsotopesString(massdict))
f.write('}\n')
def createIsotopesString(massdict):
string = ''
for key in sorted(massdict.iterkeys()):
string = '{2} "{0}": {1},\n'.format(key, massdict[key], string)
return string
if __name__ == '__main__':
main()
|
bogdal/freepacktbook
|
freepacktbook/slack.py
|
Python
|
mit
| 1,181
| 0.000847
|
import json
import requests
class SlackNotification(object):
icon_url = "https://githu
|
b-bogdal.s3.amazonaws.com/freepacktbook/icon.png"
def __init__(self, slack_url, channel):
self.slack_url = slack_url
self.channel = channel
if not self.channel.startswith("#"):
self.channel = "#%s" % (sel
|
f.channel,)
def notify(self, data):
if not all([self.slack_url, self.channel]):
return
payload = {
"channel": self.channel,
"username": "PacktPub Free Learning",
"icon_url": self.icon_url,
"attachments": [
{
"fallback": "Today's Free eBook: %s" % data["title"],
"pretext": "Today's Free eBook:",
"title": data["title"],
"title_link": data["book_url"],
"color": "#ff7f00",
"text": "%s\n%s" % (data["description"], data.get("url", "")),
"thumb_url": data["image_url"].replace(" ", "%20"),
}
],
}
requests.post(self.slack_url, data={"payload": json.dumps(payload)})
|
MobProgramming/MobTimer.Python
|
tests/Infrastructure/TipsManager/test_TipsManager.py
|
Python
|
mit
| 1,455
| 0.002062
|
import unittest
import sys
import os
|
from Infrastructure.FileUtilities import FileUtilities
from Infrastructure.TipsManager import TipsManager
class TestsTipsManage(unittest.TestCase):
@clas
|
smethod
def setUpClass(cls):
cls.fileUtilities = FileUtilities()
def test_random_tip_from_file(self):
seed = 0
dirname = os.path.dirname(__file__)
path = self.fileUtilities.go_up_dirs(dirname, 2) + "\\Tips"
tips_manager = TipsManager(seed, path)
result = tips_manager.get_random_tip()
self.assertEqual(result, 'TestTips2.txt: Words\n')
def test_random_tip_from_file_second(self):
seed = 1
dirname = os.path.dirname(__file__)
path = self.fileUtilities.go_up_dirs(dirname, 2) + "\\Tips"
tips_manager = TipsManager(seed, path)
result = tips_manager.get_random_tip()
self.assertEqual(result, 'TestTips.txt: Customer collaboration over contract negotiation\n')
def test_random_tip_from_file_second_alternate_slashes(self):
seed = 1
dirname = os.path.dirname(__file__)
path = self.fileUtilities.go_up_dirs(dirname, 2) + "\\Tips"
path = path.replace("\\", "/")
tips_manager = TipsManager(seed, path)
result = tips_manager.get_random_tip()
self.assertEqual(result, 'TestTips.txt: Customer collaboration over contract negotiation\n')
if __name__ == '__main__':
unittest.main()
|
sangwook236/SWDT
|
sw_dev/python/ext/test/graph/networkx/networkx_basic.py
|
Python
|
gpl-3.0
| 9,760
| 0.029816
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import networkx as nx
# REF [site] >> https://networkx.github.io/documentation/latest/tutorial.html
def basic_operation_tutorial():
# Create a graph.
G = nx.Graph()
# Nodes.
G.add_node(1)
G.add_nodes_from([2, 3])
H = nx.path_graph(10) # Creates a graph.
G.add_nodes_from(H)
G.add_node(H)
#print('G.nodes = {}.'.format(G.nodes))
print('G.nodes = {}.'.format(list(G.nodes)))
# Edges.
G.add_edge(1, 2)
e = (2, 3)
G.add_edge(*e) # Unpack edge tuple.
G.add_edges_from([(1, 2), (1, 3)])
G.add_edges_from(H.edges)
#print('G.edges = {}.'.format(G.edges))
print('G.edges = {}.'.format(list(G.edges)))
# Remove all nodes and edges.
G.clear()
#--------------------
G.add_edges_from([(1, 2), (1, 3)])
G.add_node(1)
G.add_edge(1, 2)
G.add_node('spam') # Adds node 'spam'.
G.add_nodes_from('spam') # Adds 4 nodes: 's', 'p', 'a', 'm'.
G.add_edge(3, 'm')
print('G.number_of_nodes() = {}.'.format(G.number_of_nodes()))
print('G.number_of_edges() = {}.'.format(G.number_of_edges()))
# Set-like views of the nodes, edges, neighbors (adjacencies), and degrees of nodes in a graph.
print('G.adj[1] = {}.'.format(list(G.adj[1]))) # or G.neighbors(1).
print('G.degree[1] = {}.'.format(G.degree[1])) # The number of edges incident to 1.
# Report the edges and degree from a subset of all nodes using an nbunch.
# An nbunch is any of: None (meaning all nodes), a node, or an iterable container of nodes that is not itself a node in the graph.
print("G.edges([2, 'm']) = {}.".format(G.edges([2, 'm'])))
print('G.degree([2, 3]) = {}.'.format(G.degree([2, 3])))
# Remove nodes and edges from the graph in a similar fashion to adding.
G.remove_node(2)
G.remove_nodes_from('spam')
print('G.nodes = {}.'.format(list(G.nodes)))
G.remove_edge(1, 3)
# When creating a graph structure by instantiating one of the graph classes you can specify data in several formats.
G.add_edge(1, 2)
H = nx.DiGraph(G) # Creates a DiGraph using the connections from G.
print('H.edges() = {}.'.format(list(H.edges())))
edgelist = [(0, 1), (1, 2), (2, 3)]
H = nx.Graph(edgelist)
#--------------------
# Access edges and neighbors.
print('G[1] = {}.'.format(G[1])) # Same as G.adj[1].
print('G[1][2] = {}.'.format(G[1][2])) # Edge 1-2.
print('G.edges[1, 2] = {}.'.format(G.edges[1, 2]))
# Get/set the attributes of an edge using subscript notation if the edge already exists.
G.add_edge(1, 3)
G[1][3]['color'] = 'blue'
G.edges[1, 2]['color'] = 'red'
# Fast examination of all (node, adjacency) pairs is achieved using G.adjacency(), or G.adj.items().
# Note that for undirected graphs, adjacency iteration sees each edge twice.
FG = nx.Graph()
FG.add_weighted_edges_from([(1, 2, 0.125), (1, 3, 0.75), (2, 4, 1.2), (3, 4, 0.375)])
for n, nbrs in FG.adj.items():
for nbr, eattr in nbrs.items():
wt = eattr['weight']
if wt < 0.5: print(f'({n}, {nbr}, {wt:.3})')
# Convenient access to all edges is achieved with the edges property.
for (u, v, wt) in FG.edges.data('weight'):
if wt < 0.5: print(f'({u}, {v}, {wt:.3})')
#--------------------
# Attributes.
# Graph attributes.
G = nx.Graph(day='Friday')
print('G.graph = {}.'.format(G.graph))
G.graph['day'] = 'Monday'
# Node attributes: add_node(), add_nodes_from(), or G.nodes.
G.add_node(1, time='5pm')
G.add_nodes_from([3], time='2pm')
print('G.nodes[1] = {}.'.format(G.nodes[1]))
G.nodes[1]['room'] = 714
print('G.nodes.data() = {}.'.format(G.nodes.data()))
print('G.nodes[1] = {}.'.format(G.nodes[1])) # List the attributes of a node.
print('G.nodes[1].keys() = {}.'.format(G.nodes[1].keys()))
#print('G[1] = {}.'.format(G[1])) # G[1] = G.adj[1].
# Edge attributes: add_edge(), add_edges_from(), or subscript notation.
G.add_edge(1, 2, weight=4.7)
G.add_edges_from([(3, 4), (4, 5)], color='red')
G.add_edges_from([(1, 2, {'color': 'blue'}), (2, 3, {'weight': 8})])
G[1][2]['weight'] = 4.7
G.edges[3, 4]['weight'] = 4.2
print('G.edges.data() = {}.'.format(G.edges.data()))
print('G.edges[3, 4] = {}.'.format(G.edges[3, 4])) # List the attributes of an edge.
print('G.edges[3, 4].keys() = {}.'.format(G.edges[3, 4].keys()))
#--------------------
# Directed graphs.
DG = nx.DiGraph()
DG.add_weighted_edges_from([(1, 2, 0.5), (3, 1, 0.75)])
print("DG.out_degree(1, weight='weight') = {}.".format(DG.out_degree(1, weight='weight')))
print("DG.degree(1, weight='weight') = {}.".format(DG.degree(1, weight='weight'))) # The sum of in_degree() and out_degree().
print('DG.successors(1) = {}.'.format(list(DG.successors(1))))
print('DG.neighbors(1) = {}.'.format(list(DG.neighbors(1))))
# Convert G to undirected graph.
#H = DG.to_undirected()
H = nx.Graph(DG)
#--------------------
# Multigraphs: Graphs which allow multiple edges between any pair of nodes.
MG = nx.MultiGraph()
#MDG = nx.MultiDiGraph()
MG.add_weighted_edges_from([(1, 2, 0.5), (1, 2, 0.75), (2, 3, 0.5)])
print("MG.degree(weight='weight') = {}.".format(dict(MG.degree(weight='weight'))))
GG = nx.Graph()
for n, nbrs in MG.adjacency():
for nbr, edict in nbrs.items():
minvalue = min([d['weight'] for d in edict.values()])
GG.add_edge(n, nbr, weight = minvalue)
print('nx.shortest_path(GG, 1, 3) = {}.'.format(nx.shortest_path(GG, 1, 3)))
#--------------------
# Classic graph operations:
"""
subgraph(G, nbunch): induced subgraph view of G on nodes in nbunch
union(G1,G2): graph union
disjoint_union(G1,G2): graph union assuming all nodes are different
cartesian_product(G1,G2): return Cartesian product graph
compose(G1,G2): combine graphs identifying nodes common to both
complement(G): graph complement
create_empty_copy(G): return an empty copy of the same graph class
to_undirected(G): return an undirected representation of G
to_directed(G): return a directed representation of G
"""
#--------------------
# Graph generators.
# Use a call to one of the classic small graphs:
petersen = nx.petersen_graph()
tutte = nx.tutte_graph()
maze = nx.sedgewick_maze_graph()
tet = nx.tetrahedral_graph()
# Use a (constructive) generator for a classic graph:
K_5 = nx.complete_graph(5)
K_3_5 = nx.complete_bipartite_graph(3, 5)
barbell = nx.barbell_graph(10, 10)
lollipop = nx.lollipop_graph(10, 20)
# Use a stochastic graph generator:
er = nx.erdos_renyi_graph(100, 0.15)
ws = nx.watts_strogatz_graph(30, 3, 0.1)
ba = nx.barabasi_albert_gr
|
aph(100, 5)
red = nx.random_lobster(100, 0.9, 0.9)
#--------------------
# Read a graph stored in a file using co
|
mmon graph formats, such as edge lists, adjacency lists, GML, GraphML, pickle, LEDA and others.
nx.write_gml(red, './test.gml')
mygraph = nx.read_gml('./test.gml')
# REF [site] >> https://networkx.github.io/documentation/latest/tutorial.html
def drawing_tutorial():
import matplotlib.pyplot as plt
G = nx.petersen_graph()
plt.subplot(121)
nx.draw(G, with_labels=True, font_weight='bold')
plt.subplot(122)
nx.draw_shell(G, nlist=[range(5, 10), range(5)], with_labels=True, font_weight='bold')
plt.show()
options = {
'node_color': 'black',
'node_size': 100,
'width': 3,
}
plt.subplot(221)
nx.draw_random(G, **options)
plt.subplot(222)
#nx.draw_planar(G, **options)
nx.draw_circular(G, **options)
plt.subplot(223)
nx.draw_spectral(G, **options)
#nx.draw_spring(G, **options)
#nx.draw_kamada_kawai(G, **options)
plt.subplot(224)
nx.draw_shell(G, nlist=[range(5, 10), range(5)], **options)
plt.show()
G = nx.dodecahedral_graph()
shells = [[2, 3, 4, 5, 6], [8, 1, 0, 19, 18, 17, 16, 15, 14, 7], [9, 10, 11, 12, 13]]
nx.draw_shell(G, nlist=shells, **options)
plt.show()
# Save drawings to a file.
nx.draw(G)
plt.savefig('./path.png')
# If Graphviz and PyGraphviz or pydot are available on your system,
# you can also use nx_agraph.graphviz_layout(G) or nx_pydot.graphviz_layout(G) to get the node positions,
# or write the graph in dot format for further processing.
pos = nx.nx_agraph.graphviz_layout(G) # e.g.) pos = {1: (10, 10), 2: (30, 20)}.
nx.draw(G, pos=pos)
nx.drawing.nx_pydot.write_dot(G, './file.dot')
#--------------------
G = nx.complete_graph(
|
escsun/radio-shop
|
accounts/admin.py
|
Python
|
gpl-3.0
| 722
| 0.001408
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin, GroupAdmin
from django.contrib.auth.admin import Group as AuthGroup
from .models import User
class Group(AuthGroup):
class Meta:
proxy = True
app_label = "accounts"
verbose_name_plural = "Группы"
verbose_name = "Группа"
class MyUserAdmin(UserAdmin):
model = User
fieldsets = UserAdmin.fieldsets
list_display = ('username', 'email', 'is_staff', 'is_active')
# fieldsets = UserAdmin.fieldsets + (
# (None, {'fields': ('some_extra_data',)}),
# )
|
admin.site.unregister(AuthGroup)
admin.site.register(User, MyUserAdmin)
admin.site.
|
register(Group, GroupAdmin)
|
solvery/lang-features
|
python/use_lib/asyncio_1.py
|
Python
|
gpl-2.0
| 314
| 0.010067
|
#e
|
ncoding=utf-8
# python3
import asyncio
@asyncio.coroutine
def hello():
print("Hello world!")
# 异步调用asyncio.sleep(1):
r = yield from asyncio.sleep(1)
print("Hello again!")
# 获取EventLoop:
lo
|
op = asyncio.get_event_loop()
# 执行coroutine
loop.run_until_complete(hello())
loop.close()
|
pydcs/dcs
|
dcs/weapons_data.py
|
Python
|
lgpl-3.0
| 212,933
| 0.006115
|
# This file is generated from pydcs_export.lua
class Weapons:
AB_250_2___144_x_SD_2__250kg_CBU_with_HE_submunitions = {"clsid": "{AB_250_2_SD_2}", "name": "AB 250-2 - 144 x SD-2, 250kg CBU with HE submunitions", "weight": 280}
AB_250_2___17_x_SD_10A__250kg_CBU_with_10kg_Frag_HE_submunitions = {"clsid": "{AB_250_2_SD_10A}", "name": "AB 250-2 - 17 x SD-10A, 250kg CBU with 10kg Frag/HE submunitions", "weight": 220}
AB_500_1___34_x_SD_10A__500kg_CBU_with_10kg_Frag_HE_submunitions = {"clsid": "{AB_500_1_SD_10A}", "name": "AB 500-1 - 34 x SD-10A, 500kg CBU with 10kg Frag/HE submunitions", "weight": 470}
ADEN_GUNPOD = {"clsid": "{ADEN_GUNPOD}", "name": "ADEN GUNPOD", "weight": 87}
ADM_141A = {"clsid": "{BRU42_ADM141}", "name": "ADM_141A", "weight": 308}
ADM_141A_ = {"clsid": "{BRU3242_ADM141}", "name": "ADM_141A", "weight": 365.38}
ADM_141A_TALD = {"clsid": "{ADM_141A}", "name": "ADM-141A TALD", "weight": 180}
ADM_141B_TALD = {"clsid": "{ADM_141B}", "name": "ADM-141B TALD", "weight": 180}
AERO_1D_300_Gallon
|
s_Fuel_Tank_ = {"clsid": "{AV8BNA_AERO1D}", "name": "AERO 1D 300 Gallons Fuel Tank ", "weight": 1002.439}
AERO_1D_300_Gallons_Fuel_Tank__Empty_ = {"clsid": "{AV8BNA_AERO1D_EMPTY}", "name": "AERO 1D 300 Gallons Fuel Tank (Empty)", "weight": 93.89362}
AGM114x2_OH_58 = {"clsid": "AGM114x2_OH_58", "name": "AGM-114K * 2", "weight": 250}
AG
|
M_114K = {"clsid": "{ee368869-c35a-486a-afe7-284beb7c5d52}", "name": "AGM-114K", "weight": 65}
AGM_114K___4 = {"clsid": "{88D18A5E-99C8-4B04-B40B-1C02F2018B6E}", "name": "AGM-114K * 4", "weight": 250}
AGM_119B_Penguin_ASM = {"clsid": "{7B8DCEB4-820B-4015-9B48-1028A4195692}", "name": "AGM-119B Penguin ASM", "weight": 300}
AGM_122_Sidearm = {"clsid": "{AGM_122_SIDEARM}", "name": "AGM-122 Sidearm", "weight": 92}
AGM_122_Sidearm_ = {"clsid": "{LAU_7_AGM_122_SIDEARM}", "name": "AGM-122 Sidearm", "weight": 107}
AGM_122_Sidearm___light_ARM = {"clsid": "{AGM_122}", "name": "AGM-122 Sidearm - light ARM", "weight": 88}
AGM_154A___JSOW_CEB__CBU_type_ = {"clsid": "{AGM-154A}", "name": "AGM-154A - JSOW CEB (CBU-type)", "weight": 485}
AGM_154B___JSOW_Anti_Armour = {"clsid": "{AGM-154B}", "name": "AGM-154B - JSOW Anti-Armour", "weight": 485}
AGM_154C___JSOW_Unitary_BROACH = {"clsid": "{9BCC2A2B-5708-4860-B1F1-053A18442067}", "name": "AGM-154C - JSOW Unitary BROACH", "weight": 484}
AGM_45A_Shrike_ARM = {"clsid": "{AGM_45A}", "name": "AGM-45A Shrike ARM", "weight": 177}
AGM_45B_Shrike_ARM__Imp_ = {"clsid": "{3E6B632D-65EB-44D2-9501-1C2D04515404}", "name": "AGM-45B Shrike ARM (Imp)", "weight": 177}
AGM_62_Walleye_II___Guided_Weapon_Mk_5__TV_Guided_ = {"clsid": "{C40A1E3A-DD05-40D9-85A4-217729E37FAE}", "name": "AGM-62 Walleye II - Guided Weapon Mk 5 (TV Guided)", "weight": 1061}
AGM_65D___Maverick_D__IIR_ASM_ = {"clsid": "{444BA8AE-82A7-4345-842E-76154EFCCA47}", "name": "AGM-65D - Maverick D (IIR ASM)", "weight": 218}
AGM_65E___Maverick_E__Laser_ASM___Lg_Whd_ = {"clsid": "{F16A4DE0-116C-4A71-97F0-2CF85B0313EF}", "name": "AGM-65E - Maverick E (Laser ASM - Lg Whd)", "weight": 286}
AGM_65K___Maverick_K__CCD_Imp_ASM_ = {"clsid": "{69DC8AE7-8F77-427B-B8AA-B19D3F478B65}", "name": "AGM-65K - Maverick K (CCD Imp ASM)", "weight": 360}
AGM_84 = {"clsid": "AGM_84", "name": "AGM-84 HARPOON", "weight": None}
AGM_84A_Harpoon_ASM = {"clsid": "{8B7CADF9-4954-46B3-8CFB-93F2F5B90B03}", "name": "AGM-84A Harpoon ASM", "weight": 661.5}
AGM_84D_Harpoon_AShM = {"clsid": "{AGM_84D}", "name": "AGM-84D Harpoon AShM", "weight": 540}
AGM_84E_Harpoon_SLAM__Stand_Off_Land_Attack_Missile_ = {"clsid": "{AF42E6DF-9A60-46D8-A9A0-1708B241AADB}", "name": "AGM-84E Harpoon/SLAM (Stand-Off Land-Attack Missile)", "weight": 628}
AGM_84E_Harpoon_SLAM__Stand_Off_Land_Attack_Missile__ = {"clsid": "{AGM_84E}", "name": "AGM-84E Harpoon/SLAM (Stand-Off Land-Attack Missile)", "weight": 628}
AGM_84H_SLAM_ER__Expanded_Response_ = {"clsid": "{AGM_84H}", "name": "AGM-84H SLAM-ER (Expanded Response)", "weight": 675}
AGM_86C_ALCM = {"clsid": "{769A15DF-6AFB-439F-9B24-5B7A45C59D16}", "name": "AGM-86C ALCM", "weight": 1950}
AGM_88C_HARM___High_Speed_Anti_Radiation_Missile = {"clsid": "{B06DD79A-F21E-4EB9-BD9D-AB3844618C9C}", "name": "AGM-88C HARM - High Speed Anti-Radiation Missile", "weight": 361}
AGM_88C_HARM___High_Speed_Anti_Radiation_Missile_ = {"clsid": "{B06DD79A-F21E-4EB9-BD9D-AB3844618C93}", "name": "AGM-88C HARM - High Speed Anti-Radiation Missile", "weight": 406.4}
AIM_120B_AMRAAM___Active_Rdr_AAM = {"clsid": "{C8E06185-7CD6-4C90-959F-044679E90751}", "name": "AIM-120B AMRAAM - Active Rdr AAM", "weight": 156}
AIM_120C_5_AMRAAM___Active_Rdr_AAM = {"clsid": "{40EF17B7-F508-45de-8566-6FFECC0C1AB8}", "name": "AIM-120C-5 AMRAAM - Active Rdr AAM", "weight": 161.5}
AIM_54A_Mk47 = {"clsid": "{AIM_54A_Mk47}", "name": "AIM-54A-Mk47", "weight": 444}
AIM_54A_Mk47_ = {"clsid": "{SHOULDER AIM_54A_Mk47 L}", "name": "AIM-54A-Mk47", "weight": 489.36}
AIM_54A_Mk47__ = {"clsid": "{SHOULDER AIM_54A_Mk47 R}", "name": "AIM-54A-Mk47", "weight": 489.36}
AIM_54A_Mk60 = {"clsid": "{AIM_54A_Mk60}", "name": "AIM-54A-Mk60", "weight": 471.7}
AIM_54A_Mk60_ = {"clsid": "{SHOULDER AIM_54A_Mk60 L}", "name": "AIM-54A-Mk60", "weight": 517.06}
AIM_54A_Mk60__ = {"clsid": "{SHOULDER AIM_54A_Mk60 R}", "name": "AIM-54A-Mk60", "weight": 517.06}
AIM_54C_Mk47 = {"clsid": "{AIM_54C_Mk47}", "name": "AIM-54C-Mk47", "weight": 465.6}
AIM_54C_Mk47_ = {"clsid": "{SHOULDER AIM_54C_Mk47 L}", "name": "AIM-54C-Mk47", "weight": 510.96}
AIM_54C_Mk47_Phoenix_IN__Semi_Active_Radar = {"clsid": "{7575BA0B-7294-4844-857B-031A144B2595}", "name": "AIM-54C-Mk47 Phoenix IN & Semi-Active Radar", "weight": 463}
AIM_54C_Mk47__ = {"clsid": "{SHOULDER AIM_54C_Mk47 R}", "name": "AIM-54C-Mk47", "weight": 510.96}
AIM_7E_Sparrow_Semi_Active_Radar = {"clsid": "{AIM-7E}", "name": "AIM-7E Sparrow Semi-Active Radar", "weight": 230}
AIM_7F = {"clsid": "{SHOULDER AIM-7F}", "name": "AIM-7F", "weight": 284.4}
AIM_7F_ = {"clsid": "{BELLY AIM-7F}", "name": "AIM-7F", "weight": 230}
AIM_7F_Sparrow_Semi_Active_Radar = {"clsid": "{AIM-7F}", "name": "AIM-7F Sparrow Semi-Active Radar", "weight": 231}
AIM_7M = {"clsid": "{SHOULDER AIM-7M}", "name": "AIM-7M", "weight": 284.4}
AIM_7MH = {"clsid": "{SHOULDER AIM-7MH}", "name": "AIM-7MH", "weight": 284.4}
AIM_7MH_ = {"clsid": "{BELLY AIM-7MH}", "name": "AIM-7MH", "weight": 230}
AIM_7MH_Sparrow_Semi_Active_Radar = {"clsid": "{AIM-7H}", "name": "AIM-7MH Sparrow Semi-Active Radar", "weight": 231}
AIM_7M_ = {"clsid": "{BELLY AIM-7M}", "name": "AIM-7M", "weight": 230}
AIM_7M_Sparrow_Semi_Active_Radar = {"clsid": "{8D399DDA-FF81-4F14-904D-099B34FE7918}", "name": "AIM-7M Sparrow Semi-Active Radar", "weight": 231.1}
AIM_9B_Sidewinder_IR_AAM = {"clsid": "{AIM-9B}", "name": "AIM-9B Sidewinder IR AAM", "weight": 74.39}
AIM_9L_Sidewinder_IR_AAM = {"clsid": "{AIM-9L}", "name": "AIM-9L Sidewinder IR AAM", "weight": 85.73}
AIM_9M_Sidewinder_IR_AAM = {"clsid": "{6CEB49FC-DED8-4DED-B053-E1F033FF72D3}", "name": "AIM-9M Sidewinder IR AAM", "weight": 85.73}
AIM_9P5_Sidewinder_IR_AAM = {"clsid": "{AIM-9P5}", "name": "AIM-9P5 Sidewinder IR AAM", "weight": 85.5}
AIM_9P_Sidewinder_IR_AAM = {"clsid": "{9BFD8C90-F7AE-4e90-833B-BFD0CED0E536}", "name": "AIM-9P Sidewinder IR AAM", "weight": 86.18}
AIM_9X_Sidewinder_IR_AAM = {"clsid": "{5CE2FF2A-645A-4197-B48D-8720AC69394F}", "name": "AIM-9X Sidewinder IR AAM", "weight": 84.46}
AJS_External_tank_1013kg_fuel = {"clsid": "{VIGGEN_X-TANK}", "name": "AJS External-tank 1013kg fuel", "weight": 1208}
AKAN_M_55_Gunpod__150_rnds_MINGR55_HE = {"clsid": "{AKAN}", "name": "AKAN M/55 Gunpod, 150 rnds MINGR55-HE", "weight": 276}
ALARM = {"clsid": "{E6747967-B1F0-4C77-977B-AB2E6EB0C102}", "name": "ALARM", "weight": 268}
ALQ_131___ECM_Pod = {"clsid": "{6D21ECEA-F85B-4E8D-9D51-31DC9B8AA4EF}", "name": "ALQ-131 - ECM Pod", "weight": 305}
ALQ_184 = {"clsid": "ALQ_184", "name": "ALQ-184 - ECM Pod", "weight": 215}
ALQ_184_Long = {"clsid
|
DragonRoman/rhevm-utils
|
3.0/hooks/directlun/before_vm_migrate_destination.py
|
Python
|
gpl-3.0
| 3,145
| 0.00318
|
#!/usr/bin/python
import os
import sys
import grp
import pwd
import traceback
import utils
import hooking
DEV_MAPPER_PATH = "/dev/mapper"
DEV_DIRECTLUN_PATH = '/dev/directlun'
def createdirectory(dirpath):
# we don't use os.mkdir/chown because we need sudo
command = ['/bin/mkdir', '-p', dirpath]
retcode, out, err = utils.execCmd(command, sudo=True, raw=True)
if retcode != 0:
sys.stderr.write('directlun: error mkdir %s, err = %s\n' % (dirpath, err))
sys.exit(2)
mode = '755'
command = ['/bin/chmod', mode, dirpath]
if retcode != 0:
sys.stderr.write('directlun: error chmod %s %s, err = %s\n' % (dirpath, mode, err))
sys.exit(2)
def cloneDeviceNode(srcpath, devpath):
"""Clone a device node into a temporary private location."""
# we don't use os.remove/mknod/chmod/chown because we need sudo
command = ['/bin/rm', '-f', devpath]
retcode, out, err = utils.execCmd(command, sudo=True, raw=True)
if retcode != 0:
sys.stderr.write('directlun: error rm -f %s, err = %
|
s\n' % (devpath, err))
sys.exit(2)
stat = os.stat(srcpath)
major = os.major(stat.st_rdev)
minor = os.minor(stat.st_rdev)
command = ['/bin/mknod', devpath, 'b', str(major), str(minor)]
retcode, out, err = utils.execCmd(command, sudo=True, raw=True)
if retco
|
de != 0:
sys.stderr.write('directlun: error mknod %s, err = %s\n' % (devpath, err))
sys.exit(2)
mode = '660'
command = ['/bin/chmod', mode, devpath]
retcode, out, err = utils.execCmd(command, sudo=True, raw=True)
if retcode != 0:
sys.stderr.write('directlun: error chmod %s to %s, err = %s\n' % (devpath, mode, err))
sys.exit(2)
group = grp.getgrnam('qemu')
gid = group.gr_gid
user = pwd.getpwnam('qemu')
uid = user.pw_uid
owner = str(uid) + ':' + str(gid)
command = ['/bin/chown', owner, devpath]
retcode, out, err = utils.execCmd(command, sudo=True, raw=True)
if retcode != 0:
sys.stderr.write('directlun: error chown %s to %s, err = %s\n' % (devpath, owner, err))
sys.exit(2)
if os.environ.has_key('directlun'):
try:
luns = os.environ['directlun']
domxml = hooking.read_domxml()
createdirectory(DEV_DIRECTLUN_PATH)
for lun in luns.split(','):
try:
lun, options = lun.split(':')
except ValueError:
options = ''
options = options.split(';')
srcpath = DEV_MAPPER_PATH + '/' + lun
if not os.path.exists(srcpath):
sys.stderr.write('directlun before_vm_migration_destination: device not found %s\n' % srcpath)
sys.exit(2)
uuid = domxml.getElementsByTagName('uuid')[0]
uuid = uuid.childNodes[0].nodeValue
devpath = DEV_DIRECTLUN_PATH + '/' + lun + '-' + uuid
cloneDeviceNode(srcpath, devpath)
hooking.write_domxml(domxml)
except:
sys.stderr.write('directlun before_vm_migration_destination: [unexpected error]: %s\n' % traceback.format_exc())
sys.exit(2)
|
Petr-By/qtpyvis
|
qtgui/widgets/training.py
|
Python
|
mit
| 6,290
| 0.00159
|
import numpy as np
from PyQt5.QtWidgets import (QWidget, QProgressBar, QLabel, QCheckBox,
QPushButton, QSpinBox, QVBoxLayout, QFormLayout)
from .matplotlib import QMatplotlib
from qtgui.utils import QObserver
from tools.train import Training, TrainingController
from dltb.network import Network
class QTrainingBox(QWidget, QObserver, qobservables={
# FIXME[hack]: check what we are really interested in ...
Training: Training.Change.all(),
Network: Network.Change.all()}):
"""
Attributes
----------
range: numpy.ndarray
trainingLoss: numpy.ndarray
validationLoss: numpy.ndarray
rangeIndex: int
"""
_training: TrainingController = None
_network: Network = None
def __init__(self, training: TrainingController=None,
network: Network=None, parent=None):
"""Initialization of the QTrainingBox.
"""
super().__init__(parent)
self._initUI()
self._layoutComponents()
self._range = np.arange(100, dtype=np.float32)
self._trainingLoss = np.zeros(100, dtype=np.float32)
self._validationLoss = np.zeros(100, dtype=np.float32)
self._rangeIndex = 0
self.setTraining(training)
self.setNetwork(network)
def _initUI(self):
def slot(checked: bool):
if self._training.ready:
self._training.start()
elif self._training.running:
self._training.stop()
self._buttonTrainModel = QPushButton("Train")
self._buttonTrainModel.clicked.connect(slot)
self._plotLoss = QMatplotlib()
self._checkboxPlot = QCheckBox()
self._progressEpoch = QProgressBar()
self._progressEpoch.setFormat("%v/%m")
self._progressBatch = QProgressBar()
self._progressBatch.setFormat("%v (%p%)")
self._labelBatch = QLabel()
self._labelEpoch = QLabel()
self._labelLoss = QLabel()
self._labelAccuracy = QLabel()
self._labelDuration = QLabel()
self._labelNetwork = QLabel()
def slot(value: int):
self._training.epochs = value
self._spinboxEpochs = QSpinBox()
self._spinboxEpochs.valueChanged.connect(slot)
def slot(value: int):
self._training.batch_size = value
self._spinboxBatchSize = QSpinBox()
self._spinboxBatchSize.valueChanged.connect(slot)
def _layoutComponents(self):
form = QFormLayout()
form.addRow("Network:", self._labelNetwork)
form.addRow("Batch:", self._labelBatch)
form.addRow("Epoch:", self._labelEpoch)
form.addRow("Loss:", self._labelLoss)
form.addRow("Accuracy:", self._labelAccuracy)
form.addRow("Duration:", self._labelDuration)
form.addRow("Plot:", self._checkboxPlot)
layout = QVBoxLayout()
layout.addWidget(self._plotLoss)
layout.addLayout(form)
layout.addWidget(self._progressBatch)
layout.addWidget(self._progressEpoch)
layout.addWidget(self._buttonTrainModel)
layout.addWidget(self._spinboxEpochs)
layout.addWidget(self._spinboxBatchSize)
self.setLayout(layout)
def _enableComponents(self):
enabled = (self._network is not None and
self._training is not None and self._training.ready)
self._buttonTrainModel.setEnabled(enabled)
enabled = enabled and not self._training.running
self._spinboxEpochs.setEnabled(enabled)
self._spinboxBatchSize.setEnabled(enabled)
def setTraining(self, training: TrainingController):
self._exchangeView('_training', training)
# FIXME[test]: should be notified by the observable
self._enableComponents()
def network_changed(self, network, change):
self._network(network)
self._labelNetwork.setText(str(network))
def training_changed(self, training, change):
self._training(training)
self._enableComponents()
return
if 'network_changed' in change:
self._enableComponents()
if 'training_changed' in change:
if self._training.epochs:
self._progressEpoch.setRange(0, self._training.epochs)
if self._training.batches:
self._progressBatch.setRange(0, self._training.batches)
if self._training is not None:
if self._training.running:
self._buttonTrainModel.setText("Stop")
else:
self._buttonTrainModel.setText("Train")
self._enableComponents()
if 'epoch_changed' in change:
if self._training.epoch is None:
self._labelEpoch.setText("")
self._progressEpoch.setValue(0)
else:
self._labelEpoch.setText(str(self._training.epoch))
self._progressEpoch.setValue(self._training.epoch+1)
if 'batch_changed' in change:
if self._training.batch is not None:
self._labelBatch.setText(f"{self._training.batch}/"
f"{self._training.batches}")
self._labelDuration.setText(str(self._training.batch_duration))
self._progressBatch.setValue(self._training.batch)
if 'parameter_changed' in change:
self._spinboxEpochs.setRange(*self._training.epochs_range)
self._spinboxEpochs.setValue(self._training.epochs)
self._spinboxBatchSize.setRange(*self._training.batch_
|
size_range)
self._spinboxBatchSize.setValue(self._training.batch_size)
if self._training.loss is not None:
self._labelLoss.setText(str(self._training.loss))
self._trainingLoss[self._rangeIndex] = self._training.loss
if self._checkboxPlot.checkSt
|
ate():
self._plotLoss.plot(self._range, self._trainingLoss)
# self._plotLoss.plot(self._validationLoss)
if self._training.accuracy is not None:
self._labelAccuracy.setText(str(self._training.accuracy))
self._rangeIndex = (self._rangeIndex + 1) % len(self._range)
|
dkliban/pulp_ostree
|
common/setup.py
|
Python
|
gpl-2.0
| 321
| 0
|
from setuptools import setup, find_packages
setup(
name
|
='pulp_ostree_common',
version='1.0.0a1',
packages=find_packages(),
url='http://www.pulpproject.org',
license='GPLv2+',
author='Pulp Team',
author_email='
|
pulp-list@redhat.com',
description='common code for pulp\'s ostree support',
)
|
skion/junkdns
|
src/resolvers/publicsuffix.py
|
Python
|
mit
| 5,429
| 0.004421
|
# -:- coding: utf-8 -:-#
"""
A resolver to query top-level domains via publicsuffix.org.
"""
from __future__ import absolute_import
NAME = "publicsuffix"
HELP = "a resolver to query top-level domains via publicsuffix.org"
DESC = """
This resolver returns a PTR record pointing to the top-level domain of the
hostname in question. When the --txt option is given, it will also return
additional informational TXT records.
The list of current top-level domains can be explicitly downloaded upon startup
via the --fetch argument.
"""
import dns.message
import logging
import sys
# remove current directory from path to load a module with the same name as us
oldpath, sys.path = sys.path, sys.path[1:]
import publicsuffix
sys.path = oldpath
"""
Module-level configuration
"""
TTL = 14400 # serve all records with this TTL
SERVE_TXT = True # serve additional TXT records
LIST_FETCH = False # download fresh copy of public suffix list
LIST_URL = "http://mxr.mozilla.org/mozilla-central/source/netwerk/dns/effective_tld_names.dat?raw=1"
log = logging.getLogger(__name__)
psl = publicsuffix.PublicSuffixList()
def configure_parser(parser):
"""
Configure provided argparse subparser with module-level options.
Use the set_defaults() construct as a callback for storing the parsed arguments.
"""
def set_defaults(args):
global TTL, SERVE_TXT, LIST_FETCH, LIST_URL
TTL = args.publicsuffix_ttl
SERVE_TXT = args.publicsuffix_txt
if args.publicsuffix_fetch in (True, False):
LIST_FETCH = args.publicsuffix_fetch
else:
LIST_FETCH = True
LIST_URL = args.publicsuffix_fetch
# download TLD list
if LIST_FETCH:
pass
parser.set_defaults(func=set_defaults)
parser.add_argument("--ttl", dest="publicsuffix_ttl", type=int,
default=TTL, metavar="TTL",
help="TTL to use for all records ")
parser.add_argument("--fetch", dest="publicsuffix_fetch", nargs="?",
default=LIST_FETCH, const=True, metavar="URL",
help="fetch new list on start, from given URL if provided")
parser.add_argument("--notxt", dest="publicsuffix_txt", action="store_false",
default=SERVE_TXT,
help="do not serve additional TXT records")
return parser
def validate(msg):
"""
Filter messages that are bad or we can't handle.
Return a DNS rcode describing the problem.
"""
opcode = msg.opcode()
# we only support queries
if opcode != dns.opcode.QUERY:
return dns.rcode.NOTIMP
# # we do not allow recursion
# if msg.flags & dns.flags.RD:
# return dns.rcode.REFUSED
# only allow single question (qdcount=1)
# @TODO: allow multiple questions?
if len(msg.question) != 1:
return dns.rcode.FORMERR
return dns.rcode.NOERROR
def query(msg):
"""
Return answer to provided DNS question.
Create appropriate skeleton response message via dns.message.make_response(msg).
"""
res = dns.message.make_response(msg)
# validate query
rcode = validate(msg)
res.set_rcode(rcode)
# stop here if didn't validate
if rcode != dns.rcode.NOERROR:
return res
# this is just one query in reality, really, but let's not assume that
for query in msg.question:
name = query.name.to_unicode(omit_final_dot=True)
# only deal with PTR queries
if query.rdtype not in (dns.rdatatype.PTR, dns.rdatatype.ANY):
res.set_rcode(dns.rcode.NXDOMAIN)
log.info("Skipping query type %d", query.rdtype)
continue
try:
suffix = psl.get_public_suffix(name)
except:
res.set_rcode(dns.rcode.SERVFAIL)
log.exception("Oddness while looking up suffix")
# don't process further questions since we've set rcode
break
if suffix:
suffix += "."
|
# answer section
rdata = suffix
# https://github.com/rthalley/dnspython/issues/44
try:
# dnspython3
rrset = dns
|
.rrset.from_text(query.name, TTL,
dns.rdataclass.IN, dns.rdatatype.PTR,
rdata)
except AttributeError:
# dnspython2
rrset = dns.rrset.from_text(query.name, TTL,
dns.rdataclass.IN, dns.rdatatype.PTR,
rdata.encode("idna"))
res.answer.append(rrset)
if SERVE_TXT:
# additional section
tld = query.name.split(2)[-1].to_text(omit_final_dot=True)
rdata = '"see: http://en.wikipedia.org/wiki/.{}"'.format(tld)
# https://github.com/rthalley/dnspython/issues/44
try:
# python3
rrset = dns.rrset.from_text(suffix, TTL,
dns.rdataclass.IN, dns.rdatatype.TXT,
rdata)
except:
# python2
rrset = dns.rrset.from_text(suffix, TTL,
dns.rdataclass.IN, dns.rdatatype.TXT,
rdata.encode("latin1"))
res.additional.append(rrset)
return res
|
greglandrum/rdkit
|
rdkit/Chem/Fingerprints/UnitTestFingerprints.py
|
Python
|
bsd-3-clause
| 2,918
| 0.011309
|
#
# Copyright (C) 2003-2006 Greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""unit testing code for fingerprinting
"""
import unittest
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem.Fingerprints import FingerprintMols
class TestCase(unittest.TestCase):
def test1(self):
# FIX: test HashAtom
pass
def test2(self):
# FIX: test HashBond
pass
def test3(self):
# FIX: test HashPath
pass
def test4(self):
""" check containing mols, no Hs, no valence """
tgts = [('CCC(O)C(=O)O', ('CCC', 'OCC', 'OCC=O', 'OCCO', 'CCCC', 'OC=O', 'CC(O)C')), ]
for smi, matches in tgts:
m = Chem.MolFromSmiles(smi)
fp1 = Chem.RDKFingerprint(m, 2, 7, 9192, 4, 0)
_ = fp1.GetOnBits()
for match in matches:
m2 = Chem.MolFromSmiles(match)
fp2 = Chem.RDKFingerprint(m2, 2, 7, 9192, 4, 0)
v1, _ = DataStructs.OnBitProjSimilarity(fp2, fp1)
self.assertAlmostEqual(v1, 1, 'substruct %s not properly contained in %s' % (match, smi))
def test5(self):
""" check containing mols, use Hs, no valence """
tgts = [('CCC(O)C(=O)O', ('O[CH-][CH2-]', 'O[CH-][C-]=O')), ]
for smi, matches in tgts:
m = Chem.MolFromSmiles(smi)
fp1 = Chem.RDKFingerprint(m, 2, 7, 9192, 4, 1)
_ = fp1.GetOnBits()
for match in matches:
m2 = Chem.MolFromSmiles(match)
fp2 = Chem.RDKFingerprint(m2, 2, 7, 9192, 4, 1)
v1,
|
_ = DataStructs.OnBitProjSimilarity(fp2, fp1)
self.assertAlmostEqual(v1, 1, 'substruct %s not properly contained in %s' % (match, smi))
def test6(self):
""" check that the bits in a signature of size N which has been folded in half
are the same as those in
|
a signature of size N/2 """
smis = ['CCC(O)C(=O)O', 'c1ccccc1', 'C1CCCCC1', 'C1NCCCC1', 'CNCNCNC']
for smi in smis:
m = Chem.MolFromSmiles(smi)
fp1 = Chem.RDKFingerprint(m, 2, 7, 4096)
fp2 = DataStructs.FoldFingerprint(fp1, 2)
fp3 = Chem.RDKFingerprint(m, 2, 7, 2048)
self.assertEqual(tuple(fp2.GetOnBits()), tuple(fp3.GetOnBits()))
fp2 = DataStructs.FoldFingerprint(fp2, 2)
fp3 = Chem.RDKFingerprint(m, 2, 7, 1024)
self.assertEqual(tuple(fp2.GetOnBits()), tuple(fp3.GetOnBits()))
fp2 = DataStructs.FoldFingerprint(fp1, 4)
self.assertEqual(tuple(fp2.GetOnBits()), tuple(fp3.GetOnBits()))
def testGithub1747(self):
""" test github #1747: deprecated apply() function causes GetRDKFingerprint
to fail in Python 3 """
fp = FingerprintMols.GetRDKFingerprint(Chem.MolFromSmiles('CCO'))
self.assertNotEqual(0,fp.GetNumOnBits())
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
ksmit799/Toontown-Source
|
toontown/classicchars/DistributedGoofySpeedwayAI.py
|
Python
|
mit
| 6,450
| 0.004186
|
from otp.ai.AIBaseGlobal import *
import DistributedCCharBaseAI
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.task import Task
import random
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
import CharStateDatasAI
class DistributedGoofySpeedwayAI(DistributedCCharBaseAI.DistributedCCharBaseAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedGoofySpeedwayAI')
def __init__(self, air):
DistributedCCharBase
|
AI.DistributedCCharBaseAI.__init__(self, air, TTLocalizer.Goofy)
self.fsm = ClassicFSM.ClassicFSM('DistributedGoofySpeedwayAI', [State.State('Off', self.enterOff, self.exitOff, ['Lonely', 'TransitionToCostume', 'Walk']),
State.State('Lonely', self.enterLonely, self.exitLonely, ['Chatty', 'Walk', 'TransitionToCostume']),
State
|
.State('Chatty', self.enterChatty, self.exitChatty, ['Lonely', 'Walk', 'TransitionToCostume']),
State.State('Walk', self.enterWalk, self.exitWalk, ['Lonely', 'Chatty', 'TransitionToCostume']),
State.State('TransitionToCostume', self.enterTransitionToCostume, self.exitTransitionToCostume, ['Off'])], 'Off', 'Off')
self.fsm.enterInitialState()
self.handleHolidays()
def delete(self):
self.fsm.requestFinalState()
DistributedCCharBaseAI.DistributedCCharBaseAI.delete(self)
self.lonelyDoneEvent = None
self.lonely = None
self.chattyDoneEvent = None
self.chatty = None
self.walkDoneEvent = None
self.walk = None
return
def generate(self):
DistributedCCharBaseAI.DistributedCCharBaseAI.generate(self)
name = self.getName()
self.lonelyDoneEvent = self.taskName(name + '-lonely-done')
self.lonely = CharStateDatasAI.CharLonelyStateAI(self.lonelyDoneEvent, self)
self.chattyDoneEvent = self.taskName(name + '-chatty-done')
self.chatty = CharStateDatasAI.CharChattyStateAI(self.chattyDoneEvent, self)
self.walkDoneEvent = self.taskName(name + '-walk-done')
if self.diffPath == None:
self.walk = CharStateDatasAI.CharWalkStateAI(self.walkDoneEvent, self)
else:
self.walk = CharStateDatasAI.CharWalkStateAI(self.walkDoneEvent, self, self.diffPath)
return
def walkSpeed(self):
return ToontownGlobals.GoofySpeed
def start(self):
self.fsm.request('Lonely')
def __decideNextState(self, doneStatus):
if self.transitionToCostume == 1:
curWalkNode = self.walk.getDestNode()
if simbase.air.holidayManager:
if ToontownGlobals.HALLOWEEN_COSTUMES in simbase.air.holidayManager.currentHolidays and simbase.air.holidayManager.currentHolidays[ToontownGlobals.HALLOWEEN_COSTUMES]:
simbase.air.holidayManager.currentHolidays[ToontownGlobals.HALLOWEEN_COSTUMES].triggerSwitch(curWalkNode, self)
self.fsm.request('TransitionToCostume')
elif ToontownGlobals.APRIL_FOOLS_COSTUMES in simbase.air.holidayManager.currentHolidays and simbase.air.holidayManager.currentHolidays[ToontownGlobals.APRIL_FOOLS_COSTUMES]:
simbase.air.holidayManager.currentHolidays[ToontownGlobals.APRIL_FOOLS_COSTUMES].triggerSwitch(curWalkNode, self)
self.fsm.request('TransitionToCostume')
else:
self.notify.warning('transitionToCostume == 1 but no costume holiday')
else:
self.notify.warning('transitionToCostume == 1 but no holiday Manager')
if doneStatus['state'] == 'lonely' and doneStatus['status'] == 'done':
self.fsm.request('Walk')
elif doneStatus['state'] == 'chatty' and doneStatus['status'] == 'done':
self.fsm.request('Walk')
elif doneStatus['state'] == 'walk' and doneStatus['status'] == 'done':
if len(self.nearbyAvatars) > 0:
self.fsm.request('Chatty')
else:
self.fsm.request('Lonely')
def enterOff(self):
pass
def exitOff(self):
DistributedCCharBaseAI.DistributedCCharBaseAI.exitOff(self)
def enterLonely(self):
self.lonely.enter()
self.acceptOnce(self.lonelyDoneEvent, self.__decideNextState)
def exitLonely(self):
self.ignore(self.lonelyDoneEvent)
self.lonely.exit()
def __goForAWalk(self, task):
self.notify.debug('going for a walk')
self.fsm.request('Walk')
return Task.done
def enterChatty(self):
self.chatty.enter()
self.acceptOnce(self.chattyDoneEvent, self.__decideNextState)
def exitChatty(self):
self.ignore(self.chattyDoneEvent)
self.chatty.exit()
def enterWalk(self):
self.notify.debug('going for a walk')
self.walk.enter()
self.acceptOnce(self.walkDoneEvent, self.__decideNextState)
def exitWalk(self):
self.ignore(self.walkDoneEvent)
self.walk.exit()
def avatarEnterNextState(self):
if len(self.nearbyAvatars) == 1:
if self.fsm.getCurrentState().getName() != 'Walk':
self.fsm.request('Chatty')
else:
self.notify.debug('avatarEnterNextState: in walk state')
else:
self.notify.debug('avatarEnterNextState: num avatars: ' + str(len(self.nearbyAvatars)))
def avatarExitNextState(self):
if len(self.nearbyAvatars) == 0:
if self.fsm.getCurrentState().getName() != 'Walk':
self.fsm.request('Lonely')
def handleHolidays(self):
DistributedCCharBaseAI.DistributedCCharBaseAI.handleHolidays(self)
if hasattr(simbase.air, 'holidayManager'):
if ToontownGlobals.APRIL_FOOLS_COSTUMES in simbase.air.holidayManager.currentHolidays:
if simbase.air.holidayManager.currentHolidays[ToontownGlobals.APRIL_FOOLS_COSTUMES] != None and simbase.air.holidayManager.currentHolidays[ToontownGlobals.APRIL_FOOLS_COSTUMES].getRunningState():
self.diffPath = TTLocalizer.Donald
return
def getCCLocation(self):
if self.diffPath == None:
return 1
else:
return 0
return
def enterTransitionToCostume(self):
pass
def exitTransitionToCostume(self):
pass
|
CMSS-BCRDB/RDS
|
trove/guestagent/datastore/experimental/couchbase/system.py
|
Python
|
apache-2.0
| 2,404
| 0.000832
|
# Copyright (c) 2013 eBay Software Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from trove.common import cfg
CONF = cfg.CONF
TIME_OUT = 1200
COUCHBASE_DUMP_DIR = '/tmp/backups'
COUCHBASE_CONF_DIR = '/etc/couchbase'
COUCHBASE_WEBADMIN_PORT = '8091'
COUCHBASE_REST_API = 'http://localhost:' + COUCHBASE_WEBADMIN_PORT
BUCKETS_JSON = '/buckets.json'
SECRET_KEY = '/secret_key'
SERVICE_CANDIDATES = ["couchbase-server"]
cmd_couchbase_status = ('sudo /opt/couchbase/bin/couchbase-cli server-info '
'-c %(IP)s:8091 -u root -p %(PWD)s')
cmd_node_init = ('sudo /opt/couchbase/bin/couchbase-cli node-init '
'-c %(IP)s:8091 --node-init-data-path=%(data_path)s '
'-u root -p %(PWD)s')
cmd_cluster_init = ('sudo /opt/couchbase/bin/couchbase-cli cluster-init '
'-c %(IP)s:8091 --cluster-init-username=root '
'--cluster-init-password=%(PWD)s '
'--cluster-init-port=8091')
cmd_kill = 'sudo pkill -u couchbase'
cmd_rm_old_data_dir = 'sudo rm -rf /opt/couchbase/var/lib/couchbase/data'
""" For optimal couchbase operations, swappiness of vm should be set to 0.
Reference link: http://docs.couchbase.com/couchbase-manual-2
.5/cb-admin/#using-couchbase-in-the-cloud """
cmd_set_swappiness = 'sudo sysctl vm.swappiness=0'
cmd_update_sysctl_conf = ('echo "vm.swappiness = 0" | sudo tee -a '
|
'/etc/sysctl.conf')
cmd_reset_pwd = 'sudo /opt/couchbase/bin/cbreset_password %(IP)s:8091'
pwd_file = COUCHBASE_CONF_DIR + SECRET_KEY
cmd_get_password_from_config = """sudo /opt/couchbase/bin/erl -noinput -eval \
'case file:read_file("/opt/couchbase/var/lib/couchbase/config/config.dat") \
of {ok, B} -> io:format("~p~n", [binary_to_term(B)]) end.' \
-run init stop |
|
grep '\[{"root",\[{password,' | awk -F\\" '{print $4}'
"""
|
cylc/cylc
|
tests/functional/xtriggers/02-persistence/faker_fail.py
|
Python
|
gpl-3.0
| 873
| 0
|
#!/usr/bin/env python3
# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program
|
is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program i
|
s distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def faker(name, debug=False):
print("%s: failing" % name)
return (False, {"name": name})
|
google/intellij-community
|
python/helpers/typeshed/scripts/create_baseline_stubs.py
|
Python
|
apache-2.0
| 6,346
| 0.001733
|
#!/usr/bin/env python3
"""Script to generate unannotated baseline stubs using stubgen.
Basic usage:
$ python3 scripts/create_baseline_stubs.py <project on PyPI>
Run with -h for more help.
"""
import argparse
import os
import re
import shutil
import subprocess
import sys
from typing import Optional, Tuple
PYRIGHT_CONFIG = "pyrightconfig.stricter.json"
def search_pip_freeze_output(project: str, output: str) -> Optional[Tuple[str, str]]:
# Look for lines such as "typed-ast==1.4.2". '-' matches '_' and
# '_' matches '-' in project name, so that "typed_ast" matches
# "typed-ast", and vice versa.
regex = "^(" + re.sub(r"[-_]", "[-_]", project) + ")==(.*)"
m = re.search(regex, output, flags=re.IGNORECASE | re.MULTILINE)
if not m:
return None
return m.group(1), m.group(2)
def get_installed_package_info(project: str) -> Optional[Tuple[str, str]]:
"""Find package information from pip freeze output.
Match project name somewhat fuzzily (case sensitive; '-' matches '_', and
vice versa).
Return (normalized project name, installed version) if successful.
"""
r = subprocess.run(["pip", "freeze"], capture_output=True, text=True, check=True)
return search_pip_freeze_output(project, r.stdout)
def run_stubgen(package: str) -> None:
print(f"Running stubgen: stubgen -p {package}")
subprocess.run(["python", "-m", "mypy.stubgen", "-p", package], check=True)
def copy_stubs(src_base_dir: str, package: str, stub_dir: str) -> None:
"""Copy generated stubs to the target directory under stub_dir/."""
print(f"Copying stubs to {stub_dir}")
if not os.path.isdir(stub_dir):
os.mkdir(stub_dir)
src_dir = os.path.join(src_base_dir, package)
if os.path.isdir(src_dir):
shutil.copytree(src_dir, os.path.join(stub_dir, package))
else:
src_file = os.path.join("out", package + ".pyi")
if not os.path.isfile(src_file):
sys.exit("Error: Cannot find generated stubs")
shutil.copy(src_file, stub_dir)
def run_black(stub_dir: str) -> None:
print(f"Running black: black {stub_dir}")
subprocess.run(["black", stub_dir])
def run_isort(stub_dir: str) -> None:
print(f"Running isort: isort {stub_dir}")
subprocess.run(["python3", "-m", "isort", stub_dir])
def create_metadata(stub_dir: str, version: str) -> None:
"""Create a METADATA.toml file."""
m = re.match(r"[0-9]+.[0-9]+", version)
if m is None:
sys.exit(f"Error: Cannot parse version number: {version}")
fnam = os.path.join(stub_dir, "METADATA.toml")
version = m.group(0)
assert not os.path.exists(fnam)
print(f"Writing {fnam}")
with open(fnam, "w") as f:
f.write(f'version = "{version}.*"\n')
def add_pyright_exclusion(stub_dir: str) -> None:
"""Exclude stub_dir from strict pyright checks."""
with open(PYRIGHT_CONFIG) as f:
lines = f.readlines()
i = 0
while i < len(lines) and not lines[i].strip().startswith('"exclude": ['):
i += 1
assert i < len(lines), f"Error parsing {PYRIGHT_CONFIG}"
while not lines[i].strip().startswith("]"):
i += 1
line_to_add = f' "{stub_dir}",'
initial = i - 1
while lines[i].lower() > line_to_add.lower():
i -= 1
if lines[i + 1].strip().rstrip(",") == line_to_add.strip().rstrip(","):
print(f"{PYRIGHT_CONFIG} already up-to-date")
return
if i == initial:
# Special case: when adding to the end of the list, commas need tweaking
line_to_add = line_to_add.rstrip(",")
lines[i] = lines[i].rstrip() + ",\n"
lines.insert(i + 1, line_to_add + "\n")
print(f"Updating {PYRIGHT_CONFIG}")
with open(PYRIGHT_CONFIG, "w") as f:
f.writelines(lines)
def main() -> None:
parser = argparse.ArgumentParser(
description="""Generate baseline stubs automatically for an installed pip package
using stubgen. Also run black and isort. If the name of
the project is different from the runtime Python package name, you must
also use --package (example: --package yaml PyYAML)."""
)
parser.add_argument("project", help="name of PyPI project for which to generate stubs under stubs/")
parser.add_argument("--package", help="generate stubs for this Python package (defaults to project)")
args = parser.parse_args()
project = args.project
package = args.package
if not re.match(r"[a-zA-Z0-9-_.]+$", project):
sys.exit(f"Invalid character in project name: {project!r}")
if not package:
package = project # TODO: infer from installed files
if not os.path.isdir("stubs") or not os.path.isdir("stdlib"):
sys.exit("Error: Current working
|
directory must be the root of typeshed r
|
epository")
# Get normalized project name and version of installed package.
info = get_installed_package_info(project)
if info is None:
print(f'Error: "{project}" is not installed', file=sys.stderr)
print("", file=sys.stderr)
print(f'Suggestion: Run "python3 -m pip install {project}" and try again', file=sys.stderr)
sys.exit(1)
project, version = info
stub_dir = os.path.join("stubs", project)
if os.path.exists(stub_dir):
sys.exit(f"Error: {stub_dir} already exists (delete it first)")
run_stubgen(package)
# Stubs were generated under out/. Copy them to stubs/.
copy_stubs("out", package, stub_dir)
run_isort(stub_dir)
run_black(stub_dir)
create_metadata(stub_dir, version)
# Since the generated stubs won't have many type annotations, we
# have to exclude them from strict pyright checks.
add_pyright_exclusion(stub_dir)
print("\nDone!\n\nSuggested next steps:")
print(f" 1. Manually review the generated stubs in {stub_dir}")
print(f' 2. Run "MYPYPATH={stub_dir} python3 -m mypy.stubtest {package}" to check the stubs against runtime')
print(f' 3. Run "mypy {stub_dir}" to check for errors')
print(f' 4. Run "black {stub_dir}" and "isort {stub_dir}" (if you\'ve made code changes)')
print(f' 5. Run "flake8 {stub_dir}" to check for e.g. unused imports')
print(" 6. Commit the changes on a new branch and create a typeshed PR")
if __name__ == "__main__":
main()
|
newmediamedicine/indivo_server_1_0
|
indivo/views/reports/procedure.py
|
Python
|
gpl-3.0
| 2,013
| 0.007452
|
"""
.. module:: views.reports.procedure
:synopsis: Indivo view implementations for the procedure report.
.. moduleauthor:: Daniel Haas <daniel.haas@post.harvard.edu>
.. moduleauthor:: Ben Adida <ben@adida.net>
"""
from django.http import HttpResponseBadRequest, HttpResponse
from indivo.lib.view_decorators import mars
|
loader, DEFAULT_ORDERBY
from indivo.lib.query import FactQuery, DATE, STRING, NU
|
MBER
from indivo.models import Procedure
PROCEDURE_FILTERS = {
'procedure_name' : ('name', STRING),
'date_performed': ('date_performed', DATE),
DEFAULT_ORDERBY : ('created_at', DATE)
}
PROCEDURE_TEMPLATE = 'reports/procedure.xml'
def procedure_list(*args, **kwargs):
""" List the procedure data for a given record.
For 1:1 mapping of URLs to views. Just calls
:py:meth:`~indivo.views.reports.procedure._procedure_list`.
"""
return _procedure_list(*args, **kwargs)
def carenet_procedure_list(*args, **kwargs):
""" List the procedure data for a given carenet.
For 1:1 mapping of URLs to views. Just calls
:py:meth:`~indivo.views.reports.procedure._procedure_list`.
"""
return _procedure_list(*args, **kwargs)
@marsloader(query_api_support=True)
def _procedure_list(request, group_by, date_group, aggregate_by,
limit, offset, order_by,
status, date_range, filters,
record=None, carenet=None):
""" List the procedure objects matching the passed query parameters.
See :doc:`/query-api` for a listing of valid parameters.
Will return :http:statuscode:`200` with a list of procedures on success,
:http:statuscode:`400` if any invalid query parameters were passed.
"""
q = FactQuery(Procedure, PROCEDURE_FILTERS,
group_by, date_group, aggregate_by,
limit, offset, order_by,
status, date_range, filters,
record, carenet)
try:
return q.render(PROCEDURE_TEMPLATE)
except ValueError as e:
return HttpResponseBadRequest(str(e))
|
cpennington/edx-platform
|
lms/djangoapps/program_enrollments/management/commands/link_program_enrollments.py
|
Python
|
agpl-3.0
| 4,440
| 0.004054
|
""" Management command to link program enrollments and external student_keys to an LMS user """
from uuid import UUID
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
from lms.djangoapps.program_enrollments.api import link_program_enrollments
User = get_user_model()
INCORRECT_PARAMETER_TEMPLATE = (
"incorrectly formatted argument '{}', "
"must be in form <external user key>:<lms username>"
)
DUPLICATE_KEY_TEMPLATE
|
= 'external user key {} provided multiple times'
class Command(BaseCommand):
"""
Management command to manually link ProgramEnrollments without an LMS user to an LMS user by
username.
Usage:
./manage.py lms link_program_enrollments <program_uuid> <user_item>*
where a <user_item> is a string formatted as <external_user_key>:<lms_username>
Normally, pro
|
gram enrollments should be linked by the Django Social Auth post_save signal
handler `lms.djangoapps.program_enrollments.signals.matriculate_learner`, but in the case that
a partner does not have an IDP set up for learners to log in through, we need a way to link
enrollments.
Provided a program uuid and a list of external_user_key:lms_username, this command will look up
the matching program enrollments and users, and update the program enrollments with the matching
user. If the program enrollment has course enrollments, we will enroll the user into their
waiting program courses.
If an external user key is specified twice, an exception will be raised and no enrollments will
be modified.
For each external_user_key:lms_username, if:
- The user is not found
- No enrollment is found for the given program and external_user_key
- The enrollment already has a user
An error message will be logged and the input will be skipped. All other inputs will be
processed and enrollments updated.
If there is an error while enrolling a user in a waiting program course enrollment, the error
will be logged, and we will roll back all transactions for that user so that their db state will
be the same as it was before this command was run. This is to allow the re-running of the same
command again to correctly enroll the user once the issue preventing the enrollment has been
resolved.
No other users will be affected, they will be processed normally.
"""
help = 'Manually links ProgramEnrollment records to LMS users'
def add_arguments(self, parser):
parser.add_argument(
'program_uuid',
help='the program in which we are linking enrollments to users',
)
parser.add_argument(
'user_items',
nargs='*',
help='specify the users to link, in the format <external_student_key>:<lms_username>*',
)
# pylint: disable=arguments-differ
def handle(self, program_uuid, user_items, *args, **options):
try:
parsed_program_uuid = UUID(program_uuid)
except ValueError:
raise CommandError("supplied program_uuid '{}' is not a valid UUID")
ext_keys_to_usernames = self.parse_user_items(user_items)
try:
link_program_enrollments(
parsed_program_uuid, ext_keys_to_usernames
)
except Exception as e:
raise CommandError(str(e))
def parse_user_items(self, user_items):
"""
Params:
list of strings in the format 'external_user_key:lms_username'
Returns:
dict mapping external user keys to lms usernames
Raises:
CommandError
"""
result = {}
for user_item in user_items:
split_args = user_item.split(':')
if len(split_args) != 2:
message = INCORRECT_PARAMETER_TEMPLATE.format(user_item)
raise CommandError(message)
external_user_key = split_args[0].strip()
lms_username = split_args[1].strip()
if not (external_user_key and lms_username):
message = INCORRECT_PARAMETER_TEMPLATE.format(user_item)
raise CommandError(message)
if external_user_key in result:
raise CommandError(DUPLICATE_KEY_TEMPLATE.format(external_user_key))
result[external_user_key] = lms_username
return result
|
erdc/proteus
|
proteus/tests/elliptic_redist/RDLS/rdls_n.py
|
Python
|
mit
| 1,916
| 0.014614
|
from __future__ import absolute_import
from proteus import *
from proteus.default_n import *
try:
from .rdls_p import *
from .vortex2D import *
except:
from rdls_p import *
from vortex2D import *
timeIntegration = NoIntegration
stepController = Newton_controller
# About the nonlinear solver
multilevelNonlinearSolver = Newton
if ct.ELLIPTIC_REDISTANCING > 0:
levelNonlinearSolver = TwoStageNewton
else:
levelNonlinearSolver = Newton
tolFac = 0.0
nl_atol_res = atolRedistance
linTolFac = 0.0
maxNonlinearIts = 100000
maxLineSearches = 0
useEisenstatWalker = True
fullNewtonFlag = True
if useHex:
hex=True
if pDegree_ls==1:
femSpaces = {0:C0_AffineLinearOnCubeWithNodalBasis}
elif pDegree_ls==2:
femSpaces = {0:C0_AffineLagrangeOnCubeWithNodalBasis}
elementQuadrature = CubeGaussQuadrature(nd,vortex_quad_order)
elementBoundaryQuadrature = CubeGaussQuadrature(nd-1,vortex_quad_order)
else:
if pDegree_ls==1:
femSpaces = {0:C0_AffineLinearOnSimplexWithNodalBasis}
elif pDegree_ls==2:
femSpaces = {0:C0_AffineQuadraticOnSimplexWithNodalBasis}
elementQuadrature = SimplexGaussQuadrature(nd,vortex_quad_order)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,vortex_quad_order)
subgridError = HamiltonJacobi_ASGS_opt(coefficients,nd,stabFlag='2',lag=False)
shockCapturing = RDLS.ShockCapturing(coefficients,nd,shockCapturingFactor=shockCapturingFactor_r
|
d,lag=lag_shockCapturing_rd)
numericalFluxType = DoNothing
nonlinearSmoother = None
level
|
NonlinearSolverConvergenceTest='r'
nonlinearSolverConvergenceTest='r'
matrix = SparseMatrix
if parallel:
multilevelLinearSolver = KSP_petsc4py#PETSc
levelLinearSolver = KSP_petsc4py#PETSc
linear_solver_options_prefix = 'rdls_'
linearSolverConvergenceTest = 'r-true'
else:
multilevelLinearSolver = LU
levelLinearSolver = LU
conservativeFlux = {}
|
balazsfabian/pytim
|
pytim/itim.py
|
Python
|
gpl-3.0
| 16,238
| 0.000308
|
#!/usr/bin/python
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
""" Module: itim
============
"""
from __future__ import print_function
from multiprocessing import Process, Queue
import numpy as np
try:
from __builtin__ import zip as builtin_zip
except:
from builtins import zip as builtin_zip
from scipy.spatial import cKDTree
from . import messages
from . import utilities
from .surface import SurfaceFlatInterface as Surface
from .sanity_check import SanityCheck
from .interface import Interface
from .patches import patchTrajectory, patchOpenMM, patchMDTRAJ
class ITIM(Interface):
""" Identifies interfacial molecules at macroscopically flat interfaces.
*(Pártay, L. B.; Hantal, Gy.; Jedlovszky, P.; Vincze, Á.; Horvai, G., \
J. Comp
|
. Chem. 29, 945, 2008)*
:param Object universe: The MDAnalysis_ Universe, MDTraj_ trajectory
or OpenMM_ Simulation objects.
:param Object group: An AtomGroup, or an array-like object with
the indices of the atoms in the group. Will
identify the interfacial molecules f
|
rom this
group
:param float alpha: The probe sphere radius
:param str normal: The macroscopic interface normal direction
'x','y', 'z' or 'guess' (default)
:param bool molecular: Switches between search of interfacial
molecules / atoms (default: True)
:param int max_layers: The number of layers to be identified
:param dict radii_dict: Dictionary with the atomic radii of the
elements in the group. If None is supplied,
the default one (from GROMOS 43a1) will be
used.
:param float cluster_cut: Cutoff used for neighbors or density-based
cluster search (default: None disables the
cluster analysis)
:param float cluster_threshold_density: Number density threshold for
the density-based cluster search. 'auto'
determines the threshold automatically.
Default: None uses simple neighbors cluster
search, if cluster_cut is not None
:param Object extra_cluster_groups: Additional groups, to allow for
mixed interfaces
:param bool info: Print additional info
:param bool centered: Center the :py:obj:`group`
:param bool warnings: Print warnings
:param float mesh: The grid spacing used for the testlines
(default 0.4 Angstrom)
:param bool autoassign: If true (default) detect the interface
every time a new frame is selected.
Example:
>>> import MDAnalysis as mda
>>> import numpy as np
>>> import pytim
>>> from pytim.datafiles import *
>>>
>>> u = mda.Universe(WATER_GRO)
>>> oxygens = u.select_atoms("name OW")
>>>
>>> interface = pytim.ITIM(u, alpha=1.5, max_layers=4,molecular=True)
>>> # atoms in the layers can be accesses either through
>>> # the layers array:
>>> print (interface.layers)
[[<AtomGroup with 786 atoms> <AtomGroup with 681 atoms>
<AtomGroup with 663 atoms> <AtomGroup with 651 atoms>]
[<AtomGroup with 786 atoms> <AtomGroup with 702 atoms>
<AtomGroup with 666 atoms> <AtomGroup with 636 atoms>]]
>>> interface.layers[0,0] # upper side, first layer
<AtomGroup with 786 atoms>
>>> interface.layers[1,2] # lower side, third layer
<AtomGroup with 666 atoms>
>>> # or as a whole AtomGroup. This can include all atoms in all layers
>>> interface.atoms
<AtomGroup with 5571 atoms>
>>> selection = interface.atoms.sides == 0
>>> interface.atoms[ selection ] # all atoms in the upper side layer
<AtomGroup with 2781 atoms>
>>> selection = np.logical_and(interface.atoms.layers == 2 , selection)
>>> interface.atoms[ selection ] # upper side, second layer
<AtomGroup with 681 atoms>
>>> # the whole system can be quickly saved to a pdb file
>>> # including the layer information, written in the beta field
>>> # using:
>>> interface.writepdb('system.pdb',centered=True)
>>> # of course, the native interface of MDAnalysis can be used to
>>> # write pdb files, but the centering options are not available.
>>> # Writing to other formats that do not support the beta factor
>>> # will loose the information on the layers.
>>> interface.atoms.write('only_layers.pdb')
>>> # In some cases it might be necessary to compute two interfaces.
>>> # This could be done in the following way:
>>> import MDAnalysis as mda
>>> import pytim
>>> from pytim.datafiles import WATER_GRO, WATER_XTC
>>> u = mda.Universe(WATER_GRO,WATER_XTC)
>>> u2 = mda.Universe(WATER_GRO,WATER_XTC)
>>> inter = pytim.ITIM(u,group=u.select_atoms('resname SOL'))
>>> inter2 = pytim.ITIM(u2,group=u2.select_atoms('resname SOL'))
>>> for ts in u.trajectory[::50]:
... ts2 = u2.trajectory[ts.frame]
>>> # pytim can be used also on top of mdtraj (MDAnalysis must be present,though)
>>> import mdtraj
>>> import pytim
>>> from pytim.datafiles import WATER_GRO, WATER_XTC
>>> t = mdtraj.load_xtc(WATER_XTC,top=WATER_GRO)
>>> inter = pytim.ITIM(t)
.. _MDAnalysis: http://www.mdanalysis.org/
.. _MDTraj: http://www.mdtraj.org/
.. _OpenMM: http://www.openmm.org/
"""
@property
def layers(self):
"""Access the layers as numpy arrays of AtomGroups.
The object can be sliced as usual with numpy arrays, so, for example:
>>> import MDAnalysis as mda
>>> import pytim
>>> from pytim.datafiles import *
>>>
>>> u = mda.Universe(WATER_GRO)
>>> oxygens = u.select_atoms("name OW")
>>>
>>> interface = pytim.ITIM(u, alpha=1.5, max_layers=4,molecular=True)
>>> print(interface.layers[0,:]) # upper side (0), all layers
[<AtomGroup with 786 atoms> <AtomGroup with 681 atoms>
<AtomGroup with 663 atoms> <AtomGroup with 651 atoms>]
>>> repr(interface.layers[1,0]) # lower side (1), first layer (0)
'<AtomGroup with 786 atoms>'
>>> print(interface.layers[:,0:3]) # 1st - 3rd layer (0:3), on both sides
[[<AtomGroup with 786 atoms> <AtomGroup with 681 atoms>
<AtomGroup with 663 atoms>]
[<AtomGroup with 786 atoms> <AtomGroup with 702 atoms>
<AtomGroup with 666 atoms>]]
>>> print(interface.layers[1,0:4:2]) # side 1, layers 1-4 & stride 2 (0:4:2)
[<AtomGroup with 786 atoms> <AtomGroup with 666 atoms>]
"""
return self._layers
def __init__(self,
universe,
group=None,
alpha=1.5,
normal='guess',
molecular=True,
max_layers=1,
radii_dict=None,
cluster_cut=None,
cluster_threshold_density=None,
extra_cluster_groups=None,
info=False,
centered=False,
warnings=False,
mesh=0.4,
autoassign=True,
**kargs):
self.autoassign = autoassign
self.symmetry = 'planar'
self.do_center = centered
sanity = SanityCheck(self, warnings=warnings)
sanity.assign_universe(universe, group)
sanity.assign_a
|
jchome/LocalGuide-Mobile
|
kvmap/overlays/WMSOverlayServer.py
|
Python
|
gpl-2.0
| 5,614
| 0.020485
|
from kvmap.code.projections import *
from urllib2 import urlopen
from httplib import HTTPConnection
from threading import Thread
from kivy.logger import Logger
from kivy.loader import Loader
from os.path import join, dirname
import time, os
import hashlib
try:
from pyproj import Proj
from xml.etree import ElementTree as ET
except:
pass
class WMSOverlayServer(object):
cache = {}
available_maptype = dict(roadmap='Roadmap') # default
type = "wms"
'''Generic WMS server'''
def __init__(self, progress_callback=None):
self.progress_callback = progress_callback
def setProgressCallback(self, progress_callback):
self.progress_callback = progress_callback
def getInfo(self, lat, lon, epsilon):
return None
def get(self, parent, width, height):
self.bl = parent.bottom_left
self.tr = parent.top_right
self.zoom = parent.zoom
url = self.geturl(self.bl[0], self.bl[1], self.tr[0], self.tr[1], self.zoom, width, height)
if not url:
return None
key = hashlib.md5(url).hexdigest()
if key in self.cache:
return self.cache[key]
try:
image = Loader.image('http://' + self.provider_host + url, progress_callback=self.progress_callback)
self.cache[key] = image
except Exception, e:
Logger.error('OverlayServer could not find (or read) image %s [%s]' % (url, e))
image = None
def getLegendGraphic(self):
if self.legend is None and not self.triedlegend:
self.triedlegend = True
layer = self.layer
if "," in layer:
layer = layer[layer.rindex(",") + 1:]
if self.legendlayer:
layer = self.legendlayer
url = self.baseurl + "?REQUEST=GetLegendGraphic&VERSION=1.0.0&FORMAT=image/png&LAYER=%s&ext=.png" % (layer)
try:
print 'http://' + self.provider_host + url
image = Loader.image('http://' + self.provider_host + url)
self.legend = image
except Exception, e:
Logger.error('OverlayServer could not find LEGENDGRAPHICS for %s %s' % (self.baseurl, layer))
return self.legend
def xy_to_co(self, lat, lon):
if self.customBounds:
x, y = latlon_to_custom(lat, lon, self.bounds)
elif self.isPLatLon: # patch for android - does not require pyproj library
x, y = lon, lat
elif self.isPGoogle: # patch for android - does not require pyproj library
x, y = latlon_to_google (lat, lon)
else:
x, y = transform(pLatlon, self.projection, lon, lat)
return x, y
def co_to_ll(self, x, y):
if self.customBounds:
u, v = custom_to_unit(lat, lon, self.bounds)
l, m = unit_to_latlon(u, v)
elif self.isPLatLon: # patch for android - does not require pyproj library
l, m = y, x
elif self.isPGoogle: # patch for android - does not require pyproj library
l, m = google_to_latlon (y, x)
else:
l, m = transform(self.projection, pLatlon, y, x)
return l, m
def geturl(self, lat1, lon1, lat2, lon2, zoom, w, h):
try:
x1, y1 = self.xy_to_co(lat1, lon1)
x2, y2 = self.xy_to_co(lat2, lon2)
return self.url + "&BBOX=%f,%f,%f,%f&WIDTH=%i&HEIGHT=%i&ext=.png" % (x1, y1, x2, y2, w, h)
except RuntimeError, e:
return None
def parseLayer(self, layer, data):
try:
name = layer.find("Name").text
excep
|
t:
name = None
srss = layer.findall("SRS")
if name: # and srss:
data[name] = map(lambda x:x.text, srss)
if self.debug:
print "Provider %s provides layer %s in projections %s" % (self.provider_host, na
|
me, data[name])
subs = layer.findall("Layer")
for sub in subs:
self.parseLayer(sub, data)
def initFromGetCapabilities(self, host, baseurl, layer=None, index=0, srs=None):
self.debug = (layer == None) and (index == 0)
# GetCapabilities (Layers + SRS)
if layer is None or srs is None:
capabilities = urlopen(host + baseurl + "?SERVICE=WMS&VERSION=1.1.1&Request=GetCapabilities").read().strip()
try:
tree = ET.fromstring(capabilities)
if self.debug:
ET.dump(tree)
layers = tree.findall("Capability/Layer") # TODO: proper parsing of cascading layers and their SRS
data = {}
for l in layers:
self.parseLayer(l, data)
# Choose Layer and SRS by (alphabetical) index
if layer is None:
layer = sorted(data.keys())[index]
if srs is None:
srs = sorted(data[layer])[0]
except:
pass
print "Displaying from %s/%s: layer %s in SRS %s." % (host, baseurl, layer, srs)
# generate tile URL and init projection by EPSG code
self.layer = layer
self.baseurl = baseurl
self.url = baseurl + "?LAYERS=%s&SRS=%s&FORMAT=image/png&TRANSPARENT=TRUE&SERVICE=WMS&VERSION=1.1.1&REQUEST=GetMap&STYLES=" % (layer, srs)
self.isPGoogle = False
self.isPLatLon = False
self.legend = None
self.legendlayer = None
self.triedlegend = False
if srs == "EPSG:4326":
self.isPLatLon = True
elif srs == "EPSG:900913" or srs == "EPSG:3857":
self.isPGoogle = True
try:
self.projection = pGoogle
except:
pass
else:
try:
self.projection = Proj(init=srs)
except:
pass
|
asedunov/intellij-community
|
python/testData/intentions/convertDictComp_after.py
|
Python
|
apache-2.0
| 43
| 0.023256
|
d
|
ict([(k, chr(k + 65)) for k in ran
|
ge(10)])
|
business-factory/captain-hook
|
hooks/app.py
|
Python
|
mit
| 66
| 0
|
# -*- c
|
oding: utf-8 -*-
from .api_server import AP
|
I
app = API()
|
ddico/account-financial-tools
|
account_credit_control/tests/test_res_partner.py
|
Python
|
agpl-3.0
| 1,262
| 0
|
# Copyright 2017 Okia SPRL (https://okia.be)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.tests.common import TransactionCase
from odoo.exceptions import ValidationError
class TestCreditControlP
|
olicyLevel(TransactionCase):
post_install = True
at_install = False
def test_check_credit_policy(self):
"""
Test the constrains on res.partner
First we try to assign an account and a policy with a wrong policy
(this policy doesn't contains the account of the partner).
After that we add the previous account in the policy and
retry to assign this policy and this account on the partner
:return:
|
"""
policy = self.env.ref('account_credit_control.credit_control_3_time')
partner = self.env['res.partner'].create({
'name': 'Partner 1',
})
account = partner.property_account_receivable_id
with self.assertRaises(ValidationError):
partner.write({
'credit_policy_id': policy.id,
})
policy.write({
'account_ids': [(6, 0, [account.id])]
})
partner.property_account_receivable_id = account.id
partner.credit_policy_id = policy.id
|
g0v/sunshine.cy
|
parser/property/db_settings.py
|
Python
|
cc0-1.0
| 337
| 0.005935
|
#! /usr/bi
|
n/env python
# -*- coding: utf-8 -*-
import psycopg2
from psycopg2.extras import Json
def con():
psycopg2.extensions.register_adapter(dict, Json)
psycopg2.extensions.regist
|
er_adapter(list, Json)
conn = psycopg2.connect(dbname='cy', host='localhost', user='postgres', password='postgres')
return conn
|
gertvv/ictrp-retrieval
|
listRecords.py
|
Python
|
mit
| 3,167
| 0.007578
|
import os
import urllib.request, urllib.error, urllib.parse
import shutil
import tempfile
import zipfile
import re
import logging
logger = logging.getLogger()
import xml.etree.cElementTree as ET
from util import stripInvalidXmlEntities
import awsSecrets
ICTRP_SECRETS = awsSecrets.getSecrets()
def nctList():
logger.info("Getting NCT ID list")
url = 'https://clinicaltrials.gov/ct2/results/download?flds=k&down_stds=all&down_typ=fields&down_flds=shown&down_fmt=xml&show_down=Y'
request = urllib.request.urlopen(url)
logger.info('Request complete')
tmpfile = tempfile.TemporaryFile()
shutil.copyfileobj(request, tmpfile)
request.close()
logger.info('Copied to temporary file')
z = zipfile.ZipFile(tmpfile, 'r')
xml = z.open('study_fields.xml', 'r')
logger.info('Opened ZIP contents')
root = ET.parse(xml)
logger.info('Parsed XML')
xml.close()
z.close()
tmpfile.close()
ids = [e.text for e in root.findall('.//nct_id')]
logger.info('NCT IDs listed: {} IDs found'.format(len(ids)))
return ids
def ictrpList():
logger.info("Getting ICTRP ID list")
url = 'http://apps.who.int/trialsearch/TrialService.asmx/GetTrials?Title=&username={username}&password={password}'.format(username=ICTRP_SECRETS['ICTRP_LIST_USERNAME'], password=ICTRP_SECRETS['ICTRP_LIST_PASSWORD'])
logger.info(url)
request = urllib.request.urlopen(url)
logger.info('Request complete')
xml = request.read().decode('utf-8')
request.close()
logger.info('Captured XML string')
root = ET.fromstring(stripInvalidXmlEntities(xml))
logger.info('Parsed XML')
ids = [e.text for e in
|
root.findall('.//TrialID')]
logger.info('ICTRP IDs listed: {} IDs found'.format(len(ids)))
return ids
def crawlList():
baseUrl = "http://apps.who.int/trialsearch/crawl/"
authinfo = urllib.request.HTTPPasswordMgrWithDefaultRealm()
authinfo.add_password(None, baseUrl, ICTRP_SECRETS['ICTRP_CRAWL_USERNAME'], ICTRP_SECRETS['ICTRP_CRAWL_PASSWORD'])
handler = urllib.request.HTTPBasicAuthHandler(authinfo)
opener = urllib.request.build_opener(handler)
urllib.request.instal
|
l_opener(opener)
def crawl(page):
response = urllib.request.urlopen(baseUrl + page)
body = response.read().decode('utf-8')
response.close()
return body
pages = re.findall('href\="(crawl[0-9]+.aspx)"', crawl("crawl0.aspx"))
logging.info("Crawl - got index, {} pages".format(len(pages)))
ids = []
for page in pages:
data = re.findall('trialid\=([A-Za-z0-9\-\/]+)', crawl(page))
logging.info("Crawl - got {}, {} IDs".format(page, len(data)))
ids.extend(data)
return ids
def allList():
il = frozenset(ictrpList())
nl = frozenset(nctList())
return il.union(nl)
#cl = frozenset(crawlList())
#al = sorted(cl.union(il, nl))
#logging.info("From Crawl but not listing: {}".format(sorted(cl.difference(il, nl))))
#logging.info("From list but not Crawl: {}".format(sorted(il.difference(cl))))
#logging.info("From ClinicalTrials.gov but not Crawl: {}".format(sorted(nl.difference(cl))))
#return al
|
pre-commit/pre-commit
|
tests/parse_shebang_test.py
|
Python
|
mit
| 4,687
| 0
|
from __future__ import annotations
import contextlib
import os.path
import shutil
import sys
import pytest
from pre_commit import parse_shebang
from pre_commit.envcontext import envcontext
from pre_commit.envcontext import Var
from pre_commit.util import make_executable
def _echo_exe() -> str:
exe = shutil.which('echo')
assert exe is not None
return exe
def test_file_doesnt_exist():
assert parse_shebang.parse_filename('herp derp derp') == ()
def test_simple_case(tmpdir):
x = tmpdir.join('f')
x.write('#!/usr/bin/env echo')
make_executable(x.strpath)
assert parse_shebang.parse_filename(x.strpath) == ('echo',)
def test_find_executable_full_path():
assert parse_shebang.find_executable(sys.executable) == sys.executable
def test_find_executable_on_path():
assert parse_shebang.find_executable('echo') == _echo_exe()
def test_find_executable_not_found_none():
assert parse_shebang.find_executable('not-a-real-executable') is None
def write_executable(shebang, filename='run'):
os.mkdir('bin')
path = os.path.join('bin', filename)
with open(path, 'w') as f:
f.write(f'#!{shebang}')
make_executable(path)
return path
@contextlib.contextmanager
def bin_on_path():
bindir = os.path.join(os.getcwd(), 'bin')
with envcontext((('PATH', (bindir, os.pathsep, Var('PATH'))),)):
yield
def test_find_executable_path_added(in_tmpdir):
path = os.path.abspath(write_executable('/usr/bin/env sh'))
assert parse_shebang.find_executable('run') is None
with bin_on_path():
assert parse_shebang.find_executable('run') == path
def test_find_executable_path_ext(in_tmpdir):
"""Windows exports PATHEXT as a list of extensions to automatically add
to executables when doing PATH searching.
"""
exe_path = os.path.abspath(
write_executable('/usr/bin/env sh', filename='run.myext'),
)
env_path = {'PATH': os.path.dirname(exe_path)}
env_path_ext = dict(env_path, PATHEXT=os.pathsep.join(('.exe', '.myext')))
assert parse_shebang.find_executable('run') is None
assert parse_shebang.find_executable('run', _environ=env_path) is None
ret = parse_shebang.find_executable('run.myext', _environ=env_path)
assert ret == exe_path
ret = parse_shebang.find_executable('run', _environ=env_path_ext)
assert ret == exe_path
def test_normexe_does_not_exist():
with pytest.raises(OSError) as excinfo:
parse_shebang.normexe('i-dont-exist-lol')
assert excinfo.value.args == ('Executable `i-dont-exist-lol` not found',)
def test_normexe_does_not_exist_sep():
with pytest.raises(OSError) as excinfo:
parse_shebang.normexe('./i-dont-exist-lol')
assert excinfo.value.args == ('Executable `./i-dont-exist-lol` not found',)
@pytest.mark.xfail(os.name == 'nt', reason='posix only')
def test_normexe_not_executable(tmpdir): # pragma: win32 no cover
tmpdir.join('exe').ensure()
with tmpdir.as_cwd(), pytest.raises(OSError) as excinfo:
|
parse_shebang.normexe('./exe')
a
|
ssert excinfo.value.args == ('Executable `./exe` is not executable',)
def test_normexe_is_a_directory(tmpdir):
with tmpdir.as_cwd():
tmpdir.join('exe').ensure_dir()
exe = os.path.join('.', 'exe')
with pytest.raises(OSError) as excinfo:
parse_shebang.normexe(exe)
msg, = excinfo.value.args
assert msg == f'Executable `{exe}` is a directory'
def test_normexe_already_full_path():
assert parse_shebang.normexe(sys.executable) == sys.executable
def test_normexe_gives_full_path():
assert parse_shebang.normexe('echo') == _echo_exe()
assert os.sep in _echo_exe()
def test_normalize_cmd_trivial():
cmd = (_echo_exe(), 'hi')
assert parse_shebang.normalize_cmd(cmd) == cmd
def test_normalize_cmd_PATH():
cmd = ('echo', '--version')
expected = (_echo_exe(), '--version')
assert parse_shebang.normalize_cmd(cmd) == expected
def test_normalize_cmd_shebang(in_tmpdir):
echo = _echo_exe().replace(os.sep, '/')
path = write_executable(echo)
assert parse_shebang.normalize_cmd((path,)) == (echo, path)
def test_normalize_cmd_PATH_shebang_full_path(in_tmpdir):
echo = _echo_exe().replace(os.sep, '/')
path = write_executable(echo)
with bin_on_path():
ret = parse_shebang.normalize_cmd(('run',))
assert ret == (echo, os.path.abspath(path))
def test_normalize_cmd_PATH_shebang_PATH(in_tmpdir):
echo = _echo_exe()
path = write_executable('/usr/bin/env echo')
with bin_on_path():
ret = parse_shebang.normalize_cmd(('run',))
assert ret == (echo, os.path.abspath(path))
|
haystack/eyebrowse-server
|
api/migrations/0003_auto__chg_field_filterlistitemcopy_date_created.py
|
Python
|
mit
| 6,965
| 0.007466
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'FilterListItemCopy.date_created'
db.alter_column('api_filterlistitemcopy', 'date_created', self.gf('django.db.models.fields.DateTimeField')())
def backwards(self, orm):
# Changing field 'FilterListItemCopy.date_created'
db.alter_column('api_filterlistitemcopy', 'date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now=True))
models = {
'api.blacklistitem': {
'Meta': {'object_name': 'BlackListItem', '_ormbases': ['api.FilterListItem']},
'filterlistitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['api.FilterListItem']", 'unique': 'True', 'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'blacklist'", 'max_length': '40'})
},
'api.eyehistory': {
'Meta': {'object_name': 'EyeHistory'},
'domain': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '2000'}),
'end_event': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {}),
'favIconUrl': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '2000'}),
'humanize_time': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'src': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40'}),
'start_event': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2000'}),
'total_time': ('django.db.models.fields.IntegerField', [], {}),
'url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '2000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'api.filterlistitem': {
'Meta': {'object_name': 'FilterListItem'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 12, 8, 0, 0)',
|
'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '2000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'api.filterlistitemcopy': {
'Meta': {
|
'object_name': 'FilterListItemCopy'},
'date_created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '2000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'api.whitelistitem': {
'Meta': {'object_name': 'WhiteListItem', '_ormbases': ['api.FilterListItem']},
'filterlistitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['api.FilterListItem']", 'unique': 'True', 'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'whitelist'", 'max_length': '40'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['api']
|
APSL/kaneda
|
kaneda/queues/rq.py
|
Python
|
mit
| 1,504
| 0.003989
|
from __future__ import absolute_import
import logging
try:
from redis import Redis
from r
|
q import Queue
except ImportError:
Redis = None
Queue = None
from kaneda.exceptions import ImproperlyConfigured
from .base import BaseQueue
class RQQueue(BaseQueue):
"""
RQ queue
:param queue: queue instance of RQ class.
:param redis_url: Redis connection url where RQ will attend the async reporting requests.
:param queue_name: name of the queue being used by the RQ worker process.
"""
settin
|
gs_namespace = 'RQ'
def __init__(self, queue=None, redis_url=None, queue_name='kaneda'):
if not Redis:
raise ImproperlyConfigured('You need to install redis to use the RQ queue.')
if not Queue:
raise ImproperlyConfigured('You need to install rq library to use the RQ queue.')
if queue:
if not isinstance(queue, Queue):
raise ImproperlyConfigured('"queue" parameter is not an instance of RQ queue.')
self.queue = queue
elif redis_url:
self.queue = Queue(queue_name, connection=Redis.from_url(redis_url))
else:
self.queue = Queue(queue_name, connection=Redis())
def report(self, name, metric, value, tags, id_):
try:
return self.queue.enqueue('kaneda.tasks.rq.report', name, metric, value, tags, id_)
except Exception as e:
logger = logging.getLogger(__name__)
logger.exception(e)
|
outlace/OpenTDA
|
tda/__init__.py
|
Python
|
apache-2.0
| 126
| 0
|
fr
|
om .persistent_homology import PersistentHomology
__name__ = 'OpenTDA'
__version__ = 0.1
__all__ = ['PersistentHomolog
|
y']
|
BRiDGEIris/cgs-apps
|
code/apps/variants/tests/local_login.py
|
Python
|
apache-2.0
| 561
| 0.046346
|
import requests
def login(output_file):
r = requests.get('http://quickstart.cloudera:8888/accounts/login/?next=/')
tmp = r.text.split('csrfmiddlewaretoken')
tmp = tmp[1].split("value='")
t
|
mp = tmp[1].split("'")
token = tmp[0]
cookie = r.cookies
data = {'username':'cloudera','password':'cloudera','csrfmiddlewaretoken':token}
r = requests.post('http://quickstart.cloudera:8888/accounts/login/?next=/variants/ap
|
i/variants/ulb|0|1|10177|A/',data=data,cookies=cookie)
f = open(output_file,'w')
f.write(r.text)
f.close()
login('curl-results.txt')
|
puttarajubr/commcare-hq
|
corehq/apps/reports/models.py
|
Python
|
bsd-3-clause
| 31,850
| 0.001601
|
from datetime import datetime, timedelta
import logging
from urllib import urlencode
from django.http import Http404
from django.utils import html
from django.utils.safestring import mark_safe
import pytz
from corehq import Domain
from corehq.apps import reports
from corehq.apps.app_manager.models import get_app, Form, RemoteApp
from corehq.apps.app_manager.util import get_case_properties
from corehq.apps.cachehq.mixins import CachedCouchDocumentMixin
from corehq.apps.domain.middleware import CCHQPRBACMiddleware
from corehq.apps.export.models import FormQuestionSchema
from corehq.apps.reports.display import xmlns_to_name
from dimagi.ext.couchdbkit import *
from corehq.apps.reports.exportfilters import form_matches_users, is_commconnect_form, default_form_filter, \
default_case_filter
from corehq.apps.users.models import WebUser, CommCareUser, CouchUser
from corehq.util.view_utils import absolute_reverse
from couchexport.models import SavedExportSchema, GroupExportConfiguration, FakeSavedExportSchema, SplitColumn
from couchexport.transforms import couch_to_excel_datetime, identity
from couchexport.util import SerializableFunction
import couchforms
from dimagi.utils.couch.cache import cache_core
from dimagi.utils.couch.database import get_db
from dimagi.utils.decorators.memoized import memoized
from django.conf import settings
from django.core.validators import validate_email
from corehq.apps.reports.dispatcher import ProjectReportDispatcher, CustomProjectReportDispatcher
import json
import calendar
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
from dimagi.utils.logging import notify_exception
from django_prbac.exceptions import PermissionDenied
class HQUserType(object):
REGISTERED = 0
DEMO_USER = 1
ADMIN = 2
UNKNOWN = 3
COMMTRACK = 4
human_readable = [settings.COMMCARE_USER_TERM,
|
ugettext_noop("demo_user"),
ugettext_noop("admin"),
ugettext_noop("Unknown Users"),
ugettext_noop("CommCare Supply")]
toggle_defaults = (True, False, False,
|
False, False)
count = len(human_readable)
included_defaults = (True, True, True, True, False)
@classmethod
def use_defaults(cls):
return cls._get_manual_filterset(cls.included_defaults, cls.toggle_defaults)
@classmethod
def all_but_users(cls):
no_users = [True] * cls.count
no_users[cls.REGISTERED] = False
return cls._get_manual_filterset(cls.included_defaults, no_users)
@classmethod
def commtrack_defaults(cls):
# this is just a convenience method for clairty on commtrack projects
return cls.all()
@classmethod
def all(cls):
defaults = (True,) * cls.count
return cls._get_manual_filterset(defaults, cls.toggle_defaults)
@classmethod
def _get_manual_filterset(cls, included, defaults):
"""
manually construct a filter set. included and defaults should both be
arrays of booleans mapping to values in human_readable and whether they should be
included and defaulted, respectively.
"""
return [HQUserToggle(i, defaults[i]) for i in range(cls.count) if included[i]]
@classmethod
def use_filter(cls, ufilter):
return [HQUserToggle(i, unicode(i) in ufilter) for i in range(cls.count)]
class HQToggle(object):
type = None
show = False
name = None
def __init__(self, type, show, name):
self.type = type
self.name = name
self.show = show
def __repr__(self):
return "%(klass)s[%(type)s:%(show)s:%(name)s]" % dict(
klass = self.__class__.__name__,
type=self.type,
name=self.name,
show=self.show
)
class HQUserToggle(HQToggle):
def __init__(self, type, show):
name = _(HQUserType.human_readable[type])
super(HQUserToggle, self).__init__(type, show, name)
class TempCommCareUser(CommCareUser):
filter_flag = IntegerProperty()
def __init__(self, domain, username, uuid):
if username == HQUserType.human_readable[HQUserType.DEMO_USER]:
filter_flag = HQUserType.DEMO_USER
elif username == HQUserType.human_readable[HQUserType.ADMIN]:
filter_flag = HQUserType.ADMIN
else:
filter_flag = HQUserType.UNKNOWN
super(TempCommCareUser, self).__init__(
domain=domain,
username=username,
_id=uuid,
date_joined=datetime.utcnow(),
is_active=False,
user_data={},
first_name='',
last_name='',
filter_flag=filter_flag
)
def save(self, **params):
raise NotImplementedError
@property
def userID(self):
return self._id
@property
def username_in_report(self):
if self.filter_flag == HQUserType.UNKNOWN:
final = mark_safe('%s <strong>[unregistered]</strong>' % html.escape(self.username))
elif self.filter_flag == HQUserType.DEMO_USER:
final = mark_safe('<strong>%s</strong>' % html.escape(self.username))
else:
final = mark_safe('<strong>%s</strong> (%s)' % tuple(map(html.escape, [self.username, self.user_id])))
return final
@property
def raw_username(self):
return self.username
class Meta:
app_label = 'reports'
DATE_RANGE_CHOICES = ['last7', 'last30', 'lastn', 'lastmonth', 'since', 'range', '']
class ReportConfig(CachedCouchDocumentMixin, Document):
domain = StringProperty()
# the prefix of the report dispatcher class for this report, used to
# get route name for url reversing, and report names
report_type = StringProperty()
report_slug = StringProperty()
subreport_slug = StringProperty(default=None)
name = StringProperty()
description = StringProperty()
owner_id = StringProperty()
filters = DictProperty()
date_range = StringProperty(choices=DATE_RANGE_CHOICES)
days = IntegerProperty(default=None)
start_date = DateProperty(default=None)
end_date = DateProperty(default=None)
datespan_slug = StringProperty(default=None)
def delete(self, *args, **kwargs):
notifications = self.view('reportconfig/notifications_by_config',
reduce=False, include_docs=True, key=self._id).all()
for n in notifications:
n.config_ids.remove(self._id)
if n.config_ids:
n.save()
else:
n.delete()
return super(ReportConfig, self).delete(*args, **kwargs)
@classmethod
def by_domain_and_owner(cls, domain, owner_id, report_slug=None,
stale=True, skip=None, limit=None):
if stale:
#kwargs['stale'] = settings.COUCH_STALE_QUERY
pass
if report_slug is not None:
key = ["name slug", domain, owner_id, report_slug]
else:
key = ["name", domain, owner_id]
db = cls.get_db()
kwargs = {}
if skip is not None:
kwargs['skip'] = skip
if limit is not None:
kwargs['limit'] = limit
result = cache_core.cached_view(
db,
"reportconfig/configs_by_domain",
reduce=False,
include_docs=True,
startkey=key,
endkey=key + [{}],
wrapper=cls.wrap,
**kwargs
)
return result
@classmethod
def default(self):
return {
'name': '',
'description': '',
#'date_range': 'last7',
'days': None,
'start_date': None,
'end_date': None,
'filters': {}
}
def to_complete_json(self):
result = super(ReportConfig, self).to_json()
result.update({
'url': self.url,
'report_name': self.report_name,
'date_description': self.date_description,
'datespan_filters': self.datespan_filters,
'has_ucr_datespan': self.has_ucr_
|
WarrenWeckesser/scipy
|
scipy/_lib/tests/test_import_cycles.py
|
Python
|
bsd-3-clause
| 1,306
| 0
|
import sys
import subprocess
MODULES = [
"scipy.cluster",
"scipy.cluster.vq",
"scipy.cluster.hierarchy",
"scipy.constants",
"scipy.fft",
"scipy.fftpack",
"scipy.fftpack.convolve",
"scipy.integrate",
"scipy.interpolate",
"scipy.io",
"scipy.io.arff",
"scipy.io.harwell_boeing",
"scipy.io.idl",
"scipy.io.matlab",
"scipy.io.netcdf",
"scipy.io.wavfile",
"scipy.linalg",
"scipy.linalg.blas",
"scipy.linalg.cython_blas",
"scipy.linalg.lapack",
"scipy.linalg.cython_lapack",
"scipy.linalg.interpolative",
"scipy.misc",
"scipy.ndimage",
"scipy.odr",
"scipy.optimize",
"scipy.signal",
"scipy.signal.windows",
"scipy.sparse",
"scipy.sparse.linalg",
"scipy.sparse.csgraph",
"scipy.spatial",
"scipy.spatia
|
l.distance",
"scipy.
|
special",
"scipy.stats",
"scipy.stats.distributions",
"scipy.stats.mstats",
"scipy.stats.contingency"
]
def test_modules_importable():
# Regression test for gh-6793.
# Check that all modules are importable in a new Python process.
# This is not necessarily true if there are import cycles present.
for module in MODULES:
cmd = 'import {}'.format(module)
subprocess.check_call([sys.executable, '-c', cmd])
|
bcb/jsonrpcclient
|
setup.py
|
Python
|
mit
| 1,595
| 0.001881
|
"""setup.py"""
from setuptools import setup
with open("README.md") as readme_file:
README = readme_file.read()
test_requirements = ["mock", "pytest", "responses", "testfixtures", "requests", "pyzmq"]
# Async requirements
test_requirements.extend(["pytest-asyncio", "aiohttp", "tornado", "websockets"])
setup(
author="Beau Barker",
author_email="beauinmelbourne@gmail.com",
classifiers=[
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
description="Send JSON-RPC requests",
entry_points={"console_scripts": ["jsonrpc = jsonrpcclient.__main__:main"]},
extras_require={
"aiohttp": ["aiohttp>=3"],
"requests": ["requests"],
"requests_security": ["requests[security]"],
"tornado": ["tornado"],
"unittest": test_requirements,
"websockets": ["websockets"],
"zmq": ["pyzmq"],
|
},
include_package_data=True,
install_requires=["apply_defaults<1", "click<8", "jsonschema<4"],
license="MIT",
long_description=README,
long_description_content_type="text/markdown",
name="jsonrpcclient",
# Be PEP 561 compliant
# https://mypy.readthedocs
|
.io/en/stable/installed_packages.html#making-pep-561-compatible-packages
package_data={"jsonrpcclient": ["response-schema.json", "py.typed"]},
zip_safe=False,
packages=["jsonrpcclient", "jsonrpcclient.clients"],
url="https://github.com/bcb/jsonrpcclient",
version="3.3.6",
)
|
phistuck/FrequentFeedScraper
|
add_handler.py
|
Python
|
mit
| 1,694
| 0.020661
|
import webapp2, logging
from database import get_feed_source_by_name, store_feed_source, \
get_feed_source_by_url, change_feed_source_url
class AddHandler(webapp2.RequestHandler):
def post(self):
from database import FeedSource
name = self.request.get('name')
url = self.request.get('url')
frequency_ms = self.request.get('frequency_ms')
should_update = self.request.get('should_update')
should_be_added = True
existing_source = get_feed_source_by_url(url)
if existing_source:
should_be_added = False
self.response.write( \
'The URL (' + url + ') already exists (name - ' + \
existing_source.name + ').<br/>')
self.response.write('Forgot you added it already? :O')
else:
existing_source = get_feed_source_by_name(name)
if existing_source:
if should_update:
should_be_added = False
change_feed_source_url(existing_source, url)
self.response.write('Updated.')
else:
should_be_added = False
self.response.write('The name (' + name + ') already exists.<br/>')
self.response.write( \
'Go back and choose a different name, or tick "Update?".<br/>')
if should_be_added and store_feed_source(name, url, int(frequency_ms)):
self.response.write('Added.');
def get(self):
from database import FeedSource
self.response.write("""<!doctype html><title>Add Feed</title>
<form method="post">
Name - <input name="name"/><br/>
URL - <input name="url"/><br/>
Frequency (milli
|
seconds) -
<input type="number" value="1000" name="frequency_ms"/><br/>
<label>
|
Update?<input type="checkbox" name="should_update" value="1"/></label>
<input type="submit"/>
</form>""")
|
deerwalk/voltdb
|
tests/sqlcoverage/schema/joined-matview-string-schema.py
|
Python
|
agpl-3.0
| 3,168
| 0
|
#!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2017 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
{
"P1": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("RATIO", FastSerializer.VOLTTYPE_FLOAT)),
"partitions": (),
"indexes": ("ID")
},
"P2": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("VCHAR", FastSerializer.
|
VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING)
|
,
("RATIO", FastSerializer.VOLTTYPE_FLOAT)),
"partitions": (),
"indexes": ("ID")
},
"P3": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("RATIO", FastSerializer.VOLTTYPE_FLOAT)),
"partitions": (),
"indexes": ("ID")
},
"R1": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("RATIO", FastSerializer.VOLTTYPE_FLOAT)),
"partitions": (),
"indexes": ("ID")
},
"R3": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("RATIO", FastSerializer.VOLTTYPE_FLOAT)),
"partitions": (),
"indexes": ("ID")
}
}
|
azaghal/ansible
|
test/lib/ansible_test/_internal/config.py
|
Python
|
gpl-3.0
| 13,139
| 0.00274
|
"""Configuration classes."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
from . import types as t
from .util import (
find_python,
generate_pip_command,
ApplicationError,
)
from .util_common import (
docker_qualify_image,
get_docker_completion,
get_remote_completion,
CommonConfig,
)
from .metadata import (
Metadata,
)
from .data import (
data_context,
)
try:
TIntegrationConfig = t.TypeVar('TIntegrationConfig', bound='IntegrationConfig')
exc
|
ept AttributeError:
TIntegrationConfig = None # pylint: disable=invalid-name
class ParsedRemote:
"""A parsed version of a "remote" string."""
def __init__(self, arch, platform, version): # type: (t.Optional[s
|
tr], str, str) -> None
self.arch = arch
self.platform = platform
self.version = version
@staticmethod
def parse(value): # type: (str) -> t.Optional['ParsedRemote']
"""Return a ParsedRemote from the given value or None if the syntax is invalid."""
parts = value.split('/')
if len(parts) == 2:
arch = None
platform, version = parts
elif len(parts) == 3:
arch, platform, version = parts
else:
return None
return ParsedRemote(arch, platform, version)
class EnvironmentConfig(CommonConfig):
"""Configuration common to all commands which execute in an environment."""
def __init__(self, args, command):
"""
:type args: any
:type command: str
"""
super(EnvironmentConfig, self).__init__(args, command)
self.local = args.local is True
self.venv = args.venv
self.venv_system_site_packages = args.venv_system_site_packages
self.python = args.python if 'python' in args else None # type: str
self.docker = docker_qualify_image(args.docker) # type: str
self.docker_raw = args.docker # type: str
self.remote = args.remote # type: str
if self.remote:
self.parsed_remote = ParsedRemote.parse(self.remote)
if not self.parsed_remote or not self.parsed_remote.platform or not self.parsed_remote.version:
raise ApplicationError('Unrecognized remote "%s" syntax. Use "platform/version" or "arch/platform/version".' % self.remote)
else:
self.parsed_remote = None
self.docker_privileged = args.docker_privileged if 'docker_privileged' in args else False # type: bool
self.docker_pull = args.docker_pull if 'docker_pull' in args else False # type: bool
self.docker_keep_git = args.docker_keep_git if 'docker_keep_git' in args else False # type: bool
self.docker_seccomp = args.docker_seccomp if 'docker_seccomp' in args else None # type: str
self.docker_memory = args.docker_memory if 'docker_memory' in args else None
self.docker_terminate = args.docker_terminate if 'docker_terminate' in args else None # type: str
if self.docker_seccomp is None:
self.docker_seccomp = get_docker_completion().get(self.docker_raw, {}).get('seccomp', 'default')
self.remote_stage = args.remote_stage # type: str
self.remote_provider = args.remote_provider # type: str
self.remote_aws_region = args.remote_aws_region # type: str
self.remote_terminate = args.remote_terminate # type: str
if self.remote_provider == 'default':
self.remote_provider = None
self.requirements = args.requirements # type: bool
if self.python == 'default':
self.python = None
actual_major_minor = '.'.join(str(i) for i in sys.version_info[:2])
self.python_version = self.python or actual_major_minor
self.python_interpreter = args.python_interpreter
self.pip_check = args.pip_check
self.delegate = self.docker or self.remote or self.venv
self.delegate_args = [] # type: t.List[str]
if self.delegate:
self.requirements = True
self.inject_httptester = args.inject_httptester if 'inject_httptester' in args else False # type: bool
self.httptester = docker_qualify_image(args.httptester if 'httptester' in args else '') # type: str
if self.get_delegated_completion().get('httptester', 'enabled') == 'disabled':
self.httptester = False
if self.get_delegated_completion().get('pip-check', 'enabled') == 'disabled':
self.pip_check = False
if args.check_python and args.check_python != actual_major_minor:
raise ApplicationError('Running under Python %s instead of Python %s as expected.' % (actual_major_minor, args.check_python))
if self.docker_keep_git:
def git_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None
"""Add files from the content root .git directory to the payload file list."""
for dirpath, _dirnames, filenames in os.walk(os.path.join(data_context().content.root, '.git')):
paths = [os.path.join(dirpath, filename) for filename in filenames]
files.extend((path, os.path.relpath(path, data_context().content.root)) for path in paths)
data_context().register_payload_callback(git_callback)
@property
def python_executable(self):
"""
:rtype: str
"""
return find_python(self.python_version)
@property
def pip_command(self):
"""
:rtype: list[str]
"""
return generate_pip_command(self.python_executable)
def get_delegated_completion(self):
"""Returns a dictionary of settings specific to the selected delegation system, if any. Otherwise returns an empty dictionary.
:rtype: dict[str, str]
"""
if self.docker:
return get_docker_completion().get(self.docker_raw, {})
if self.remote:
return get_remote_completion().get(self.remote, {})
return {}
class TestConfig(EnvironmentConfig):
"""Configuration common to all test commands."""
def __init__(self, args, command):
"""
:type args: any
:type command: str
"""
super(TestConfig, self).__init__(args, command)
self.coverage = args.coverage # type: bool
self.coverage_label = args.coverage_label # type: str
self.coverage_check = args.coverage_check # type: bool
self.coverage_config_base_path = None # type: t.Optional[str]
self.include = args.include or [] # type: t.List[str]
self.exclude = args.exclude or [] # type: t.List[str]
self.require = args.require or [] # type: t.List[str]
self.changed = args.changed # type: bool
self.tracked = args.tracked # type: bool
self.untracked = args.untracked # type: bool
self.committed = args.committed # type: bool
self.staged = args.staged # type: bool
self.unstaged = args.unstaged # type: bool
self.changed_from = args.changed_from # type: str
self.changed_path = args.changed_path # type: t.List[str]
self.base_branch = args.base_branch # type: str
self.lint = args.lint if 'lint' in args else False # type: bool
self.junit = args.junit if 'junit' in args else False # type: bool
self.failure_ok = args.failure_ok if 'failure_ok' in args else False # type: bool
self.metadata = Metadata.from_file(args.metadata) if args.metadata else Metadata()
self.metadata_path = None
if self.coverage_check:
self.coverage = True
def metadata_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None
"""Add the metadata file to the payload file list."""
config = self
if self.metadata_path:
files.append((os.path.abspath(config.metadata_path), config.metadata_path))
data_context().register_payload_callback(metadata_callback)
class ShellConfig(EnvironmentConfig):
"""Configuration for the shell command."""
def __init__
|
asridharan/dcos
|
packages/adminrouter/extra/src/test-harness/modules/mocker/endpoints/mesos_dns.py
|
Python
|
apache-2.0
| 5,228
| 0.001148
|
# Copyright (C) Mesosphere, Inc. See LICENSE file for details.
"""MesosDNS mock endpoint"""
import copy
import logging
import re
from exceptions import EndpointException
from mocker.endpoints.recording import (
RecordingHTTPRequestHandler,
RecordingTcpIpEndpoint,
)
# pylint: disable=C0103
log = logging.getLogger(__name__)
# pylint: disable=R0903
class MesosDnsHTTPRequestHandler(RecordingHTTPRequestHandler):
"""Request handler that mimics MesosDNS
Depending on how it was set up, it will respond with different SRV
entries for preset services.
"""
SRV_QUERY_REGEXP = re.compile('^/v1/services/_([^_]+)._tcp.marathon.mesos$')
def _calculate_response(self, base_path, url_args, body_args=None):
"""Reply with the currently set mock-reply for given SRV record query.
Please refer to the description of the BaseHTTPRequestHandler class
for details on the arguments and return value of this method.
Raises:
EndpointException: request URL path is unsupported
"""
if base_path == '/v1/reflect/me':
# A test URI that is used by tests. In some cases it is impossible
# to reuse SRV record path.
return self._reflect_request(base_path, url_args, body_args)
match = self.SRV_QUERY_REGEXP.search(base_path)
if match:
return self.__srv_permissions_request_handler(match.group(1))
raise EndpointException(
code=500,
content="Path `{}` is not supported yet".format(base_path))
def __srv_permissions_request_handler(self, srvid):
"""Calculate reply for given service-ID
Arguments:
srvid (string): service ID to reply to
"""
ctx = self.server.context
if srvid not in ctx.data['services']:
raise EndpointException(
code=500,
content="Service `{}` is unknown".format(srvid))
blob = self._convert_data_to_blob(ctx.data['services'][srvid])
return 200, 'application/json', blob
def create_srv_entry(srv_name, ip, port):
"""Create a SRV entry based on the supplied data
Arguments:
srv_name (string): service ID that the new SRV-entry should represent
port (string): TCP/IP port that the new agent should pretend to listen on
ip (string): IP address that the new agent hould pretend to listen on
Returns:
SRV entry dict mimicing the one returned by MesosDNS
"""
res = {}
res['service'] = "_{}._tcp.marathon.mesos".format(srv_name)
res['host'] = "{}-74b1w-s1.marathon.mesos.".format(srv_name)
res['ip'] = ip
res['port'] = port
return res
EMPTY_SRV = {
"scheduler-alwaysthere": [
{
"service": "",
"host": "",
"ip": "",
"port": "",
}
],
}
SCHEDULER_SRV_ALWAYSTHERE = {
"scheduler-alwaysthere": [
create_srv_entry("scheduler-alwaysthere", "127.0.0.1", 16000),
create_srv_entry("scheduler-alwaysthere", "127.0.0.1", 16002),
],
}
SCHEDULER_SRV_ALWAYSTHERE_DIFFERENTPORT = {
"scheduler-alwaysthere": [
create_srv_entry("sch
|
eduler-alwaysthere", "127.0.0.15", 16001),
create_srv_entry("scheduler-alwaysthere", "127.0.0.1", 16002),
],
}
SCHEDULER_SRV_ALWAYSTHERE_NEST1 = {
"scheduler-alwaysthere.nest1.nest2": [
create_srv_entry("scheduler-alwaysthere.nest1.nest2", "127.0.0.1", 18000),
create_srv_entry("scheduler-alw
|
aysthere.nest1.nest2", "127.0.0.1", 16002),
],
}
SCHEDULER_SRV_ALWAYSTHERE_NEST2 = {
"scheduler-alwaysthere.nest1": [
create_srv_entry("scheduler-alwaysthere.nest1", "127.0.0.1", 17000),
create_srv_entry("scheduler-alwaysthere.nest1", "127.0.0.1", 16002),
],
}
SCHEDULER_SRV_ONLYMESOSDNS_NEST2 = {
"scheduler-onlymesosdns.nest1.nest2": [
create_srv_entry("scheduler-onlymesosdns.nest1.nest2", "127.0.0.1", 18003),
create_srv_entry("scheduler-onlymesosdns.nest1.nest2", "127.0.0.1", 16002),
],
}
INITIAL_SRVDATA = {}
INITIAL_SRVDATA.update(SCHEDULER_SRV_ALWAYSTHERE)
INITIAL_SRVDATA.update(SCHEDULER_SRV_ALWAYSTHERE_NEST1)
INITIAL_SRVDATA.update(SCHEDULER_SRV_ALWAYSTHERE_NEST2)
INITIAL_SRVDATA.update(SCHEDULER_SRV_ONLYMESOSDNS_NEST2)
# pylint: disable=R0903,C0103
class MesosDnsEndpoint(RecordingTcpIpEndpoint):
"""An endpoint that mimics DC/OS MesosDNS"""
def __init__(self, port, ip=''):
super().__init__(port, ip, MesosDnsHTTPRequestHandler)
self.__context_init()
def reset(self, *_):
"""Reset the endpoint to the default/initial state."""
with self._context.lock:
super().reset()
self.__context_init()
def set_srv_response(self, srvs):
"""Change the endpoint output so that it responds with a non-default
MesosDNS srv node.
"""
with self._context.lock:
self._context.data["services"] = srvs
def __context_init(self):
"""Helper function meant to initialize all the data relevant to this
particular type of endpoint"""
self._context.data["services"] = copy.deepcopy(INITIAL_SRVDATA)
|
jonathaneunice/textdata
|
test/test_eval.py
|
Python
|
apache-2.0
| 2,704
| 0.001109
|
# -*- coding: utf-8 -*-
import sys
import pytest
from textdata.eval import evaluation
_PY2 = sys.version_info[0] == 2
_PY26 = sys.version_info[:2] == (2, 6)
def test_evaluation_natural():
cases = [
(' 1 ', 1),
(' 1.1 \n ', 1.1),
(' gizmo \n\t \n', 'gizmo'),
]
if not _PY26:
cases.append((' 1+4j ', 1+4j))
# PY26 doesn't play nice with complex literals
# Not worth fighting over.
for value, expected in cases:
assert evaluation(value) == expected
assert evaluation(value.strip()) == expected
assert evaluation(value, 'natural') == expected
def test_evaluation_none():
cases = [
(' 1 ', 1),
(' 1.1 \n ', 1.1),
(' gizmo \n\t \n', 'gizmo'),
(' 1+4j ', 1+4j)
]
for value, _ in cases:
assert evaluation(value, None) == value
assert evaluation(value, 'none') == value
def test_evaluation_minimal():
cases = [
(' 1 ', '1'),
(' 1.1 \n ', '1.1'),
(' gizmo \n\t \n', 'gizmo'),
(' 1+4j ', '1+4j')
]
for value, expected in cases:
assert evaluation(value, 'minimal') == expected
assert evaluation(value, False) == expected
def test_evalu
|
ation_broken():
cases = [
(' 1 ', '1'),
(' 1.1 \n ', '1.1'),
(' gizmo \n\t \n', 'gizmo'),
(' 1+4j ', '1+4j')
]
for value, expected in cases:
with pytest.raises(ValueError):
assert evaluation(value, 'smork') == expected
with pytest.raises(ValueError):
assert evaluation(value, value) == expected
def test_py23_diff():
if _PY2:
|
assert evaluation('007', 'natural') == 7
else:
assert evaluation('007', 'natural') == '007'
def test_evaluation_func():
custom = lambda x: x.strip().upper()
def custom2(x):
return x.strip().upper()
assert evaluation(' haPpIly ', custom) == 'HAPPILY'
assert evaluation(' haPpIly ', custom2) == 'HAPPILY'
def test_evaluation_full():
cases = [
(' "1" ', 1),
(' "1.1" \n ', 1.1),
(' gizmo \n\t \n', 'gizmo'),
(' "gizmo" \n\t \n', 'gizmo'),
(' "and space " \n\t \n', 'and space '),
(' "a" ', 'a')
]
if not _PY26:
cases.append((' 1+4j ', 1+4j))
cases.append((' "1+4j" ', 1+4j))
# PY26 doesn't play nice with complex literals
# Not worth fighting over.
for value, expected in cases:
assert evaluation(value.strip(), 'full') == expected
def test_evaluation_exception():
def broken():
raise ValueError
assert evaluation(' mostly ', broken) == 'mostly'
|
mmoiozo/mavcourse
|
sw/tools/dfu/dfu.py
|
Python
|
gpl-2.0
| 7,206
| 0.005551
|
#!/usr/bin/env python
#
# dfu.py: Access USB DFU class devices
# Copyright (C) 2009 Black Sphere Technologies
# Copyright (C) 2012 Transition Robotics Inc.
# Written by Gareth McMullin <gareth@blacksphere.co.nz>
# Modified by Piotr Esden-Tempski <piotr@esden.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import usb
DFU_DETACH_TIMEOUT = 1000
# DFU Requests
DFU_DETACH = 0x00
DFU_DNLOAD = 0x01
DFU_UPLOAD = 0x02
DFU_GETSTATUS = 0x03
DFU_CLRSTATUS = 0x04
DFU_GETSTATE = 0x05
DFU_ABORT = 0x06
# DFU States
STATE_APP_IDLE = 0x00
STATE_APP_DETACH = 0x01
STATE_DFU_IDLE = 0x02
STATE_DFU_DOWNLOAD_SYNC = 0x03
STATE_DFU_DOWNLOAD_BUSY = 0x04
STATE_DFU_DOWNLOAD_IDLE = 0x05
STATE_DFU_MANIFEST_SYNC = 0x06
STATE_DFU_MANIFEST = 0x07
STATE_DFU_MANIFEST_WAIT_RESET = 0x08
STATE_DFU_UPLOAD_IDLE = 0x09
STATE_DFU_ERROR = 0x0a
DFU_STATUS_OK = 0x00
# DFU Status cides
DFU_STATUS_ERROR_TARGET = 0x01
DFU_STATUS_ERROR_FILE = 0x02
DFU_STATUS_ERROR_WRITE = 0x03
DFU_STATUS_ERROR_ERASE = 0x04
DFU_STATUS_ERROR_CHECK_ERASED = 0x05
DFU_STATUS_ERROR_PROG = 0x06
DFU_STATUS_ERROR_VERIFY = 0x07
DFU_STATUS_ERROR_ADDRESS = 0x08
DFU_STATUS_ERROR_NOTDONE = 0x09
DFU_STATUS_ERROR_FIRMWARE = 0x0a
DFU_STATUS_ERROR_VENDOR = 0x0b
DFU_STATUS_ERROR_USBR = 0x0c
DFU_STATUS_ERROR_POR = 0x0d
DFU_STATUS_ERROR_UNKNOWN = 0x0e
DFU_STATUS_ERROR_STALLEDPKT = 0x0f
class dfu_status(object):
def __init__(self, buf):
self.bStatus = buf[0]
self.bwPollTimeout = buf[1] + (buf[2]<<8) + (buf[3]<<16)
self.bState = buf[4]
self.iString = buf[5]
class dfu_device(object):
def __init__(self, dev, conf, iface):
self.dev = dev
self.conf = conf
self.iface = iface
try:
self.handle = self.dev.open()
except:
raise
try:
self.handle.setConfiguration(conf)
except:
pass
try:
self.handle.claimInterface(iface.interfaceNumber)
except:
raise
if type(self.iface) is usb.Interface:
self.index = self.iface.interfaceNumber
else:
self.index = self.iface
def detach(self, wTimeout=255):
self.handle.controlMsg(usb.ENDPOINT_OUT | usb.TYPE_CLASS |
usb.RECIP_INTERFACE, DFU_DETACH,
None, value=wTimeout, index=self.index)
def download(self, wBlockNum, data):
self.handle.controlMsg(usb.ENDPOINT_OUT | usb.TYPE_CLASS |
usb.RECIP_INTERFACE, DFU_DNLOAD,
data, value=wBlockNum, index=self.index)
def upload(self, wBlockNum, length):
return self.handle.controlMsg(usb.ENDPOINT_IN | usb.TYPE_CLASS |
usb.RECIP_INTERFACE, DFU_UPLOAD,
length, value=wBlockNum,
index=self.index)
def get_status(self):
buf = self.handle.controlMsg(usb.ENDPOINT_IN | usb.TYPE_CLASS |
usb.RECIP_INTERFACE, DFU_GETSTATUS,
6, index=self.index, timeout=2000)
return dfu_status(buf)
def clear_status(self):
self.handle.controlMsg(usb.ENDPOINT_OUT | usb.TYPE_CLASS |
usb.RECIP_INTERFACE, DFU_CLRSTATUS,
"", index=0)
def get_state(self):
buf = self.handle.controlMsg(usb.ENDPOINT_IN | usb.TYPE_CLASS |
usb.RECIP_INTERFACE, DFU_GETSTATE,
1, index=self.index)
return buf[0]
def abort(self):
self.handle.controlMsg(usb.ENDPOINT_OUT | usb.TYPE_CLASS |
usb.RECIP_INTERFACE, DFU_ABORT,
None, index=self.index)
def make_idle(self):
retries = 3
while retries:
try:
status = self.get_status()
except:
self.clear_status()
continue
retries -= 1
if status.bState == STATE_DFU_IDL
|
E:
return True
if ((status.bState == STATE_DFU_DOWNLOAD_SYNC) or
(status.bState == STATE_DFU_DOWNLOAD_IDLE) or
(status.bState == STATE_DFU_MANIFEST_SYNC) or
(status.bState == STATE_DFU_UPLOAD_IDLE) or
(status.bSt
|
ate == STATE_DFU_DOWNLOAD_BUSY) or
(status.bState == STATE_DFU_MANIFEST)):
self.abort()
continue
if status.bState == STATE_DFU_ERROR:
self.clear_status()
continue
if status.bState == STATE_APP_IDLE:
self.detach(DFU_DETACH_TIMEOUT)
continue
if ((status.bState == STATE_APP_DETACH) or
(status.bState == STATE_DFU_MANIFEST_WAIT_RESET)):
usb.reset(self.handle)
return False
raise Exception
def finddevs():
devs = []
for bus in usb.busses():
for dev in bus.devices:
for conf in dev.configurations:
for ifaces in conf.interfaces:
for iface in ifaces:
if ((iface.interfaceClass == 0xFE) and
(iface.interfaceSubClass == 0x01)):
devs.append((dev, conf, iface))
return devs
if __name__ == "__main__":
devs = finddevs()
if not devs:
print("No devices found!")
exit(-1)
else:
print("Found %i devices." % len(devs))
for dfu in devs:
handle = dfu[0].open()
try:
man = handle.getString(dfu[0].iManufacturer, 30)
product = handle.getString(dfu[0].iProduct, 30)
serial = handle.getString(dfu[0].iSerialNumber, 40)
except Exception as e:
print("Could not access descriptions strings of a DFU device. " +
"Maybe the OS driver is claiming it?")
print("Exception:", e)
continue
print("Device %s: ID %04x:%04x %s - %s - %s" % (dfu[0].filename,
dfu[0].idVendor, dfu[0].idProduct, man, product, serial))
print("%r, %r" % (dfu[1], dfu[2]))
print("Finished scanning for devices.")
|
danakj/chromium
|
third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py
|
Python
|
bsd-3-clause
| 4,329
| 0.003234
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import optparse
from webkitpy.layout_tests.port import mac
from webkitpy.layout_tests.port import port_testcase
class MacPortTest(port_testcase.PortTestCase):
os_name = 'mac'
os_version = 'mac10.11'
port_name = 'mac'
full_port_name = 'mac-mac10.11'
port_maker = mac.MacPort
def assert_name(self, port_name, os_version_string, expected):
port = self.make_port(os_version=os_version_string, port_name=port_name)
self.assertEqual(expected, port.name())
def test_operating_system(self):
self.assertEqual('mac', self.make_port().operating_system())
def test_build_path(self):
# Test that optional paths are used regardless of whether they exist.
options = optparse.Values({'configuration': 'Release', 'build_directory': '/foo'})
self.assert_build_path(options, ['/mock-checkout/out/Release'], '/foo/Release')
# Test that optional relative paths are returned unmodified.
options = optparse.Values({'configuration': 'Release', 'build_directory': 'foo'})
self.assert_build_path(options, ['/mock-checkout/out/Release'], 'foo/Release')
# Test that we prefer the legacy dir over the new dir.
options = optparse.Values({'configuration': 'Release', 'build_directory': None})
self.assert_build_path(options, ['/mock-checkout/xcodebuild/Release',
'/mock-checkout/out/Release'], '/mock-checkout/xcodebuild/Release')
def test_build_path_timestamps(self):
options = optparse.Values({'configuration': 'Release', 'build_directory': None})
port = self.make_port(options=options)
port.host.filesystem.maybe_make_directory('/mock-checkout/out/Release')
|
port.host.filesystem.maybe_make_directory('/mock-checkout/xcodebuild/Release')
# Check with 'out' being newer.
port.host.filesystem.mtime = lambda f: 5 if '/out/' in f else 4
self.assertEqual(port._build_path(), '/mock-checkout/out/Release')
# Check with 'xcodebuild' being newer.
port.host.filesystem.mtime = lambda f: 5 if '/xcodebuild/' in f else 4
self.assertEqual(port._build_path(), '/mo
|
ck-checkout/xcodebuild/Release')
def test_driver_name_option(self):
self.assertTrue(self.make_port()._path_to_driver().endswith('Content Shell'))
self.assertTrue(self.make_port(options=optparse.Values(dict(driver_name='OtherDriver')))._path_to_driver().endswith('OtherDriver'))
def test_path_to_image_diff(self):
self.assertEqual(self.make_port()._path_to_image_diff(), '/mock-checkout/out/Release/image_diff')
def test_expectation_files(self):
# FIXME: crbug.com/589709 - Delete this test override once the 10.11 failures have been rebaselined or triaged.
pass
|
mavrix93/LightCurvesClassifier
|
lcc_web/web/interface/admin.py
|
Python
|
mit
| 167
| 0.005988
|
from django.contrib import admin
# Register your models here.
from .models im
|
port
|
DbQuery, StarsFilter
admin.site.register(DbQuery)
admin.site.register(StarsFilter)
|
rickydunlop/cookiecutter-django-app-template-drf-haystack
|
{{cookiecutter.app_name}}/serializers.py
|
Python
|
mit
| 771
| 0.022049
|
from rest_framework import serializers
from drf_haystack.serializ
|
ers import Hayst
|
ackSerializerMixin
from .models import {{ cookiecutter.model_name }}
from .search_indexes import {{ cookiecutter.model_name }}Index
class {{ cookiecutter.model_name }}Serializer(serializers.ModelSerializer):
class Meta:
model = {{ cookiecutter.model_name }}
fields = '__all__'
class {{ cookiecutter.model_name }}SearchSerializer(HaystackSerializerMixin, {{ cookiecutter.model_name }}Serializer):
groupby_key = serializers.SerializerMethodField()
def get_groupby_key(self, obj):
return obj._meta.verbose_name_plural.title()
class Meta({{ cookiecutter.model_name }}Serializer.Meta):
index_classes = [{{ cookiecutter.model_name }}Index]
|
vsaw/miniSSL
|
minissl/TcpDispatcher.py
|
Python
|
mit
| 3,729
| 0.001341
|
import socket
import asyncore
import pickle
from minissl.AbstractConnection import AbstractConnection
class PickleStreamWrapper(asyncore.dispatcher_with_send, AbstractConnection):
"""Buffers a stream until it contains valid data serialized by pickle.
That is a big of an ugly glue code I had to come up with in the last minute.
The SSL-Server and Client were developed by using custom AbstractConnection
to hide the actual communication chanel.
However the AbstractConnection does not do fragmentation, it is expected
to always send and receive all data at once. After trying to implement a
TCP based AbstractConnection type I noticed that all this underlying
fragmentation and buffering of the IP breaks that pattern. Therefore this
class has been written to glue the behavior of the AbstractConnection and
the Networking sockets together.
"""
def __init__(self, sock):
"""Creates a new PickleStream Wrapper for the underlying socket.
:param sock:
The underlying base socket
"""
asyncore.dispatcher_with_send.__init__(self, sock)
AbstractConnection.__init__(self)
self.rx_buffer = ''
self.tx_buffer = ''
def handle_read(self):
new_data = self.recv(1024)
self.rx_buffer += new_data
try:
# try to load the buffer to see if we have something that pickle
# understands. If it worked out send the data upstream, if not do
# nothing and wait for the rest of the data to arrive
unpickled_data = pickle.loads(self.rx_buffer)
if self._receive_handler is not None:
self._receive_handler(self, self.rx_buffer)
# Clear the buffer
self.rx_buffer = ''
except:
pass
def handle_close(self):
AbstractConnection.close(self)
asyncore.dispatcher_with_send.close(self)
def send(self, data):
"""Send all the data
:param data:
The data to send
To match the AbstractConnection API this has to redirect send to sendall
because send can not handle data that is larger than some 512 byte
buffer limit. sendall on the other hand can withou
|
t a problem.
"""
self.socket.sendall(data)
class TcpDispatcher(asyncore.dispatcher):
"""A powerful TCP dispatcher based on asyncore to listen for incoming
connections.
See htt
|
p://docs.python.org/2/library/asyncore.html for more information on
the library.
"""
def __init__(self, host, port, receive_callback):
"""Start a new dispatcher to listen on the given host socket
:param host:
The host interface to listen to
:param port:
The port to bind to
:param receive_callback:
This callback will be used to notify if an accepted TCP connection
sent any data
"""
asyncore.dispatcher.__init__(self)
self.receive_callback = receive_callback
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.listen(5)
def handle_accept(self):
"""Handle TCP accepts.
In this case if it is a valid accept a separate handler will be launched
that takes care of the rest of the messages being exchanged of the new
accepted connection.
"""
pair = self.accept()
if pair is not None:
sock, addr = pair
print 'Incoming connection from %s' % repr(addr)
wrapper = PickleStreamWrapper(sock)
wrapper.set_receive_handler(self.receive_callback)
|
sakost/vkAPI
|
vkAPI/API.py
|
Python
|
apache-2.0
| 5,360
| 0.002425
|
import hashlib
from vkAPI.mixins import *
from vkAPI.utils import *
class Session(object):
"""A Session object where you should put a default params which will always send"""
API_URL = 'https://api.vk.com/method/'
def __init__(self, access_token=None, secret=None):
"""
:except kwargs: such params as 'lang' or 'https'
:param access_token: access token
:param secret: a 'secret' parameter
:type access_token: str
:type secret: str
"""
self.access_token = access_token
self._session_request = VkRequest()
if secret:
self._secret = secret
def __str__(self):
return '<Session of vkAPI>'
@staticmethod
def get_md5_hash(string):
return hashlib.md5(string.encode()).hexdigest()
@staticmethod
def get_sig_data(method, secret, params=None):
if params is None:
params = {}
data = ''
for key, item in params.items():
data += str(key) + '=' + str(item) + '&'
data = data[:-1]
data += '&sig=' + Session.get_md5_hash('/method/' + method + '?' + data + secret)
return data
def _make_request(self, method_request):
req = self._send_request(method_request)
req.raise_for_status()
text = Parser(req.text).start()
for error_or_response in json_iter_parse(text):
if 'response' in error_or_response:
return error_or_response['response']
elif 'error' in error_or_response:
error = VkAPIError(error_or_response)
if error.is_access_token_incorrect():
self.access_token = None
return self._make_request(method_request)
raise error
def _send_request(self, request):
url = self.API_URL + request._method_name
method_args = request._api._method_default_args.copy()
method_args.update(request._method_args)
access_token = self.access_token
if access_token:
method_args['access_token'] = access_token
if hasattr(self, '_secret'):
if self._secret is not None:
method_args = self.get_sig_data(request._method_name, self._secret, method_args)
timeout = request._api._timeout
response = self._session_request.post(url, method_args, timeout=timeout)
return response
def __setattr__(self, key, value):
if key == 'API_URL':
raise AttributeError('"' + key + '" doesn\'t support assignment')
self.__dict__[key] = value
class API(object):
def __init__(self, session, timeout=10, v='5.68', **method_default_args):
"""
:param session: Object Session
:param timeout: timeout. 10 by default
:param v: API version
:param method_default_args: Default args that will be used always in this API object
:type session: Session
:type timeout: int
:type v: str
"""
self._session = session
self._timeout = timeout
self._method_default_args = method_default_args
self._method_default_args.update({'v': v})
def __getattr__(self, method_name):
return Request(self, method_name)
def __call__(self, method_name, **method_kwargs):
return getattr(self, method_name)(**method_kwargs)
class Decorator(API):
def __getattr__(self, method_name):
return DecorRequest(self, method_name)
def __call__(self, method_name, **method_kwargs):
def decorator(func):
def wrapper(*args, **kwargs):
return func(*args, getattr(self, method_name)(**method_kwargs), **kwar
|
gs)
return wrapper
return decorator
class Request(object):
__slots__ = ('_api', '_method_name', '_method_args')
def __init__(self, api, method_name):
self._api = api
self._method_name = method_name
def __getattr__(self, method_name):
return Request(self._api, self._method_name + '.' + method_name)
def __call__(self, **method_args):
self._method_args = method_args
ret
|
urn self._api._session._make_request(self)
class DecorRequest(Request):
def __getattr__(self, method_name):
return DecorRequest(self._api, self._method_name + '.' + method_name)
def __call__(self, is_method=False, **method_args):
self._method_args = method_args
def decorator(func):
def wrapper(*args, **kwargs):
return func(*args, self._api._session._make_request(self), **kwargs)
return wrapper
return decorator
class AuthSession(AuthMixin, Session):
def __init__(self, user_login='', user_password='', app_id=2274003, scope='offline', client_secret='hHbZxrka2uZ6jB1inYsH',
lang='ru'):
AuthMixin.__init__(self, user_login, user_password, app_id, scope, client_secret, lang)
access_token = self.access_token
secret = self._secret
Session.__init__(self, access_token, secret)
self.access_token = access_token
def __setattr__(self, key, value):
if key == 'OAUTH_URL' or key == 'API_URL':
raise AttributeError('"' + key + '" doesn\'t support assignment')
self.__dict__[key] = value
|
annarev/tensorflow
|
tensorflow/python/ops/math_ops.py
|
Python
|
apache-2.0
| 186,089
| 0.003353
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Math Operations.
Note: Functions taking `Tensor` arguments can also take anything accepted by
`tf.convert_to_tensor`.
Note: Elementwise binary operations in TensorFlow follow [numpy-style
broadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
TensorFlow provides a variety of math functions including:
* Basic arithmetic operators and trigonometric functions.
* Special math functions (like: `tf.math.igamma` and `tf.math.zeta`)
* Complex number functions (like: `tf.math.imag` and `tf.math.angle`)
* Reductions and scans (like: `tf.math.reduce_mean` and `tf.math.cumsum`)
* Segment functions (like: `tf.math.segment_sum`)
See: `tf.linalg` for matrix and tensor functions.
<a id=Segmentation></a>
## About Segmentation
TensorFlow provides several operations that you can use to perform common
math computations on tensor segments.
Here a segmentation is a partitioning of a tensor along
the first dimension, i.e. it defines a mapping from the first dimension onto
`segment_ids`. The `segment_ids` tensor should be the size of
the first dimension, `d0`, with consecutive IDs in the range `0` to `k`,
where `k<d0`.
In particular, a segmentation of a matrix tensor is a mapping of rows to
segments.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.math.segment_sum(c, tf.constant([0, 0, 1]))
# ==> [[0 0 0 0]
# [5 6 7 8]]
```
The standard `segment_*` functions assert that the segment indices are sorted.
If you have unsorted indices use the equivalent `unsorted_segment_` function.
These functions take an additional argument `num_segments` so that the output
tensor can be efficiently allocated.
``` python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.math.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2)
# ==> [[ 6, 8, 10, 12],
# [-1, -2, -3, -4]]
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
import numpy as np
import six
from six.moves import builtins
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_bitwise_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gen_sparse_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_math_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export
np_dtypes = LazyLoader(
"np_dtypes", globals(),
"tensorflow.python.ops.numpy_ops.np_dtypes")
# Aliases for some automatically-generated names.
nextafter = gen_math_ops.next_after
@tf_export("linspace", v1=["lin_space", "linspace"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("lin_space")
def linspace_nd(start, stop, num, name=None, axis=0):
r"""Generates evenly-spaced values in an interval along a given axis.
A sequence of `num` evenly-spaced values are generated beginning at `start`
along a given `axis`.
If `num > 1`, the values in the sequence increase by
`(stop - start) / (num - 1)`, so that the last one is exactly `stop`.
If `num <= 0`, `ValueError` is raised.
Matches
[np.linspace](https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html)'s
behaviour
except when `num == 0`.
For example:
```
tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0]
```
`Start` and `stop` can be tensors of arbitrary size:
>>> tf.linspace([0., 5.], [10., 40.], 5, axis=0)
<tf.Tensor: shape=(5, 2), dtype=float32, numpy=
array([[ 0. , 5. ],
[ 2.5 , 13.75],
[ 5. , 22.5 ],
[ 7.5 , 31.25],
[10. , 40. ]], dtype=float32)>
`Axis` is where the values will be generated (the dimension in the
returned tensor which corresponds to the axis will be equal to `num`)
>>> tf.linspace([0., 5.], [10., 40.], 5, axis=-1)
<tf.Tensor: shape=(2, 5), dtype=float32, numpy=
array([[ 0. , 2.5 , 5. , 7.5 , 10. ],
[ 5. , 13.75, 22.5 , 31.25, 40. ]], dtype=float32)>
Args:
start: A `Tensor`. Must be one of the following types: `bfloat16`,
`float32`, `float64`. N-D tensor. First entry in the range.
stop: A `Tensor`. Must have the same type and shape as `start`. N-D tensor.
Last entry in the range.
num: A `Tensor`. Must be one of the following types: `int32`, `int64`. 0-D
tensor. Number of values to generate.
name: A name for the operation (optional).
axis: Axis along which the operation is performed (used only when N-D
tensors are provided).
Returns:
A `Tensor`. Has the same type as `start`.
"""
with ops.name_scope(name, "linspace", [start, stop]):
start = ops.convert_to_tensor(start, name="start")
# stop must be convertible to the same dtype as start
stop = ops.convert_to_tensor(stop, name="stop", dtype=start.dtype)
num_int = array_ops.convert_to_int_tensor(num, name="num")
num = cast(num_int, dtype=start.dtype)
broadcast_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(start), array_ops.shape(stop))
start = array_ops.broadcast_to(start, broadcast_shape)
stop = array_ops.broadcast_to(stop, broadcast_shape)
expanded_start = array_ops.expand_dims(start, axis=axis)
expanded_stop = array_ops.expand_dims(stop, axis=axis)
shape = array_ops.shape(expanded_start)
ndims = array_ops.shape(shape)[0]
axis = array_ops.where_v2(axis >= 0, axis, ndims + axis)
# The purpose is to avoid having negative values when repeating.
num_fill = gen_math_ops.maximum(num_int - 2, 0)
# To avoid having negative values in the range or zero division
# the result
|
is sliced in the end so a correct result is returned for
# num == 1, and num == 0.
n_steps = gen_math_ops.maximum(num_int - 1, 1)
delta = (expanded_stop - expanded_start) / cast(n_steps,
|
expanded_stop.dtype)
# Re-cast tensors as delta.
expanded_start = cast(expanded_start, delta.dtype)
expanded_stop = cast(expanded_stop, delta.dtype)
# If num < 0, we will throw exception in the range
# otherwise use the same div for delta
range_end = array_ops.where_v2(num_int >= 0, n_steps, -1)
# Even though range supports an output dtype, its limited
# (e.g. doesn't support half at the moment).
desired_range = cast(range(1, range_end, dtype=dtypes.int64), delta.dtype)
mask = gen_math
|
johnsonc/OTM2
|
opentreemap/treemap/migrations/0061_change_zip_code_sort_order.py
|
Python
|
gpl-3.0
| 20,859
| 0.008102
|
# -*- coding: utf-8 -*-
from south.v2 import DataMigration
class Migration(DataMigration):
def forwards(self, orm):
orm.Boundary.objects.filter(category='Zip Code').update(sort_order=5)
def backwards(self, orm):
pass
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name'
|
: ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharFi
|
eld', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'treemap.audit': {
'Meta': {'object_name': 'Audit'},
'action': ('django.db.models.fields.IntegerField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'field': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']", 'null': 'True', 'blank': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'model_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'previous_value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'ref': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Audit']", 'null': 'True'}),
'requires_auth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.User']"})
},
u'treemap.benefitcurrencyconversion': {
'Meta': {'object_name': 'BenefitCurrencyConversion'},
'co2_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'currency_symbol': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'electricity_kwh_to_currency': ('django.db.models.fields.FloatField', [], {}),
'h20_gal_to_currency': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'natural_gas_kbtu_to_currency': ('django.db.models.fields.FloatField', [], {}),
'nox_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'o3_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'pm10_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'sox_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'voc_lb_to_currency': ('django.db.models.fields.FloatField', [], {})
},
u'treemap.boundary': {
'Meta': {'object_name': 'Boundary'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geom': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857', 'db_column': "u'the_geom_webmercator'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sort_order': ('django.db.models.fields.IntegerField', [], {})
},
u'treemap.fieldpermission': {
'Meta': {'unique_together': "((u'model_name', u'field_name', u'role', u'instance'),)", 'object_name': 'FieldPermission'},
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'permission_level': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Role']"})
},
u'treemap.instance': {
'Meta': {'object_name': 'Instance'},
'basemap_data': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'basemap_type': ('django.db.models.fields.CharField', [], {'default': "u'google'", 'max_length': '255'}),
'boundaries': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['treemap.Boundary']", 'null': 'True', 'blank': 'True'}),
'bounds': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857'}),
'config': ('treemap.json_field.JSONField', [], {'blank': 'True'}),
'default_role': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'default_role'", 'to': u"orm['treemap.Role']"}),
'eco_benefits_conversion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.BenefitCurrencyConversion']", 'null': 'True', 'blank': 'True'}),
'geo_rev': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'itree_region_default': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'url_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['treemap.User']", 'null': 'True', 'through': u"orm['treemap.InstanceUser']", 'blank': 'True'})
},
u'treemap.instanceuser': {
'Meta': {'object_name': 'InstanceUser'},
'admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'reputation': ('django.db.models
|
scripnichenko/nova
|
nova/api/openstack/compute/legacy_v2/contrib/flavormanage.py
|
Python
|
apache-2.0
| 4,241
| 0.000472
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack.compute.views import flavors as flavors_view
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.compute import flavors
from nova import exception
from nova.i18n import _
authorize = extensions.extension_authorizer('compute', 'flavormanage')
class FlavorManageController(wsgi.Controller):
"""The Flavor Lifecycle API controller for the OpenStack API."""
_view_builder_class = flavors_view.ViewBuilder
def __init__(self):
super(FlavorManageController, self).__init__()
@wsgi.action("delete")
def _delete(self, req, id):
c
|
ontext = req.environ['nova.context']
authorize(context)
try:
flavor = flavors.get_flavor_by_flavor_id(
id, ctxt=context, read_deleted="no")
except exceptio
|
n.FlavorNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
flavors.destroy(flavor['name'])
return webob.Response(status_int=202)
@wsgi.action("create")
def _create(self, req, body):
context = req.environ['nova.context']
authorize(context)
if not self.is_valid_body(body, 'flavor'):
msg = _("Invalid request body")
raise webob.exc.HTTPBadRequest(explanation=msg)
vals = body['flavor']
name = vals.get('name')
if name is None:
msg = _("A valid name parameter is required")
raise webob.exc.HTTPBadRequest(explanation=msg)
flavorid = vals.get('id')
memory = vals.get('ram')
if memory is None:
msg = _("A valid ram parameter is required")
raise webob.exc.HTTPBadRequest(explanation=msg)
vcpus = vals.get('vcpus')
if vcpus is None:
msg = _("A valid vcpus parameter is required")
raise webob.exc.HTTPBadRequest(explanation=msg)
root_gb = vals.get('disk')
if root_gb is None:
msg = _("A valid disk parameter is required")
raise webob.exc.HTTPBadRequest(explanation=msg)
ephemeral_gb = vals.get('OS-FLV-EXT-DATA:ephemeral', 0)
swap = vals.get('swap', 0)
rxtx_factor = vals.get('rxtx_factor', 1.0)
is_public = vals.get('os-flavor-access:is_public', True)
try:
flavor = flavors.create(name, memory, vcpus, root_gb,
ephemeral_gb=ephemeral_gb,
flavorid=flavorid, swap=swap,
rxtx_factor=rxtx_factor,
is_public=is_public)
req.cache_db_flavor(flavor)
except (exception.FlavorExists,
exception.FlavorIdExists) as err:
raise webob.exc.HTTPConflict(explanation=err.format_message())
except exception.InvalidInput as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
except exception.FlavorCreateFailed as exc:
raise webob.exc.HTTPInternalServerError(explanation=
exc.format_message())
return self._view_builder.show(req, flavor)
class Flavormanage(extensions.ExtensionDescriptor):
"""Flavor create/delete API support."""
name = "FlavorManage"
alias = "os-flavor-manage"
namespace = ("http://docs.openstack.org/compute/ext/"
"flavor_manage/api/v1.1")
updated = "2012-01-19T00:00:00Z"
def get_controller_extensions(self):
controller = FlavorManageController()
extension = extensions.ControllerExtension(self, 'flavors', controller)
return [extension]
|
benzkji/django-ckeditor-link
|
ckeditor_link/templatetags/ckeditor_link_tags.py
|
Python
|
gpl-2.0
| 3,359
| 0.001191
|
import importlib
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from ckeditor_link import conf
from django import template
from django.template.defaultfilters import stringfilter
try:
module_name, class_name = conf.CKEDITOR_LINK_MODEL.rsplit(".", 1)
my_module = importlib.import_module(module_name)
ckeditor_link_class = getattr(my_module, class_name, None)
except ImportError:
ckeditor_link_class = None
register = template.Library()
@register.filter
@stringfilter
def ckeditor_link_add_links(html):
# lxml is not a dependency, but needed for this tag.
from lxml.html import fragment_fromstring, tostring
if not ckeditor_link_class:
# TODO: use some log thing, or rais ImproperlyConfigured!
if settings.DEBUG:
msg = "Warning: CKEDITOR_LINK_MODEL (%s) could not be imported!?" % (conf.CKEDITOR_LINK_MODEL, )
raise ImproperlyConfigured(msg)
return html
fragment = fragment_fromstring("<div>" + html + "</div>")
links = fragment.cssselect('a')
for link in links:
if link.get('data-ckeditor-link', None):
link.attrib.pop('data-ckeditor-link')
kwargs = {}
dummy_link = ckeditor_link_class()
for key, value in link.items():
if key.startswith('data-'):
new_key = key.replace('data-', '', 1)
# DEPRECATED: use CKEDITOR_LINK_ATTR_MODIFIERS setting!
if new_key == 'page_2':
new_key = 'cms_page' # backward compat, for 0.2.0
if new_key == 'cms_page_2':
new_key = 'cms_page'
# until here
if hasattr(dummy_link, new_key):
if hasattr(dummy_link, new_key + "_id"):
# set fk directly
new_key = new_key + "_id"
if not value:
value = None
kwargs[new_key] = value
link.attrib.pop(key)
for key, formatted_string in conf.CKEDITOR_LINK_ATTR_MODIFIERS.items():
try:
kwargs[key] = formatted_string.format(**kwargs)
except KeyError:
# this is an option, we dont know at all how our link is/was built (ages ago)
pass
|
try:
# this can go wrong with fk and the like
real_link = ckeditor_link_class(**kwargs)
link.set('href', real_link.get_link())
if getattr(real_link, 'get_link_target', None):
link.set('target', real_link.get_link_target())
if getattr(real_link, 'get_link_style', None):
link.set('class', real_link.get_link_style())
if getattr(real_lin
|
k, 'get_link_attrs', None):
for attr, value in real_link.get_link_attrs().items():
link.set(attr, value)
except (ValueError, ObjectDoesNotExist):
continue
# arf: http://makble.com/python-why-lxml-etree-tostring-method-returns-bytes
# beautifulsoup to the rescue!
return tostring(fragment, encoding='unicode')
|
dustcloud/dustlink
|
DustLinkData/PersistenceEngine.py
|
Python
|
bsd-3-clause
| 4,768
| 0.014681
|
#!/usr/bin/python
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('PersistenceEngine')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import time
import threading
import traceback
class PersistenceEngine(threading.Thread):
DEFAULT_SAVEDDATALOCATION = None
DEFAULT_SAVE_PERIOD = 60 # in seconds
PERIOD_WAKEUP = 1 # in seconds
def __init__(self,getDataCb):
# store params
self.getDataCb = getDataCb
# local variables
self.varLock = threading.RLock()
self.savePeriod = self.DEFAULT_SAVE_PERIOD
self.savedDataLocation = self.DEFAULT_SAVEDDATALOCATION
self.runningPeriod = 0
self.goOn = True
self.closingSem = threading.Lock()
self.closingSem.acquire()
# intialize parent class
threading.Thread.__init__(self)
self.name = "PersistenceEngine"
def run(self):
# log
log.info('thread started')
try:
# run in loop until time to stop
while self._getGoOn():
time.sleep(self.PERIOD_WAKEUP)
with self.varLock:
self.runningPeriod += self.PERIOD_WAKEUP
if self.runningPeriod >= self.getSavePeriod():
self._performSaveRoutine()
self.runningPeriod = 0
# time to stop, save one last time
self._performSaveRoutine()
# release closingSem
self.closingSem.release()
except Exception as err:
output = []
output += ['===== crash in thread {0} ====='.format(self.name)]
output += ['\nerror:\n']
output += [str(err)]
output += ['\ncall stack:\n']
|
output += [traceback.format_exc()]
output = '\n'.join(output)
print
|
output # critical error
log.critical(output)
# release closingSem
self.closingSem.release()
raise
finally:
# log
log.info('thread ended')
#======================== public ==========================================
def setSavePeriod(self,newSavePeriod):
assert type(newSavePeriod)==int
self.varLock.acquire()
self.savePeriod = newSavePeriod
self.varLock.release()
def getSavePeriod(self):
self.varLock.acquire()
returnVal = self.savePeriod
self.varLock.release()
return returnVal
def setSavedDataLocation(self,newSavedDataLocation):
self.varLock.acquire()
self.savedDataLocation = newSavedDataLocation
self.varLock.release()
def getSavedDataLocation(self):
self.varLock.acquire()
returnVal = self.savedDataLocation
self.varLock.release()
return returnVal
def stop(self):
log.info("stop called")
self._setGoOn(False)
self.closingSem.acquire()
log.info("stopped")
def indicateChange(self):
'''
Some important data has been changed and data should be saved soon.
'''
with self.varLock:
self.runningPeriod = self.getSavePeriod()
#======================== virtual methods =================================
def retrieveData(self):
raise NotImplementedError() # to be implemented by child class
def saveData(self,dataToSave):
raise NotImplementedError() # to be implemented by child class
def quarantineData(self):
raise NotImplementedError() # to be implemented by child class
#======================== private =========================================
def _performSaveRoutine(self):
# get a copy of the data to save
dataToSave = self.getDataCb()
# save the data
self.saveData(dataToSave)
def _getGoOn(self):
self.varLock.acquire()
returnVal = self.goOn
self.varLock.release()
return returnVal
def _setGoOn(self,newGoOn):
assert newGoOn in [True,False]
self.varLock.acquire()
self.goOn = newGoOn
self.varLock.release()
|
sassoftware/amiconfig
|
amiconfig_test/plugin_disablesshpasswdauth_test.py
|
Python
|
apache-2.0
| 1,526
| 0.001966
|
#!/usr/bin/python
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import testsuite
# Bootstrap the testsuite
testsuite.setup()
import test
|
base
class PluginTest(testbase.BasePluginTest):
pluginName = 'disablesshpasswdauth'
PluginData = ""
SshdConfigContents = """\
Dummy line 1
Dummy line 2
PasswordAuthentication yes
# PasswordAuthentication yes
Dummy line 3
"""
def setUpExtra(self):
sshdConfigDir = self.mkdirs("etc/ssh")
|
sshdConfig = self.sshdConfigFile = os.path.join(
sshdConfigDir, "sshd_config")
file(sshdConfig, "w").write(self.SshdConfigContents)
def testFiles(self):
self.assertEquals(file(self.sshdConfigFile).read(),
self.SshdConfigContents.replace(
'\nPasswordAuthentication yes',
'\nPasswordAuthentication no').replace(
'# PasswordAuthentication yes',
'# # PasswordAuthentication yes')
)
|
ehudmagal/robotqcapp
|
Utils/RobotQAUtils/graphics/drawTable.py
|
Python
|
bsd-3-clause
| 435
| 0.036782
|
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
def printLegend(rowLabels,colLabels,params):
fig = plt.figure()
co
|
l_labels=colLabels
row_labels=rowLabels
table_vals=params
the_table = plt.table(cellText=table_vals,
colWidths = [0.2]*4,
rowLabels=row_labels,
colLabels=col_labels,
loc='center')
plt.te
|
xt(12,3.4,'Table Title',size=8)
plt.title('Legend for expiriments')
plt.show()
|
mahak/neutron
|
neutron/tests/unit/db/test_ovn_revision_numbers_db.py
|
Python
|
apache-2.0
| 12,345
| 0
|
# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from neutron_lib.api.definitions import security_groups_remote_address_group \
as sgag_def
from neutron_lib import constants as n_const
from neutron_lib import context
from neutron_lib.db import api as db_api
from oslo_db import exception as db_exc
from neutron.api import extensions
from neutron.common import config
from neutron.db.models import ovn as ovn_models
from neutron.db import ovn_revision_numbers_db as ovn_rn_db
import neutron.extensions
from neutron.services.revisions import revision_plugin
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron.tests.unit.extensions import test_l3
from neutron.tests.unit.extensions import test_securitygroup
EXTENSIONS_PATH = ':'.join(neutron.extensions.__path__)
PLUGIN_CLASS = (
'neutron.tests.unit.db.test_ovn_revision_numbers_db.TestMaintenancePlugin')
class TestRevisionNumber(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def setUp(self):
super(TestRevisionNumber, self).setUp()
self.ctx = context.get_admin_context()
self.addCleanup(self._delete_objs)
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
self.net = self.deserialize(self.fmt, res)['network']
def _delete_objs(self):
with db_api.CONTEXT_WRITER.using(self.ctx):
self.ctx.session.query(
ovn_models.OVNRevisionNumbers).delete()
def _create_initial_revision(self, resource_uuid, resource_type,
revision_number=ovn_rn_db.INITIAL_REV_NUM,
may_exist=False):
ovn_rn_db.create_initial_revision(
self.ctx, resource_uuid, resource_type,
revision_number=revision_number, may_exist=may_exist)
def test_bump_revision(self):
with db_api.CONTEXT_WRITER.using(self.ctx):
self._create_initial_revision(self.net['id'],
ovn_rn_db.TYPE_NETWORKS)
self.net['revision_number'] = 123
ovn_rn_db.bump_revision(self.ctx, self.net,
ovn_rn_db.TYPE_NETWORKS)
row = ovn_rn_db.get_revision_row(self.ctx, self.net['id'])
self.assertEqual(123, row.revision_number)
def test_bump_older_revision(self):
with db_api.CONTEXT_WRITER.using(self.ctx):
self._create_initial_revision(
self.net['id'], ovn_rn_db.TYPE_NETWORKS,
revision_number=124)
self.net['revision_number'] = 1
ovn_rn_db.bump_revision(self.ctx, self.net,
ovn_rn_db.TYPE_NETWORKS)
row = ovn_rn_db.get_revision_row(self.ctx, self.net['id'])
self.assertEqual(124, row.revision_number)
@mock.patch.object(ovn_rn_db.LOG, 'warning')
def test_bump_revision_row_not_found(self, mock_log):
with db_api.CONTEXT_WRITER.using(self.ctx):
self.net['revision_number'] = 123
ovn_rn_db.bump_revision(self.ctx, self.net,
ovn_rn_db.TYPE_NETWORKS)
# Assert the revision number wasn't bumped
row = ovn_rn_db.get_revision_row(self.ctx, self.net['id'])
self.assertEqual(123, row.revision_number)
self.assertIn('No revision row found for',
mock_log.call_args[0][0])
def test_delete_revision(self):
with db_api.CONTEXT_WRITER.using(self.ctx):
self._create_initial_revision(self.net['id'],
ovn_rn_db.TYPE_NETWORKS)
ovn_rn_db.delete_revision(self.ctx, self.net['id'],
ovn_rn_db.TYPE_NETWORKS)
row = ovn_rn_db.get_revision_row(self.ctx, self.net['id'])
self.assertIsNone(row)
def test_create_initial_revision_may_exist_duplicated_entry(self):
try:
with db_api.CONTEXT_WRITER.using(self.ctx):
args = (self.net['id'], ovn_rn_db.TYPE_NETWORKS)
self._create_initial_revision(*args)
# DBDuplicateEntry is raised when may_exist is False (default)
self._create_initial_revision(*args)
except Exception as exc:
if type(exc) is not db_exc.DBDuplicateEntry:
self.fail("create_initial_revision with the same parameters "
"should have raisen a DBDuplicateEntry exception")
with db_api.CONTEXT_WRITER.using(self.ctx):
args = (self.net['id'], ovn_rn_db.TYPE_NETWORKS)
self._create_initial_revision(*args)
try:
self._create_initial_revision(*args, may_exist=True)
except db_exc.DBDuplicateEntry:
self.fail("create_initial_revision shouldn't raise "
"DBDuplicateEntry when may_exist is True")
class TestMaintenancePlugin(test_securitygroup.SecurityGroupTestPlugin,
test_l3.TestL3NatBasePlugin):
__native_pagination_support = True
__native_sorting_support = True
supported_extension_aliases = ['external-net', 'security-group',
sgag_def.ALIAS]
class TestRevisionNumberMaintenance(test_securitygroup.SecurityGroupsTestCase,
test_l3.L3NatTestCaseMixin):
def setUp(self):
service_plugins = {
'router':
'neutron.tests.unit.extensions.test_l3.TestL3NatServicePlugin'}
l3_plugin = test_l3.TestL3NatServicePlugin()
sec_plugin = test_securitygroup.SecurityGroupTestPlugin()
ext_mgr = extensions.PluginAwareExtensionManager(
EXTENSIONS_PATH, {'router': l3_plugin, 'sec': sec_plugin}
)
super(TestRevisionNumberMaintenance, self).setUp(
plugin=PLUGIN_CLASS, service_plugins=service_plugins)
app = config.load_paste_app('extensions_test_app')
self.ext_api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
self.session = db_api.get_writer_sessi
|
on()
revision_plugin.RevisionPlugin()
self.net = self._make_network(self.fmt, 'net1', True)['network']
# Mock the default value for INCONSISTENCIES_OLDER_THAN so
# tests won't need to wait for the timeout in order to validate
# the database inconsistencies
self.older_than_mock = mock.patch(
'neutron.db.ovn_revision_numbers_db.INCONSISTENCIES_OLDER_THAN',
|
-1)
self.older_than_mock.start()
self.addCleanup(self.older_than_mock.stop)
self.ctx = context.get_admin_context()
def _create_initial_revision(self, resource_uuid, resource_type,
revision_number=ovn_rn_db.INITIAL_REV_NUM,
may_exist=False):
with db_api.CONTEXT_WRITER.using(self.ctx):
ovn_rn_db.create_initial_revision(
self.ctx, resource_uuid, resource_type,
revision_number=revision_number, may_exist=may_exist)
def test_get_inconsistent_resources(self):
# Set the intial revision to -1 to force it to be incosistent
self._create_initial_revision(
self.net['id'], ovn_rn_db.TYPE_NETWORKS, revision_number=-1)
res = ovn_rn_db.get_inconsistent_resources(self.ctx)
self.assertEqual(1, len(res))
self.assertEqual(self.net['id'], res[0].resource_uuid)
def test_get_inconsistent
|
flexVDI/cerbero
|
recipes/custom.py
|
Python
|
lgpl-2.1
| 2,085
| 0.003357
|
# -*- Mode: Python -*- vi:si:et:sw=4:sts=4:ts=4:syntax=python
import os
import shutil
from collections import defaultdict
from cerbero.build import recipe
from cerbero.build.source import SourceType
from cerbero.build.cookbook import CookBook
from cerbero.config import Platform
from cerbero.enums import License
from cerbero.utils import shell, to_unixpath
class GStreamer(recipe.Recipe):
licenses = [License.LGPLv2Plus]
version = '1.13.0.1'
commit = 'origin/master'
def list_gstreamer_1_0_plugins_by_category(config):
cookbook = CookBook(config)
plugins = defaultdict(list)
for r in ['gstreamer-1.0', 'gst-plugins-base-1.0', 'gst-plugins-good-1.0',
'gst-plugins-bad-1.0', 'gst-plugins-ugly-1.0',
'gst-libav-1.0', 'gst-editing-services-1.0', 'gst-rtsp-server-1.0']:
r = cookbook.get_recipe(r)
for attr_name in dir(r):
if attr_name.startswith('files_plugins_'):
cat_name = attr_name[len('files_plugins_'):]
plugins_list = getattr(r, attr_name)
elif attr_name.startswith('platform_files_plugins_'):
cat_name = attr_name[len('platform_files_plugins_'):]
plugins_dict = getattr(r, attr_name)
plugins_list = plugins_dict.get(config.target_platform, [])
else:
continue
for e in plugins_list:
|
if not e.startswith('lib/gstreamer-'):
continue
c = e.split('/')
if len(c) != 3:
continue
e = c[2]
# we only
|
care about files with the replaceable %(mext)s extension
if not e.endswith ('%(mext)s'):
continue
if e.startswith('libgst'):
e = e[6:-8]
else:
e = e[3:-8]
plugins[cat_name].append(e)
return plugins
|
google/llvm-propeller
|
lldb/packages/Python/lldbsuite/test/dotest_args.py
|
Python
|
apache-2.0
| 11,181
| 0.003935
|
from __future__ import absolute_import
# System modules
import argparse
import sys
import os
import textwrap
# LLDB modules
from . import configuration
def create_parser():
parser = argparse.ArgumentParser(
description='description',
prefix_chars='+-',
add_help=False)
group = None
# Helper function for boolean options (group will point to the current
# group when executing X)
X = lambda optstr, helpstr, **kwargs: group.add_argument(
optstr, help=helpstr, action='store_true', **kwargs)
group = parser.add_argument_group('Help')
group.add_argument(
'-h',
'--help',
dest='h',
action='store_true',
help="Print this help message and exit. Add '-v' for more detailed help.")
# C and Python toolchain options
group = parser.add_argument_group('Toolchain options')
group.add_argument(
'-A',
'--arch',
metavar='arch',
dest='arch',
help=textwrap.dedent('''Specify the architecture(s) to test. This option can be specified more than once'''))
group.add_argument('-C', '--compiler', metavar='compiler', dest='compiler', help=textwrap.dedent(
'''Specify the compiler(s) used to build the inferior executables. The compiler path can be an executable basename or a full path to a compiler executable. This option can be specified multiple times.'''))
if sys.platform == 'darwin':
group.add_argument('--apple-sdk', metavar='apple_sdk', dest='apple_sdk', default="", help=textwrap.dedent(
'''Specify the name of the Apple SDK (macosx, macosx.internal, iphoneos, iphoneos.internal, or path to SDK) and use the appropriate tools from that SDK's toolchain.'''))
# FIXME? This won't work for different extra flags according to each arch.
group.add_argument(
'-E',
metavar='extra-flags',
help=textwrap.dedent('''Specify the
|
extra flags to be passed to the toolchain when building the inferior programs to be debugged
|
suggestions: do not lump the "-A arch1 -A arch2" together such that the -E option applies to only one of the architectures'''))
group.add_argument('--dsymutil', metavar='dsymutil', dest='dsymutil', help=textwrap.dedent('Specify which dsymutil to use.'))
group.add_argument('--yaml2obj', metavar='yaml2obj', dest='yaml2obj', help=textwrap.dedent('Specify which yaml2obj binary to use.'))
group.add_argument('--filecheck', metavar='filecheck', dest='filecheck', help=textwrap.dedent('Specify which FileCheck binary to use.'))
# Test filtering options
group = parser.add_argument_group('Test filtering options')
group.add_argument(
'-f',
metavar='filterspec',
action='append',
help=('Specify a filter, which looks like "TestModule.TestClass.test_name". '+
'You may also use shortened filters, such as '+
'"TestModule.TestClass", "TestClass.test_name", or just "test_name".'))
group.add_argument(
'-p',
metavar='pattern',
help='Specify a regexp filename pattern for inclusion in the test suite')
group.add_argument('--excluded', metavar='exclusion-file', action='append', help=textwrap.dedent(
'''Specify a file for tests to exclude. File should contain lists of regular expressions for test files or methods,
with each list under a matching header (xfail files, xfail methods, skip files, skip methods)'''))
group.add_argument(
'-G',
'--category',
metavar='category',
action='append',
dest='categories_list',
help=textwrap.dedent('''Specify categories of test cases of interest. Can be specified more than once.'''))
group.add_argument(
'--skip-category',
metavar='category',
action='append',
dest='skip_categories',
help=textwrap.dedent('''Specify categories of test cases to skip. Takes precedence over -G. Can be specified more than once.'''))
group.add_argument(
'--xfail-category',
metavar='category',
action='append',
dest='xfail_categories',
help=textwrap.dedent('''Specify categories of test cases that are expected to fail. Can be specified more than once.'''))
# Configuration options
group = parser.add_argument_group('Configuration options')
group.add_argument(
'--framework',
metavar='framework-path',
help='The path to LLDB.framework')
group.add_argument(
'--executable',
metavar='executable-path',
help='The path to the lldb executable')
group.add_argument(
'--server',
metavar='server-path',
help='The path to the debug server executable to use')
group.add_argument(
'--out-of-tree-debugserver',
dest='out_of_tree_debugserver',
action='store_true',
help='A flag to indicate an out-of-tree debug server is being used')
group.add_argument(
'--dwarf-version',
metavar='dwarf_version',
dest='dwarf_version',
type=int,
help='Override the DWARF version.')
group.add_argument(
'--setting',
metavar='SETTING=VALUE',
dest='settings',
type=str,
nargs=1,
action='append',
help='Run "setting set SETTING VALUE" before executing any test.')
group.add_argument(
'-s',
metavar='name',
help='Specify the name of the dir created to store the session files of tests with errored or failed status. If not specified, the test driver uses the timestamp as the session dir name')
group.add_argument(
'-S',
'--session-file-format',
default=configuration.session_file_format,
metavar='format',
help='Specify session file name format. See configuration.py for a description.')
group.add_argument(
'-y',
type=int,
metavar='count',
help="Specify the iteration count used to collect our benchmarks. An example is the number of times to do 'thread step-over' to measure stepping speed.")
group.add_argument(
'-#',
type=int,
metavar='sharp',
dest='sharp',
help='Repeat the test suite for a specified number of times')
group.add_argument('--channel', metavar='channel', dest='channels', action='append', help=textwrap.dedent(
"Specify the log channels (and optional categories) e.g. 'lldb all' or 'gdb-remote packets' if no categories are specified, 'default' is used"))
group.add_argument(
'--log-success',
dest='log_success',
action='store_true',
help="Leave logs/traces even for successful test runs (useful for creating reference log files during debugging.)")
group.add_argument(
'--codesign-identity',
metavar='Codesigning identity',
default='lldb_codesign',
help='The codesigning identity to use')
group.add_argument(
'--build-dir',
dest='test_build_dir',
metavar='Test build directory',
default='lldb-test-build.noindex',
help='The root build directory for the tests. It will be removed before running.')
group.add_argument(
'--lldb-module-cache-dir',
dest='lldb_module_cache_dir',
metavar='The clang module cache directory used by LLDB',
help='The clang module cache directory used by LLDB. Defaults to <test build directory>/module-cache-lldb.')
group.add_argument(
'--clang-module-cache-dir',
dest='clang_module_cache_dir',
metavar='The clang module cache directory used by Clang',
help='The clang module cache directory used in the Make files by Clang while building tests. Defaults to <test build directory>/module-cache-clang.')
group.add_argument(
'--lldb-libs-dir',
dest='lldb_libs_dir',
metavar='path',
help='The path to LLDB library directory (containing liblldb)')
group.add_argument(
'--enable-plugin',
dest='enabled_plugins',
action='append',
|
shunw/pythonML_code
|
eg2_real_world_data.py
|
Python
|
mit
| 4,961
| 0.01794
|
from collections import Counter
from imblearn.datasets import make_imbalance
from imblearn.metrics import classification_report_imbalanced
from imblearn.pipeline import make_pipeline
from imblearn.under_sampling import ClusterCentroids
from imblearn.under_sampling import NearMiss
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import seaborn as sns
import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
def scatter_plot_2d(x_ls, y_ls):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y_ls))])
# plot class samples
for idx, c1 in enumerate(np.unique(y_ls)):
plt.scatter(x = x_ls[y_ls == c1, 0], y = x_ls[y_ls == c1, 1],
alpha = .8, c = cmap(idx),
marker = markers[idx], label = c1)
# plt.show()
def deci_bdry_plot_2d(x_ls, y_ls, classifier, resolution = .02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y_ls))])
# plot the decision surface
x1_min, x1_max = x_ls[:, 0].min() - 1, x_ls[:, 0].max() + 1
x2_min, x2_max = x_ls[:, 1].min() - 1, x_ls[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha = .4, cmap = cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class samples
for idx, c1 in enumerate(np.unique(y_ls)):
plt.scatter(x = x_ls[y_ls == c1, 0], y = x_ls[y_ls == c1, 1],
alpha = .8, c = cmap(idx),
marker = markers[idx], label = c1)
# plt.show()
def multi_class_under_sampling():
'''
EXAMPLE: Multiclass classification with under-sampling
'''
RANDOM_STATE = 42
iris = load_iris()
X, y = make_imbalance(iris.data, iris.target, ratio = {0:25, 1:50, 2:50}, random_state = 0)
# print (X[:, [1, 2]])
# print (type(y))
X_train, X_test, y_train, y_test = train_test_split(X[:, [1, 2]], y, random_state = RANDOM_STATE)
# print ('Training target statistics: {}'.format(Counter(y_train)))
# print ('Testing target statistics: {}'.format(Counter(y_test)))
nm = NearMiss(version = 1, random_state = RANDOM_STATE)
X_resample_nm, y_resample_nm = nm.fit_sample(X_train, y_train)
cc = ClusterCentroids(random_state = 0)
X_resample_cc, y_resample_cc = cc.fit_sample(X_train, y_train)
'''plot two in one frame'''
fig, (ax0, ax1) = plt.subplots(ncols = 2)
# ax0, ax1 = axes.flatten()
ax0 = scatter_plot_2d(X_resample_nm, y_resample_nm)
ax1 = scatter_plot_2d(X_resample_nm, y_resample_nm)
# fig.tight_layout()
plt.show()
# pipeline_nm = make_pipeline(NearMiss(version = 1, random_state = RANDOM_STATE), LinearSVC(random_state = RANDOM_STATE))
# pipeline_nm.fit(X_train, y_train)
# pipeline_cc = make_pipeline(ClusterCentroids(random_state = 0), LinearSVC(random_state = RANDOM_STATE))
# pipeline_cc.fit(X_train, y_train)
# print (classification_report_imbalanced(y_test, pipeline_nm.predict(X_test)))
# deci_bdry_plot_2d(X[:, [1, 2]], y, pipeline_nm)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.scatter_plot(X[:, [1, 2]], y, pipeline)
# print (classification_report_imbalanced(y_test, pipeline.predict(X_test)))
pipeline_1= make_pipeline(NearMis
|
s(version = 1, random_state = RANDOM_STATE), LinearSVC(random_state = RANDOM_STATE))
pipeline_1.fit(X_train, y_train)
ax2 = fig.add_subplot(212)
ax2.scatter_plot(X[:, [1, 2]], y, pipeline_1)
plt.show()
def wendy_try_iris():
'''
EXAMPLE: Multiclass classification with under-sampling
'''
RANDOM_STATE = 42
iris = load_iris()
# X, y = make_imbalance(iris.data, iris.target, ratio = {0:25, 1:50, 2:50}, ra
|
ndom_state = 0)
X = pd.DataFrame(iris.data, columns = ['Sepal_length', 'Sepal_width', 'Petal_length', 'Petal_width'])
y = pd.DataFrame(iris.target, columns = ['Species'])
df = X
df['Species'] = y
'''pair plot for the features'''
# sns.set(style='whitegrid', context='notebook')
# cols = ['Sepal_length', 'Sepal_width', 'Petal_length', 'Petal_width']
# sns.pairplot(df, vars = cols, size=2.5, hue = 'Species')
# plt.show()
'''dimension reduction'''
# print (classification_report_imbalanced(y_test, pipeline_cc.predict(X_test)))
# deci_bdry_plot_2d(X[:, [1, 2]], y, pipeline_cc)
if __name__ == '__main__':
wendy_try_iris()
|
Sarthak30/Codeforces
|
fox_and_snake.py
|
Python
|
gpl-2.0
| 361
| 0.113573
|
a,
|
b = map(int,raw_input().split())
i=0
while(i<a):
j=0
c=[]
if(i%2==0):
while(j<b):
c.append('#')
j=j+1
print (''.join(c))
else:
k = int(i/2)
if (k%2==0):
while(j<(b-1)):
c.append(".")
j=j+1
c.append("#")
print (''.join(c))
else:
c.append('#')
while(j<(b-1)):
c.append(".")
j=j+1
print (''.join(c))
i=i+1
| |
SublimeText-Markdown/TableEditor
|
table_plugin.py
|
Python
|
gpl-3.0
| 18,180
| 0.000935
|
# table_plugin.py - sublime plugins for pretty print text table
# Copyright (C) 2012 Free Software Foundation, Inc.
# Author: Valery Kocubinsky
# Package: SublimeTableEditor
# Homepage: https://github.com/vkocubinsky/SublimeTableEditor
# This file is part of SublimeTableEditor.
# SublimeTableEditor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# SublimeTableEditor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with SublimeTableEditor. If not, see <http://www.gnu.org/licenses/>.
import sublime
import sublime_plugin
import re
try:
from . import table_lib as tlib
from . import table_base as tbase
except ValueError:
import table_lib as tlib
import table_base as tbase
class TableContext:
def __init__(self, view, sel, syntax):
self.view = view
(sel_row, sel_col) = self.view.rowcol(sel.begin())
self.syntax = syntax
self.first_table_row = self._get_first_table_row(sel_row, sel_col)
self.last_table_row = self._get_last_table_row(sel_row, sel_col)
self.table_text = self._get_table_text(self.first_table_row, self.last_table_row)
self.visual_field_num = self._visual_field_num(sel_row, sel_col)
self.row_num = sel_row - self.first_table_row
self.table_pos = tbase.TablePos(self.row_num, self.visual_field_num)
self.table = self.syntax.table_parser.parse_text(self.table_text)
self.table_driver = self.syntax.table_driver
self.field_num = self.table_driver.visual_to_internal_index(self.table, self.table_pos).field_num
def _get_table_text(self, first_table_row, last_table_row):
begin_point = self.view.line(self.view.text_point(first_table_row, 0)
).begin()
end_point = self.view.line(self.view.text_point(last_table_row, 0)
).end()
return self.view.substr(sublime.Region(begin_point, end_point))
def _get_last_table_row(self, sel_row, sel_col):
row = sel_row
last_table_row = sel_row
last_line = self.view.rowcol(self.view.size())[0]
while (row <= last_line and self._is_table_row(row)):
last_table_row = row
row = row + 1
return last_table_row
def _get_first_table_row(self, sel_row, sel_col):
row = sel_row
first_table_row = sel_row
while (row >= 0 and self._is_table_row(row)):
first_table_row = row
row = row - 1
return first_table_row
def _is_table_row(self, row):
text = self._get_text(row)
return self.syntax.table_parser.is_table_row(text)
def _visual_field_num(self, sel_row, sel_col):
line_text = self._get_text(sel_row)
line = self.syntax.line_parser.parse(line_text)
return line.field_num(sel_col)
def _get_text(self, row):
point = self.view.text_point(row, 0)
region = self.view.line(point)
text = self.view.substr(region)
return text
class AbstractTableCommand(sublime_plugin.TextCommand):
def detect_syntax(self):
if self.view.settings().has("table_editor_syntax"):
syntax_name = self.view.settings().get("table_editor_syntax")
else:
syntax_name = self.auto_detect_syntax_name()
table_configuration = tbase.TableConfiguration()
border_style = (self.view.settings().get("table_editor_border_style", None)
or self.view.settings().get("table_editor_style", None))
if border_style == "emacs":
table_configuration.hline_out_border = '|'
table_configuration.hline_in_border = '+'
elif border_style == "grid":
table_configuration.hline_out_border = '+'
table_configuration.hline_in_border = '+'
elif border_style == "simple":
table_configuration.hline_out_border = '|'
table_configuration.hline_in_border = '|'
if self.view.settings().has("table_editor_custom_column_alignment"):
table_configuration.custom_column_alignment = self.view.settings().get("table_editor_custom_column_alignment")
if self.view.settings().has("table_editor_keep_space_left"):
table_configuration.keep_space_left = self.view.settings().get("table_editor_keep_space_left")
if self.view.settings().has("table_editor_align_number_right"):
table_configuration.align_number_right = self.view.settings().get("table_editor_align_number_right")
if self.view.settings().has("table_editor_detect_header"):
table_configuration.detect_header = self.view.settings().get("table_editor_detect_header")
if self.view.settings().has("table_editor_intelligent_formatting"):
table_configuration.intelligent_formatting = self.view.settings().get("table_editor_intelligent_formatting")
syntax = tlib.create_syntax(syntax_name, table_configuration)
return syntax
def auto_detect_syntax_name(self):
view_syntax = self.view.settings().get('syntax')
if (view_syntax == 'Packages/Markdown/MultiMarkdown.tmLanguage' or
view_syntax == 'Packages/Markdown/Markdown.tmLanguage'):
return "MultiMarkdown"
elif view_syntax == 'Packages/Textile/Textile.tmLanguage':
return "Textile"
elif (view_syntax == 'Packages/RestructuredText/reStructuredText.tmLanguage'):
return "reStructuredT
|
ext"
else:
return "Simple"
def merge(self, edit, ctx):
table = ctx.table
new_lines = table.render_lines()
first_table_row = ctx.first_table_row
last_table_row = ctx.last_table_row
rows = range(first_table_row, last_table_row + 1)
for row, new_text in zip(rows, new_lines):
region = self.view.line(self.view.text_point(ro
|
w, 0))
old_text = self.view.substr(region)
if old_text != new_text:
self.view.replace(edit, region, new_text)
#case 1: some lines inserted
if len(rows) < len(new_lines):
row = last_table_row
for new_text in new_lines[len(rows):]:
end_point = self.view.line(self.view.text_point(row, 0)).end()
self.view.insert(edit, end_point, "\n" + new_text)
row = row + 1
#case 2: some lines deleted
elif len(rows) > len(new_lines):
for row in rows[len(new_lines):]:
region = self.view.line(self.view.text_point(row, 0))
self.view.erase(edit, region)
def create_context(self, sel):
return TableContext(self.view, sel, self.detect_syntax())
def run(self, edit):
new_sels = []
for sel in self.view.sel():
new_sel = self.run_one_sel(edit, sel)
new_sels.append(new_sel)
self.view.sel().clear()
for sel in new_sels:
self.view.sel().add(sel)
self.view.show(sel, False)
def run_one_sel(self, edit, sel):
ctx = self.create_context(sel)
try:
msg, table_pos = self.run_operation(ctx)
self.merge(edit, ctx)
sublime.status_message("Table Editor: {0}".format(msg))
return self.table_pos_sel(ctx, table_pos)
except tbase.TableException as err:
sublime.status_message("Table Editor: {0}".format(err))
return self.table_pos_sel(ctx, ctx.table_pos)
def visual_field_sel(self, ctx, row_num, visual_field_num):
if ctx.table.empty():
pt = self.view.text_point(ctx.first_table_row, 0)
else:
pos = tbase.TablePos(row_num, visual_field_num)
c
|
kiae-grid/panda-bigmon-core
|
core/htcondor/urls.py
|
Python
|
apache-2.0
| 526
| 0.013308
|
from django.conf.urls import
|
patterns, include, url
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
### #FIXME admin.autodiscover()
import views as htcondor_views
#from ..api.htcondorapi import views as htcondorapi_views
urlpatterns = patterns('',
### HTCondor Jo
|
bs
url(r'^/$', htcondor_views.list3HTCondorJobs, name='listHTCondorJobs'),
url(r'^/(?P<globaljobid>[-A-Za-z0-9_.#]+)/$', htcondor_views.htcondorJobDetails, name='HTCondorJobDetails'),
)
|
nanomolina/JP
|
src/odontology/register/apps.py
|
Python
|
apache-2.0
| 132
| 0
|
from __future__ import unicode_literals
from django.apps import
|
AppConfig
class RegisterConfig(AppConfig
|
):
name = 'register'
|
kstilwell/tcex
|
tcex/logger/rotating_file_handler_custom.py
|
Python
|
apache-2.0
| 3,432
| 0.001166
|
"""API Handler Class"""
# standard library
import gzip
import os
import shutil
from logging.handlers import RotatingFileHandler
from typing import Optional
class RotatingFileHandlerCustom(RotatingFileHandler):
"""Logger handler for ThreatConnect Exchange File logging."""
def __init__(
self,
filename: str,
mode: Optional[str] = 'a',
maxBytes: Optional[int] = 0,
backupCount: Optional[int] = 0,
encoding: Optional[str] = None,
delay: Optional[bool] = False,
):
"""Customize RotatingFileHandler to create full log path.
Args:
filename: The name of the logfile.
mode: The write mode for the file.
maxBytes: The max file size before rotating.
backupCount: The maximum # of backup files.
encoding: The log file encoding.
delay: If True, then file opening is deferred until the first call to emit().
"""
if encoding is None and os.getenv('LANG') is None:
encoding = 'UTF-8'
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename), exist_ok=True)
RotatingFileHandler.__init__(self, filename, mode, maxBytes, backupCount, encoding, delay)
# set namer
self.namer = self.custom_gzip_namer
self.rotator = self.custom_gzip_rotator
@staticmethod
def custom_gzip_namer(name):
"""Namer for rotating log handler with gz extension.
Args:
name: The current name of the logfile.
"""
return name + '.gz'
@staticmethod
def custom_gzip_rotator(source: str, dest: str) -> None:
"""Rotate and compress log file.
Args:
source: The source filename.
dest: The destination filename.
"""
with open(source, 'rb') as f_in:
with gzip.open(dest, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(source)
# class RotatingFileHandlerFormatter(logging.Formatter):
# """Custom logging formatter that allows a different format depending on the logging level."""
#
# def __init__(self):
# """Initialize formatter parent."""
# super().__init__(fmt='%(levelno)d: %(msg)s', datefmt=None, style='%')
#
# def format(self, record):
# """Format file handle log event according to logging level.
#
# Args:
# record (obj): The record to be logged.
# """
# # Replace the original format with one customized by logging level
# self._style._fmt = self.standard_format
# if record.levelno < 10: # <= logging.DEBUG
# self._style._fmt = self.trace_format
#
# # Call the original formatter class to
|
do the grunt work
# result = logging.Formatter.format(
|
self, record)
#
# return result
#
# @property
# def standard_format(self):
# """Return the standard log format"""
# return (
# '%(asctime)s - %(name)s - %(levelname)s - %(message)s '
# '(%(filename)s:%(funcName)s:%(lineno)d:%(threadName)s)'
# )
#
# @property
# def trace_format(self):
# """Return the standard log format"""
# return (
# '%(asctime)s - %(name)s - %(levelname)s - [%(funcName)s:%(lineno)d] %(message)s '
# '(%(filename)s:%(threadName)s)'
# )
|
petrus-v/server-tools
|
disable_openerp_online/__openerp__.py
|
Python
|
agpl-3.0
| 1,917
| 0
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Remove openerp.com bindings",
"version": "1.1",
"author": "Therp BV",
"complexity": "normal",
"description": """
This module deactivates all bindings to openerp.com that
come with the standard code:
* update notifier code is deactivated and the function is overw
|
ritten
* apps and updates menu items in settings are removed
* help and account menu items in user menu are removed
* prevent lookup of OPW for current database uuid and resulting"""
""" 'unsupported' warning
""",
"category": "",
"depends": [
|
'base',
'mail',
],
"data": [
'data/ir_ui_menu.xml',
'data/ir_cron.xml',
],
"js": [
'static/src/js/disable_openerp_online.js',
],
"css": [
],
"qweb": [
'static/src/xml/base.xml',
],
"auto_install": False,
"installable": True,
"external_dependencies": {
'python': [],
},
}
|
a1ezzz/wasp-general
|
wasp_general/network/web/request.py
|
Python
|
lgpl-3.0
| 4,630
| 0.023542
|
# -*- coding: utf-8 -*-
# wasp_general/network/web.py
#
# Copyright (C) 2016 the wasp-general authors and contributors
# <see AUTHORS file>
#
# This file is part of wasp-general.
#
# Wasp-general is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Wasp-general is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with wasp-general. If not, see <http://www.gnu.org/licenses/>.
# noinspection PyUnresolvedReferences
from wasp_general.version import __author__, __version__, __credits__, __license__, __copyright__, __email__
# noinspection PyUnresolvedReferences
from wasp_general.version import __status__
import re
from wasp_general.verify import verify_type, verify_value
from wasp_general.network.web.proto import WWebSessionProto, WWebRequestProto
from wasp_general.network.web.headers import WHTTPHeaders
from wasp_general.network.web.re_statements import http_method_name, http_path, http_version
class WWebRequest(WWebRequestProto):
""" :class:`.WWebRequestProto` implementation. Class represent HTTP-request descriptor.
Call :meth:`.WWebRequest.ro` method to create unchangeable copy
"""
request_line_re = re.compile(
'^(' + http_method_name + ') +(' + http_path + ')( +HTTP/(' + http_version + '))?$'
)
"""
Check for HTTP request line. See RFC 2616, Section 5.1
"""
@verify_type(session=WWebSessionProto, method=str, path=str, headers=(WHTTPHeaders, None))
@verify_type(request_data=(bytes, None))
@verify_value(method=lambda x: len(x) > 0)
@verify_value(path=lambda x: len(x) > 0)
def __init__(self, session, method, path, headers=None, request_data=None):
"""
Create new request descriptor
:param session: request origin
:param method: called HTTP-method
:param path: called HTTP-path
"""
WWebRequestProto.__init__(self)
self.__session = session
self.__method = method.upper()
self.__path = path
self.__headers = headers
self.__request_data = request_data
self.__ro_flag = False
def session(self):
""" Return origin session
:return: WWebSessionProto
"""
return self.__session
def method(self):
""" Return requested method
:return: str
"""
return self.__method
def path(self):
""" Return requested path
:return: str
"""
return self.__path
def headers(self):
""" Return request headers
:return: WHTTPHeaders
"""
return self.__headers
@verify_type(headers=WHTTPHeaders)
def set_headers(self, headers):
""" Set headers for request
:param headers: headers to set
:return: None
"""
if self.__ro_flag:
raise RuntimeError('Read-only object changing attempt')
self.__headers = headers
def request_data(self):
""" Return request data
:return: bytes
"""
return self.__request_data
@verify_type(request_data=bytes)
def set_request_data(self, request_data):
""" Set payload data for request
:param request_data: data to set
:return: None
"""
if self.__ro_flag:
raise RuntimeError('Read-only object changing attempt')
self.__request_data = request_data
@classmethod
@verify_type('paranoid', session=WWebSessionProto)
@verify_type(request_line=str)
def parse_request_line(cls, session, request_line):
""" Parse given request line like 'GET /foo' or 'POST /zzz HTTP/1.0
|
'
:param session: origin session
:param request_line: line to parse
:return: WWebRequest
"""
r = cls.request_line_re.search(req
|
uest_line)
if r is not None:
method, path, protocol_sentence, protocol_version = r.groups()
return WWebRequest(session, method, path)
raise ValueError('Invalid request line')
@verify_type('paranoid', http_code=str)
def parse_headers(self, http_code):
""" Parse http-code (like 'Header-X: foo\r\nHeader-Y: bar\r\n') and retrieve (save) HTTP-headers
:param http_code: code to parse
:return: None
"""
if self.__ro_flag:
raise RuntimeError('Read-only object changing attempt')
self.__headers = WHTTPHeaders.import_headers(http_code)
def ro(self):
""" Create read-only copy
:return: WWebRequest
"""
request = WWebRequest(
self.session(), self.method(), self.path(),
headers=self.headers().ro(), request_data=self.request_data()
)
request.__ro_flag = True
return request
|
sparcs-kaist/heartbeat-server
|
apps/core/migrations/0006_auto_20161113_0341.py
|
Python
|
mit
| 645
| 0
|
# -*- coding: utf-8 -*-
#
|
Generated by Django 1.10.1 on 2016-11-12 18:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_auto_20161113_0317'),
]
operations = [
migrations.RenameField(
model_name='backuptarget',
o
|
ld_name='path',
new_name='path_template',
),
migrations.AddField(
model_name='backuplog',
name='path',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
]
|
grahamc/markment
|
tests/unit/test_html_rendering.py
|
Python
|
mit
| 7,784
| 0.000257
|
# -*- coding: utf-8 -*-
# <markment - markdown-based documentation generator for python>
# Copyright (C) <2013> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from markment import Markment
from markment.engine import MarkmentRenderer
from lxml import html as lhtml
from .base import MARKDOWN
def test_prefix_link_when_needed():
"MarkmentRenderer#prefix_link_if_needed should prefix if link is relative"
renderer = MarkmentRenderer()
renderer.url_prefix = 'http://awesome.com'
result = renderer.prefix_link_if_needed('bar.png')
result.should.equal('http://awesome.com/bar.png')
def test_prefix_link_when_not_needed():
"MarkmentRenderer#prefix_link_if_needed should NOT prefix if link is absolute"
renderer = MarkmentRenderer()
renderer.url_prefix = 'http://awesome.com'
result = renderer.prefix_link_if_needed('http://ok.com/bar.png')
result.should.equal('')
def test_prefix_link_when_not_needed_provided():
"MarkmentRenderer#prefix_link_if_needed should NOT prefix if link is absolute"
renderer = MarkmentRenderer()
result = renderer.prefix_link_if_needed('bar.png')
result.should.equal('')
def test_anchors_in_1st_level_headers():
"Markment should put anchors in 1st level headers"
MD = MARKDOWN("""
# API Reference
some content
""")
mm = Markment(MD)
dom = lhtml.fromstring(mm.rendered)
headers = dom.cssselect("h1")
headers.should.have.length_of(1)
h1 = headers[0]
h1.attrib.should.have.key("name").being.equal("api-reference")
h1.attrib.should.have.key("id").being.equal("api-reference")
links = h1.getchildren()
links.should.have.length_of(1)
a = links[0]
a.text.should.equal("API Reference")
a.attrib.should.have.key("href").equal("#api-reference")
def test_anchors_in_2nd_level_headers():
"Markment should put anchors in 2nd level headers"
MD = MARKDOWN("""
# API Reference
## Rendering content
""")
mm = Markment
|
(MD)
dom = lhtml.fromstring(mm.rendered)
headers = dom.cssselect("h2")
headers.should.have.length_of(1)
h2 = headers[0]
h2.attrib.should.have.key("name").being.equal("rendering-content")
h2.attrib.should.have.key("id").being.equal("rendering-content")
links = h2.getchildren()
links.should.have.length_of(1)
a = links[0]
a.text.should.equal("Rendering
|
content")
a.attrib.should.have.key("href").equal("#rendering-content")
def test_code_block():
"Markment should render code blocks"
MD = MARKDOWN("""
# API Reference
This is good
```python
import os
os.system('ls /')
```
This is not good
```python
import os
os.system('sudo rm -rf /')
```
""")
mm = Markment(MD)
dom = lhtml.fromstring(mm.rendered)
code_blocks = dom.cssselect("div.highlight pre")
code_blocks.should.have.length_of(2)
code1, code2 = code_blocks
code1.attrib.should.have.key("name").equal("api-reference-example-1")
code2.attrib.should.have.key("name").equal("api-reference-example-2")
def test_code_block_guesses_lexer():
"Markment should render code blocks even without a language specified"
MD = MARKDOWN("""
# API Reference
This is good
```
import os
os.system('ls /')
```
This is not good
```python
import os
os.system('sudo rm -rf /')
```
""")
mm = Markment(MD)
dom = lhtml.fromstring(mm.rendered)
code_blocks = dom.cssselect("div.highlight pre")
code_blocks.should.have.length_of(2)
code1, code2 = code_blocks
code1.attrib.should.have.key("name").equal("api-reference-example-1")
code2.attrib.should.have.key("name").equal("api-reference-example-2")
def test_image_relative():
"Markment should render images with relative path"
MD = MARKDOWN("""
# Awesome project

""")
mm = Markment(MD, url_prefix='http://falcao.it')
dom = lhtml.fromstring(mm.rendered)
images = dom.cssselect("img")
images.should.have.length_of(1)
img = images[0]
img.attrib.should.have.key("src").equal("http://falcao.it/logo.png")
img.attrib.should.have.key("alt").equal("LOGO")
def test_image_relative_with_callback():
"Markment should render images with relative path"
MD = MARKDOWN("""
# Awesome project

[Documentation](docs.md)
""")
def process_url(path):
if path.lower().endswith("md"):
return "http://markdown.com/{0}".format(path)
else:
return "http://images.com/{0}".format(path)
mm = Markment(MD, url_prefix=process_url)
dom = lhtml.fromstring(mm.rendered)
images = dom.cssselect("img")
images.should.have.length_of(1)
img = images[0]
img.attrib.should.have.key("src").equal("http://images.com/logo.png")
img.attrib.should.have.key("alt").equal("LOGO")
links = dom.cssselect("a")
links.should.have.length_of(2)
a = links[-1]
a.attrib.should.have.key("href").equal("http://markdown.com/docs.md")
def test_image_absolute():
"Markment should render images with absolute path"
MD = MARKDOWN("""
# Awesome project

""")
mm = Markment(MD, url_prefix='http://falcao.it')
dom = lhtml.fromstring(mm.rendered)
images = dom.cssselect("img")
images.should.have.length_of(1)
img = images[0]
img.attrib.should.have.key("src").equal("http://octomarks.io/logo.png")
img.attrib.should.have.key("alt").equal("LOGO")
def test_link_relative():
"Markment should render links with relative path"
MD = MARKDOWN("""
[LOGO](file.md)
""")
mm = Markment(MD, url_prefix='http://falcao.it')
dom = lhtml.fromstring(mm.rendered)
links = dom.cssselect("a")
links.should.have.length_of(1)
a = links[0]
a.attrib.should.have.key("href").equal("http://falcao.it/file.md")
a.text.should.equal('LOGO')
def test_link_absolute():
"Markment should render links with absolute path"
MD = MARKDOWN("""
[LOGO](http://octomarks.io/file.md)
""")
mm = Markment(MD, url_prefix='http://falcao.it')
dom = lhtml.fromstring(mm.rendered)
links = dom.cssselect("a")
links.should.have.length_of(1)
a = links[0]
a.attrib.should.have.key("href").equal("http://octomarks.io/file.md")
a.text.should.equal('LOGO')
def test_markment_doesnt_fail_if_has_no_headers():
"Markment should find and index 3rd level headers"
MD = MARKDOWN("""
```python
poor code, doesn't have a title
```
""")
mm = Markment(MD)
mm.index().should.equal([])
def test_markment_header_accepts_unicode_characters():
"Markment supports unicode (at least in the headers :)"
MD = MARKDOWN('''
# Curdling
## Curdle /ˈkərdl/
''')
mm = Markment(MD)
dom = lhtml.fromstring(mm.rendered)
headers = dom.cssselect("h2 a")
headers.should_not.be.empty
h2 = headers[0]
h2.text.should.equal('Curdle /ˈkərdl/')
|
b-long/ezbake-platform-services
|
efe/frontend_app/modules/ezRPStaticFileStore.py
|
Python
|
apache-2.0
| 9,011
| 0.003884
|
# Copyright (C) 2013-2015 Computer Sciences Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import array
import math
import logging
import logging.handlers
from pyaccumulo import Accumulo, Mutation, Range
class EzRPStaticStore(object):
'''
Class to save and retrieve static content from Accumulo.
cf = "static" For all rows
cq = "hash" Stores the hash_value of Static File
cq = "nofchunks" Stores the number of Chunks needed to store Static File
cq = "chunk_000" .. "chunk_nnn" Stores the Chunks of Static File
'''
def __init__(self, host="localhost", port=42424, user='root', password='secret', chunk_size=int(5*1048576), logger=None):
self.__host = host
self.__port = port
self.__user = user
self.__password = password
self.__table = 'ezfrontend'
self.__cf = 'static'
self.__connection = None
if logger is not None:
self.__log = logger
else:
self.__log = logging.getLogger(self.__module__ + '.' + self.__class__.__name__)
self.__log.addHandler(logging.NullHandler())
self.__chunk_size =int(chunk_size)
self._connect(self.__host, self.__port, self.__user, self.__password)
def _connect(self, host, port, user, password):
try:
self.__connection = Accumulo(host, port, user, password)
self.__log.debug('Connected to StaticFile Store')
except Exception as e:
self.__log.exception('Error while connecting to StaticFile Store: %s' % str(e))
raise Exception('Error while connecting to StaticFile Store: %s' % str(e))
def _ensureTableExists(self):
'''
Make sure that the table exists before any other operation.
Reconnect to Accumulo if the Connection is reset.
'''
if not self.__connection.table_exists(self.__table):
self.__log.info('table "{table}" does not exist in StaticFile Store. Creating the table'.format(table=self.__table))
self.__connection.create_table(self.__table)
if not self.__connection.table_exists(self.__table):
self.__log.error('Unable to ensure StaticFile Store table "{table} exists'.format(format(table=self.__table)))
raise Exception('StaticFile Store: Unable to ensure table "{table}" exists'.format(table=self.__table))
def _ensureNoDuplicates(self, usrFacingUrlPrefix):
'''
Ensure a single copy of file for a given usrFacingUrlPrefix
'''
if self._getHash(usrFacingUrlPrefix) is not None:
self.deleteFile(usrFacingUrlPrefix)
def _putNofChunks(self, usrFacingUrlPrefix, length):
'''
Put the number of chunks the static contents is stored
'''
chunks = int(math.ceil(length / float(self.__chunk_size)))
writer = self.__connection.create_batch_writer(self.__table)
m = Mutation(usrFacingUrlPrefix)
m.put(cf=self.__cf, cq="nofchunks", val=str(chunks))
writer.add_mutation(m)
writer.close()
def _getNofChunks(self, usrFacingUrlPrefix):
'''
Get the number of chunks the static contents is stored
'''
scan_range = Range(srow=usrFacingUrlPrefix, scf=self.__cf, scq="nofchunks",
erow=usrFacingUrlPrefix, ecf=self.__cf, ecq="nofchunks")
for entry in self.__connection.scan(self.__table, scanrange=scan_range):
return int(entry.val)
return 0
def _getChunks(self, data):
'''
Break the blob into CHUNK_SIZE.
less than maxFrameSize in Accumulo proxy.properties
'''
data_length = len(data)
for i in range(0, data_length + 1, self.__chunk_size):
yield data[i:i + self.__chunk_size]
def _putHash(self, usrFacingUrlPrefix, hash_str):
'''
Puts the Hash for usrFacingUrlPrefix
'''
writer = self.__connection.create_batch_writer(self.__table)
m = Mutation(usrFacingUrlPrefix)
m.put(cf=self.__cf, cq="hash", val=hash_str)
writer.add_mutation(m)
writer.close()
def _getHash(self, usrFacingUrlPrefix):
scan_range = Range(srow=usrFacingUrlPrefix, scf=self.__cf, scq="hash",
erow=usrFacingUrlPrefix, ecf=self.__cf, ecq="hash")
for entry in self.__connection.scan(self.__table, scanrange=scan_range):
return str(entry.val)
else:
return None
def reConnection(self):
self._connect(self.__host, self.__port, self.__user, self.__password)
def putFile(self, usrFacingUrlPrefix, hash_str, data):
self._ensureTableExists()
self._ensureNoDuplicates(usrFacingUrlPrefix)
self._putHash(usrFacingUrlPrefix, hash_str)
data_length = len(data)
self._putNofChunks(usrFacingUrlPrefix, data_length)
writer = self.__connection.create_batch_writer(self.__table)
for i, chunk in enumerate(self._getChunks(data)):
m = Mutation(usrFacingUrlPrefix)
m.put(cf=self.__cf, cq="chunk_{number:010d}".format(number=i), val=chunk)
writer.add_mutation(m)
self.__log.debug('added static file for "{url}" with hash "{hash}" of length "{length}"'.format(url=usrFacingUrlPrefix, hash=hash_str, length=data_length))
writer.close()
def getFile(self, usrFacingUrlPrefix):
'''
Assembles all the chunks for this row
'''
self._ensureTableExists()
data = array.array('c') # Create a byte array
chunks = self._getNofChunks(usrFacingUrlPrefix)
chunks_read = 0
for i in range(chunks):
cq = 'chunk_{number:010d}'.format(number=i)
for entry in self.__connection.scan(self.__table, None, cols=[[self.__cf, cq]]):
if entry.row == usrFacingUrlPrefix and entry.cq.startswith("chunk_"):
chunks_read += 1
data.extend(entry.val)
# This code gets following error while retrieving over 96MB. Data stops at first chunk_000
# # java.lang.OutOfMemoryError: Java heap space
# -XX:OnOutOfMemoryError="kill -9 %p"
# Executing /bin/sh -c "kill -9 32597"...
# [1]+ Exit 137 sudo -u accumulo /opt/accumulo/current/bin/accumulo proxy -p /opt/accumulo/current/conf/proxy.properties
# startChunk = "chunk_{number:010d}".format(number=0)
# endChunk = "chunk_{number:010d}".format(number=chunks)
# scan_range = Range(srow=usrFacingUrlPrefix, scf=self.__cf, scq=startChunk,
# erow=usrFacingUrlPrefix, ecf=self.__cf, ecq=endChunk)
# for entry in self.__connection.scan(self.__table, scanrange=scan_range):
# #self.__log.info("getFile: row = {
|
0} cq= {1}".format(entry.row, entry.cq))
# if entry.cq.startswith("chunk_"):
# self.__log.info("getFile: row = {0} cq= {1}".format(entry.row, entry.cq))
# chun
|
ks_read += 1
# data.extend(entry.val)
self.__log.debug('retrieved static file for {url}'.format(url=usrFacingUrlPrefix))
if chunks_read != chunks:
self.__log.error("did not read all the chunks from StaticFile Store")
return data.tostring() if data.buffer_info()[1] > 0 else None
def deleteFile(self, usrFacingUrlPrefix):
self._ensureTableExists()
writer = self.__connection.create_batch_writer(self.__table)
chunks = se
|
austensatterlee/VOSIMSynth
|
scripts/add_file.py
|
Python
|
gpl-3.0
| 5,486
| 0.006744
|
import re
from __future__ import print_function
HEADER_TEMPLATE=r"""/*
Copyright 2016, Austen Satterlee
This file is part of VOSIMProject.
VOSIMProject is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
VOSIMProject is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with VOSIMProject. If not, see <http://www.gnu.org/licenses/>.
*/
{guard}
"""
IFNDEF_TEMPLATE = r"""
#ifndef __{guardname}__
#define __{guardname}__
#endif
"""
PRAGMA_ONCE_TEMPLATE = r"""
#pragma once
"""
SOURCE_TEMPLATE="#include \"{filename:}.h\""
def find_valid_directories(source_dir, include_dir):
sofar = []
def _walker(ret_list, dirname, fnames):
if (source_dir in fnames) and (include_dir in fnames):
ret_list.append(dirname)
# m_dirname = re.sub(r'\binclude\b|\bsrc\b', '', dirname)
# if m_dirname in sofar:
# ret_list.append(dirname)
# else:
# sofar.append(m_dirname)
valid_directories = []
os.path.walk(".", _walker, valid_directories)
return valid_directories
if __name__=="__main__":
import argparse as ap
import os,sys,datetime
parser = ap.ArgumentParser(formatter_class=ap.ArgumentDefaultsHelpFormatter)
parser.add_argument("-n", "--dry-run", action="store_true",
help="Don't perform any actions, just print would be done")
parser.add_argument("-f", "--force", action="store_true",
help="Overwrite existing files")
parser.add_argument("--guard",choices=["pragma","ifndef"], default="pragma",
help="Choose the type of header guard to use")
subparsers = parser.add_subparsers(title="commands")
parser_auto_add = subparsers.add_parser("auto-add",
help="Add source and include files to their respective directories. Automatically detect source and\
include directories given a base directory")
parser_auto_add.add_argument("directory", nargs='?', type=str, help="Directory that contains 'src' and 'include' dirs")
parser_auto_add.add_argument("filenames", nargs='*', type=str, help="Name of the new files to add (without extension)")
parser_auto_add.add_argument("--list", "-l", action="store_true", help="List valid directories and exit")
parser_auto_add.set_defaults(command="auto-add")
parser_add = subparsers.add_parser("add", help="Add source and include files to the specified directories")
parser_add.add_argument("source_dir", type=str)
parser_add.add_argument("include_dir", type=str)
parser_add.add_argument("filenames", nargs='+', type=str)
parser_add.set_defaults(command="add")
parsed = parser.parse_args()
if parsed.command=="auto-add":
if parsed.list:
print('\n'.join(find_valid_directories
|
('src', 'include')))
sys.exit(1)
if not parsed.directory:
sys.stderr.write("ERROR: Please provide a directory\n")
sys.exit(1)
if not parsed.filenames:
sys.stderr.write("ERROR: Please provide at least one filename\n")
sys.exit(1)
directory = os.path.normpath(parsed.directory)
dir_contents = os.lis
|
tdir(directory)
if ('src' not in dir_contents) or ('include' not in dir_contents):
raise RuntimeError("'%s' and '%s' directories not found" % (parsed.source_dir,parsed.include_dir))
include_dir = os.path.join(directory,"include")
src_dir = os.path.join(directory,"src")
if parsed.command=="add":
include_dir = os.path.normpath(parsed.include_dir)
src_dir = os.path.normpath(parsed.source_dir)
# Check that directories exist
if not os.path.exists(src_dir):
sys.stderr.write("ERROR: Source directory '%s' does not exist\n" % source_dir)
sys.exit(1)
if not os.path.exists(include_dir):
sys.stderr.write("ERROR: Include directory '%s' does not exist\n" % include_dir)
sys.exit(1)
for filename in parsed.filenames:
include_fname = os.path.join(include_dir, filename+".h")
src_fname = os.path.join(src_dir, filename+".cpp")
if not parsed.force and (os.path.exists(include_fname) or os.path.exists(src_fname)):
sys.stderr.write("ERROR: '%s' or '%s' already exists!\n" % (include_fname,src_fname))
sys.exit(1)
guard_str = PRAGMA_ONCE_TEMPLATE if parsed.guard=="pragma" else IFNDEF_TEMPLATE.format(guardname=filename.toupper())
include_contents = HEADER_TEMPLATE.format(
filename=filename,
guard=guard_str,
date=datetime.date.today().strftime("%m/%Y")
)
src_contents = SOURCE_TEMPLATE.format(filename=filename)
if not parsed.dry_run:
with open(include_fname,"w") as fp:
fp.write(include_contents)
sys.stdout.write("Added header file to {}\n".format(include_fname))
if not parsed.dry_run:
with open(src_fname,"w") as fp:
fp.write(src_contents)
sys.stdout.write("Added source file to {}\n".format(src_fname))
sys.exit()
|
CERT-W/certitude
|
components/interface/web.py
|
Python
|
gpl-2.0
| 40,631
| 0.003323
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
CERTitude: the seeker of IOC
Copyright (c) 2016 CERT-W
Contact: cert@wavestone.com
Contributors: @iansus, @nervous, @fschwebel
CERTitude is under licence GPL-2.0:
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
'''
if __name__ == "__main__" and __package__ is None:
raise Exception('Erreur : lancez le script depuis main.py et non directement')
# Imports
# Lots of them...
#
import atexit
import base64
import datetime
import json
import logging
import os
import ssl
import subprocess
import sys
try:
import win32event
import win32security
except:
pass
from flask import Flask, render_template, request, session, redirect, url_for, Response, abort, escape
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import netaddr
from config import LISTEN_ADDRESS, LISTEN_PORT, BOKEH_LISTEN_ADDRESS, BOKEH_LISTEN_PORT
from config import IOC_MODE, DEBUG, USE_SSL, SSL_KEY_FILE, SSL_CERT_FILE, CERTITUDE_DATABASE, MIN_SUBMIT_INTERVAL
from helpers.queue_models import Task
from helpers.results_models import Result, IOCDetection
from helpers.misc_models import User, ConfigurationProfile, WindowsCredential, XMLIOC, Batch, GlobalConfig
from helpers.yara_models import YaraRule
from helpers.helpers import hashPassword, checksum, verifyPassword
import helpers.crypto as crypto
import components.scanner.openioc.openiocparser as openiocparser
import helpers.iocscan_modules as ioc_modules
import helpers.hashscan_modules as hash_modules
import xml.etree.ElementTree as ET
from functools import wraps
# Bokeh future
# from bokeh.embed import autoload_server
# from bokeh.client import pull_session
from plyara.plyara import PlyaraParser
# Set up logger
loggingserver = logging.getLogger('api')
# Create database
engine = create_engine(CERTITUDE_DATABASE, echo=False)
dbsession = sessionmaker(bind=engine)()
def genCSRFToken():
return base64.b64encode(crypto.randomBytes(20)).replace('=', '').replace('+', '').replace('/', '')
CSRF_TOKEN_INDEX = '_csrft'
STATIC_ENDPOINT = 'static'
def getCSRFToken():
if not CSRF_TOKEN_INDEX in session:
session[
|
CSRF_TOKEN_INDEX] = genCSRFToken()
return session[CSRF_TOKEN_INDEX]
''' APPLICATION CONFIGURATION '''
app = Flask(__name__, static_folder=STATIC_ENDPOINT)
app.secret_key = os.urandom(24)
app.jinja_env.globals['csrf_token'] = getCSRFToken
app.jinja_env.globals['csrf_to
|
ken_name'] = CSRF_TOKEN_INDEX
app.config['UPLOAD_FOLDER'] = 'upload'
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 5 * 60
ALLOWED_EXTENSIONS = ['txt']
app.config['IOCS_FOLDER'] = os.path.join('components', 'iocscan', '.', 'ioc')
app.config['RESULT_FILE'] = os.path.join('components', 'interface', 'static', 'data', 'results.csv')
app.config['CERTITUDE_OUTPUT_FOLDER'] = 'results'
app.config['PROCESSED_FOLDER'] = 'processed'
RESULT_FILE_HEADER = 'Title:HostId,Title:Hostname,Lookup:Success,Lookup:IOCScanned,Lookup:HashScanned,Lookup:IP,Lookup:Subnet,Malware,Compromise'
IP_REGEX = '(([0-9]|[1-9][0-9]|1[0-9]{2}|2([0-4][0-9]|5[0-5]))\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2([0-4][0-9]|5[0-5]))'
# ''' Decorator for auth '''
def requires_auth(f):
"""
Wrapper to check on each page the user authentication
"""
@wraps(f)
def decorated(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
return redirect(app.jinja_env.globals['url_for']('login'))
return decorated
# ''' Bokeh configuration '''
bokeh_process = None
# Preventing Flask from running Bokeh twice
# source : https://stackoverflow.com/questions/9449101/how-to-stop-flask-from-initialising-twice-in-debug-mode
if not DEBUG or os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
bokeh_process = subprocess.Popen([
'bokeh',
'serve', 'crossbokeh.py',
'--address', BOKEH_LISTEN_ADDRESS,
'--port', str(BOKEH_LISTEN_PORT),
'--allow-websocket-origin', '%s:%d' % (BOKEH_LISTEN_ADDRESS, BOKEH_LISTEN_PORT),
], stdout=subprocess.PIPE)
@atexit.register
def kill_server():
if bokeh_process is not None:
bokeh_process.kill()
# ''' CSRF Protection '''
@app.before_request
def csrf_protect():
if request.method == 'POST':
token = None if CSRF_TOKEN_INDEX not in session else session[CSRF_TOKEN_INDEX]
arg = request.form.get(CSRF_TOKEN_INDEX)
if not token or token != arg:
print 'Received %s, expected %s' % (arg, token)
abort(400)
# -############################-#
# Pages routing and controlers #
# -############################-#
# INDEX
@app.route('/')
@requires_auth
def index():
return redirect(app.jinja_env.globals['url_for']('scan'))
# SESSION MANAGEMENT
@app.route('/login', methods=['GET', 'POST'])
def login():
error = ''
if request.method == 'POST':
# Get user from username
userList = dbsession.query(User).filter_by(username=request.form['username']).limit(1)
matchingUser = userList.first()
# Check password
if (matchingUser is not None) and (matchingUser.password == hashPassword(request.form['password'])):
# Since there is an "active" status...
if matchingUser.active:
session['logged_in'] = True
session['user_id'] = matchingUser.id
return redirect(app.jinja_env.globals['url_for']('index'))
else:
return render_template('session-login.html', error='User account is disabled')
error = 'User might not exist or password is incorrect'
return render_template('session-login.html', errors=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
return redirect(app.jinja_env.globals['url_for']('index'))
# USER MANAGEMENT
# Lists users
@app.route('/users')
@requires_auth
def users():
allUsers = dbsession.query(User).order_by(User.id.asc())
return render_template('user-list.html', users=allUsers)
# {En,Dis}ables an account
@app.route('/users/<int:userid>/switchactive')
@requires_auth
def userSwitchActive(userid):
u = dbsession.query(User).filter_by(id=userid).first()
if u is None:
return redirect(app.jinja_env.globals['url_for']('users'))
u.active = not u.active
dbsession.commit()
return redirect(app.jinja_env.globals['url_for']('users'))
# Add a new user
# MASTER_KEY is encrypted for the new user
# Clear text MASTER_KEY is retrieved using the current use's credentials
#
@app.route('/user/add', methods=['GET', 'POST'])
@requires_auth
def userAdd():
if request.method == 'GET':
return render_template('user-add.html')
else:
success = True
errors = []
user_password = request.form['user_password']
user = dbsession.query(User).filter_by(id=session['user_id']).first()
# Checks current user password
if user is None or hashPassword(user_password) != user.password:
success = False
errors.append('Your password is incorrect')
# Someone has messed with the database
if success:
mk_cksum = dbsession.query(GlobalConfig).filter_by(key='master_key_checksum').first()
if not mk_cksum:
success = False
errors.append('Database is broken, please create a new one !')
if success:
keyFromPas
|
codeboy/coddy-sitetools
|
sitetools/coddy_site/procesors.py
|
Python
|
bsd-3-clause
| 486
| 0.004115
|
# -*- coding: utf-8 -*-
from django.core import urlresolvers
def custom_processor(request):
proc_data = dict()
resolver = urlresolvers.get_resolver(None)
patterns = sorted(
(key, val[0][0][0]) for key, val in resolver.reverse_di
|
ct.iteritems() if isinstance(key, basestring))
proc_data['pat'] = patterns
|
proc_data['app'] = 'Common app'
proc_data['ip_address'] = request.META['REMOTE_ADDR']
proc_data['user'] = request.user
return proc_data
|
rouxcode/django-cms-plugins
|
cmsplugins/maps/migrations/0003_googlemap_cms_page.py
|
Python
|
mit
| 601
| 0.001664
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2016-12-06 09:04
from
|
__future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cms', '0016_auto_201606
|
08_1535'),
('maps', '0002_auto_20160926_1157'),
]
operations = [
migrations.AddField(
model_name='googlemap',
name='cms_page',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, to='cms.Page'),
),
]
|
nathanbjenx/cairis
|
cairis/test/test_UseCaseContribution.py
|
Python
|
apache-2.0
| 3,094
| 0.007434
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import os
import json
from subprocess import call
import cairis.core.BorgFactory
from cairis.core.Borg import Borg
from cairis.core.ReferenceSynopsis import ReferenceSynopsis
from cairis.core.ReferenceContribution import ReferenceContribution
from cairis.core.ARM import DatabaseProxyException
from cairis.mio.ModelImport import importModelFile
__author__ = 'Shamal Faily'
class UseCaseContributionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cairis.core.BorgFactory.initialise()
importModelFile(os.environ['CAIRIS_SRC'] + '/test/webinos.xml',1)
def setUp(self):
f = open(os.environ['CAIRIS_SRC'] + '/test/usecase_contributions.json')
d = json.load(f)
f.close()
self.csData = d['characteristic_synopses'][0]
self.rcData = d['usecase_contributions'][0]
def tearDown(self):
pass
def testAddContribution(self):
ics = ReferenceSynopsis(-1,self.csData['theReference'],self.csData['theSynopsis'],self.csData['theDimension'],self.csData['theActorType'],self.csData['theActor'])
b = Borg()
b.dbProxy.addCharacteristicSynopsis(ics)
irc = ReferenceContribution(self.rcData['theSource'],self.rcData['theDestination'],self.rcData['theMeansEnd'],self.rcData['theContribution'])
b.dbProxy.addUseCaseContribution(irc)
orcs = b.dbProxy.getUseCaseContributions(self.rcData['theSource'])
orc,rType = orcs[self.rcData['theDestination']]
self.assertEqual(orc.source(), irc.source())
self.assertEqual(orc.destination(), irc.destination())
self.assertEqual(orc.meansEnd(), irc.meansEnd())
self.assertEqual(orc.contribution(), irc.contribution())
def testUpdateContribution(self):
b = Borg()
orcs = b.dbProxy.getUseCaseContributions(self.rcData['theSource'])
orc,rType = orcs[self.rcData['theDestination']]
orc.theContribution = 'Break'
b.dbProxy.updateUseCaseContribution(orc)
urcs = b.dbProxy.getUseCaseContributions(self.rcDat
|
a['theSource'])
urc,rType = urcs[self.rcData['theDestination']]
self.assertEqual(orc.source(), urc.source())
self.assertEqual(orc.destination(), urc.destination())
self.assertEqual(orc.meansEnd(), urc.meansEnd())
self.assertEqual(orc.contribution(), urc.contribution())
if __name__ == '__main__':
|
unittest.main()
|
Eforcers/inbox-cleaner
|
src/lib/gdata/apps/audit/service.py
|
Python
|
mit
| 9,512
| 0.006308
|
# Copyright (C) 2008 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allow Google Apps domain administrators to audit user data.
AuditService: Set auditing."""
__author__ = 'jlee@pbu.edu'
from base64 import b64encode
import gdata.apps
import gdata.apps.service
import gdata.service
class AuditService(gdata.apps.service.PropertyService):
"""Client for the Google Apps Audit service."""
def _serviceUrl(self, setting_id, domain=None, user=None):
if domain is None:
domain = self.domain
if user is None:
return '/a/feeds/compliance/audit/%s/%s' % (setting_id, domain)
else:
return '/a/feeds/compliance/audit/%s/%s/%s' % (setting_id, domain, user)
def updatePGPKey(self, pgpkey):
"""Updates Public PGP Key Google uses to encrypt audit data
Args:
pgpkey: string, ASCII text of PGP Public Key to be used
Returns:
A dict containing the result of the POST operation."""
uri = self._serviceUrl('publickey')
b64pgpkey = b64encode(pgpkey)
properties = {}
properties['publicKey'] = b64pgpkey
return self._PostProperties(uri, properties)
def createEmailMonitor(self, source_user, destination_user, end_date,
begin_date=None, incoming_headers_only=False,
outgoing_headers_only=False, drafts=False,
drafts_headers_only=False, chats=False,
chats_headers_only=False):
"""Creates a email monitor, forwarding the source_users emails/chats
Args:
source_user: string, the user whose email will be audited
destination_user: string, the user to receive the audited email
end_date: string, the date the audit will end in
"yyyy-MM-dd HH:mm" format, required
begin_date: string, the date the audit will start in
"yyyy-MM-dd HH:mm" format, leave blank to use current time
incoming_headers_only: boolean, whether to audit only the headers of
mail delivered to source user
outgoing_headers_only: boolean, whether to audit only the headers of
mail sent from the source user
drafts: boolean, whether to audit draft messages of the source user
drafts_headers_only: boolean, whether to audit only the headers of
mail drafts saved by the user
chats: boolean, whether to audit archived chats of the source user
chats_headers_only: boolean, whether to audit only the headers of
archived chats of the source user
Returns:
A dict containing the result of the POST operation."""
uri = self._serviceUrl('mail/monitor', user=source_user)
properties = {}
properties['destUserName'] = destination_user
if begin_date is not None:
properties['beginDate'] = begin_date
properties['endDate'] = end_date
if incoming_headers_only:
properties['incomingEmailMonitorLevel'] = 'HEADER_ONLY'
else:
properties['incomingEmailMonitorLevel'] = 'FULL_MESSAGE'
if outgoing_headers_only:
properties['outgoingEmailMonitorLevel'] = 'HEADER_ONLY'
else:
properties['outgoingEmailMonitorLevel'] = 'FULL_MESSAGE'
if drafts:
if drafts_headers_only:
properties['draftMonitorLevel'] = 'HEADER_ONLY'
else:
properties['draftMonitorLevel'] = 'FULL_MESSAGE'
if chats:
if chats_headers_only:
properties['chatMonitorLevel'] = 'HEADER_ONLY'
else:
properties['chatMonitorLevel'] = 'FULL_MESSAGE'
return self._PostProperties(uri, properties)
def getEmailMonitors(self, user):
""""Gets the email monitors for the given user
Args:
user: string, the user to retrieve email monitors for
Returns:
list results of the POST operation
"""
uri = self._serviceUrl('mail/monitor', user=user)
return self._GetPropertiesList(uri)
def deleteEmailMonitor(self, source_user, destination_user):
"""Deletes the email monitor for the given user
Args:
source_user: string, the user who is being monitored
destination_user: string, theuser who recieves the monitored emails
Returns:
Nothing
"""
uri = self._serviceUrl('mail/monitor', user=source_user+'/'+destination_user)
try:
return self._DeleteProperties(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def createAccountInformationRequest(self, user):
"""Creates a request for account auditing details
Args:
user: string, the user to request account information for
Returns:
A dict containing the result of the post operation."""
uri = self._serviceUrl('account', user=user)
properties = {}
#XML Body is left empty
try:
return self._PostProperties(uri, properties)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def getAccountInformationRequestStatus(self, user, request_id):
"""Gets the status of an account auditing request
Args:
user: string,
|
the user whose account auditing details were requested
request_id: string, the request_id
Returns:
A dict containing the result of the get operation."""
uri = self._serviceUrl('account', user=user+'/'+request_id)
try:
return self._GetProperties(uri)
except gdata.service.RequestError, e:
|
raise AppsForYourDomainException(e.args[0])
def getAllAccountInformationRequestsStatus(self):
"""Gets the status of all account auditing requests for the domain
Args:
None
Returns:
list results of the POST operation
"""
uri = self._serviceUrl('account')
return self._GetPropertiesList(uri)
def deleteAccountInformationRequest(self, user, request_id):
"""Deletes the request for account auditing information
Args:
user: string, the user whose account auditing details were requested
request_id: string, the request_id
Returns:
Nothing
"""
uri = self._serviceUrl('account', user=user+'/'+request_id)
try:
return self._DeleteProperties(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def createMailboxExportRequest(self, user, begin_date=None, end_date=None, include_deleted=False, search_query=None, headers_only=False):
"""Creates a mailbox export request
Args:
user: string, the user whose mailbox export is being requested
begin_date: string, date of earliest emails to export, optional, defaults to date of account creation
format is 'yyyy-MM-dd HH:mm'
end_date: string, date of latest emails to export, optional, defaults to current date
format is 'yyyy-MM-dd HH:mm'
include_deleted: boolean, whether to include deleted emails in export, mutually exclusive with search_query
search_query: string, gmail style search query, matched emails will be exported, mutually exclusive with include_deleted
Returns:
A dict containing the result of the post operation."""
uri = self._serviceUrl('mail/export', user=user)
properties = {}
if begin_date is not None:
properties['beginDate'] = begin_date
if end_date is not None:
properties['endDate'] = end_date
if include_deleted is not None:
properties['includeDeleted'] = gdata.apps.service._bool2str(include_deleted)
if search_query is not None:
properties['searchQuery'] = search_query
if headers_only is True:
properties['packageContent'] = 'HEADER_ONLY'
else:
properties[
|
enthought/etsproxy
|
enthought/logger/util.py
|
Python
|
bsd-3-clause
| 50
| 0
|
# proxy module
|
from apptools.logger.util import
|
*
|
fweidemann14/x-gewinnt
|
game/test_game.py
|
Python
|
gpl-3.0
| 3,095
| 0.008401
|
import game as game
import pytest
import sys
sys.path.insert(0, '..')
def trim_board(ascii_board):
return '\n'.join([i.strip() for i in ascii_board.splitlines()])
t = trim_board
def test_new_board():
game.Board(3,3).ascii() == t("""
...
...
...
""")
game.Board(4,3).ascii() == t("""
....
....
....
""")
game.Board(3,4).ascii() == t("""
...
...
...
...
""")
def test_game():
board = game.Board(3,3,win=3)
assert board.count_tokens == 0
assert board.game_status == 'active'
assert board.turn_color == None
# drop first token
token = board.drop('x',0)
assert board.game_status == 'active'
assert token.position == (0,0)
assert token.color == 'x'
assert board.ascii() == t("""
...
...
x..
""")
assert board.count_tokens == 1
assert board.turn_color == 'o'
# drop second token
token = board.drop('o',0)
assert board.game_status == 'active'
assert token.position == (0,1)
assert token.color == 'o'
assert board.ascii() == t("""
...
o..
x..
""")
assert board.count_tokens == 2
assert board.turn_color == 'x'
# dropping the wrong color should raise an error
with pytest.raises(Exception):
token = board.drop('o',1)
# drop third token
token = board.drop('x',1)
assert board.game_status == 'active'
assert token.position == (1,0)
assert token.color == 'x'
board.ascii() == t("""
...
o..
xx.
""")
assert board.count_tokens == 3
assert board.turn_color == 'o'
# drop fourth token
token = board.drop('o',0)
assert board.game_status == 'active'
assert token.position == (0,2)
assert token.color == 'o'
board.ascii() == t("""
o..
o..
xx.
""")
assert board.count_tokens == 4
# drop fifth token
token = board.drop('x',2)
assert board.game_status == 'over'
assert board.won_by == 'x'
assert token.position == (2,0)
assert token.color == 'x'
board.ascii() == t("""
o..
o..
xxx
""")
assert board.count_tokens == 5
def test_load_board():
"""
The Board class should provide a load method to load a predefined board.
the load method should be implemented as a static method like this:
>>> class Test:
>>> @staticmethod
>>> def a_static_factory():
>>> t = Test()
>>> # do something with t and return it
>>> return t
the load function accepts a board layout. It retrieves the dimensions of the boar
|
d
and loads the provided data into the board.
"""
board = game.Board.load(t("""
o..
o..
xxx
"""))
def test_axis_strings():
board = game.Board.load(t("""
o..
o..
xxx
"""))
# get the axis strings
|
in this order: | \ / -
axis_strings = board.axis_strings(0,0)
assert axis_strings[0] == 'xoo'
assert axis_strings[1] == 'x'
assert axis_strings[2] == 'x..'
assert axis_strings[3] == 'xxx' # the winner :-)
assert board.won_by == 'x'
|
ivanprjcts/equinox-spring16-API
|
equinox_spring16_api/equinox_api/migrations/0005_application_new_att.py
|
Python
|
lgpl-3.0
| 463
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4
|
on 2016-03-11 11:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('equinox_api', '0004_operation_description'),
]
operations = [
migrations.AddField(
model_name='appl
|
ication',
name='new_att',
field=models.BooleanField(default=True),
),
]
|
dunkhong/grr
|
grr/server/grr_response_server/databases/db_foreman_rules_test.py
|
Python
|
apache-2.0
| 2,967
| 0.005056
|
#!/usr/bin/env python
"""Mixin tests for storing Foreman rules in the relational db."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from grr_response_core.lib import rdfvalue
from grr_response_server import foreman_rules
from grr.test_lib import test_lib
class DatabaseTestForemanRulesMixin(object):
"""An abstract class for testing db.Database implementations.
This mixin adds methods to test the handling of foreman rules.
"""
def _GetTestRule(self, hunt_id="H:123456", expires=None):
now = rdfvalue.RDFDatetime.Now()
expiration_time = expires or now + rdfvalue.Duration.From(2, rdfvalue.WEEKS)
rule = foreman_rules.ForemanCondition(
creation_time=now,
expiration_time=expiration_time,
description="Test rule",
hunt_id=hunt_id)
rule.client_rule_set = foreman_rules.ForemanClientRuleSet(rules=[
foreman_rules.ForemanClientRule(
rule_type=foreman_rules.ForemanClientRule.Type.INTEGER,
integer=foreman_rules.ForemanIntegerClientRule(
field="INSTALL_TIME",
operator=foreman_rules.ForemanIntegerClientRule.Operator
.LESS_THAN,
value=now))
])
return rule
def testForemanRuleWrite(self):
rule = self._GetTestRule()
self.db.WriteForemanRule(rule)
read = self.db.ReadAllForemanRules()
self.assertLen(read, 1)
self.assertEqual(read[0], rule)
def testForemanRuleRemove(self):
rule1 = self._GetTestRule("H:123456")
self.db.WriteForemanRule(rule1)
rule2 = self._GetTestRule("H:654321")
self.db.WriteForemanRule(rule2)
rule3 = self._GetTestRule("H:ABCDEF")
self.db.WriteForemanRule(rule3)
read = self.db.ReadAllForemanRules()
self.assertLen(read, 3)
self.db.RemoveForemanRule("H:654321")
read = self.db.ReadAllForemanRules()
self.assertLen(read, 2)
self.assertEqual(
sorted(read, key=lambda rule: rule.hunt_id), [rule1, rule3])
self.db.RemoveForemanRule("H:123456")
read = self.db.ReadAllForemanRules()
self.assertLen(read, 1)
self.assertEqual(read[0], rule3)
def testForemanRuleExpire(self):
for i, ex in enumerate([100, 200, 300, 400, 500, 600]):
|
expires = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(ex)
rule = self._GetTestRule("H:00000%d"
|
% i, expires=expires)
self.db.WriteForemanRule(rule)
self.assertLen(self.db.ReadAllForemanRules(), 6)
with test_lib.FakeTime(110):
self.db.RemoveExpiredForemanRules()
self.assertLen(self.db.ReadAllForemanRules(), 5)
with test_lib.FakeTime(350):
self.db.RemoveExpiredForemanRules()
self.assertLen(self.db.ReadAllForemanRules(), 3)
with test_lib.FakeTime(590):
self.db.RemoveExpiredForemanRules()
self.assertLen(self.db.ReadAllForemanRules(), 1)
# This file is a test library and thus does not require a __main__ block.
|
fixbugs/py-fixbugs-tools
|
test/75f6.py
|
Python
|
gpl-3.0
| 37
| 0.027027
|
#0y0r1+syp0ry1+0267*y0r-7i+Psr+1g
|
e
|
|
ewerybody/a2
|
ui/a2widget/demo/key_value_table_demo.py
|
Python
|
gpl-3.0
| 1,931
| 0
|
import json
import pprint
from a2qt import QtWidgets
from a2widget.key_value_table import KeyValueTable
from a2widget.a2text_field import A2CodeField
_DEMO
|
_DATA = {
'Name': 'Some Body',
'Surname': '
|
Body',
'Street. Nr': 'Thingstreet 8',
'Street': 'Thingstreet',
'Nr': '8',
'PLZ': '12354',
'City': 'Frankfurt am Main',
'Phone+': '+1232222222',
'Phone': '2222222',
'Country': 'Germany',
}
class Demo(QtWidgets.QMainWindow):
def __init__(self):
super(Demo, self).__init__()
w = QtWidgets.QWidget(self)
self.setCentralWidget(w)
lyt = QtWidgets.QVBoxLayout(w)
self.key_value_table = KeyValueTable(self)
self.key_value_table.changed.connect(self.table_to_code)
lyt.addWidget(self.key_value_table)
btn = QtWidgets.QPushButton('GET DATA')
btn.clicked.connect(self.get_data)
lyt.addWidget(btn)
self.text_field = A2CodeField(self)
self.text_field.text_changed.connect(self.code_to_table)
lyt.addWidget(self.text_field)
btn = QtWidgets.QPushButton('SET DATA')
btn.clicked.connect(self.set_data)
lyt.addWidget(btn)
self.text_field.setText(json.dumps(_DEMO_DATA, indent=2))
self.set_data()
def table_to_code(self):
data = self.key_value_table.get_data()
self.text_field.setText(json.dumps(data, indent=2))
def code_to_table(self):
data = json.loads(self.text_field.text())
self.key_value_table.set_silent(data)
def get_data(self):
data = self.key_value_table.get_data()
print(data)
pprint.pprint(data, sort_dicts=False)
def set_data(self):
data = json.loads(self.text_field.text())
self.key_value_table.set_data(data)
def show():
app = QtWidgets.QApplication([])
win = Demo()
win.show()
app.exec()
if __name__ == '__main__':
show()
|
whitews/BAMA_Analytics
|
BAMA_Analytics/urls.py
|
Python
|
bsd-2-clause
| 338
| 0.002959
|
fr
|
om django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^', include('authenticate.urls')),
(r'^', include('analytics.urls')),
(r'^admin/', include(admin.site.urls)
|
),
(r'^api/', include('rest_framework.urls', namespace='rest_framework')),
)
|
snark/ignorance
|
docs/conf.py
|
Python
|
isc
| 8,405
| 0.005354
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ignorance documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import ignorance
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Ignorance'
copyright = u'2015, Steve Cook'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = ignorance.__version__
# The full version, including alpha/beta/rc tags.
release = ignorance.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_f
|
mt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#defaul
|
t_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ignorancedoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'ignorance.tex',
u'Ignorance Documentation',
u'Steve Cook', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ignorance',
u'Ignorance Documentation',
[u'Steve Cook'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ignorance',
u'Ignorance Documentation',
u'Steve Cook',
'ignorance',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#
|
FordyceLab/AcqPack
|
acqpack/utils.py
|
Python
|
mit
| 6,206
| 0.004351
|
import csv
import json
import numpy as np
import pandas as pd
def read_delim(filepath):
"""
Reads delimited file (auto-detects delimiter + header). Returns list.
:param filepath: (str) location of delimited file
:return: (list) list of records w/o header
"""
f = open(filepath, 'r')
dialect = csv.Sniffer().sniff(f.read(1024))
f.seek(0)
has_header = csv.Sniffer().has_header(f.read(1024))
f.seek(0)
reader = csv.reader(f, dialect)
if has_header:
reader.next()
ret = [line for line in reader]
return ret
def read_delim_pd(filepath):
"""
Reads delimited file (auto-detects delimiter + header). Returns pandas DataFrame.
:param filepath: (str) location of delimited file
:return: (DataFrame)
"""
f = open(filepath)
has_header = None
if csv.Sniffer().has_header(f.read(1024)):
has_header = 0
f.seek(0)
return pd.read_csv(f, header=has_header, sep=None, engine='python')
def lookup(table, lookup_cols, lookup_vals, output_cols=None, output_recs=None):
"""
Looks up records where lookup_cols == lookup_vals.
Optionally returns only specified output_cols and/or specified output_recs.
:param table: (DataFrame) the pandas DataFrame to use as a lookup table
:param lookup_cols: (str | list)
:param lookup_vals: (val | list)
:param output_cols:
:param output_recs:
:return:
"""
if type(lookup_cols) == str:
lookup_cols = [lookup_cols]
lookup_vals = [lookup_vals]
temp_df = pd.DataFrame(data=lookup_vals, columns=lookup_cols, copy=False)
output = table.merge(temp_df, copy=False)
if output_cols is not None:
if type(output_cols) == str:
output_cols = [output_cols]
output = output[output_cols]
if output_recs is not None:
output = output.iloc[output_recs]
return output
def generate_position_table(num_rc, space_rc, offset=(0.0,0.0,0.0), to_clipboard=False):
"""
Generates a position table for a plate. Assumes that 'x' and 'c' are aligned and that
'y' and 'r' are aligned. These axes can be reflected by negating the corresponding 'space_rc';
translations can be applied via 'offset'. All entries are indexed by 'n' (newspaper order)
and 's' (serpentine order). Other columns may be added as needed, but Autosampler.goto()
requires 'x', 'y', and 'z' to function properly.
:param num_rc: (tup) number of rows and columns (num_rows, num_cols)
:param space_rc: (tup) spacing for rows and columns [mm] (spacing_rows, spacing_cols)
:param offset: (tup) 3-tuple of floats to be added to x,y,z [mm]
:param to_clipboard: (bool) whether to copy the position_table to the OS clipboard
:return: (DataFrame)
"""
# TODO: instead of offset, full affine option? can use negative space rc to reflect,
# but can't remap x -> y
temp = list()
headers = ['n', 's', 'r', 'c', 'name', 'x', 'y', 'z']
for r in range(num_rc[0]):
for c in range(num_rc[1]):
n = c + r * num_rc[1]
s = ((r + 1) % 2) * (c + r * num_rc[1]) + (r % 2) * ((r + 1) * num_rc[1] - (c + 1))
name = chr(64 + r + 1) + '{:02d}'.format(c + 1)
x = float(c * space_rc[1] + offset[0])
y = float(r * space_rc[0] + offset[1])
z = float(offset[2])
temp.append([n, s, r, c, name, x, y, z])
position_table = pd.DataFrame(temp, columns=headers)
if to_clipboard:
position_table.to_clipboard(index=False)
return position_table
def spacing(num_rc, p1, p2):
r, c = map(float, num_rc)
return tuple(abs(np.nan_to_num(np.subtract(p2, p1) / (c - 1, r - 1))))
def load_mm_positionlist(filepath):
"""
Takes a MicroManager position list and converts it to a pandas DataFrame. Will load z-coordinates if
available.
:param filepath: (str)
:return: (DataFrame) position list with headers = "r, c, name, x, y, [z]"
"""
with open(filepath) as f:
data = json.load(f)
df_rcn = pd.io.json.json_normalize(data, ['POSITIONS'])[['GRID_ROW', 'GRID_COL', 'LABEL']]
df_pos = pd.io.json.json_normalize(data, ['POSITIONS', 'DEVICES'])[['DEVICE', 'X', 'Y']]
df_xy = df_pos.query("DEVICE=='XYStage'")[['X','Y']].reset_index(drop=True)
df = pd
|
.concat([df_rcn,df_xy], axis=1)
# check for z-axis
ds_z = df_pos.query("DEVICE=='ZStage'")['X'].reset_index(drop=True)
if len(ds_z)>0:
df['z'] = ds_z
rename = {'GRID_ROW': 'r',
|
'GRID_COL': 'c',
'LABEL': 'name',
'X': 'x',
'Y': 'y'}
df.rename(columns=rename, inplace=True)
return df
def generate_grid(c0, c1, l_img, p):
"""
Based on two points, creates a 2D-acquisition grid similar to what MicroManager would produce.
:param c0: (arr) first point; numpy 1d array of len 2
:param c1: (arr) second point; numpy 1d array of len 2
:param l_img: (float)
:param p: (float) desired percent overlap
:return: (DataFrame) position_list in the same format as load_mm_positionlist
"""
# TODO: does generate_grid subsume generate_position_table?
# n -> number of stage positions on an axis
n = 1 + np.ceil(np.abs(c1 - c0) / ((1 - p) * l_img)) # ct,ct
n = n.astype('int')
# l_acq = total_movement + l_img
# l_acq = l_img * (n - n*p + p) # um,um
sign = np.sign(c1 - c0)
# could also use cartesian product (itertools.product OR np.mgrid, stack)
# https://stackoverflow.com/questions/1208118/using-numpy-to-build-an-array-of-all-combinations-of-two-arrays
position_list = pd.DataFrame(columns=['r', 'c', 'name', 'x', 'y'], )
for j in xrange(n[1]): # iter y
y = sign[1] * j * l_img * (1 - p) + c0[1]
for i in xrange(n[0]) if not (j % 2) else reversed(xrange(n[0])): # iter x (serp)
x = sign[0] * i * l_img * (1 - p) + c0[0]
r = j
c = i
name = '1-Pos_{:03}_{:03}'.format(c, r)
position_list.loc[len(position_list)] = [r, c, name, x, y]
position_list[['r', 'c']] = position_list[['r', 'c']].astype(int)
return position_list
|
shanot/imp
|
modules/core/test/test_surface_tethered_chain.py
|
Python
|
gpl-3.0
| 3,454
| 0.00029
|
import math
import random
import numpy as np
import IMP
import IMP.core
import IMP.test
def _get_beta(N, b):
return 3. / (2. * N * b**2)
def _get_score(z, N, b):
beta = _get_beta(N, b)
return beta * z**2 - math.log(2 * beta * z)
def _get_derv(z, N, b):
beta = _get_beta(N, b)
return 2 * beta * z - 1. / float(z)
def _get_linear_score(z, N, b):
slope = _get_linear_derv(N, b)
intercept = 5.258546595708 - .5 * math.log(_get_beta(N, b))
return slope * z + intercept
def _get_linear_derv(N, b):
return -141.407214101686 * _get_beta(N, b)**.5
class Tests(IMP.test.TestCase):
"""Tests for SurfaceTetheredChain."""
def test_init(self):
"""Test correct initialization."""
func = IMP.core.SurfaceTetheredChain(10, 8)
func.set_was_used(True)
def test_evaluate(self):
"""Test evaluates to correct scores and derivatives."""
for i in range(100):
N = random.randint(1, 10)
b = random.uniform(.1, 5.)
func = IMP.core.SurfaceTetheredChain(N, b)
func.set_was_used(True)
max_z = 2 * N * b
beta = _get_beta(N, b)
min_z = .01 / (2 * beta)**.5
z_range = np.linspace(min_z, max_z, 100)
for z in z_range:
corr_score = _get_score(z, N, b)
corr_derv = _get_derv(z, N, b)
score, deriv = func.evaluate_with_derivative(z)
scoreonly = func.evaluate(z)
self.assertAlmostEqual(scoreonly, corr_score, delta=1e-4)
self.assertAlmostEqual(score, corr_score, delta=1e-4)
self.assertAlmostEqual(deriv, corr_derv, delta=1e-4)
def test_evaluate_linear(self):
"""Test linear region evaluates to correct scores and derivatives."""
for i in range(100):
N = random.randint(3, 10)
b = random.uniform(.1, 5.)
func = IMP.core.SurfaceTetheredChain(N, b)
func.set_was_used(True)
beta = _get_beta(N, b)
min_z = .01 / (2 * beta)**.5
z_range = np.linspace(-1, min_z, 100)
corr_derv = _get_linear_derv(N, b)
for z in z_range:
corr_score = _get_linear_score(z, N, b)
score, deriv = func.evaluate_with_derivative(z)
scoreonly = func.evaluate(z)
self.assertAlmostEqual(scoreonly / corr_score, 1, delta=1e-6)
self.assertAlmostEqual(score / corr_score, 1, delta=1e-6)
self.assertAlmostEqual(deriv / corr_derv, 1, delta=1e-6)
def test_special_values(self):
"""Test special distance values are correctly calculated."""
for i in range(10):
N = random.randint(3, 10)
b = random.uniform(.1, 5.)
func = IMP.core.SurfaceTetheredChain(N, b)
func.set_was_used(True)
beta = _get_beta(N, b)
zmin = 1. / (2 * beta)**.5
zmean = .5 * (mat
|
h.pi / beta)**.5
self.assertAlmostEqual(func.get_dist
|
ance_at_minimum(), zmin,
delta=1e-6)
self.assertAlmostEqual(func.evaluate_with_derivative(zmin)[1], 0.,
delta=1e-6)
self.assertAlmostEqual(func.get_average_distance(), zmean,
delta=1e-6)
if __name__ == '__main__':
IMP.test.main()
|
ltowarek/budget-supervisor
|
third_party/saltedge/swagger_client/models/simplified_attempt.py
|
Python
|
mit
| 31,285
| 0.000064
|
# coding: utf-8
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: support@saltedge.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SimplifiedAttempt(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_mode': 'str',
'api_version': 'str',
'automatic_fetch': 'bool',
'daily_refresh': 'bool',
'categorization': 'str',
'created_at': 'datetime',
'custom_fields': 'object',
'device_type': 'str',
'remote_ip': 'str',
'exclude_accounts': 'list[str]',
'user_present': 'bool',
'customer_last_logged_at': 'datetime',
'fail_at': 'datetime',
'fail_error_class': 'str',
'fail_mess
|
age': 'str',
'fetch_scopes': 'list[str]',
'finished': 'bool',
'finished_recent': 'bool',
'from_date': 'date',
'id': 'str',
'interactive': 'bool',
'locale': 'str',
'partial': 'bool',
'store_credenti
|
als': 'bool',
'success_at': 'datetime',
'to_date': 'datetime',
'updated_at': 'datetime',
'show_consent_confirmation': 'bool',
'include_natures': 'list[str]',
'last_stage': 'Stage'
}
attribute_map = {
'api_mode': 'api_mode',
'api_version': 'api_version',
'automatic_fetch': 'automatic_fetch',
'daily_refresh': 'daily_refresh',
'categorization': 'categorization',
'created_at': 'created_at',
'custom_fields': 'custom_fields',
'device_type': 'device_type',
'remote_ip': 'remote_ip',
'exclude_accounts': 'exclude_accounts',
'user_present': 'user_present',
'customer_last_logged_at': 'customer_last_logged_at',
'fail_at': 'fail_at',
'fail_error_class': 'fail_error_class',
'fail_message': 'fail_message',
'fetch_scopes': 'fetch_scopes',
'finished': 'finished',
'finished_recent': 'finished_recent',
'from_date': 'from_date',
'id': 'id',
'interactive': 'interactive',
'locale': 'locale',
'partial': 'partial',
'store_credentials': 'store_credentials',
'success_at': 'success_at',
'to_date': 'to_date',
'updated_at': 'updated_at',
'show_consent_confirmation': 'show_consent_confirmation',
'include_natures': 'include_natures',
'last_stage': 'last_stage'
}
def __init__(self, api_mode=None, api_version=None, automatic_fetch=None, daily_refresh=None, categorization='personal', created_at=None, custom_fields=None, device_type=None, remote_ip=None, exclude_accounts=None, user_present=None, customer_last_logged_at=None, fail_at=None, fail_error_class=None, fail_message=None, fetch_scopes=None, finished=None, finished_recent=None, from_date=None, id=None, interactive=None, locale=None, partial=None, store_credentials=None, success_at=None, to_date=None, updated_at=None, show_consent_confirmation=None, include_natures=None, last_stage=None): # noqa: E501
"""SimplifiedAttempt - a model defined in Swagger""" # noqa: E501
self._api_mode = None
self._api_version = None
self._automatic_fetch = None
self._daily_refresh = None
self._categorization = None
self._created_at = None
self._custom_fields = None
self._device_type = None
self._remote_ip = None
self._exclude_accounts = None
self._user_present = None
self._customer_last_logged_at = None
self._fail_at = None
self._fail_error_class = None
self._fail_message = None
self._fetch_scopes = None
self._finished = None
self._finished_recent = None
self._from_date = None
self._id = None
self._interactive = None
self._locale = None
self._partial = None
self._store_credentials = None
self._success_at = None
self._to_date = None
self._updated_at = None
self._show_consent_confirmation = None
self._include_natures = None
self._last_stage = None
self.discriminator = None
if api_mode is not None:
self.api_mode = api_mode
if api_version is not None:
self.api_version = api_version
if automatic_fetch is not None:
self.automatic_fetch = automatic_fetch
if daily_refresh is not None:
self.daily_refresh = daily_refresh
if categorization is not None:
self.categorization = categorization
if created_at is not None:
self.created_at = created_at
if custom_fields is not None:
self.custom_fields = custom_fields
if device_type is not None:
self.device_type = device_type
if remote_ip is not None:
self.remote_ip = remote_ip
if exclude_accounts is not None:
self.exclude_accounts = exclude_accounts
if user_present is not None:
self.user_present = user_present
if customer_last_logged_at is not None:
self.customer_last_logged_at = customer_last_logged_at
if fail_at is not None:
self.fail_at = fail_at
if fail_error_class is not None:
self.fail_error_class = fail_error_class
if fail_message is not None:
self.fail_message = fail_message
if fetch_scopes is not None:
self.fetch_scopes = fetch_scopes
if finished is not None:
self.finished = finished
if finished_recent is not None:
self.finished_recent = finished_recent
if from_date is not None:
self.from_date = from_date
if id is not None:
self.id = id
if interactive is not None:
self.interactive = interactive
if locale is not None:
self.locale = locale
if partial is not None:
self.partial = partial
if store_credentials is not None:
self.store_credentials = store_credentials
if success_at is not None:
self.success_at = success_at
if to_date is not None:
self.to_date = to_date
if updated_at is not None:
self.updated_at = updated_at
if show_consent_confirmation is not None:
self.show_consent_confirmation = show_consent_confirmation
if include_natures is not None:
self.include_natures = include_natures
if last_stage is not None:
self.last_stage = last_stage
@property
def api_mode(self):
"""Gets the api_mode of this SimplifiedAttempt. # noqa: E501
the API mode of the customer that queried the API. # noqa: E501
:return: The api_mode of this SimplifiedAttempt. # noqa: E501
:rtype: str
"""
return self._api_mode
@api_mode.setter
def api_mode(self, api_mode):
"""Sets the api_mode of this SimplifiedAttempt.
the API mode of the customer that queried the API. # noqa: E501
:param api_mode: The api_mode of this SimplifiedAttempt. # noqa: E501
:type: str
"""
allowed_values = ["app", "service"] # noqa: E501
if api_mode not in allowed_values:
raise ValueError(
"Invalid value for `api_mode` ({0}), must be one of {1}" # noqa: E501
.format(api_mode, allowed_values)
)
self._api_mode = api_mode
@property
def api_version(self):
"""Gets the api_version of this SimplifiedAttempt. # noqa: E501
|
toway/towaymeetups
|
mba/alembic/versions/20150201_e567bd5c0b_make_banner_url_long.py
|
Python
|
gpl-3.0
| 479
| 0.004175
|
"""make banner url longger
Revision ID: e567bd5c0b
Revises: 23ab90c01600
Create Date: 2015-02-01 13:15:59.075956
"""
# revision identifiers, used by Alembic.
rev
|
ision = 'e567bd5c0b'
down_revision = '23ab90c01600'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.alter_column('banners', 'link_url',
type_=sa.String(200),
existing_type=sa.String(100),
nullable=True)
def downgrade():
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.