repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
amilcarsj/analytic
|
analytic/al_strategies.py
|
Python
|
gpl-3.0
| 12,074
| 0.004307
|
import math
import numpy as np
from collections import defaultdict
from analytic import trajectory_manager
import scipy.sparse as ss
from sklearn.naive_bayes import MultinomialNB, GaussianNB, BernoulliNB
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
import http_get_solr_data
class RandomBootstrap(object):
"""Class - used if strategy selected is rand"""
def __init__(self, seed):
"""Instantiate :mod:`al.instance_strategies.RandomBootstrap`
**Parameters**
* seed (*int*) - trial number.
"""
self.randS = RandomStrategy(seed)
def bootstrap(self, pool, y=None, k=1):
"""
**Parameters**
* pool (*int*) - range of numbers within length of pool
* y - None or possible pool
* k (*int*) - 1 or possible bootstrap size
**Returns**
* randS.chooseNext(pool, k=k) - choose next pool
"""
return self.randS.chooseNext(pool, k=k)
class BootstrapFromEach(object):
"""Class - used if not bootstrapped"""
def __init__(self, seed):
"""Instantiate :mod:`al.instance_strategies.BootstrapFromEach`
**Parameters**
* seed (*int*) - trial number.
"""
self.randS = RandomStrategy(seed)
def bootstrap(self, pool, y, k=1):
"""
**Parameters**
* pool (*int*) - range of numbers within length of pool
* y - None or possible pool
* k (*int*) - 1 or possible bootstrap size
**Returns**
* chosen array of indices
"""
data = defaultdict(lambda: [])
for i in pool:
data[y[i]].append(i)
chosen = []
num_classes = len(data.keys())
for label in data.keys():
candidates = data[label]
indices = self.randS.chooseNext(candidates, k=k/num_classes)
chosen.extend(indices)
return chosen
class BaseStrategy(object):
"""Class - Base strategy"""
def __init__(self, seed=0):
"""Instantiate :mod:`al.instance_strategies.BaseStrategy`
**Parameters**
* seed (*int*) - 0 or trial number.
"""
self.randgen = np.random.RandomState(seed)
def chooseNext(self, pool, X=None, model=None, k=1, current_train_indices = None, current_train_y = None):
pass
class RandomStrategy(BaseStrategy):
"""Class - used if strategy is rand, inherits from :mod:`al.instance_strategies.BaseStrategy`"""
def chooseNext(self, pool, X=None, model=None, k=1, current_train_indices = None, current_train_y = None):
"""Overide method BaseStrategy.chooseNext
**Parameters**
* pool (*int*) - range of numbers within length of pool
* X - None or pool.toarray()
* model - None
* k (*int*) - 1 or step size
* current_train_indices - None or array of trained indices
* current_train_y - None or train_indices specific to y_pool
**Returns**
* [list_pool[i] for i in rand_indices[:k]] - array of random permutations given pool
"""
list_pool = list(pool)
#print 'list', [list_pool[i] for i in len(list_pool)]
rand_indices = self.randgen.permutation(len(pool))
return [list_pool[i] for i in rand_indices[:k]]
class UncStrategy(BaseStrategy):
"""Class - used if strategy selected is unc, inherits from :mod:`al.instance_strategies.BaseStrategy`"""
def __init__(self, seed=0, sub_pool=None):
"""Instantiate :mod:`al.instance_strategies.UncStrategy`
**Parameters**
* seed (*int*) - 0 or trial number.
* sub_pool - None or sub_pool parameter
"""
super(UncStrategy, self).__init__(seed=seed)
self.sub_pool = sub_pool
def chooseNext(self, pool, X=None, model=None, k=1, current_train_indices=None, current_train_y=None):
"""Overide method BaseStrategy.chooseNext
**Parameters**
* pool (*int*) - range of numbers within length of pool
* X - None or pool.toarray()
* model - None
* k (*int*) - 1 or step size
* current_train_indices - None or array of trained indices
* current_train_y - None or train_indices specific to y_pool
**Returns**
* [candidates[i] for i in uis[:k]]
"""
if not self.sub_pool:
rand_indices = self.randgen.permutation(len(pool))
array_pool = np.array(list(pool))
candidates = array_pool[rand_indices[:self.sub_pool]]
else:
candidates = list(pool)
if ss.issparse(X):
if not ss.isspmatrix_csr(X):
X = X.tocsr()
#print "X", len(X), X
#print "candidates", len(candidates), candidates
#print "X[candidates]", X[candidates-1]
probs = model.predict_proba(X[candidates])
uncerts = np.min(probs, axis=1)
uis = np.argsort(uncerts)[::-1]
chosen = [candidates[i] for i in uis[:k]]
return chosen
class QBCStrategy(BaseStrategy):
"""Class - used if strategy selected is qbc, inherits from :mod:`al.instance_strategies.BaseStrategy`"""
# def __init__(self, classifier, classifier_args, seed=0, sub_pool=None, num_committee=10):
def __init__(self, classifier_name, seed=0, sub_pool=None, num_committee=10):
"""Instantiate :mod:`al.instance_strategies.QBCStrategy`
**Parameters**
* classifier - Represents the classifier that will be used (default: MultinomialNB).
* classifier_args - Represents the arguments that will be passed to the classifier (default: '').
* seed (*int*) - 0 or trial number.
* sub_pool - None or sub_pool parameter
* num_committee - 4
"""
super(QBCStrategy, self).__init__(seed=seed)
self.sub_pool = sub_pool
self.num_committee = num_committee
self.classifier = classifier_name
# self.classifier_args = classifier_args
def vote_entropy(self, sample):
""" Computes vote entropy.
**Parameters**
* sample
**Returns**
* out (*int*)
"""
votes = defaultdict(lambda: 0.0)
size = float(len(sample))
for i in sample:
votes[i] += 1.0
out = 0
for i in votes:
aux = (float(votes[i] / size))
out += ((aux * math.log(aux, 2)) * -1.)
return out
def chooseNext(self, pool, X=None, model=None, k=1, current_train_indices=None, current_train_y=None):
|
"""Overide method BaseStrategy.chooseNext
**Parameters**
* pool (*int*) - range of numbers within length of pool
* X - None or pool.toarray()
* model - None
* k (*int*) - 1 or step size
* current_train_indices - None or array of tr
|
ained indices
* current_train_y - None or train_indices specific to y_pool
**Returns**
* [candidates[i] for i in dis[:k]]
"""
if not self.sub_pool:
rand_indices = self.randgen.permutation(len(pool))
array_pool = np.array(list(pool))
candidates = array_pool[rand_indices[:self.sub_pool]]
else:
candidates = list(pool)
if ss.issparse(X):
if not ss.isspmatrix_csr(X):
X = X.tocsr()
# Create bags
comm_predictions = []
for c in range(self.num_committee):
# Make sure that we have at least one of each label in each bag
bfe = BootstrapFromEach(seed=c)
num_labels = len(np.unique(current_train_y))
initial = bfe.bootstrap(range(len(current_train_indices)), current_train_y, num_labels)
r_inds = self.randgen.randint(0, len(current_train_indices), size=len(current_train_indices) - num_labels)
r_inds = np.hstack((r_inds, np.array(initial)))
bag = [current_train_indices[i] for i in r_inds]
bag_y =
|
vzer/ToughRADIUS
|
toughradius/console/control/control.py
|
Python
|
agpl-3.0
| 905
| 0.003315
|
#!/usr/bin/env python
# coding:utf-8
import sys, os
from twisted.internet import reactor
from bottle import Bottle
from bottle import request
from bottle import response
from bottle import redirect
from bottle import static_file
from bottle import abort
from hashlib import md5
from urlparse import urljoin
from toughradius.console.base import *
from toughradius.console.libs import utils
import time
import bottle
import decimal
import datetime
import functools
import subprocess
import platform
app = Bottle()
@app.route('/static/:path#.+#')
def route_static(path, render):
|
static_path = os.path.join(os.path.split(os.path.split(__file__)[0])[0], 'static')
return static_file(path, root=static_path)
@app.get('/', apply=auth_ctl)
def control_index(render):
return render("index")
@app.route('/dashboard', apply=auth_ct
|
l)
def index(render):
return render("index", **locals())
|
sdgdsffdsfff/jumpserver
|
apps/perms/api/user_permission/common.py
|
Python
|
gpl-2.0
| 3,904
| 0.000512
|
# -*- coding: utf-8 -*-
#
import uuid
from django.shortcuts import get_object_or_404
from rest_framework.views import APIView, Response
from rest_framework.generics import (
ListAPIView, get_object_or_404, RetrieveAPIView
)
from common.permissions import IsOrgAdminOrAppUser, IsOrgAdmin
from common.utils import get_logger
from ...utils import (
AssetPermissionUtilV2
)
from ...hands import User, Asset, SystemUser
from ... import serializers
from ...models import Action
from .mixin import UserAssetPermissionMixin
logger = get_logger(__name__)
__all__ = [
'RefreshAssetPermissionCacheApi',
'UserGrantedAssetSystemUsersApi',
'ValidateUserAssetPermissionApi',
'GetUserAssetPermissionActionsApi',
]
class GetUserAssetPermissionActionsApi(UserAssetPermissionMixin,
RetrieveAPIView):
permission_classes = (IsOrgAdminOrAppUser,)
serializer_class = serializers.ActionsSerializer
def get_obj(self):
user_id = self.request.query_params.get('user_id', '')
user = get_object_or_404(User, id=user_id)
return user
def get_object(self):
asset_id = self.request.query_params.get('asset_id', '')
system_id = self.request.query_params.get('system_user_id', '')
try:
asset_id = uuid.UUID(asset_id)
system_id = uuid.UUID(system_id)
except ValueError:
return Response({'msg': False}, status=403)
asset = get_object_or_404(Asset, id=asset_id)
system_user = get_object_or_404(SystemUser, id=system_id)
system_users_actions = self.util.get_asset_system_users_with_actions(asset)
actions = system_users_actions.get(system_user)
return {"actions": actions}
class ValidateUserAssetPermissionApi(UserAssetPermissionMixin, APIView):
permission_classes = (IsOrgAdminOrAppUser,)
def get_obj(self):
user_id = self.request.query_params.get('user_id', '')
user = get_object_or_404(User, id=user_id)
return user
def get(self, request, *args, **kwargs):
asset_id = request.query_params.get('asset_id', '')
system_id = request.query_params.get('system_user_id', '')
action_name = request.query_params.get('action_name', '')
try:
asset_id = uuid.UUID(asset_id)
system_id = uuid.UUID(system_id)
except ValueError:
return Response({'msg': False}, status=403)
asset = get_object_or_404(Asset, id=asset_id)
system_user = get_object_or_404(SystemUser, id=system_id)
system_users_actions = self.util.get_asset_system_users_with_actions(
asset)
actions = system_users_actions.get(system_user)
if action_name in Action.value_to_choices(actions):
return Response({'msg': True}, status=200)
return Response({'msg': False}, status=403)
class RefreshAssetPermissionCacheApi(RetrieveAPIView):
permission_classes = (IsOrgAdmin,)
def retrieve(self, request, *args, **kwargs):
AssetPermissionUtilV2.expire_all_user_tree_cache()
return Response({'msg': True}, status=200)
class UserGrantedAssetSystemUsersApi(UserAssetPermissionMixin, ListAPIView):
permission_classes = (IsOrgAdm
|
inOrAppUser,)
serializer_class = serializers.AssetSystemUserSerializer
only_fields = serializers.AssetSystemUserSerializer.Meta.only_fields
def get_queryset(self):
asset_id = self.kwargs.get('asset_id')
asset = get_object_or_404(Asset, id=asset_id)
|
system_users_with_actions = self.util.get_asset_system_users_with_actions(asset)
system_users = []
for system_user, actions in system_users_with_actions.items():
system_user.actions = actions
system_users.append(system_user)
system_users.sort(key=lambda x: x.priority)
return system_users
|
algorhythms/LeetCode
|
658 Find K Closest Elements.py
|
Python
|
mit
| 2,116
| 0.007089
|
#!/usr/bin/python3
"""
Given a sorted array, two integers k and x, find the k closest elements to x in
the array. The result should also be sorted in ascending order. If there is a
tie, the smaller elements are always preferred.
Example 1:
Input: [1,2,3,4,5], k=4, x=3
Output: [1,2,3,4]
Example 2:
Input: [1,2,3,4,5], k=4, x=-1
Output: [1,2,3,4]
Note:
The value k is positive and will always be smaller than the length of the sorted array.
Length of the given array is positive and will not exceed 104
Absolute value of elements in the array and x will not exceed 104
"""
from typing import List
from bisect import bisect_left
from collections import deque
class Solution:
def findClosestElements(self, A: List[int], k: int, x: int) -> List[int]:
"""
binary search without two pointers scanning
"""
n = len(A)
lo = 0
hi = n - k
while lo < hi:
mid = (lo + hi) // 2
if abs(x - A[mid]) > abs(A[mid + k] - x):
# better to have A[mid+k] rather than A[mid]
|
lo = mid + 1
else:
hi = mid
return A[lo:lo+k]
def
|
findClosestElements2(self, A: List[int], k: int, x: int) -> List[int]:
"""
input sorted arrya
two pointers
"""
n = len(A)
idx = bisect_left(A, x)
ret = deque()
i = idx - 1
j = idx
while k:
if 0 <= i < n and 0 <= j < n:
if abs(A[i] - x) <= abs(A[j] - x):
ret.appendleft(A[i])
i -= 1
else:
ret.append(A[j])
j += 1
elif 0 <= i < n:
ret.appendleft(A[i])
i -= 1
elif 0 <= j < n:
ret.append(A[j])
j += 1
else:
raise
k -= 1
return list(ret)
if __name__ == "__main__":
assert Solution().findClosestElements([1,2,3,4,5], 4, 3) == [1,2,3,4]
assert Solution().findClosestElements([1,2,3,4,5], 4, -1) == [1,2,3,4]
|
eschava/broadlink-mqtt
|
test.py
|
Python
|
mit
| 1,269
| 0.002364
|
# noinspection PyMethodMayBeStatic
class TestDevice:
def __init__(self,
|
cf):
self.type = cf.get('device_test_type', 'test')
self.host = ('test', 80)
self.mac = [1, 2, 3, 4, 5, 6]
def auth(self):
pass
# RM2/RM4
def check_temperature(self):
return 23.5
# RM4
def check_humidity(self):
return 56
def enter_learning(self):
pass
def check_data(self):
|
payload = bytearray(5)
payload[0] = 0xAA
payload[1] = 0xBB
payload[2] = 0xCC
payload[3] = 0xDD
payload[4] = 0xEE
return payload
def send_data(self, data):
pass
def check_sensors(self):
return {'temperature': 23.5, 'humidity': 36, 'light': 'dim', 'air_quality': 'normal', 'noise': 'noisy'}
def check_sensors_raw(self):
return {'temperature': 23.5, 'humidity': 36, 'light': 1, 'air_quality': 3, 'noise': 2}
def get_percentage(self):
return 33
def open(self):
pass
def get_state(self):
return {'pwr': 1, 'pwr1': 1, 'pwr2': 0, 'maxworktime': 60, 'maxworktime1': 60, 'maxworktime2': 0, 'idcbrightness': 50}
def check_power(self):
return {'s1': True, 's2': False, 's3': True, 's4': False}
|
osbjmg/evt
|
bin/evt.py
|
Python
|
mit
| 14,898
| 0.009666
|
#!/usr/bin/python
# -* coding: UTF-8 -*-
#import iso8602
#import iso-8601
import pprint
import cgi
import cgitb
import json
from slackclient import SlackClient
import datetime
import pytz
import re
import os
#### ToDo ####
# Assume wallClockTime is not None scenario time is not negative, go to the next day
# Convert a full date and time to user time
cgitbLogDir = os.environ.get('SLACKBOTS_J3B_CGI_LOGDIR')
cgitb.enable(display=0, logdir=cgitbLogDir, format='text') # display=1 if we want to print errors, they will log to cgitbLogDir always though
try :
EXPECTED_TOKEN = os.environ.get('SLACKBOTS_J3B_TOKEN') # token for eve /evt command
EXPECTED_TEAM_ID = os.environ.get('SLACKBOTS_J3B_TEAM') # team ID eve /evt command
BOT_ID = os.environ.get('SLACKBOTS_J3B_BOTID') # time helper bot ID for eve, j3b
BOT_TOKEN = os.environ.get('SLACKBOTS_J3B_BOT_TOKEN') # time helper bot token for j3b
except KeyError:
print 'One or more environment variables are not properly set.'
sys.exit(1)
slack_client = SlackClient(BOT_TOKEN)
def getUserTimezone(user) :
api_call = slack_client.api_call("users.info", user=user)
if api_call.get('ok'):
profile = api_call.get('user')
name = profile['name']
if profile['tz'] :
tz = profile['tz']
else :
tz = None
tz_label = profile['tz_label']
tz_offset_sec = profile['tz_offset']
# testnow = some time to represent a time between DST in EMEA and the US
now = datetime.datetime.now(tz=pytz.utc) # now is tz "aware"
return tz, tz_label, tz_offset_sec, now, name
def handle_command(command, channel, user) :
# basic options
# help
# provide desired time hh:mm, hh, hhmm, optional timezone override if none upon lookup
# spit back current and future local time
# spit back hh:mm until that time
# provide offset in hh:mm, or dd:hh:mm, russian NN-NN from now
# check timezone - /time --mytz
# debug/verbose to check inputs and maybe allow all options, or not?
# option: private/direct message quiet/silent
# option: time until dt
# use str replacement %s
# find libs for time and isdst()
# note mil time
"""
1 non arg just now
2 next time in the future in my tz ( if no tz, say so and gib utc)
3 next time in future in a specific offset
4 some hh:mm in the future (even day)
5 help
important: option to show the DST boundary dates in different TZs and current DST status
silent option
"""
wallClockTime = None
now_strings = { '--now', 'now', '-n'}
help_strings = {'--help', 'help', 'halp', '-h'}
verbose_strings = {'-v', '--verbose', 'verbose'}
offset_strings = {'-o', '--offset'}
tz_specified_strings = {'-tz', '--timezone', 'tz'}
check_mytz_strings = {'--mytz', '--sme','--check', '-c'}
if command :
# regex to match 200, 0300, 11:00, 16-18 time formats.
requestedTime = re.compile('.*?([0-9]{1,2}[:\-\.]*[0-9]{2})') # explain: match anything, 0 or more times [non-greedy], return one or two numbers, 0 or more dividers [: or -], another 2 numbers.
for cmd in command :
if re.match(requestedTime, cmd) :
wallClockTime = re.match(requestedTime, cmd).group(1)
# if no input, give current time
if (command is None or ((command is not None) and (set(command).intersection(now_strings)))):
tz_city, tz, offset, now, name = getUserTimezone(user)
if now.hour == 11 and now.minute < 31 :
dtWarn = '_*POSSIBLE DOWNTIME WARNING*_\n'
else :
dtWarn = ''
response_type = 'in_channel'
mainText = dtWarn + 'Current EVE Time: '+ str(now.strftime("*%H:%M* %Y-%m-%d"))
if tz_city is not None:
attachmentText = name + "'s current time: " + now.astimezone(pytz.timezone(tz_city)).strftime("%H:%M | %I:%M %p %Y-%m-%d") + ' (UTC' + str(offset/60/60) + '/' + tz + ')'
else : # some users do not have a tz in slack, but usually have the tz_label and offset, here I have to trust slack converted properly
attachmentText = name + "'s current time: " + (now + datetime.timedelta(seconds=offset)).strftime("%H:%M | %I:%M %p %Y-%m-%d") + ' (UTC' + str(offset/60/60) + '/' + tz + ')'
# consider making these all function calls in the event we need to break?
elif wallClockTime is not None: # a time is given, let's determine time until /from EVT (no date assumes next time we hit this time on the clock, no negatives for now)
# first, create a time string
timeString = ''
timeValid = True
threeOrFourDigits = re.compile('^([0-9]{1,2})[:\-\.]*([0-9]{2})')
outputList = re.findall(threeOrFourDigits, wallClockTime)
for item in outputList :
for stringInTuple in item :
timeString += stringInTuple
# validate time string
if len(timeString) == 4 :
hours = timeString[0:2]
minutes = timeString[2:]
elif len(timeString) == 3 :
hours = timeString[0]
minutes = timeString[1:]
if not (0 <= int(hours) <= 23) :
timeValid = False
if not ( 0 <= int(minutes) <= 59 )
|
:
timeValid = False
# convert time
# present time in a nifty manner, left/right eve time, your time, difference, colors
# see elephants formatting, color
t
|
z_city, tz, offset, now, name = getUserTimezone(user)
response_type = 'in_channel'
if timeValid is True :
hours=int(hours)
minutes=int(minutes)
reqdEveTime = now.replace(hour=hours, minute=minutes)
difference = int((reqdEveTime - now).total_seconds())
differenceH, remainder = divmod(difference, 3600)
differenceM, s = divmod (remainder, 60)
if difference < 0 :
sign = ''
fromNow = 'ago'
else :
sign = '+'
fromNow = 'from now'
theDeltaSecs = datetime.timedelta(seconds=difference)
newTimeUTC = now + theDeltaSecs
if newTimeUTC.hour == 11 and newTimeUTC.minute < 31 :
dtWarn = '_*POSSIBLE DOWNTIME WARNING*_\n'
else :
dtWarn = ''
if tz_city is not None:
mainText = dtWarn + 'Requested time: '+ reqdEveTime.strftime("%H:%M %Y-%m-%d") + ' EVE Time\n ' \
+ '*' + sign + str(differenceH) + 'h '+ str(differenceM) + 'm* ' + fromNow + ': ' \
+ newTimeUTC.astimezone(pytz.timezone(tz_city)).strftime("%H:%M | %I:%M %p %Y-%m-%d") + ' (UTC' + str(offset/60/60) + '/' + tz + ') \n'
attachmentText = name + "'s current time: " + now.astimezone(pytz.timezone(tz_city)).strftime("%H:%M | %I:%M %p %Y-%m-%d") + ' (UTC' + str(offset/60/60) + '/' + tz + ')\n' \
'EVE current time: ' + now.strftime("%H:%M %Y-%m-%d")
else : # some users do not have a tz in slack, but usually have the tz_label and offset, here I have to trust slack converted properly
mainText = dtWarn + 'Requested time: '+ reqdEveTime.strftime("%H:%M %Y-%m-%d") + ' EVE Time\n ' \
+ '*' + sign + str(differenceH) + 'h '+ str(differenceM) + 'm* ' + fromNow + ': ' \
+ (newTimeUTC + datetime.timedelta(seconds=offset)).strftime("%H:%M | %I:%M %p %Y-%m-%d") + ' (UTC' + str(offset/60/60) + '/' + tz + ') \n'
attachmentText = name + "'s current time: " + (now + datetime.timedelta(seconds=offset)).strftime("%H:%M | %I:%M %p %Y-%m-%d") + ' (UTC' + str(offset/60/60) + '/' + tz + ')\n' \
'EVE current time: ' + now.strftime("%H:%M %Y-%m-%d")
else :
response_type = 'ephemeral'
mainText = 'Error: requested an invalid EVE time (' + hours + ':' + minutes + ')'
attachmentText = ''
# verbose, debug and give current time OR try to determine if we are doing another variant
elif set(command).intersection(verbose_strings) :
tz_city, tz, offset, n
|
shuxin/androguard
|
androguard/decompiler/dad/decompile.py
|
Python
|
apache-2.0
| 18,369
| 0.001633
|
from __future__ import print_function
import sys
from builtins import input
from builtins import map
# This file is part of Androguard.
#
# Copyright (c) 2012 Geoffroy Gueguen <geoffroy.gueguen@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import next
from builtins import object
from builtins import range
from builtins import str
sys.path.append('./')
import logging
import struct
from collections import defaultdict
import androguard.core.androconf as androconf
import androguard.decompiler.dad.util as util
from androguard.core.analysis import analysis
from androguard.core.bytecodes import apk, dvm
from androguard.decompiler.dad.ast import (
JSONWriter, parse_descriptor, literal_string, literal_hex_int,
dummy)
from androguard.decompiler.dad.control_flow import identify_structures
from androguard.decompiler.dad.dataflow import (
build_def_use, place_declarations, dead_code_elimination,
register_propagation, split_variables)
from androguard.decompiler.dad.graph import construct, simplify, split_if_nodes
from androguard.decompiler.dad.instruction import Param, ThisParam
from androguard.decompiler.dad.writer import Writer
from androguard.util import read
def auto_vm(filename):
ret = androconf.is_android(filename)
if ret == 'APK':
return dvm.DalvikVMFormat(apk.APK(filename).get_dex())
elif ret == 'DEX':
return dvm.DalvikVMFormat(read(filename))
elif ret == 'DEY':
return dvm.DalvikOdexVMFormat(read(filename))
return None
# No seperate DvField class currently
def get_field_ast(field):
triple = field.get_class_name()[1:-1], field.get_name(
), field.get_descriptor()
expr = None
if field.init_value:
val = field.init_value.value
expr = dummy(str(val))
if val is not None:
if field.get_descriptor() == 'Ljava/lang/String;':
expr = literal_string(val)
elif field.proto == 'B':
expr = literal_hex_int(struct.unpack('<b', struct.pack("B", val))[0])
return {
'triple': triple,
'type': parse_descriptor(field.get_descriptor()),
'flags': util.get_access_field(field.get_access_flags()),
'expr': expr,
}
class DvMethod(object):
def __init__(self, methanalysis):
method = methanalysis.get_method()
self.method = method
self.start_block = next(methanalysis.get_basic_blocks().get(), None)
self.cls_name = method.get_class_name()
self.name = method.get_name()
self.lparams = []
self.var_to_name = defaultdict()
self.writer = None
self.graph = None
self.ast = None
self.access = util.get_access_method(method.get_access_flags())
desc = method.get_descriptor()
self.type = desc.split(')')[-1]
self.params_type = util.get_params_type(desc)
self.triple = method.get_triple()
self.exceptions = methanalysis.exceptions.exceptions
code = method.get_code()
if code is None:
logger.debug('No code : %s %s', self.name, self.cls_name)
else:
start = code.registers_size - code.ins_size
if 'static' not in self.access:
self.var_to_nam
|
e[start] = ThisParam(start, self.cls_name)
self.lparams.append(start)
start += 1
num_param = 0
for ptype in self.params_type:
param = start + num_param
self.lparams.append(param)
|
self.var_to_name[param] = Param(param, ptype)
num_param += util.get_type_size(ptype)
if not __debug__:
from androguard.core import bytecode
bytecode.method2png('/tmp/dad/graphs/%s#%s.png' % \
(self.cls_name.split('/')[-1][:-1], self.name), methanalysis)
def process(self, doAST=False):
logger.debug('METHOD : %s', self.name)
# Native methods... no blocks.
if self.start_block is None:
logger.debug('Native Method.')
if doAST:
self.ast = JSONWriter(None, self).get_ast()
else:
self.writer = Writer(None, self)
self.writer.write_method()
return
graph = construct(self.start_block, self.var_to_name, self.exceptions)
self.graph = graph
if not __debug__:
util.create_png(self.cls_name, self.name, graph, '/tmp/dad/blocks')
use_defs, def_uses = build_def_use(graph, self.lparams)
split_variables(graph, self.var_to_name, def_uses, use_defs)
dead_code_elimination(graph, def_uses, use_defs)
register_propagation(graph, def_uses, use_defs)
# FIXME var_to_name need to contain the created tmp variables.
# This seems to be a workaround, we add them into the list manually
for var, i in def_uses:
if not isinstance(var, int):
self.var_to_name[var] = var.upper()
place_declarations(graph, self.var_to_name, def_uses, use_defs)
del def_uses, use_defs
# After the DCE pass, some nodes may be empty, so we can simplify the
# graph to delete these nodes.
# We start by restructuring the graph by spliting the conditional nodes
# into a pre-header and a header part.
split_if_nodes(graph)
# We then simplify the graph by merging multiple statement nodes into
# a single statement node when possible. This also delete empty nodes.
simplify(graph)
graph.compute_rpo()
if not __debug__:
util.create_png(self.cls_name, self.name, graph,
'/tmp/dad/pre-structured')
identify_structures(graph, graph.immediate_dominators())
if not __debug__:
util.create_png(self.cls_name, self.name, graph,
'/tmp/dad/structured')
if doAST:
self.ast = JSONWriter(graph, self).get_ast()
else:
self.writer = Writer(graph, self)
self.writer.write_method()
def get_ast(self):
return self.ast
def show_source(self):
print(self.get_source())
def get_source(self):
if self.writer:
return str(self.writer)
return ''
def get_source_ext(self):
if self.writer:
return self.writer.str_ext()
return []
def __repr__(self):
# return 'Method %s' % self.name
return 'class DvMethod(object): %s' % self.name
class DvClass(object):
def __init__(self, dvclass, vma):
name = dvclass.get_name()
if name.find('/') > 0:
pckg, name = name.rsplit('/', 1)
else:
pckg, name = '', name
self.package = pckg[1:].replace('/', '.')
self.name = name[:-1]
self.vma = vma
self.methods = dvclass.get_methods()
self.fields = dvclass.get_fields()
self.code = []
self.inner = False
access = dvclass.get_access_flags()
# If interface we remove the class and abstract keywords
if 0x200 & access:
prototype = '%s %s'
if access & 0x400:
access -= 0x400
else:
prototype = '%s class %s'
self.access = util.get_access_class(access)
self.prototype = prototype % (' '.join(self.access), self.name)
self.interfaces = dvclass.get_interfaces()
self.superclass = dvclass.get_superclassname()
self.thisclass = dvclass.get_name()
logger.info('Class : %s',
|
code-for-india/sahana_shelter_worldbank
|
private/templates/India/config.py
|
Python
|
mit
| 167,905
| 0.007278
|
# -*- coding: utf-8 -*-
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from datetime import timedelta
from gluon import current, Field, URL
from gluon.html import *
from gluon.storage import Storage
from gluon.validators import IS_EMPTY_OR, IS_NOT_EMPTY
from s3.s3fields import S3Represent
from s3.s3resource import S3FieldSelector
from s3.s3utils import S3DateTime, s3_auth_user_represent_name, s3_avatar_represent, s3_unicode
from s3.s3validators import IS_INT_AMOUNT, IS_LOCATION_SELECTOR2, IS_ONE_OF
from s3.s3widgets import S3LocationSelectorWidget2
from s3.s3forms import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineComponentCheckbox
T = current.T
s3 = current.response.s3
settings = current.deployment_settings
"""
Template settings for DRM Portal
"""
datetime_represent = lambda dt: S3DateTime.datetime_represent(dt, utc=True)
# =============================================================================
# System Settings
# -----------------------------------------------------------------------------
# Authorization Settings
settings.auth.registration_requires_approval = True
settings.auth.registration_requires_verification = False
settings.auth.registration_requests_organisation = True
#settings.auth.registration_organisation_required = True
settings.auth.registration_requests_site = False
# Approval emails get sent to all admins
settings.mail.approver = "ADMIN"
settings.auth.registration_link_user_to = {"staff": T("Staff")}
settings.auth.registration_link_user_to_default = ["staff"]
settings.auth.registration_roles = {"organisation_id": ["USER"],
}
# Terms of Service to be able to Register on the system
# uses <template>/views/tos.html
settings.auth.terms_of_service = True
settings.auth.show_utc_offset = False
settings.auth.show_link = False
# -----------------------------------------------------------------------------
# Security Policy
#settings.security.policy = 6 # Realms
settings.security.map = True
# Owner Entity
settings.auth.person_realm_human_resource_site_then_org = False
def drmp_realm_entity(table, row):
"""
Assign a Realm Entity to records
"""
tablename = table._tablename
if tablename == "cms_post":
# Give the Post the Realm of the author's Organisation
db = current.db
utable = db.auth_user
otable = current.s3db.org_organisation
if "created_by" in row:
query = (utable.id == row.created_by) & \
(otable.id == utable.organisation_id)
else:
query = (table.id == row.id) & \
(utable.id == table.created_by) & \
(otable.id == utable.organisation_id)
org = db(query).select(otable.pe_id,
limitby=(0, 1)).first()
if org:
return org.pe_id
# Follow normal rules
return 0
settings.auth.realm_entity = drmp_realm_entity
# -----------------------------------------------------------------------------
# Pre-Populate
settings.base.prepopulate = ["India"]
settings.base.system_name = T("India Disaster Risk Management Information System")
settings.base.system_name_short = T("DRMIS")
# -----------------------------------------------------------------------------
# Theme (folder to use for views/layout.html)
settings.base.theme = "India"
settings.ui.formstyle_row = "bootstrap"
settings.ui.formstyle = "bootstrap"
#settings.gis.map_height = 600
#settings.gis.map_width = 854
# -----------------------------------------------------------------------------
# L10n (Localization) settings
settings.L10n.languages = OrderedDict([
("en-gb", "English"),
])
# Default Language
settings.L10n.default_language = "en-gb"
# Default timezone for users
settings.L10n.utc_offset = "UTC +0530"
# Unsortable 'pretty' date format
settings.L10n.date_format = "%d %b %Y"
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Uncomment this to Translate CMS Series Names
# - we want this on when running s3translate but off in normal usage as we use the English names to lookup icons in render_posts
#settings.L10n.translate_cms_series = True
# Uncomment this to Translate Location Names
#settings.L10n.translate_gis_location = True
# Restrict the Location Selector to just certain countries
settings.gis.countries = ["IN"]
# Until we add support to LocationSelector2 to set dropdowns from LatLons
#settings.gis.check_within_parent_boundaries = False
# Uncomment to hide Layer Properties tool
#settings.gis.layer_properties = False
# Hide unnecessary Toolbar items
settings.gis.nav_controls = False
# Uncomment to display the Map Legend as a floating DIV
settings.gis.legend = "float"
# -----------------------------------------------------------------------------
# Finance settings
settings.fin.currencies = {
"CHF" : T("Swiss Francs"),
"EUR" : T("Euros"),
"GBP" : T("Great British Pounds"),
"USD" : T("United States Dollars"),
}
# -----------------------------------------------------------------------------
# Enable this for a UN-style deployment
#settings.ui.cluster = True
# Enable this to use the label 'Camp' instead of 'Shelter'
#settings.ui.camp = True
# -----------------------------------------------------------------------------
# Uncomment to restrict the export formats available
#settings.ui.export_formats = ["xls"]
settings.ui.update_label = "Edit"
# -----------------------------------------------------------------------------
# Summary Pages
settings.ui.summary = [#{"common": True,
# "name": "cms",
# "widgets": [{"method": "cms"}]
# },
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}]
},
{"name": "map",
"label": "Map",
"widgets": [{"method": "map", "ajax_init": True}],
},
{"name": "charts",
"label": "Charts",
"widgets": [{"method": "report", "ajax_init": True}]
},
]
settings.search.filter_manager = False
# =============================================================================
# Module Settings
# -----------------------------------------------------------------------------
# Human Resource Management
settings.hrm.staff_label = "Contacts"
# Uncomment to allow Staff & Volunteers to be registered without an email address
settings.hrm.email_required = False
# Uncomment to show the Organisation name in HR represents
settings.hrm.show_organisation = True
# Uncomment to disable Staff experience
settings.hrm.staff_experience = False
# Uncomment to disable the use of HR Credentials
settings.hrm.use_credentials = False
# Uncomment to disable the use of HR Skills
settings.hrm.use_skills = False
# Uncomment to disable the use of HR Teams
settings.hrm.teams = False
# Uncomment to hide fields in S3AddPersonWidget[2]
settings.pr.request_dob = False
settings.pr.request_gender = False
# -----------------------------------------------------------------------------
# Org
settings.org.site_label = "Office/Shelter/Hospital"
# -----------------------------------------------------------------------------
# Project
# Uncomment this to use multiple Organisations per project
settings.project.multiple_organisations = True
# Links to Filtered Components for Donors & Partners
#settings.project.org
|
anisation_roles = {
# 1: T("Host National Society"),
# 2: T("Partner"),
# 3: T("Donor"),
# #4: T("Customer"), # T("Beneficiary")?
# #5: T("Supplier"),
# 9: T("Partner National Society"),
#}
# -----------------------------------------------------------------------------
# Notifications
# Template for the
|
sub
|
irvingprog/pilas
|
pilas/test/test_interface.py
|
Python
|
lgpl-3.0
| 736
| 0.002717
|
import pilas
def test_todos_los_objetos_de_interfaz_se_pueden_crear():
pilas.iniciar()
deslizador = pilas.interfaz.Deslizador()
assert deslizador
assert deslizador.progreso == 0
boton = pilas.interfaz.Boton()
assert boton
|
ingreso = pilas.interfaz.IngresoDeTexto()
assert ingreso
try:
pilas.interfaz.ListaSeleccion()
except TypeError:
assert True # Se espera esta excepcion, porque un argumento es obligatorio
lista = pilas.interfaz.ListaSeleccion([('uno')])
assert lista
try:
pilas.interfaz.Selector()
except TypeError:
assert True # el argumento texto es obligatorio.
selector = pilas.interfaz.Selector("ho
|
la")
assert selector
|
dnjohnstone/hyperspy
|
hyperspy/tests/component/test_gaussian2d.py
|
Python
|
gpl-3.0
| 2,985
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import math
import numpy as np
from numpy.testing import assert_allclose
from hyperspy.components2d import Gaussian2D
sigma2fwhm = 2 * np.sqrt(2 * np.log(2))
def test_function():
g = Gaussian2D()
g.A.value = 14
g.sigma_x.value = 1.
g.sigma_y.value = 2.
g.centre_x.value = -5.
g.centre_y.value = -5.
assert_allclose(g.function(-5, -5), 1.1140846)
assert_allclose(g.function(-2, -3), 0.007506643)
assert g._is2D
assert g._position_x == g.centre_x
assert g._position_y == g.centre_y
def test_util_fwhm_set():
g1 = Gaussian2D()
g1.fwhm_x = 0.33
g1.fwhm_y = 0.33
g1.A.value = 1.0
assert_allclose(g1.fwhm_x, g1.sigma_x.value * sigma2fwhm)
assert_allclose(g1.fwhm_y, g1.sigma_y.value * sigma2fwhm)
def test_util_fwhm_get():
g1 = Gaussian2D(sigma_x=0.33, sigma_y=0.33)
g1.A.value = 1.0
assert_allclose(g1.fwhm_x, g1.sigma_x.value * sigma2fwhm)
assert_allclose(g1.fwhm_y, g1.sigma_y.value * sigma2fwhm)
def test_util_fwhm_getset():
g1 = Gaussian2D()
g1.fwhm_x = 0.33
g1.fwhm_y = 0.33
assert g1.fwhm_x == 0.33
assert g1.fwhm_y == 0.33
def test_properties():
g = Gaussian2D(add_rotation=True)
angle
|
= np.radians(20)
g.rotation_angle.value = angle
assert_allclose(g.rotation_angle_wrapped, angle)
angle = np.radians(380)
g.rotation_angle.value = angle
assert_allclose(g.rotation_angle_wrapped, math.fmod(angle, 2 * np.pi))
g = Gaussian2D(add_rotation=True)
g.sigma_x.value = 0.5
g.sigma_y.value = 0.1
assert g.ellipticity == 5.0
assert g.rotation_angle.value == 0
assert g.sigma_major == 0.5
assert g.sigma_minor == 0.
|
1
angle = np.radians(20)
g.rotation_angle.value = angle
assert_allclose(g.rotation_angle_wrapped, angle)
assert_allclose(g.rotation_major_axis, angle)
g = Gaussian2D(add_rotation=True)
g.sigma_x.value = 0.1
g.sigma_y.value = 0.5
assert g.ellipticity == 5.0
assert g.rotation_angle.value == 0
assert g.sigma_major == 0.5
assert g.sigma_minor == 0.1
angle = np.radians(20)
g.rotation_angle.value = angle
assert_allclose(g.rotation_angle_wrapped, angle)
assert_allclose(g.rotation_major_axis, angle - np.pi / 2)
|
bqbn/addons-server
|
src/olympia/lib/tests/test_cache.py
|
Python
|
bsd-3-clause
| 1,418
| 0.001445
|
# -*- coding: utf-8 -*-
from django.utils import translation
from django.core.cache import cache
from olympia.lib.cache import memoize, memoize_key, make_key
def test_make_key():
with translation.override('en-US'):
assert make_key('é@øel') == 'é@øel:en-us'
with translation.override('de'):
assert make_key('é@øel') == 'é@øel:de'
with translation.override('de'):
assert make_key('é@øel', with_locale=False) == 'é@øel'
with translation.override(
|
'en-US'):
assert make_key('é@øel', normalize=True) == '2798e65bbe384320c9da7930e93e63fb'
assert (
make_key('é@øel', with_locale=
|
False, normalize=True)
== 'a83feada27737072d4ec741640368f07'
)
with translation.override('fr'):
assert make_key('é@øel', normalize=True) == 'bc5208e905c8dfcc521e4196e16cfa1a'
def test_memoize_key():
assert memoize_key('foo', ['a', 'b'], {'c': 'e'}) == (
'memoize:foo:9666a2a48c17dc1c308fb327c2a6e3a8'
)
def test_memoize():
@memoize('f')
def add(*args):
return sum(args)
cache_key = memoize_key('f', 1, 2)
assert add(1, 2) == cache.get(cache_key)
def test_memcached_unicode():
"""Regression test for
https://github.com/linsomniac/python-memcached/issues/79
"""
cache.set('këy', 'Iñtërnâtiônàlizætiøn2')
assert cache.get('këy') == 'Iñtërnâtiônàlizætiøn2'
|
etos/django
|
tests/model_fields/test_floatfield.py
|
Python
|
bsd-3-clause
| 1,149
| 0
|
from django.db import transaction
from django.test import TestCase
from .models import FloatModel
class TestFloatField(TestCase):
def test_float_validates_object(self):
instance = FloatModel(size=2.5)
# Try setting float field to unsaved object
instance.size = instance
with transaction.atomic():
with self.assertRaises(TypeError):
instance.save()
# Set value to valid and save
instance.size = 2.5
instance.save()
self.assertTrue(instance.id)
# Set field to object on saved insta
|
nce
instance.size = instance
msg = (
'Tried to update field model_fields.FloatModel.size with a model '
'instance, %r. Use a value '
'compatible with FloatField.'
) % instance
with transaction.atomic():
with self.assertRaisesMessage(TypeError, msg):
instance.save()
# Try setting field to object on retrieved object
obj = Flo
|
atModel.objects.get(pk=instance.id)
obj.size = obj
with self.assertRaises(TypeError):
obj.save()
|
r0k3/arctic
|
tests/unit/date/test_util.py
|
Python
|
lgpl-2.1
| 3,570
| 0.003641
|
import pytest
import pytz
from datetime import datetime as dt
from arctic.date import datetime_to_ms, ms_to_datetime, mktz, to_pandas_closed_closed, DateRange, OPEN_OPEN, CLOSED_CLOSED
from arctic.date._mktz import DEFAULT_TIME_ZONE_NAME
from arctic.date._util import to_dt
@pytest.mark.parametrize('pdt', [
dt(2007, 3, 25, 1, tzinfo=mktz('Europe/London')),
dt(2004, 10, 31, 23, 3, tzinfo=mktz('Europe/London')),
dt(1990, 4, 5, 0, 0, tzinfo=mktz('Europe/London')),
dt(2007, 3, 25, 1, tzinfo=mktz('EST')),
dt(2004, 10, 31, 23, 3, tzinfo=mktz('EST')),
dt(1990, 4, 5, 0, 0, tzinfo=mktz('EST')),
]
)
def test_datetime_to_ms_and_back(pdt):
i = datetime_to_ms(pdt)
pdt = pdt.astimezone(mktz())
pdt2 = ms_to_datetime(i)
assert pdt == pdt2
def test_datetime_to_ms_and_back_microseconds():
pdt = dt(2012, 8, 1, 12, 34, 56, 999999, tzinfo=mktz(DEFAULT_TIME_ZONE_NAME))
i = datetime_to_ms(pdt)
pdt2 = ms_to_datetime(i)
assert pdt != pdt2
assert pdt.year == pdt2.year
assert pdt.month == pdt2.month
assert pdt.day == pdt2.day
assert pdt.hour == pdt2.hour
assert pdt.minute == pdt2.minute
assert pdt.second == pdt2.second
# Microsecond precision loss inevitable.
assert pdt.microsecond // 1000 == pdt2.microsecond // 1000
def test_daterange_closedclosed_None():
assert to_pandas_closed_closed(None) is None
def test_daterange_closedclosed():
date_range = DateRange(dt(2013, 1, 1, tzinfo=mktz('Europe/London')),
dt(2014, 2, 1, tzinfo=mktz('Europe/London')), OPEN_OPEN)
expected = DateRange(dt(2013, 1, 1, 0, 0, 0, 1000, tzinfo=mktz('Europe/London')),
dt(2014, 1, 31, 23, 59, 59, 999000, tzinfo=mktz('Europe/London')),
CLOSED_CLOSED)
act = to_pandas_closed_closed(date_range)
assert act == expected
def test_daterange_closedclosed_no_tz():
date_range = DateRange(dt(2013, 1, 1),
dt(2014, 2, 1), OPEN_OPEN)
expected = DateRange(dt(2013, 1, 1, 0, 0, 0, 1000, tzinfo=mktz()),
dt(2014, 1, 31, 23, 59, 59, 999000, tzinfo=mktz()),
CLOSED_CLOSED)
act = to_pandas_closed_closed(date_range)
assert act == expected
def test_to_dt_0():
assert to_dt(0) == dt(1970, 1, 1, tzinfo=mktz('UTC'))
def test_to_dt_0_default():
assert to_dt(0, mktz('UTC')) == dt(1970, 1, 1, tzinfo=mktz('UTC'))
def test_to_dt_dt_no_tz():
with pytest.raises(ValueError):
assert to_dt(dt(1970, 1, 1)) == dt(1970, 1, 1, tzinfo=mktz())
def test_to_dt_dt_no_tz_default():
assert to_dt(dt(1970, 1, 1), mktz('UTC')) == dt(1970, 1, 1, tzinfo=mktz('UTC'))
def test_to_dt_dt_tz():
assert to_dt(dt(1970, 1, 1, tzinfo=mktz('UTC'))) == dt(1970, 1, 1, tzinfo=mktz('UTC'))
def test_to_dt_dt_tz_default():
assert
|
to_dt(dt(1970, 1, 1, tzinfo=mktz('UTC')), mktz('Europe/London')) == dt(1970, 1, 1, tzinfo=mktz('UTC'))
def test_daterange_raises():
with pytest.raises(ValueError):
assert(DateRange(dt(2013, 1, 1), dt(2000, 1, 1)))
def test_daterange_eq():
dr = DateRange(dt(2013, 1, 1))
assert((dr == None) == False)
assert(dr == dr)
def test_daterange_lt():
dr = DateRange(dt(2013, 1, 1))
dr2 = DateRange(dt(2001, 1, 1))
assert(dr2 < dr)
dr.start = None
assert(
|
(dr2 < dr) == False)
|
magnusgasslander/sqlalchemy-bigquery
|
sqlalchemy_bigquery/gcp/authorize/authorize.py
|
Python
|
mit
| 3,503
| 0.003426
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import httplib2
from oauth2client.client import GoogleCredentials
from oauth2client.service_account import ServiceAccountCredentials
"""
Copied from airflow integration
"""
class GoogleCloudBaseHook(object):
"""
A base hook for Google cloud-related hooks. Google cloud has a shared REST
API client that is built in the same way no matter which service you use.
This class helps construct and authorize the credentials needed to then
call apiclient.discovery.build() to actually discover and build a client
for a Google cloud service.
The class also contains some miscellaneous helper functions.
All hook derived from this base hook use the 'Google Cloud Platform' connection
type. Two ways of authentication are supported:
Default credentials: Only specify 'Project Id'. Then you need to have executed
``gcloud auth`` on the Airflow worker machine.
JSON key file: Specify 'Project Id', 'Key Path' and 'Scope'.
Legacy P12 key files are not supported.
"""
def __init__(self, conn_id, delegate_to=None):
"""
:param conn_id: The connection ID to use when fetching connection info.
:type conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: string
"""
self.conn_id = conn_id
self.delegate_to = delegate_to
self.extras = self.get_connection(conn_id).extra_dejson
def _
|
authorize(self, scope = 'https://www.googleapis.com/auth/bigquery', key_path = None):
"""
Returns an authorized HTTP object to be used to build a Google cloud
service hook connection.
"""
kwargs = {}
if not key_path:
logging.info('Getting connection using `gcloud
|
auth` user, since no key file '
'is defined for hook.')
credentials = GoogleCredentials.get_application_default()
else:
if not scope:
raise Exception('Scope should be defined when using a key file.')
scopes = [s.strip() for s in scope.split(',')]
print scopes
if key_path.endswith('.json'):
logging.info('Getting connection using a JSON key file.')
credentials = ServiceAccountCredentials\
.from_json_keyfile_name(key_path, scopes)
credentials = GoogleCredentials.get_application_default()
elif key_path.endswith('.p12'):
raise Exception('Legacy P12 key file are not supported, '
'use a JSON key file.')
else:
raise Exception('Unrecognised extension for key file.')
http = httplib2.Http()
return credentials.authorize(http)
|
Jidgdoi/PacmanPy
|
src/GhostAI.py
|
Python
|
gpl-2.0
| 5,502
| 0.035474
|
# -*- coding:utf-8 -*-
# Cyril Fournier
# 19/01/2016
import random
import threading
import Queue
import time
import UtilsAndGlobal as UAG
# ==============================
# === Class Ghost ===
# ==============================
class Ghost():
"""
Object representing a ghost.
"""
def __init__(self, ID, state, color=''):
self.character = UAG.CellCharacterGhost
self.ID = ID
self.state = state
self.color = color or "\033[1;3%sm" %random.choice([1,2,4,7])
self.mvt =
|
UAG.MovementUp
self.countdownFear = 0.0
def __repr__(self):
return "%s%s\033[0m: %s" %(self.color, self.ID, "Alive" if self.state == 1 else "Afraid" if self.state == 2
|
else "Dead")
def setNewDirection(self, direction):
"""
Set new direction for the ghost.
"""
self.mvt = direction
def booh(self):
"""
Change ghost's state to GhostAfraid and make him turn back.
"""
if self.state == UAG.GhostAlive:
self.state = UAG.GhostAfraid
# Turn back
if self.mvt == UAG.MovementUp: self.mvt = UAG.MovementDown
elif self.mvt == UAG.MovementDown: self.mvt = UAG.MovementUp
elif self.mvt == UAG.MovementRight: self.mvt = UAG.MovementLeft
else: self.mvt = UAG.MovementRight
# Start FearTime countdown
self.countdownFear = time.time() + UAG.FearTime
else:
print "The ghost %s is not alive, he can't be afraid." %self.ID
def notAfraidAnymoreBitch(self):
"""
Ghost is not afraid anymore.
"""
if self.state == UAG.GhostAfraid:
self.state = UAG.GhostAlive
self.countdownFear = 0.0
else: print "The ghost %s is not afraid, he cann't be not afraid anymore." %self.ID
def die(self):
"""
Change ghost's state to GhostDead.
"""
if self.state == UAG.GhostAfraid:
self.state = UAG.GhostDead
self.countdownFear = 0.0
else: print "The ghost %s is not afraid, he can't die." %self.ID
def resurect(self):
"""
Change ghost's state to GhostAlive and make him turn back.
"""
if self.state != UAG.GhostAlive:
self.state = UAG.GhostAlive
# Turn back
if self.mvt == UAG.MovementUp: self.mvt = UAG.MovementDown
elif self.mvt == UAG.MovementDown: self.mvt = UAG.MovementUp
elif self.mvt == UAG.MovementRight: self.mvt = UAG.MovementLeft
else: self.mvt = UAG.MovementRight
else:
print "The ghost %s is not dead or afraid, he can't resurect." %self.ID
def respawn(self):
"""
Re-initialize ghost.
"""
self.state = UAG.GhostAlive
self.countdownFear = 0.0
self.mvt = UAG.MovementUp
# ===============================
# === Class GhostAI ===
# ===============================
class GhostAI(threading.Thread):
"""
Object controlling the Artificial Intelligence of Ghosts.
"""
# ----------------------------------
# --- Built-in functions
# ----------------------------------
def __init__(self, threadID, threadName, queue, queueLock, speed, nbGhost):
# Init thread module in this class
threading.Thread.__init__(self)
self.threadID = threadID
self.threadName = threadName
self.queue = queue
self.queueLock = queueLock
self.speed = speed
self.dAuthorizedMoves = {UAG.MovementUp:[UAG.MovementUp, UAG.MovementRight, UAG.MovementLeft],
UAG.MovementDown:[UAG.MovementDown, UAG.MovementRight, UAG.MovementLeft],
UAG.MovementRight:[UAG.MovementRight, UAG.MovementUp, UAG.MovementDown],
UAG.MovementLeft:[UAG.MovementLeft, UAG.MovementUp, UAG.MovementDown]}
self.dGhosts = self._initGhost(nbGhost)
# ----------------------------------
# --- Private functions
# ----------------------------------
def _initGhost(self, n):
"""
Initiate Ghosts objects.
"""
return {i:Ghost(i, 1, "\033[5;3%sm" %i) for i in range(n)}
# ----------------------------------
# --- Set functions
# ----------------------------------
def fearThem(self):
"""
The Pacman took a power, set all ghosts states to GhostAfraid.
"""
for g in self.dGhosts.values(): g.booh()
# ----------------------------------
# --- Move functions
# ----------------------------------
def randomMove(self, direction, lCellAuthorizedMoves):
"""
Return the new direction for a ghost.
'direction': current ghost direction
'lCellAuthorizedMoves': list of authorized moves from the current cell
"""
lDirection = list( set(self.dAuthorizedMoves[direction]) & set(lCellAuthorizedMoves) )
# If it's a dead-end, go back
if lDirection: return random.choice(lDirection)
else: return lCellAuthorizedMoves[0]
def predatorMove(self, direction, lCellAuthorizedMoves, lCellPacmanDistance):
"""
Return the new direction for a ghost, which is predating the Pacman.
'direction': current ghost direction
'lCellAuthorizedMoves': list of authorized moves from the current cell
'lCellPacmanDistance': list of distance of cell's neighbors.
"""
predatorDirection = min(lCellPacmanDistance, key=lambda x: x[0])
# Return predator move
if predatorDirection[0] <= UAG.GhostSmell:
return predatorDirection[1]
# Return random move
return self.randomMove(direction, lCellAuthorizedMoves)
# ----------------------------------
# --- Run
# ----------------------------------
def run(self):
while not UAG.ExitFlag:
# print "[GhostAI] 1 - Ghosts ask to move"
self.queueLock.acquire()
for g in self.dGhosts.values():
query = [UAG.CellCharacterGhost, g]
# print "[GhostAI] 2 - Ghost %s put movement in queue" %g.ID
self.queue.put(query)
self.queueLock.release()
time.sleep(self.speed)
|
CuBoulder/atlas
|
atlas/instance_operations.py
|
Python
|
mit
| 24,722
| 0.003276
|
"""
atlas.instance_operations
~~~~
Commands that run on servers to deploy instances.
Instance methods:
Create - Local - All symlinks are in place, DB exists, NFS mount is attached
Update - Local and Update code or configuration;
Delete - Local - Remove instance symlinks, delete settings file, delete NFS files.; Remote - Delete database.
# TODO After `util` and `ops` servers are combined
# Repair - Local - Check that only intended code exists in instance, add any missing code. If extra code is found, raise an exception and open a ticket.
# Install - Remote - Drupal install command runs
# Update - Remote - Run optional clear caches, rebuild registry, and/or updb.
# Backup - Remote - Create a database and NFS files backup of the instance.
# Restore - Local - Restore files on an new instance; Remote - Restore database on instance.
"""
import logging
import os
import re
import stat
from grp import getgrnam
from shutil import copyfile, rmtree
from pwd import getpwuid
from jinja2 import Environment, PackageLoader
from atlas import utilities
from atlas.config import (ENVIRONMENT, INSTANCE_ROOT, WEB_ROOT, CORE_WEB_ROOT_SYMLINKS,
NFS_MOUNT_FILES_DIR, NFS_MOUNT_LOCATION, SAML_AUTH,
SERVICE_ACCOUNT_USERNAME, SERVICE_ACCOUNT_PASSWORD, VARNISH_CONTROL_KEY,
SMTP_PASSWORD, WEBSERVER_USER_GROUP, ATLAS_LOCATION, SITE_DOWN_PATH,
SSH_USER, SERVICENOW_KEY, EXPRESS_SITE_METRICS_SECRET)
from atlas.config_servers import (SERVERDEFS, ATLAS_LOGGING_URLS, API_URLS,
VARNISH_CONTROL_TERMINALS, BASE_URLS)
# Setup a sub-logger. See tasks.py for longer comment.
log = logging.getLogger('atlas.instance_operations')
def instance_create(instance, nfs_preserve=False):
"""Create symlink structure, settings file, and NFS space for an instance.
Arguments:
instance {dict} -- complete instance dict from POST request
"""
log.info('Instance | Provision | Instance ID - %s', instance['_id'])
log.debug('Instance | Provision | Instance ID - %s | Instance - %s', instance['_id'], instance)
# Setup path variables
instance_code_path_sid = '{0}/{1}/{1}'.format(INSTANCE_ROOT, instance['sid'])
instance_code_path_current = '{0}/{1}/current'.format(INSTANCE_ROOT, instance['sid'])
instance_web_path_sid = '{0}/{1}'.format(WEB_ROOT, instance['sid'])
log.debug('Instance | Provision | Instance sid path - %s', instance_code_path_sid)
# Create structure in INSTANCE_ROOT
if os.path.exists(instance_code_path_sid):
raise Exception('Destinaton directory already exists')
os.makedirs(instance_code_path_sid)
# Add Core
switch_core(instance)
# Add profile
switch_profile(instance)
# Add packages
switch_packages(instance)
# Add NFS mounted files directory
if NFS_MOUNT_FILES_DIR:
# Setup paths
nfs_files_dir = NFS_MOUNT_LOCATION[ENVIRONMENT] + '/' + instance['sid']
site_files_dir = instance_code_path_sid + '/sites/default/files'
nfs_src = nfs_files_dir + '/files'
# Make dir on mount if it does not exist or we are not preserving a previous mount.
if not os.access(nfs_src, os.F_OK) or not nfs_preserve:
nfs_directories_to_create = [nfs_files_dir, nfs_src, nfs_files_dir + '/tmp']
for directory in nfs_directories_to_create:
os.mkdir(directory)
# Replace default files dir with one from NFS mount
# Will error if there are files in the directory
# Check is path exists, is a directory (matches symlink)
if os.path.exists(site_files_dir) and os.path.isdir(site_files_dir):
# Check
|
for symlink
if not os.path.islink(site_files_dir):
# Check if directory is empty and remove it if it is
if not os.listdir(site_files_dir):
|
os.rmdir(site_files_dir)
else:
# Remove symlink
os.remove(site_files_dir)
os.symlink(nfs_src, site_files_dir)
# Create setttings file
switch_settings_files(instance)
# Create symlinks for current in instance root, 'sid' and 'path' (if needed) in web root.
log.info('Instance | Provision | Instance ID - %s | Symlink current - %s | Symlink web - %s',
instance['_id'], instance_code_path_current, instance_web_path_sid)
utilities.relative_symlink(instance_code_path_sid, instance_code_path_current)
utilities.relative_symlink(instance_code_path_current, instance_web_path_sid)
if instance['status'] in ['launched', 'launching']:
switch_web_root_symlinks(instance)
# Correct file permissions
correct_fs_permissions(instance)
def instance_delete(instance, nfs_preserve=False):
"""Delete symlink structure, settings file, and NFS space for an instance.
Arguments:
instance {dict} -- full instance record
"""
log.info('Instance | Delete | Instance ID - %s', instance['_id'])
log.debug('Instance | Delete | Instance ID - %s | Instance - %s', instance['_id'], instance)
# Setup path variables
instance_code_path = '{0}/{1}'.format(INSTANCE_ROOT, instance['sid'])
instance_code_path_current = '{0}/{1}/current'.format(INSTANCE_ROOT, instance['sid'])
instance_web_path_sid = '{0}/{1}'.format(WEB_ROOT, instance['sid'])
instance_web_path_path = '{0}/{1}'.format(WEB_ROOT, instance['path'])
# Remove symlinks and directories
symlinks_to_remove = [instance_code_path_current, instance_web_path_sid, instance_web_path_path]
# Directories to remove
directories_to_remove = [instance_code_path]
if NFS_MOUNT_FILES_DIR:
# Remove dir on mount unless we are preserving it, like when we 'heal' an instance.
if not nfs_preserve:
nfs_files_dir = NFS_MOUNT_LOCATION[ENVIRONMENT] + '/' + instance['sid']
directories_to_remove.append(nfs_files_dir)
# Remove symnlink to files
symlinks_to_remove.append(instance_code_path + '/sites/default/files')
# If the settings file exists, change permissions to allow us to delete the file.
file_destination = "{0}/{1}/{1}/sites/default/settings.php".format(
INSTANCE_ROOT, instance['sid'])
# Check to see if file exists and is writable.
utilities.file_accessable_and_writable(file_destination)
# Remove symlinks
for symlink in symlinks_to_remove:
if os.path.islink(symlink):
log.debug('Instance | Delete | Symlink - %s', symlink)
os.remove(symlink)
# Remove directories
for directory in directories_to_remove:
# Check if it exists
if os.access(directory, os.F_OK):
rmtree(directory)
def switch_core(instance):
"""Switch Drupal core symlinks, if no core symlinks are present add them.
Arguments:
instance {dict} -- full instance record
"""
# Lookup the core we want to use.
core = utilities.get_single_eve('code', instance['code']['core'])
# Setup variables
core_path = utilities.code_path(core)
instance_code_path_sid = '{0}/{1}/{1}'.format(INSTANCE_ROOT, instance['sid'])
# Get a list of files in the Core source directory
core_files = os.listdir(core_path)
# Get a list of files in the Instance target directory
instance_files = os.listdir(instance_code_path_sid)
# Remove any existing symlinks to a core.
for instance_file in instance_files:
full_path = instance_code_path_sid + '/' + instance_file
# Check if path is a symlink.
if os.path.islink(full_path):
# Get the target of the symlink.
symlink_target = os.readlink(full_path)
# Get the name of the directory that contains the symlink target
code_dir = os.path.dirname(symlink_target)
# Check to see if the directory is a Drupal core, if so remove the symlink.
regex = '((drupal)\-([\d\.x]+\-*[dev|alph|beta|rc|pl]*[\d]*))$i'
if re.match(regex, code_dir):
os.remove(full_path)
# Iter
|
torehc/carontepass-v2
|
web/carontepass/access/migrations/0002_user.py
|
Python
|
gpl-3.0
| 962
| 0.002079
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('access', '0001_initial'),
]
operations = [
migrations.CreateModel(
|
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.Cha
|
rField(max_length=60)),
('last_name', models.CharField(max_length=120)),
('rol', models.CharField(default=b'USER', max_length=4, choices=[(b'USER', b'User'), (b'ADMI', b'Administrator')])),
('phone', models.CharField(max_length=18)),
('address', models.CharField(max_length=220)),
('email', models.CharField(max_length=180)),
('group', models.ForeignKey(to='access.Group')),
],
),
]
|
google/jax-cfd
|
jax_cfd/spectral/time_stepping.py
|
Python
|
apache-2.0
| 8,747
| 0.009049
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implicit-explicit time stepping routines for ODEs."""
import dataclasses
from typing import Callable, Sequence, TypeVar
import tree_math
PyTreeState = TypeVar("PyTreeState")
TimeStepFn = Callable[[PyTreeState], PyTreeState]
class ImplicitExplicitODE:
"""Describes a set of ODEs with implicit & explicit terms.
The equation is given by:
∂x/∂t = explicit_terms(x) + implicit_terms(x)
`explicit_terms(x)` includes terms that should use explicit time-stepping and
`implicit_terms(x)` includes terms that should be modeled implicitly.
Typically the explicit terms are non-linear and the implicit terms are linear.
This simplifies solves but isn't strictly necessary.
"""
def explicit_terms(self, state: PyTreeState) -> PyTreeState:
"""Evaluates explicit terms in the ODE."""
raise NotImplementedError
def implicit_terms(self, state: PyTreeState) -> PyTreeState:
"""Evaluates implicit terms in the ODE."""
raise NotImplementedError
def implicit_solve(
self, state: PyTreeState, step_size: float,
) -> PyTreeState:
"""Solves `y - step_size * implicit_terms(y) = x` for y."""
raise NotImplementedError
def backward_forward_euler(
equation: ImplicitExplicitODE, time_step: float,
) -> TimeStepFn:
"""Time stepping via forward and backward Euler methods.
This method is first order accurate.
Args:
equation: equation to solve.
time_step: time step.
Returns:
Function that performs a time step.
"""
# pylint: disable=invalid-name
dt = time_step
F = tree_math.unwrap(equation.explicit_terms)
G_inv = tree_math.unwrap(equation.implicit_solve, vector_argnums=0)
@tree_math.wrap
def step_fn(u0):
g = u0 + dt * F(u0)
u1 = G_inv(g, dt)
return u1
return step_fn
def crank_nicolson_rk2(
equation: ImplicitExplicitODE, time_step: float,
) -> TimeStepFn:
"""Time stepping via Crank-Nicolson and 2nd order Runge-Kutta (Heun).
This method is second order accurate.
Args:
equation: equation to solve.
time_step: time step.
Returns:
Function that performs a time step.
Reference:
Chandler, G. J. & Kerswell, R. R. Invariant recurrent solutions embedded in
a turbulent two-dimensional Kolmogorov flow. J. Fluid Mech. 722, 554–595
(2013). https://doi.org/10.1017/jfm.2013.122 (Section 3)
"""
# pylint: disable=invalid-name
dt = time_step
F = tree_math.unwrap(equation.explicit_terms)
G = tree_math.unwrap(equation.implicit_terms)
G_inv = tree_math.unwrap(equation.implicit_solve, vector_argnums=0)
@tree_math.wrap
def step_fn(u0):
g = u0 + 0.5 * dt * G(u0)
h1 = F(u0)
u1 = G_inv(g + dt * h1, 0.5 * dt)
h2 = 0.5 * (F(u1) + h1)
u2 = G_inv(g + dt * h2, 0.5 * dt)
return u2
return step_fn
def low_storage_runge_kutta_crank_nicolson(
alphas: Sequence[float],
betas: Sequence[float],
gammas: Sequence[float],
equation: ImplicitExplicitODE,
time_step: float,
) -> TimeStepFn:
"""Time stepping via "low-storage" Runge-Kutta and Crank-Nicolson steps.
These scheme are second order accurate for the implicit terms, but potentially
higher order accurate for the explicit terms. This seems to be a favorable
tradeoff when the explicit terms dominate, e.g., for modeling turbulent
fluids.
Per Canuto: "[these methods] have been widely used for the time-discretization
in applications of spectral methods."
Args:
alphas: alpha coefficients.
betas: beta coefficients.
gammas: gamma coefficients.
equation: equation to solve.
time_step: time step.
Returns:
Function that performs a time step.
Reference:
Canuto, C., Yousuff Hussaini, M., Quarteroni, A. & Zang, T. A.
Spectral Methods: Evolution to Complex Geometries and Applications to
Fluid Dynamics. (Springer Berlin Heidelberg, 2007).
https://doi.org/10.1007/978-3-540-30728-0 (Appendix D.3)
"""
# pylint: disable=invalid-name,non-ascii-name
α = alphas
β = betas
γ = gammas
dt = time_step
F = tree_math.unwrap(equation.explicit_terms)
G = tree_math.unwrap(equation.implicit_terms)
G_inv = tree_math.unwrap(equation.implicit_solve, vector_argnums=0)
if len(alphas) - 1 != len(betas) != len(gammas):
raise ValueError("number of RK coefficients does not match")
@tree_math.wrap
def step_fn(u):
h = 0
for k in range(len(β)):
h = F(u) + β[k] * h
µ = 0.5 * dt * (α[k + 1] - α[k])
u = G_inv(u + γ[k] * dt * h + µ * G(u), µ)
return u
return step_fn
def crank_nicolson_rk3(
equation: ImplicitExplicitODE, time_step: float,
) -> TimeStepFn:
"""Time stepping via Crank-Nicolson and RK3 ("Williamson")."""
return low_storage_runge_kutta_crank_nicolson(
alphas=[0, 1/3, 3/4, 1],
betas=[0, -5/9, -153/128],
gammas=[1/3, 15/16, 8/15],
equation=equation,
time_step=time_step,
)
def crank_nicolson_rk4(
equation: ImplicitExplicitODE, time_step: float,
) -> TimeStepFn:
"""Time stepping via Crank-Nicolson and RK4 ("Carpenter-Kennedy")."""
# pylint:
|
disable=line-too-long
return low_storage_runge_kutta_crank_nicolson(
alphas=[0, 0.1496590219993, 0.3704009573644, 0.6222557631345, 0.9582821306748, 1],
betas=[0, -0.4178904745, -1.192151694643, -1.697784692471, -1.514183444257],
gammas=[0.
|
1496590219993, 0.3792103129999, 0.8229550293869, 0.6994504559488, 0.1530572479681],
equation=equation,
time_step=time_step,
)
@dataclasses.dataclass
class ImExButcherTableau:
"""Butcher Tableau for implicit-explicit Runge-Kutta methods."""
a_ex: Sequence[Sequence[float]]
a_im: Sequence[Sequence[float]]
b_ex: Sequence[float]
b_im: Sequence[float]
def __post_init__(self):
if len({len(self.a_ex) + 1,
len(self.a_im) + 1,
len(self.b_ex),
len(self.b_im)}) > 1:
raise ValueError("inconsistent Butcher tableau")
def imex_runge_kutta(
tableau: ImExButcherTableau,
equation: ImplicitExplicitODE,
time_step: float,
) -> TimeStepFn:
"""Time stepping with Implicit-Explicit Runge-Kutta."""
# pylint: disable=invalid-name
dt = time_step
F = tree_math.unwrap(equation.explicit_terms)
G = tree_math.unwrap(equation.implicit_terms)
G_inv = tree_math.unwrap(equation.implicit_solve, vector_argnums=0)
a_ex = tableau.a_ex
a_im = tableau.a_im
b_ex = tableau.b_ex
b_im = tableau.b_im
num_steps = len(b_ex)
@tree_math.wrap
def step_fn(y0):
f = [None] * num_steps
g = [None] * num_steps
f[0] = F(y0)
g[0] = G(y0)
for i in range(1, num_steps):
ex_terms = dt * sum(a_ex[i-1][j] * f[j] for j in range(i) if a_ex[i-1][j])
im_terms = dt * sum(a_im[i-1][j] * g[j] for j in range(i) if a_im[i-1][j])
Y_star = y0 + ex_terms + im_terms
Y = G_inv(Y_star, dt * a_im[i-1][i])
if any(a_ex[j][i] for j in range(i, num_steps - 1)) or b_ex[i]:
f[i] = F(Y)
if any(a_im[j][i] for j in range(i, num_steps - 1)) or b_im[i]:
g[i] = G(Y)
ex_terms = dt * sum(b_ex[j] * f[j] for j in range(num_steps) if b_ex[j])
im_terms = dt * sum(b_im[j] * g[j] for j in range(num_steps) if b_im[j])
y_next = y0 + ex_terms + im_terms
return y_next
return step_fn
def imex_rk_sil3(
equation: ImplicitExplicitODE, time_step: float,
) -> TimeStepFn:
"""Time stepping with the SIL3 implicit-explicit RK scheme.
This method is second-order accurate for the implicit terms and third-order
accurate for the explicit terms.
Args:
equation: equation to solve.
time_step: time step.
Returns:
Function that per
|
whitepyro/debian_server_setup
|
sickbeard/scene_numbering.py
|
Python
|
gpl-3.0
| 25,288
| 0.003282
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
#
# Created on Sep 20, 2012
# @author: Dermot Buckley <dermot@buckley.ie>
# @copyright: Dermot Buckley
#
import time
import datetime
import traceback
import sickbeard
try:
import json
except ImportError:
from lib import simplejson as json
from sickbeard import logger
from sickbeard import db
from sickbeard.exceptions import ex
def get_scene_numbering(indexer_id, indexer, season, episode, fallback_to_xem=True):
"""
Returns a tuple, (season, episode), with the scene numbering (if there is one),
otherwise returns the xem numbering (if fallback_to_xem is set), otherwise
returns the TVDB and TVRAGE numbering.
(so the return values will always be set)
@param indexer_id: int
@param season: int
@param episode: int
@param fallback_to_xem: bool If set (the default), check xem for matches if there is no local scene numbering
@return: (int, int) a tuple with (season, episode)
"""
if indexer_id is None or season is None or episode is None:
return (season, episode)
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(indexer_id))
if showObj and not showObj.is_scene:
return (season, episode)
result = find_scene_numbering(int(indexer_id), int(indexer), season, episode)
if result:
return result
else:
if fallback_to_xem:
xem_result = find_xem_numbering(int(indexer_id), int(indexer), season, episode)
if xem_result:
return xem_result
return (season, episode)
def find_scene_numbering(indexer_id, indexer, season, episode):
"""
Same as get_scene_numbering(), but returns None if scene numbering is not set
"""
if indexer_id is None or season is None or episode is None:
return (season, episode)
indexer_id = int(indexer_id)
indexer = int(indexer)
myDB = db.DBConnection()
rows = myDB.select(
"SELECT scene_season, scene_episode FROM scene_numbering WHERE indexer = ? and indexer_id = ? and season = ? and episode = ? and (scene_season or scene_episode) != 0",
[indexer, indexer_id, season, episode])
if rows:
return (int(rows[0]["scene_season"]), int(rows[0]["scene_episode"]))
def get_scene_absolute_numbering(indexer_id, indexer, absolute_number, fallback_to_xem=True):
"""
Returns a tuple, (season, episode), with the scene numbering (if there is one),
otherwise returns the xem numbering (if fallback_to_xem is set), otherwise
returns the TVDB and TVRAGE numbering.
(so the return values will always be set)
@param indexer_id: int
@param absolute_number: int
@param fallback_to_xem: bool If set (the default), check xem for matches if there is no local scene numbering
@return: (int, int) a tuple with (season, episode)
"""
if indexer_id is None or absolute_number is None:
return absolute_number
indexer_id = int(indexer_id)
indexer = int(indexer)
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, indexer_id)
if showObj and not showObj.is_scene:
return absolute_number
result = find_scene_absolute_numbering(indexer_id, indexer, absolute_number)
if result:
return result
else:
if fallback_to_xem:
xem_result = find_xem_absolute_numbering(indexer_id, indexer, absolute_number)
if xem_result:
return xem_result
return absolute_number
def find_scene_absolute_numbering(indexer_id, indexer, absolute_number):
"""
Same as get_scene_numbering(), but returns None if scene numbering is not set
"""
if indexer_id is None or absolute_number is None:
return absolute_number
indexer_id = int(indexer_id)
indexer = int(indexer)
myDB = db.DBConnection()
rows = myDB.select(
"SELECT scene_absolute_number FROM scene_numbering WHERE indexer = ? and indexer_id = ? and absolute_number = ? and scene_absolute_number != 0",
[indexer, indexer_id, absolute_number])
if rows:
return int(rows[0]["scene_absolute_number"])
def get_indexer_numbering(indexer_id, indexer, sceneSeason, sceneEpisode, fallback_to_xem=True):
"""
Returns a tuple, (season, episode) with the TVDB and TVRAGE numbering for (sceneSeason, sceneEpisode)
(this works like the reverse of get_scene_numbering)
"""
if indexer_id is None or sceneSeason is None or sceneEpisode is None:
return (sceneSeason, sceneEpisode)
indexer_id = int(indexer_id)
indexer = int(indexer)
myDB = db.DBConnection()
rows = myDB.select(
"SELECT season, episode FROM scene_numbering WHERE indexer = ? and indexer_id = ? and scene_season = ? and scene_episode = ?",
[indexer, indexer_id, sceneSeason, sceneEpisode])
if rows:
return (int(rows[0]["season"]), int(rows[0]["episode"]))
else:
if fallback_to_xem:
return get_indexer_numbering_for_xem(indexer_id, indexer, sceneSeason, sceneEpisode)
return (sceneSeason, sceneEpisode)
def get_indexer_absolute_numbering(indexer_id, indexer, sceneAbsoluteNumber, fallback_to_xem=True, scene_season=None):
"""
|
Returns a tuple, (season, episode, absolute_number) with the TVDB and TVRAGE numbering for (sceneAbsoluteNumber)
(this works like the reverse of get_absolute_numbering)
"""
if indexer_id is None or sceneAbsoluteNumber is None:
|
return sceneAbsoluteNumber
indexer_id = int(indexer_id)
indexer = int(indexer)
myDB = db.DBConnection()
if scene_season is None:
rows = myDB.select(
"SELECT absolute_number FROM scene_numbering WHERE indexer = ? and indexer_id = ? and scene_absolute_number = ?",
[indexer, indexer_id, sceneAbsoluteNumber])
else:
rows = myDB.select(
"SELECT absolute_number FROM scene_numbering WHERE indexer = ? and indexer_id = ? and scene_absolute_number = ? and scene_season = ?",
[indexer, indexer_id, sceneAbsoluteNumber, scene_season])
if rows:
return int(rows[0]["absolute_number"])
else:
if fallback_to_xem:
return get_indexer_absolute_numbering_for_xem(indexer_id, indexer, sceneAbsoluteNumber, scene_season)
return sceneAbsoluteNumber
def set_scene_numbering(indexer_id, indexer, season=None, episode=None, absolute_number=None, sceneSeason=None,
sceneEpisode=None, sceneAbsolute=None):
"""
Set scene numbering for a season/episode.
To clear the scene numbering, leave both sceneSeason and sceneEpisode as None.
"""
if indexer_id is None:
return
indexer_id = int(indexer_id)
indexer = int(indexer)
myDB = db.DBConnection()
if season and episode:
myDB.action(
"INSERT OR IGNORE INTO scene_numbering (indexer, indexer_id, season, episode) VALUES (?,?,?,?)",
[indexer, indexer_id, season, episode])
myDB.action(
"UPDATE scene_numbering SET scene_season = ?, scene_episode = ? WHERE indexer = ? and indexer_id = ? and season = ? and episode = ?",
[sceneSeason, sceneEpisode, indexer, indexer_id, season, episode])
elif absolute_number:
myDB.action(
"INSERT OR IGNORE INTO scene_numbering (indexer, indexer_id, absolute_number) VALUES (?,?,?)",
[indexer, indexer_id, absolute_number])
|
Comunitea/CMNT_00098_2017_JIM_addons
|
shipping_container/models/shipping_container.py
|
Python
|
agpl-3.0
| 3,735
| 0.002678
|
# -*- coding: utf-8 -*-
# © 2016 Comunitea - Kiko Sanchez <kiko@comunitea.com>
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.
from odoo import api, fields, models, _
import odoo.addons.decimal_precision as dp
class ShippingContainerType(models.Model):
_name = "shipping.container.type"
name = fields.Char("Container type", required=True)
volume = fields.Float("Volumen", help="Container volume (m3)", required=True)
length = fields.Float("Length", help="Length(m)")
height = fields.Float("Height", help="Height(m)")
width = fields.Float("Width", help="Width(m)")
@api.onchange('length', 'height', 'width')
def onchange_dimensions(self):
if self.length and self.height and self.width:
self.volume = self.length * self.height * self.width
class ShippingContainer(models.Model):
_name = "shippin
|
g.container"
@api.one
def _get_moves(self):
self.move_ids_count = len(self.move_ids)
@api.one
def _get_partners(self):
self.partner_ids = self.picking_ids.partner_id
@api.multi
def _available_volume(self):
|
for container in self:
volume = container.shipping_container_type_id.volume
weight = 0.00
for move in container.move_ids:
volume -= move.product_id.volume * move.product_uom_qty
weight += move.product_id.weight * move.product_uom_qty
container.available_volume = volume
container.weight = weight
name = fields.Char("Container Ref.", required=True)
date_expected = fields.Date("Date expected", required=True)
date_shipment = fields.Date("Shipment date")
picking_ids = fields.One2many("stock.picking", "shipping_container_id", "Pickings")
company_id = fields. \
Many2one("res.company", "Company", required=True,
default=lambda self:
self.env['res.company']._company_default_get('shipping.container'))
harbor_id = fields.Many2one('res.harbor', string="Harbor", required=True)
move_ids = fields.One2many('stock.move', 'shipping_container_id', string="Moves")
move_ids_count = fields.Integer('Move ids count', compute="_get_moves")
harbor_dest_id = fields.Many2one('res.harbor', string="Dest. harbor")
state = fields.Selection([('loading', 'Loading'),
('transit', 'Transit'),
('destination', 'Destination')],
default='loading')
shipping_container_type_id = fields.Many2one('shipping.container.type', 'Type')
available_volume = fields.Float("Available volume (m3)", compute="_available_volume")
weight = fields.Float("Weight (kgr.)", compute="_available_volume")
incoterm_id = fields.Many2one('stock.incoterms', string='Incoterm')
_sql_constraints = [
('name_uniq', 'unique(name)', 'Container name must be unique')
]
@api.multi
def action_view_move_ids(self):
action = self.env.ref(
'shipping_container.container_picking_tree_action').read()[0]
action['domain'] = [('id', 'in', self.move_ids.ids)]
return action
def set_transit(self):
self.state = 'transit'
def set_destination(self):
self.state = 'destination'
def set_loading(self):
self.state = 'loading'
@api.multi
def write(self, vals):
if vals.get('date_expected', False):
for container in self:
if vals['date_expected'] != container.date_expected:
for pick in container.picking_ids:
pick.min_date = vals['date_expected']
return super(ShippingContainer, self).write(vals)
|
google/tink
|
python/tink/_keyset_reader.py
|
Python
|
apache-2.0
| 2,497
| 0.009211
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reads Keysets from file."""
import abc
from google.protobuf import json_format
from google.protobuf import message
from tink.proto import tink_pb2
from tink import core
class KeysetReader(metaclass=abc.ABCMeta):
"""Reads a Keyset."""
@abc.abstractmethod
def read(self) -> tink_pb2.Keyset:
"""Reads and returns a (cleartext) tink_pb2.Keyset from its source."""
raise NotImplementedError()
@abc.abstractmethod
def read_encrypted(self) -> tink_pb2.EncryptedKeyset:
"""Reads and returns an tink_pb2.EncryptedKeyset from its source."""
raise NotImplementedError()
class JsonKeysetReader(KeysetReader):
"""Reads a JSON Keyset."""
def __init__(self, serialized_keyset: str):
self._serialized_keyset = serialized_keyset
def read(self) -> tink_pb2.Keyset:
try:
return json_format.Parse(self._serialized_keyset, tink_pb2.Keyset())
except json_format.ParseError as e:
raise core.TinkError(e)
def read_encrypted(self) -> tink_pb2.EncryptedKeyset:
try:
return json_format.Parse(self._serialized_keyset,
tink_pb2.EncryptedKeyset())
except json_format.ParseError as e:
raise core.TinkError(e)
class BinaryKeysetReader(KeysetReader):
"""Reads a binary Keyset."""
def __init__(self, serialized_keyset: bytes):
self._serialized_keyset = serialized_keyset
def read(self) -> tink_pb2.Keyset:
if not self._serialized_keyset:
raise core.TinkError('No keyset found')
try:
return tink_pb2.Keyset.FromString(self._serialized_keyset)
except message.DecodeError as e:
raise core.TinkError(e
|
)
def read_encrypted(self) -> tink_pb2.EncryptedKeyset:
if not self._serialized_keyset:
raise core.TinkError('No keyset found')
try:
return tink_pb2.EncryptedKeyset.FromString(self._serial
|
ized_keyset)
except message.DecodeError as e:
raise core.TinkError(e)
|
ahu-odoo/odoo
|
openerp/models.py
|
Python
|
agpl-3.0
| 274,326
| 0.003879
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Object Relational Mapping module:
* Hierarchical structure
* Constraints consistency and validation
* Object metadata depends on its status
* Optimised processing by complex query (multiple actions at once)
* Default field values
* Permissions optimisation
* Persistant object: DB postgresql
* Data conversion
* Multi-level caching system
* Two different inheritance mechanisms
* Rich set of field types:
- classical (varchar, integer, boolean, ...)
- relational (one2many, many2one, many2many)
- functional
"""
import copy
import datetime
import functools
import itertools
import logging
import operator
import pickle
import pytz
import re
import time
from collections import defaultdict, MutableMapping
from inspect import getmembers
import babel.dates
import dateutil.relativedelta
import psycopg2
from lxml import etree
import openerp
from
|
. import SUPERUSER_ID
from . import api
from . import tools
from .api import Environment
from .exceptions import except_orm, AccessError, MissingError
from .osv i
|
mport fields
from .osv.query import Query
from .tools import lazy_property
from .tools.config import config
from .tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
from .tools.safe_eval import safe_eval as eval
from .tools.translate import _
_logger = logging.getLogger(__name__)
_schema = logging.getLogger(__name__ + '.schema')
regex_order = re.compile('^( *([a-z0-9:_]+|"[a-z0-9:_]+")( *desc| *asc)?( *, *|))+$', re.I)
regex_object_name = re.compile(r'^[a-z0-9_.]+$')
onchange_v7 = re.compile(r"^(\w+)\((.*)\)$")
AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
def check_object_name(name):
""" Check if the given name is a valid openerp object name.
The _name attribute in osv and osv_memory object is subject to
some restrictions. This function returns True or False whether
the given name is allowed or not.
TODO: this is an approximation. The goal in this approximation
is to disallow uppercase characters (in some places, we quote
table/column names and in other not, which leads to this kind
of errors:
psycopg2.ProgrammingError: relation "xxx" does not exist).
The same restriction should apply to both osv and osv_memory
objects for consistency.
"""
if regex_object_name.match(name) is None:
return False
return True
def raise_on_invalid_object_name(name):
if not check_object_name(name):
msg = "The _name attribute %s is not valid." % name
_logger.error(msg)
raise except_orm('ValueError', msg)
POSTGRES_CONFDELTYPES = {
'RESTRICT': 'r',
'NO ACTION': 'a',
'CASCADE': 'c',
'SET NULL': 'n',
'SET DEFAULT': 'd',
}
def intersect(la, lb):
return filter(lambda x: x in lb, la)
def same_name(f, g):
""" Test whether functions `f` and `g` are identical or have the same name """
return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)
def fix_import_export_id_paths(fieldname):
"""
Fixes the id fields in import and exports, and splits field paths
on '/'.
:param str fieldname: name of the field to import/export
:return: split field name
:rtype: list of str
"""
fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
return fixed_external_id.split('/')
def pg_varchar(size=0):
""" Returns the VARCHAR declaration for the provided size:
* If no size (or an empty or negative size is provided) return an
'infinite' VARCHAR
* Otherwise return a VARCHAR(n)
:type int size: varchar size, optional
:rtype: str
"""
if size:
if not isinstance(size, int):
raise TypeError("VARCHAR parameter should be an int, got %s"
% type(size))
if size > 0:
return 'VARCHAR(%d)' % size
return 'VARCHAR'
FIELDS_TO_PGTYPES = {
fields.boolean: 'bool',
fields.integer: 'int4',
fields.text: 'text',
fields.html: 'text',
fields.date: 'date',
fields.datetime: 'timestamp',
fields.binary: 'bytea',
fields.many2one: 'int4',
fields.serialized: 'text',
}
def get_pg_type(f, type_override=None):
"""
:param fields._column f: field to get a Postgres type for
:param type type_override: use the provided type for dispatching instead of the field's own type
:returns: (postgres_identification_type, postgres_type_specification)
:rtype: (str, str)
"""
field_type = type_override or type(f)
if field_type in FIELDS_TO_PGTYPES:
pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
elif issubclass(field_type, fields.float):
if f.digits:
pg_type = ('numeric', 'NUMERIC')
else:
pg_type = ('float8', 'DOUBLE PRECISION')
elif issubclass(field_type, (fields.char, fields.reference)):
pg_type = ('varchar', pg_varchar(f.size))
elif issubclass(field_type, fields.selection):
if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
or getattr(f, 'size', None) == -1:
pg_type = ('int4', 'INTEGER')
else:
pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
elif issubclass(field_type, fields.function):
if f._type == 'selection':
pg_type = ('varchar', pg_varchar())
else:
pg_type = get_pg_type(f, getattr(fields, f._type))
else:
_logger.warning('%s type not supported!', field_type)
pg_type = None
return pg_type
class MetaModel(api.Meta):
""" Metaclass for the models.
This class is used as the metaclass for the class :class:`BaseModel` to
discover the models defined in a module (without instanciating them).
If the automatic discovery is not needed, it is possible to set the model's
``_register`` attribute to False.
"""
module_to_models = {}
def __init__(self, name, bases, attrs):
if not self._register:
self._register = True
super(MetaModel, self).__init__(name, bases, attrs)
return
if not hasattr(self, '_module'):
# The (OpenERP) module name can be in the `openerp.addons` namespace
# or not. For instance, module `sale` can be imported as
# `openerp.addons.sale` (the right way) or `sale` (for backward
# compatibility).
module_parts = self.__module__.split('.')
if len(module_parts) > 2 and module_parts[:2] == ['openerp', 'addons']:
module_name = self.__module__.split('.')[2]
else:
module_name = self.__module__.split('.')[0]
self._module = module_name
# Remember which models to instanciate for this module.
if not self._custom:
self.module_to_models.setdefault(self._module, []).append(self)
class NewId(object):
""" Pseudo-ids for new records. """
def __nonzero__(self):
return False
IdType =
|
khufkens/daymetpy
|
setup.py
|
Python
|
agpl-3.0
| 955
| 0.024084
|
from distutils.core import setup
setup(
name = 'daymetpy',
packages = ['daymetpy'],
version = '1.0.0',
license = 'AGPL-3',
description = 'A library for accessing Daymet surface weather data',
author = 'Koen Hufkens',
author_email = 'koen.hufkens@gmail.com',
url = 'https://github.com/khufkens/daymetpy',
download_url = 'https://github.com/khufkens/daymetpy/archive/1.0.0.tar.gz',
keywords = ['daymet', 'climatology', 'ORNL','weather'],
classifiers = [
# Status
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Information Analysis',
# License
'License :: OSI Approved :: GNU Affero General Public Licen
|
se v3',
# Python versions supported
'Programming Language ::
|
Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
],
)
|
samsu/neutron
|
tests/unit/ml2/drivers/mechanism_bulkless.py
|
Python
|
apache-2.0
| 872
| 0
|
# Copyright (c) 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in complia
|
nce with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is dist
|
ributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.plugins.ml2 import driver_api as api
class BulklessMechanismDriver(api.MechanismDriver):
"""Test mechanism driver for testing bulk emulation."""
def initialize(self):
self.native_bulk_support = False
|
SXBK/kaggle
|
zillow/xgb.py
|
Python
|
gpl-3.0
| 2,782
| 0.00683
|
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "data/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
# Any results you write to the current directory are saved as output.
import numpy as np
import pandas as pd
import xgboost as xgb
import gc
import sklearn
print('Loading data ...')
train = pd.read_csv('data/train_2016.csv')
prop = pd.read_csv('data/properties_2016.csv')
for c, dtype in zip(prop.columns, prop.dtypes):
if dtype == np.float64:
prop[c] = prop[c].astype(np.float32)
df_train = train.merge(prop, how='left', on='parcelid')
x_train = df_train.drop(['parcelid', 'logerror', 'transactiondate', 'propertyzoningdesc', 'propertycountylandusecode'], axis=1)
y_train = df_train['logerror'].values
print(x_train.shape, y_train.shape)
train_columns = x_train.columns
for c in x_train.dtypes[x_train.dtypes == object].index.values:
x_train[c] = (x_train[c] == True)
del df_train; gc.collect()
split = 90000
x_train, y_train, x_valid, y_valid = x_train[:split], y_train[:split], x_train[split:], y_train[split:]
x_train = x_train.values.astype(np.float32, copy=False)
x_valid = x_valid.values.astype(np.float32, copy=False)
d_train = xgb.DMatrix(x_train, label=y_train)
d_valid = xgb.DMatrix(x_valid, label=y_valid)
del x_train, x_valid; gc.collect()
params = {}
params['eta'] = 0.02
params['objective'] = 'reg:linear'
params['eval_metric'] = 'mae'
params['max_depth'] = 10
params['silent'] = 0
watchlist = [(d_train, 'train'), (d_valid, 'valid')]
clf = xgb.train(params, d_train, 10000, watchlist, early_stopping_rounds=100, verbose_eval=10)
del d_train, d_valid; gc.collect()
print("Prepare for the prediction ...")
sample = pd.read_csv('data/sample_submission.csv')
sample['parcelid'] = sample['ParcelId']
df_test = sample.merge(prop, on='parcelid', how='left')
del sample, prop; gc.collect()
x_test = df_test[
|
train_columns]
del df_test; gc.collect()
for c in x_test.dtypes[x_test.dtypes == object].index.values:
x_test[c] = (x_test[c] == True)
x_test = x_test.values.astype(np.float32, copy=False)
print("Start prediction ...")
d_test = xgb.DMatrix(x_test)
p_test = clf.predict(d_test)
del x
|
_test; gc.collect()
print("Start write result ...")
sub = pd.read_csv('data/sample_submission.csv')
for c in sub.columns[sub.columns != 'ParcelId']:
sub[c] = p_test
sub.to_csv('out/xgb.csv', index=False, float_format='%.4f')
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/newrelic-2.46.0.37/newrelic/common/encoding_utils.py
|
Python
|
agpl-3.0
| 9,031
| 0.001218
|
"""This module implements assorted utility functions for encoding/decoding
of data.
"""
import types
import base64
import json
import zlib
from hashlib import md5
from ..packages import six
# Functions for encoding/decoding JSON. These wrappers are used in order
# to hide the differences between Python 2 and Python 3 implementations
# of the json module functions as well as instigate some better defaults
# for the handling on unknown objects. All but the first argument must
# be supplied as key word arguments to allow
|
the wrappers to supply
# defaults.
def json_encode(obj, **kwargs):
_kwargs = {}
# This wrapper function needs to deal with a few issues.
#
# The first is that when a byte string is provided, we need to
# ensure that it is interpreted as being Latin-1. This is necessary
# as by default JSON will treat it as UTF-8, which means if an
|
# invalid UTF-8 byte string is provided, a failure will occur when
# encoding the value.
#
# The json.dumps() function in Python 2 had an encoding argument
# which needs to be used to dictate what encoding a byte string
# should be interpreted as being. We need to supply this and set it
# to Latin-1 to avoid the failures if the byte string is not valid
# UTF-8.
#
# For Python 3, it will simply fail if provided any byte string. To
# be compatible with Python 2, we still want to accept them, but as
# before interpret it as being Latin-1. For Python 3 we can only do
# this by overriding the fallback encoder used when a type is
# encountered that the JSON encoder doesn't know what to do with.
#
# The second issue we want to deal with is allowing generators or
# iterables to be supplied and for them to be automatically expanded
# and treated as lists. This also entails overriding the fallback
# encoder.
#
# The third is eliminate white space after separators to trim the
# size of the data being sent.
if type(b'') is type(''):
_kwargs['encoding'] = 'latin-1'
def _encode(o):
if isinstance(o, bytes):
return o.decode('latin-1')
elif isinstance(o, types.GeneratorType):
return list(o)
elif hasattr(o, '__iter__'):
return list(iter(o))
raise TypeError(repr(o) + ' is not JSON serializable')
_kwargs['default'] = _encode
_kwargs['separators'] = (',', ':')
# We still allow supplied arguments to override internal defaults if
# necessary, but the caller must be sure they aren't dependent on
# the new defaults. In particular, if they supply 'default' it will
# override our default fallback encoder.
_kwargs.update(kwargs)
return json.dumps(obj, **_kwargs)
def json_decode(s, **kwargs):
# Nothing special to do here at this point but use a wrapper to be
# consistent with encoding and allow for changes later.
return json.loads(s, **kwargs)
# Functions for obfuscating/deobfuscating text string based on an XOR
# cipher.
def xor_cipher_genkey(key, length=None):
"""Generates a byte array for use in XOR cipher encrypt and decrypt
routines. In Python 2 either a byte string or Unicode string can be
provided for the key. In Python 3, it must be a Unicode string. In
either case, characters in the string must be within the ASCII
character range.
"""
return bytearray(key[:length], encoding='ascii')
def xor_cipher_encrypt(text, key):
"""Encrypts the text using an XOR cipher where the key is provided
as a byte array. The key cannot be an empty byte array. Where the
key is shorter than the text to be encrypted, the same key will
continually be reapplied in succession. In Python 2 either a byte
string or Unicode string can be provided for the text input. In
Python 3 only a Unicode string can be provided for the text input.
In either case where a Unicode string is being provided, characters
must have an ordinal value less than 256. The result will be a byte
array.
"""
return bytearray([ord(c) ^ key[i % len(key)] for i, c in enumerate(text)])
def xor_cipher_decrypt(text, key):
"""Decrypts the text using an XOR cipher where the key is provided
as a byte array. The key cannot be an empty byte array. Where the
key is shorter than the text to be encrypted, the same key will
continually be reapplied in succession. The input text must be in
the form of a byte array. The result will in turn also be a byte
array.
"""
return bytearray([c ^ key[i % len(key)] for i, c in enumerate(text)])
def xor_cipher_encrypt_base64(text, key):
"""Encrypts the UTF-8 encoded representation of the text using an
XOR cipher using the key. The key can be a byte array generated
using xor_cipher_genkey() or an appropiate string of the correct
type and composition, in which case if will be converted to a byte
array using xor_cipher_genkey(). The key cannot be an empty byte
array or string. Where the key is shorter than the text to be
encrypted, the same key will continually be reapplied in succession.
In Python 2 either a byte string or Unicode string can be provided
for the text input. In the case of a byte string, it will be
interpreted as having Latin-1 encoding. In Python 3 only a Unicode
string can be provided for the text input. Having being encrypted,
the result will then be base64 encoded with the result being a
Unicode string.
"""
if not isinstance(key, bytearray):
key = xor_cipher_genkey(key)
# The input to xor_cipher_encrypt() must be a Unicode string, but
# where each character has an ordinal value less than 256. This
# means that where the text to be encrypted is a Unicode string, we
# need to encode it to UTF-8 and then back to Unicode as Latin-1
# which will preserve the encoded byte string as is. Where the text
# to be encrypted is a byte string, we will not know what encoding
# it may have. What we therefore must do is first convert it to
# Unicode as Latin-1 before doing the UTF-8/Latin-1 conversion. This
# needs to be done as when decrypting we assume that the input will
# always be UTF-8. If we do not do this extra conversion for a byte
# string, we could later end up trying to decode a byte string which
# isn't UTF-8 and so fail with a Unicode decoding error.
if isinstance(text, bytes):
text = text.decode('latin-1')
text = text.encode('utf-8').decode('latin-1')
result = base64.b64encode(bytes(xor_cipher_encrypt(text, key)))
# The result from base64 encoding will be a byte string but since
# dealing with byte strings in Python 2 and Python 3 is quite
# different, it is safer to return a Unicode string for both. We can
# use ASCII when decoding the byte string as base64 encoding only
# produces characters within that codeset.
if six.PY3:
return result.decode('ascii')
return result
def xor_cipher_decrypt_base64(text, key):
"""Decrypts the text using an XOR cipher where the key is provided
as a byte array. The key cannot be an empty byte array. Where the
key is shorter than the text to be encrypted, the same key will
continually be reapplied in succession. The input text must be in
the form of a base64 encoded byte string with a UTF-8 encoding. The
base64 string itself can be either a byte string or Unicode string.
The final result of decrypting the input will be a Unicode string.
"""
if not isinstance(key, bytearray):
key = xor_cipher_genkey(key)
result = xor_cipher_decrypt(bytearray(base64.b64decode(text)), key)
return bytes(result).decode('utf-8')
obfuscate = xor_cipher_encrypt_base64
deobfuscate = xor_cipher_decrypt_base64
def unpack_field(field):
"""Decodes data that was compressed before being sent to the collector.
For example, 'pack_data' in a transaction trace, or 'params_data' in a
slow sql trace is run through zlib.compress, base64.standard_b64encode
and json_encode before being sent. This function reverses the compres
|
cinek810/refractiveindex.info
|
plot.py
|
Python
|
gpl-2.0
| 2,040
| 0.042857
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import yaml
from parse import *
import sys
import cmath
import numpy as np
from getData import *
def NtoEps(matVec):
matVec=np.array(matVec)
lamb=matVec[:,0]
n=matVec[:,1]
k=matVec[:,2]
N=n[:]+1j*k[:]
eps=N[:]*N[:]
matEps=[lamb,eps.real,eps.imag]
return matEps
import matplotlib.pyplot as plt
#Plotting Ag
#hag=getData("database/main/Ag/Hagemann.yml");
range=(0.280,0.790)
lambdasAg=np.linspace(range[0],range[1],100)
ag=getData("database/main/Ag/Johnson.yml",lambdasAg);
#print ag
lambdasTiO2=np.linspace(0.280,0.790)
#tio2=getData("database/main/TiO2/Devore-o.yml",lambdasTiO2);
tio2=getData("database/main/SiO2/Malitson.yml",lambdasTiO2);
fig=plt.figure()
plt.plot(lambdasTiO2*1e3,[ x.real for x in tio2],'g-',label=r'Real($n_{SiO_{2}}$)')
plt.legend()
plt.title(r"Współczynnik załamania $SiO_2$ ($n$)");
plt.ylabel('')
plt.xlabel('wavelength [nm]')
plt.xlim([280,750])
plt.savefig("../phd/images/sio2n.png")
fig=plt.figure()
plt.plot(lambdasAg*1e3,[ x.real for x in ag ],'r-',label=r'Real($n_{Ag}$)')
plt.plot(lambdasAg*1e3,[ x.imag for x in ag] ,'b-',label=r'Imag($n_{Ag}$)')
#plt.plot(lambdasTiO2*1e3,[ x.real for x in tio2],'g-',label=r'Real($n_{TiO_{2}}$)')
plt.legend()
plt.title(r"Współczynnik załamania srebra ($n$)");
plt.ylabel('')
plt.xlabel('wavelength [nm]')
plt.xlim([280,750])
plt.ylim([0,3])
plt.savefig("../phd/images/agn.png")
fig=plt.figure()
plt.plot(lambdasAg*1e3,[ (x*x).real for x in ag ],'r-',label=r'Real($\varepsilon_{Ag}$)')
plt.plot(lambdasAg*1e3,[ (x*x).imag for x in ag] ,'b-'
|
,label=r'Imag($\varepsilon_{Ag}$)')
#plt.plot(lambdasTiO2*1e3,[ (x*x).real for x in tio2],'g-',label=r'Real($\varepsilon_{TiO_{2}}$)')
plt.legend()
plt.title(r"Współczynnik przenikalnośći elektryczne
|
j ($\varepsilon$)");
plt.ylabel('')
plt.xlabel('wavelength [nm]')
plt.xlim([380,750])
#plt.ylim([-1.5,1.5])
plt.savefig("../phd/images/agtio2eps.png")
########
#GaAs do rozdzialu o THz
plt.show()
|
wazo-pbx/xivo-auth
|
wazo_auth/__init__.py
|
Python
|
gpl-3.0
| 310
| 0
|
# Copyright 2015-2020 The Wazo Authors (see the AUTHORS file)
# SPDX-License-Identifier: GPL-3.0-or-later
from wazo_auth.interfaces import (
BaseAuthenticationBackend,
|
BaseMetadata,
DEFA
|
ULT_XIVO_UUID,
)
__all__ = [
'BaseAuthenticationBackend',
'BaseMetadata',
'DEFAULT_XIVO_UUID',
]
|
superdesk/Live-Blog
|
plugins/livedesk-sync/livedesk/core/impl/icon_content.py
|
Python
|
agpl-3.0
| 3,125
| 0.0032
|
'''
Created on August 19, 2013
@package: livedesk-sync
@copyright: 2013 Sourcefabric o.p.s.
@license: http://www.gnu.org/licenses/gpl-3.0.txt
@author: Martin Saturka
Content for icons of collaborators of chained blogs.
'''
import socket
import logging
from urllib.request import urlopen
from ally.api.model import Content
from urllib.error import HTTPError
from ally.exception import InputError, Ref
from ally.internationalization import _
from urllib.request import Request
from urllib.parse import quote, urlsplit, SplitResult, urlunsplit
# --------------------------------------------------------------------
log = logging.getLogger(__name__)
# --------------------------------------------------------------------
class ChainedIconContent(Content):
'''
Simple remote icon content taking
'''
__slots__ = ('_url', '_response')
def __init__(self, contentURL, fileName):
'''
Initialize the content.
@param contentURL: string
The URL of the icon to be downloaded.
@param fileName: string
The name of file under that the icon should be saved.
'''
Content.__init__(self, fileName, 'image', 'binary', 0)
(scheme, netloc, path, query, fragment) = urlsplit(contentURL if not isinstance(contentURL, Request) else contentURL.full_url)
if not scheme: scheme = 'http'
parsed = SplitResult(scheme, netloc, quote(path), quote(query), fragment)
if isinstance(contentURL, Request): contentURL.full_url = urlunsplit(parsed)
else: contentURL = urlunsplit(parsed)
self._url = contentURL
self._response = None
def read(self, nbytes=None):
'''
@see: Content.read
'''
if not self._response:
try:
req = Request(self._url, headers={'Use
|
r-Agent' : 'Magic Browser'})
self._response = urlopen(req)
except (HTTPError, socket.error) as e:
log.error('Can not read icon im
|
age data %s' % e)
raise InputError(Ref(_('Can not open icon URL'),))
if not self._response:
log.error('Can not read icon image data %s' % e)
raise InputError(Ref(_('Can not open icon URL'),))
if str(self._response.status) != '200':
raise InputError(Ref(_('Can not open icon URL'),))
self.type = self._response.getheader('Content-Type')
if not self.type:
self.type = 'image'
self.length = self._response.getheader('Content-Length')
if not self.length:
self.length = 0
if (not self._response) or self._response.closed:
return ''
try:
if nbytes:
return self._response.read(nbytes)
return self._response.read()
except (HTTPError, socket.error) as e:
log.error('Can not read icon image data %s' % e)
raise InputError(Ref(_('Can not read from icon URL'),))
def next(self):
'''
@see: Content.next
'''
return None
|
yupswing/yaps
|
lib/sound_engine.py
|
Python
|
mit
| 1,288
| 0.000776
|
#
# YetAnotherPythonSnake 0.94
# Author: Simone Cingano (simonecingano@gmail.com)
# Web: http://simonecingano.it
# Licence: MIT
#
import pygame
import os
# YASP common imports
import data
if pygame.mixer:
pygame.mixer.init()
class dummysound:
def play(self): pass
class SoundPlayer:
def __init__(self, sounds):
self.sounds = {}
for s in sounds:
self.load(*s)
|
def play(self, sound):
self.sounds[sound].play()
def load(self, key, filename):
self.sounds[key] = self.load_sound(filename)
def load_sou
|
nd(self, filename):
if not pygame.mixer:
return dummysound()
filepath = data.filepath("sfx", filename)
if filepath:
sound = pygame.mixer.Sound(filepath)
return sound
else:
return dummysound()
EXTENSION = os.name == 'nt' and '.mp3' or '.ogg'
class MusicPlayer:
def __init__(self, track=None):
if track is not None:
self.load(track)
def load(self, track):
pygame.mixer.music.load(data.filepath("music", track + EXTENSION))
def play(self):
pygame.mixer.music.play(-1)
def once(self):
pygame.mixer.music.play()
def stop(self):
pygame.mixer.music.stop()
|
Azure/azure-sdk-for-python
|
sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2021_03_01/aio/operations/_recommendations_operations.py
|
Python
|
mit
| 48,433
| 0.004852
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._recommendations_operations import build_disable_all_for_hosting_environment_request, build_disable_all_for_web_app_request, build_disable_recommendation_for_hosting_environment_request, build_disable_recommendation_for_site_request, build_disable_recommendation_for_subscription_request, build_get_rule_details_by_hosting_environment_request, build_get_rule_details_by_web_app_request, build_list_history_for_hosting_environment_request, build_list_history_for_web_app_request, build_list_recommended_rules_for_hosting_environment_request, build_list_recommended_rules_for_web_app_request, build_list_request, build_reset_all_filters_for_hosting_environment_request, build_reset_all_filters_for_web_app_request, build_reset_all_filters_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RecommendationsOperations:
"""RecommendationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2021_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
featured: Optional[bool] = None,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.RecommendationCollection"]:
"""List all recommendations for a subscription.
Description for List all recommendations for a subscription.
:param featured: Specify :code:`<code>true</code>` to return only the most critical
recommendations. The default is :code:`<code>false</code>`, which returns all recommendations.
:type featured: bool
:param filter: Filter is specified by using OData syntax. Example: $filter=channel eq 'Api' or
channel eq 'Notification' and startTime eq 2014-01-01T00:00:00Z and endTime eq
2014-12-31T23:59:59Z and timeGrain eq duration'[PT1H|PT1M|P1D].
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RecommendationCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2021_03_01.models.RecommendationCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RecommendationCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
featured=featured,
filter=filter,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
featured=featured,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("RecommendationCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Web/recommendations'} # type: ignore
@distributed_trace_async
async def reset_all_filters(
self,
**kwargs: Any
) -> None:
"""Reset all recommendation opt-out settings
|
for a
|
subscription.
Description for Reset all recommendation opt-out settings for a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_reset_all_filters_request(
subscription_id=self._config.subscription_id,
template_url=self.reset_all_filters.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {}
|
impulse-cloud/web-ping
|
run-web-ping.py
|
Python
|
mit
| 364
| 0.005495
|
impo
|
rt requests
import time
import traceback
import sys
print "Web Ping running..."
print "All connection exceptions will be output to stdout."
while True:
time.sleep(5)
try:
r = requests.get('http://web/')
except:
print "Exception in request:"
|
print '-'*60
traceback.print_exc(file=sys.stdout)
print '-'*60
|
nocarryr/django-ingress-agent-info
|
ingress_agent_info/locations/models.py
|
Python
|
gpl-2.0
| 5,462
| 0.006225
|
import urllib, urllib2
import json
from django.contrib.gis.db import models
from django.contrib.gis import geos
class Country(models.Model):
name = models.CharField(max_length=30, unique=True)
short_name = models.CharField(max_length=10, unique=True)
objects = models.GeoManager()
def __unicode____(self):
return self.short_name
class State(models.Model):
name = models.CharField(max_length=30)
short_name = models.CharField(max_length=2)
country = models.ForeignKey(Country, related_name='states')
objects = models.GeoManager()
def __unicode__(self):
return self.short_name
class City(models.Model):
name = models.CharField(max_length=100)
state = models.ForeignKey(State,
blank=True,
null=True,
related_name='cities')
country = models.ForeignKey(Country, related_name='cities')
center = models.PointField()
viewport = models.MultiPointField()
objects = models.GeoManager()
def __unicode__(self):
names = [self.name]
if self.state:
names.append(unicode(self.state))
else:
names.append(unicode(self.country))
return u', '.join(names)
GEOCODING_CONF_DEFAULTS = {'google_api_key':None, 'max_daily_requests':'2500'}
class GeoCodingConf(models.Model):
name = models.CharField(max_length=30,
choices=((key, key) for key in GEOCODING_CONF_DEFAULTS.keys()),
unique=True)
value = models.CharField(max_length=100, null=True)
def save(self, *args, **kwargs):
if self.value is None:
default_value = GEOCODING_CONF_DEFAULTS.get(self.name)
if default_value is not None:
self.value = default_value
super(GeoCodingConf, self).save(*args, **kwargs)
def __unicode__(self):
return self.name
def get_geocode(address, **kwargs):
address =
|
urllib.quote_plus(address)
show_debug = kwargs.get('show_debug', False)
LOG = kwargs.get('log_fn')
if LOG is N
|
one:
def LOG(*args):
if not show_debug:
return
print '\t'.join([str(arg) for arg in args])
conf = {}
for key in GEOCODING_CONF_DEFAULTS.keys():
try:
conf_item = GeoCodingConf.objects.get(name=key)
except GeoCodingConf.DoesNotExist:
conf_item = None
if conf_item is None or conf_item.value is None:
raise Exception('GeoCodingConf items not configured')
conf[key] = conf_item.value
conf['max_daily_requests'] = int(conf['max_daily_requests'])
LOG('conf: %s' % (conf))
base_url = 'https://maps.googleapis.com/maps/api/geocode/json'
qs_fmt = 'address=%(address)s&key=%(api_key)s'
qdict = {'address':address, 'api_key':conf['google_api_key']}
url = '?'.join([base_url, qs_fmt % qdict])
LOG(url)
u = urllib2.urlopen(url)
s = u.read()
u.close()
d = json.loads(s)
if d['status'] != 'OK':
LOG('request status: %s' % d['status'])
if d['status'] == 'ZERO_RESULTS':
return False
def latlong_to_point(latlong):
return geos.Point([latlong[key] for key in ['lat', 'lng']])
def parse_address_type(addr_component):
addr_types = addr_comp['types']
if 'locality' in addr_types:
return 'city'
if 'administrative_area_level_1' in addr_types:
return 'state'
if 'country' in addr_types:
return 'country'
addr_types_needed = ['city', 'state', 'country']
return_data = {}
geometry = {}
for result in d['results']:
for addr_comp in result['address_components']:
addr_type = parse_address_type(addr_comp)
if not addr_type:
continue
if addr_type not in return_data:
return_data[addr_type] = {}
if 'name' not in return_data[addr_type]:
return_data[addr_type]['name'] = addr_comp['long_name']
if addr_type != 'city' and 'short_name' not in return_data[addr_type]:
return_data[addr_type]['short_name'] = addr_comp['short_name']
if 'center' not in geometry and 'location' in result['geometry']:
loc = result['geometry']['location']
geometry['center'] = latlong_to_point(loc)
if 'viewport' not in geometry and 'viewport' in result['geometry']:
vp = result['geometry']['viewport']
geometry['viewport'] = geos.MultiPoint(*[latlong_to_point(vp[vpkey]) for vpkey in ['northeast', 'southwest']])
if len(return_data) < len(addr_types_needed):
return False
return_data['city'].update(geometry)
return return_data
def build_from_geocode(data):
country, created = Country.objects.get_or_create(**data['country'])
if data.get('state'):
skwargs = data['state'].copy()
skwargs['country'] = country
state, created = State.objects.get_or_create(**skwargs)
else:
state = None
ckwargs = data['city'].copy()
ckwargs['country'] = country
if state is not None:
ckwargs['state'] = state
city, created = City.objects.get_or_create(**ckwargs)
def get_and_build_from_address(address):
geodata = get_geocode(address)
if not geodata:
return
build_from_geocode(geodata)
|
p1c2u/openapi-core
|
tests/integration/contrib/django/test_django_project.py
|
Python
|
bsd-3-clause
| 10,712
| 0
|
import os
import sys
from base64 import b64encode
from json import dumps
from unittest import mock
import pytest
class BaseTestDjangoProject:
api_key = "12345"
@property
def api_key_encoded(self):
api_key_bytes = self.api_key.encode("utf8")
api_key_bytes_enc = b64encode(api_key_bytes)
return str(api_key_bytes_enc, "utf8")
@pytest.fixture(autouse=True, scope="module")
def django_setup(self):
directory = os.path.abspath(os.path.dirname(__file__))
django_project_dir = os.path.join(directory, "data/v3.0")
sys.path.insert(0, django_project_dir)
with mock.patch.dict(
os.environ,
{
"DJANGO_SETTINGS_MODULE": "djangoproject.settings",
},
):
import django
django.setup()
yield
sys.path.remove(django_project_dir)
@pytest.fixture
def client(self):
from django.test import Client
return Client()
class TestPetListView(BaseTestDjangoProject):
def test_get_no_required_param(self, client):
headers = {
"HTTP_AUTHORIZATION": "Basic testuser",
"HTTP_HOST": "petstore.swagger.io",
}
with pytest.warns(DeprecationWarning):
response = client.get("/v1/pets", **headers)
expected_data = {
"errors": [
{
"class": (
"<class 'openapi_core.exceptions."
"MissingRequiredParameter'>"
),
"status": 400,
"title": "Missing required parameter: limit",
}
]
}
assert response.status_code == 400
assert response.json() == expected_data
def test_get_valid(self, client):
data_json = {
"limit": 12,
}
headers = {
"HTTP_AUTHORIZATION": "Basic testuser",
"HTTP_HOST": "petstore.swagger.io",
}
with pytest.warns(DeprecationWarning):
response = client.get("/v1/pets", data_json, **headers)
expected_data = {
"data": [
{
"id": 12,
"name": "Cat",
"ears": {
"healthy": True,
},
},
],
}
assert response.status_code == 200
assert response.json() == expected_data
def test_post_server_invalid(self, client):
headers = {
"HTTP_HOST": "petstore.swagger.io",
}
response = client.post("/v1/pets", **headers)
expected_data = {
"errors": [
{
"class": (
"<class 'openapi_core.templating.paths.exceptions."
"ServerNotFound'>"
),
"status": 400,
"title": (
"Server not found for "
"http://petstore.swagger.io/v1/pets"
),
}
]
}
assert response.status_code == 400
assert response.json() == expected_data
def test_post_required_header_param_missing(self, client):
client.cookies.load({"user": 1})
pet_name = "Cat"
pet_tag = "cats"
pet_street = "Piekna"
pet_city = "Warsaw"
pet_healthy = False
data_json = {
"name": pet_name,
"tag": pet_tag,
"position": 2,
"address": {
"street": pet_street,
"city": pet_city,
},
"healthy": pet_healthy,
"wings": {
"healthy": pet_healthy,
},
}
content_type = "application/json"
headers = {
"HTTP_AUTHORIZATION": "Basic testuser",
"HTTP_HOST": "staging.gigantic-server.com",
}
response = client.post(
"/v1/pets", data_json, content_type, secure=True, **headers
)
expected_data = {
"errors": [
{
"class": (
"<class 'openapi_core.exceptions."
"MissingRequiredParameter'>"
),
"status": 400,
"title": "Missing required parameter: api-key",
}
]
}
assert response.status_code == 400
assert response.json() == expected_data
def test_post_media_type_invalid(self, client):
client.cookies.load({"user": 1})
data = "data"
content_type = "text/html"
headers = {
"HTTP_AUTHORIZATION": "Basic testuser",
"HTTP_HOST": "staging.gigantic-server.com",
"HTTP_API_KEY": self.api_key_encoded,
}
response = client.post(
"/v1/pets", data, content_type, secure=True, **headers
)
expected_data = {
"errors": [
{
"class": (
"<class 'openapi_core.templating.media_types."
"exceptions.MediaTypeNotFound'>"
),
"status": 415,
"title": (
"Content for the following mimetype not found: "
"text/html. "
"Valid mimetypes: ['application/json', 'text/plain']"
),
}
]
}
assert response.status_code == 415
assert response.json() == expected_data
def test_post_required_cookie_param_missing(self, client):
data_json = {
"id": 12,
"name": "Cat",
"ears": {
"healthy": True,
},
}
content_type = "application/json"
headers = {
"HTTP_AUTHORIZATION": "Basic testuser",
"HTTP_HOST": "staging.gigantic-server.com",
"HTTP_API_KEY": self.api_key_encoded,
}
response = client.post(
"/v1/pets", data_json, content_type, secure=True, **headers
)
expected_data = {
"errors": [
{
"class": (
"<class 'openapi_core.exceptions."
"MissingRequiredParameter'>"
),
"status": 400,
"title": "Missing required parameter: user",
}
]
}
assert response.status_code == 400
assert response.json() == expected_data
def test_post_valid(self, client):
client.cookies.load({"user": 1})
content_type = "application/json"
data_json = {
"id": 12,
"name": "Cat",
"ears": {
"healthy": True,
},
}
headers = {
"HTTP_AUTHORIZATION": "Basic testuser",
"HTTP_HOST": "staging.gigantic-server.com",
"HTTP_API_KEY": self.api_key_encoded,
}
response = client.post(
"/v1/pets", data_json, content_type, secure=True, **headers
)
assert response.status_code == 201
assert not response.content
class TestPetDetailView(BaseTestDjangoProject):
def test_get_server_invalid(self, client):
response = client.get("/v1/pets/12")
expected_data = (
b"You may need to add 'testserver' to ALLOWED_HOSTS."
)
assert response.status_code == 400
|
assert expected_data in response.content
def test_get_unauthorized(self, client):
headers = {
"HTTP_HOST": "petstore.swagger.io",
}
|
response = client.get("/v1/pets/12", **headers)
expected_data = {
"errors": [
{
"class": (
"<class 'openapi_core.validation.exceptions."
"InvalidSecurity'>"
),
|
khughitt/ete
|
ete_dev/orthoxml/__init__.py
|
Python
|
gpl-3.0
| 25
| 0.04
|
from _ortho
|
xml
|
import *
|
vincentltz/MobileR
|
Scraper/myUtility.py
|
Python
|
apache-2.0
| 2,217
| 0.032927
|
'''
Copyright 2015 Kendall Bailey
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import sys
import json
def ReadFileLines(filename):
try:
f = open(filename)
except IOError as e:
print "Problem opening file {2}. Error ({0}) : {1}".format(e.errno, e.strerror, filename)
sys.exit(2)
lines = f.readlines()
f.close
return lines
def ReadFile(filename):
try:
f = open(filename)
except IOError as e:
print "Problem opening file {2}. Error ({0}) : {1}".format(e.errno, e.strerror, filename)
sys.exit(2)
text = f.read()
f.close
return text
def CheckArgs(numArgs, pattern):
if len(sys.argv) != (numArgs + 1
|
):
print "Error: " + pattern
print "length = " + str(len(sys.argv))
sys.exit(2)
def OverwriteFile(filename, data):
try:
with open(filename, 'w') as outfile:
outfile.write(data)
outfile.close()
except UnicodeEncodeErr
|
or as e:
print "Unicode Encode Error: Couldn't write --" + data + "-- to file"
sys.exit(2)
def AppendToFile(filename, data):
try:
with open(filename, 'a') as outfile:
outfile.write(data)
outfile.close()
except UnicodeEncodeError as e:
print "Unicode Encode Error: Couldn't write --" + data + "-- to file"
sys.exit(2)
#@deprecated
def OverwriteJsonListToFile(filename, ls):
OverwriteFile(filename, "")
for e in ls:
AppendToFile(filename, e.encode("ascii", "ignore") + "\n")
#@deprecated
def AppendJsonListToFile(filename, ls):
for e in ls:
AppendToFile(filename, e.encode("ascii", "ignore") + "\n")
def AppendStrListToFile(filename, ls):
AppendJsonListToFile(filename, ls)
def OverwriteStrListToFile(filename, ls):
OverwriteJsonListToFile(filename, ls)
|
bgris/ODL_bgris
|
odl/test/largescale/tomo/analytic_slow_test.py
|
Python
|
gpl-3.0
| 7,939
| 0.000756
|
# Copyright 2014-2016 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
"""Test analytical reconstruction methods."""
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
# External
import pytest
import numpy as np
# Internal
import odl
import odl.tomo as tomo
from odl.util.testutils import skip_if_no_largescale, simple_fixture
from odl.tomo.util.testutils import (skip_if_no_astra, skip_if_no_astra_cuda,
skip_if_no_scikit)
filter_type = simple_fixture(
'filter_type', ['Ram-Lak', 'Shepp-Logan', 'Cosine', 'Hamming', 'Hann'])
frequency_scaling = simple_fixture(
'frequency_scaling', [0.5, 0.9, 1.0])
# Find the valid projectors
# TODO: Add nonuniform once #671 is solved
projectors = [skip_if_no_astra('par2d astra_cpu uniform'),
skip_if_no_astra('cone2d astra_cpu uniform'),
|
skip_if_no_astra_cuda('par2d astra_cuda uniform'),
skip_if_no_astra_cuda('cone2d astra_cuda uniform'),
skip_if_no_astra_cuda('par3d astra_cuda uniform'),
skip_if_no_astra_cuda('cone3d astra_cuda uniform'),
|
skip_if_no_astra_cuda('helical astra_cuda uniform'),
skip_if_no_scikit('par2d scikit uniform')]
projector_ids = ['geom={}, impl={}, angles={}'
''.format(*p.args[1].split()) for p in projectors]
# bug in pytest (ignores pytestmark) forces us to do this this
largescale = " or not pytest.config.getoption('--largescale')"
projectors = [pytest.mark.skipif(p.args[0] + largescale, p.args[1])
for p in projectors]
@pytest.fixture(scope="module", params=projectors, ids=projector_ids)
def projector(request):
n_angles = 500
dtype = 'float32'
geom, impl, angle = request.param.split()
if angle == 'uniform':
apart = odl.uniform_partition(0, 2 * np.pi, n_angles)
elif angle == 'random':
# Linearly spaced with random noise
min_pt = 2 * (2.0 * np.pi) / n_angles
max_pt = (2.0 * np.pi) - 2 * (2.0 * np.pi) / n_angles
points = np.linspace(min_pt, max_pt, n_angles)
points += np.random.rand(n_angles) * (max_pt - min_pt) / (5 * n_angles)
apart = odl.nonuniform_partition(points)
elif angle == 'nonuniform':
# Angles spaced quadratically
min_pt = 2 * (2.0 * np.pi) / n_angles
max_pt = (2.0 * np.pi) - 2 * (2.0 * np.pi) / n_angles
points = np.linspace(min_pt ** 0.5, max_pt ** 0.5, n_angles) ** 2
apart = odl.nonuniform_partition(points)
else:
raise ValueError('angle not valid')
if geom == 'par2d':
# Discrete reconstruction space
discr_reco_space = odl.uniform_discr([-20, -20], [20, 20],
[100, 100], dtype=dtype)
# Geometry
dpart = odl.uniform_partition(-30, 30, 500)
geom = tomo.Parallel2dGeometry(apart, dpart)
# Ray transform
return tomo.RayTransform(discr_reco_space, geom, impl=impl)
elif geom == 'par3d':
# Discrete reconstruction space
discr_reco_space = odl.uniform_discr([-20, -20, -20], [20, 20, 20],
[100, 100, 100], dtype=dtype)
# Geometry
dpart = odl.uniform_partition([-30, -30], [30, 30], [200, 200])
geom = tomo.Parallel3dAxisGeometry(apart, dpart, axis=[1, 1, 0])
# Ray transform
return tomo.RayTransform(discr_reco_space, geom, impl=impl)
elif geom == 'cone2d':
# Discrete reconstruction space
discr_reco_space = odl.uniform_discr([-20, -20], [20, 20],
[100, 100], dtype=dtype)
# Geometry
dpart = odl.uniform_partition(-40, 40, 200)
geom = tomo.FanFlatGeometry(apart, dpart,
src_radius=100, det_radius=100)
# Ray transform
return tomo.RayTransform(discr_reco_space, geom, impl=impl)
elif geom == 'cone3d':
# Discrete reconstruction space
discr_reco_space = odl.uniform_discr([-20, -20, -20], [20, 20, 20],
[100, 100, 100], dtype=dtype)
# Geometry
dpart = odl.uniform_partition([-50, -50], [50, 50], [200, 200])
geom = tomo.CircularConeFlatGeometry(
apart, dpart, src_radius=100, det_radius=100, axis=[1, 0, 0])
# Ray transform
return tomo.RayTransform(discr_reco_space, geom, impl=impl)
elif geom == 'helical':
# Discrete reconstruction space
discr_reco_space = odl.uniform_discr([-20, -20, 0], [20, 20, 40],
[100, 100, 100], dtype=dtype)
# Geometry
# TODO: angles
n_angle = 2000
apart = odl.uniform_partition(0, 8 * 2 * np.pi, n_angle)
dpart = odl.uniform_partition([-50, -4], [50, 4], [200, 20])
geom = tomo.HelicalConeFlatGeometry(apart, dpart, pitch=5.0,
src_radius=100, det_radius=100)
# Windowed ray transform
return tomo.RayTransform(discr_reco_space, geom, impl=impl)
else:
raise ValueError('param not valid')
@skip_if_no_largescale
def test_fbp_reconstruction(projector):
"""Test filtered back-projection with various projectors."""
# Create Shepp-Logan phantom
vol = odl.phantom.shepp_logan(projector.domain, modified=False)
# Project data
projections = projector(vol)
# Create default FBP operator and apply to projections
fbp_operator = odl.tomo.fbp_op(projector)
# Add window if problem is in 3d.
if (isinstance(projector.geometry, odl.tomo.HelicalConeFlatGeometry) and
projector.geometry.pitch != 0):
fbp_operator = fbp_operator * odl.tomo.tam_danielson_window(projector)
# Compute the FBP result
fbp_result = fbp_operator(projections)
maxerr = vol.norm() / 5.0
error = vol.dist(fbp_result)
assert error < maxerr
@skip_if_no_astra_cuda
@skip_if_no_largescale
def test_fbp_reconstruction_filters(filter_type, frequency_scaling):
"""Validate that the various filters work as expected."""
apart = odl.uniform_partition(0, np.pi, 500)
discr_reco_space = odl.uniform_discr([-20, -20], [20, 20],
[100, 100], dtype='float32')
# Geometry
dpart = odl.uniform_partition(-30, 30, 500)
geom = tomo.Parallel2dGeometry(apart, dpart)
# Ray transform
projector = tomo.RayTransform(discr_reco_space, geom, impl='astra_cuda')
# Create Shepp-Logan phantom
vol = odl.phantom.shepp_logan(projector.domain, modified=False)
# Project data
projections = projector(vol)
# Create FBP operator with filters and apply to projections
fbp_operator = odl.tomo.fbp_op(projector,
filter_type=filter_type,
frequency_scaling=frequency_scaling)
fbp_result = fbp_operator(projections)
maxerr = vol.norm() / 5.0
error = vol.dist(fbp_result)
assert error < maxerr
if __name__ == '__main__':
pytest.main([str(__file__.replace('\\', '/')), '-v', '--largescale'])
|
alex/readthedocs.org
|
readthedocs/core/admin.py
|
Python
|
mit
| 356
| 0.005618
|
"""Djang
|
o admin interface for core models.
"""
from django.contrib import admin
from core.models import UserProfile
class UserProfileAdmin(admin.ModelAdmin):
list_display = ('user', 'whitelisted', 'homepage')
search_fields = ('user__username', 'homepage')
list_edita
|
ble = ('whitelisted',)
admin.site.register(UserProfile, UserProfileAdmin)
|
ryanbressler/numpy2go
|
numpy2go.py
|
Python
|
bsd-2-clause
| 496
| 0.004032
|
import numpy as np
import ctypes
import numpy.ctypeslib as npct
# For more information see:
# https://scipy-lectures.github.io/advanced/interfacing_with_c/interfacing_with_c.html#id5
numpy2go = npct.load_library
|
("numpy2go", ".")
array_1d_double = npct.ndpointer(dtype=np.double, ndim=1, flags='CONTIGUOUS')
numpy2go.Test.restype = None
numpy2go.Test.argtypes = [array_1d_double, ctypes.c_int]
data = np.array([0.0, 1.0, 2.0])
print("Python says", data)
numpy2go.Te
|
st(data, len(data))
|
smartbgp/yarib
|
yarib/db/mongodb.py
|
Python
|
apache-2.0
| 4,903
| 0.001224
|
# Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" mongodb """
import logging
import pymongo
LOG = logging.getLogger(__name__)
class MongoApi(object):
"""Class handling MongoDB specific functionality.
This class uses PyMongo APIs internally to create database connection,
also serves as handle to collection for get/set operations, etc.
"""
# class level attributes for re-use of db client connection and collection
_DB = {} # dict of db_name: db connection reference
_MONGO_COLLS = {} # dict of cache_collection : db collection reference
def __init__(self, connection_url, db_name, use_replica=False, replica_name='rs1',
read_preference=3, write_concern=-1, w_timeout=5000):
"""for parameters details, please see
http://api.mongodb.org/python/current/api/pymongo/mongo_client.html
:param connection_url: string like mongodb://localhost:27017
:param db_name: database name, string
:param use_replica: if use replica, true of false
:param replica_name: if use replica, then the replica set name
:param read_preference: read preference, interger
:param write_concern: write concern, interger
:param w_timeout: write concern timeout, interger
"""
LOG.debug('Creating MongoDB API class')
self.conection_url = connection_url
self.db_name = db_name
self.collection_name = None
# config about replica set
self.use_replica = use_replica
self.replica_name = replica_name
self.read_preference = read_preference
# Write Concern options:
self.w = write_concern
self.w_timeout = w_timeout
def _get_db(self):
"""
get database
:return: database
"""
try:
if self.use_replica: # if use replica set configuration
connection = pymongo.MongoClient(
self.conection_url, replicaSet=self.replica_name)
else: # use for standalone node or mongos in sharded setup
connection = pymongo.MongoClient(self.conection_url)
except pymongo.errors.ConnectionFailure as e:
LOG.warn('Unable to connect to the database server, %s', e)
raise
database = getattr(connection, self.db_name)
return database
def _close_db(self):
if self.db_name in self._DB:
self._DB[self.db_name].client.close()
self._DB.pop(self.db_name)
def get_collection(self):
"""
get collection
:return: database collection
"""
if self.collection_name not in self._MONGO_COLLS:
if self.db_name not in self._DB:
self._DB[self.db_name] = self._get_db()
coll = getattr(self._DB[self.db_name], self.collection_name)
if self.use_replica:
# for read preference
# TODO(xiaoquwl) fix this as more elegant way in future
if self.read_preference is not None:
if self.read_preference == 0:
coll.with_options(read_preference=pymongo.ReadPreference.PRIMARY)
elif self.read_preference == 1:
coll.with_options(read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
elif self.read_preference == 2:
coll.with_options(read_preference=pymongo.ReadPreference.SECONDARY)
elif self.read_preference == 3:
coll.with_options(read_preference=pymongo.ReadPreference.SECONDARY_PREFERRED)
elif self.read_preference == 4:
coll.with_optio
|
ns(read_preference=pymongo.ReadPreference.NEAREST)
else:
LOG.error('unknow read preference setting')
pass
# for write concern
if self.w > -1:
coll.write_concern['w'] = self.w
coll.write_concern['wtimeout'] = 5000
self._MONGO_COLLS[self.collection_
|
name] = coll
return self._MONGO_COLLS[self.collection_name]
def remove_collection(self):
self._DB[self.db_name].drop_collection(self.collection_name)
|
ketan-analytics/learnpython
|
Safaribookonline-Python/courseware-btb/solutions/py3/patterns/properties_extra.py
|
Python
|
gpl-2.0
| 2,255
| 0.000887
|
'''
Ohm's law is a simple equation describing electr
|
ical circuits. It
states that the voltage V through a resistor is equal to the current
(I) times the resistance:
V = I * R
The units of these are volts, ampheres (or "amps"), and ohms,
respectively. In real circuits, often R is actually measured in
kiloohms (10**3 ohms) and I in milliamps (10**-3 amps).
Let's create a Resistor class that models
|
this behavior. The
constructor takes two arguments - the resistance in ohms, and the
voltage in volts:
>>> resistor = Resistor(800, 5.5)
>>> resistor.resistance
800
>>> resistor.voltage
5.5
The current is derived from these two using Ohm's law:
(Hint: use @property)
>>> resistor.current
0.006875
Since we may want the value in milliamps, let's make another property
to provide that:
>>> resistor.current_in_milliamps
6.875
Let's set it up so that we can change the current, and doing so will
correspondingly modify the voltage (but keep the resistance constant).
>>> resistor.current_in_milliamps = 3.5
>>> resistor.resistance
800
>>> round(resistor.voltage, 2)
2.8
>>> resistor.current = .006875
>>> round(resistor.voltage, 2)
5.5
>>> resistor.resistance
800
Also, we've made a design decision that a Resistor cannot change its
resistance value once created:
>>> resistor.resistance = 8200
Traceback (most recent call last):
AttributeError: can't set attribute
'''
# Write your code here:
class Resistor:
def __init__(self, resistance, voltage):
self._resistance = resistance
self.voltage = voltage
@property
def resistance(self):
return self._resistance
@property
def current(self):
return self.voltage / self.resistance
@current.setter
def current(self, value):
self.voltage = self.resistance * value
@property
def current_in_milliamps(self):
return self.current * 1000
@current_in_milliamps.setter
def current_in_milliamps(self, value):
self.current = value / 1000
# Do not edit any code below this line!
if __name__ == '__main__':
import doctest
count, _ = doctest.testmod()
if count == 0:
print('*** ALL TESTS PASS ***\nGive someone a HIGH FIVE!')
# Copyright 2015-2018 Aaron Maxwell. All rights reserved.
|
UnderXirox/Python-3_Des-fichiers-complementaires
|
test_asyncio_35.py
|
Python
|
gpl-3.0
| 1,476
| 0.009498
|
#!/usr/bin/p
|
ython3.4
from itertools import permutations
import asyncio
from time import time
class BoardTester:
def __init__(self, n):
self.data = permutations(range(n))
async def __aiter__(self):
return self
async d
|
ef __anext__(self):
return await next(self.data)
async def nqueens_async_coroutine(n):
async for board in BoardTester(n):
if n == len(set(board[i]+i for i in columns)) \
== len(set(board[i]-i for i in columns)):
pass # print(board)
# await (board for board in permutations(columns)
# if n == len(set(board[i]+i for i in columns))
# == len(set(board[i]-i for i in columns)))
async def print_nqueen_solutions(n):
result = await(nqueens_async_coroutine(n))
print("Résultat trouvé: {}".format(result))
def nqueens_async(n):
loop = asyncio.get_event_loop()
loop.run_until_complete(print_nqueen_solutions(n))
loop.close()
def nqueens_sync(n):
columns=range(n)
for board in permutations(columns):
if n == len(set(board[i]+i for i in columns)) \
== len(set(board[i]-i for i in columns)):
pass # print(board)
if __name__ == '__main__':
t0=time()
res=nqueens_sync(9)
t1=time()
print('4-Dames en synchrone : %12.9f secondes' % (t1-t0))
t0=time()
res=nqueens_async(9)
t1=time()
print('4-Dames en asynchrone : %12.9f secondes' % (t1-t0))
|
nttks/edx-platform
|
cms/djangoapps/course_creators/tests/test_admin.py
|
Python
|
agpl-3.0
| 7,332
| 0.004501
|
"""
Tests course_creators.admin.py.
"""
from django.test import TestCase
from django.contrib.auth.models import User
from django.contrib.admin.sites import AdminSite
from django.http import HttpRequest
import mock
from course_creators.admin import CourseCreatorAdmin
from course_creators.models import CourseCreator
from django.core import mail
from student.roles import CourseCreatorRole
from student import auth
def mock_render_to_string(template_name, context):
"""Return a string that encodes template_name and context"""
return str((template_name, context))
class CourseCreatorAdminTest(TestCase):
"""
Tests for course creator admin.
"""
def setUp(self):
""" Test case setup """
super(CourseCreatorAdminTest, self).setUp()
self.user = User.objects.create_user('test_user', 'test_user+courses@edx.org', 'foo')
self.table_entry = CourseCreator(user=self.user)
self.table_entry.save()
self.admin = User.objects.create_user('Mark', 'admin+courses@edx.org', 'foo')
self.admin.is_staff = True
self.request = HttpRequest()
self.request.user = self.admin
self.creator_admin = CourseCreatorAdmin(self.table_entry, AdminSite())
self.studio_request_email = 'mark@marky.mark'
self.enable_creator_group_patch = {
"ENABLE_CREATOR_GROUP": True,
"STUDIO_REQUEST_EMAIL": self.studio_request_email
}
@mock.patch('course_creators.admin.render_to_string', mock.Mock(side_effect=mock_render_to_string, autospec=True))
@mock.patch('django.contrib.auth.models.User.email_user')
def test_change_status(self, email_user):
"""
Tests that updates to state impact the creator group maintained in authz.py and that e-mails are sent.
"""
def change_state_and_verify_email(state, is_creator):
""" Changes user state, verifies creator status, and verifies e-mail is sent based on transition """
self._change_state(state)
self.assertEqual(is_creator, auth.user_has_role(self.user, CourseCreatorRole()))
context = {'studio_request_email': self.studio_request_email}
if state == CourseCreator.GRANTED:
template = 'emails/course_creator_granted.txt'
elif state == CourseCreator.DENIED:
template = 'emails/co
|
urse_creator_denied.txt'
else:
template = 'emails/course_creator_revoked.txt'
email_user.assert_called_with(
|
mock_render_to_string('emails/course_creator_subject.txt', context),
mock_render_to_string(template, context),
self.studio_request_email
)
with mock.patch.dict('django.conf.settings.FEATURES', self.enable_creator_group_patch):
# User is initially unrequested.
self.assertFalse(auth.user_has_role(self.user, CourseCreatorRole()))
change_state_and_verify_email(CourseCreator.GRANTED, True)
change_state_and_verify_email(CourseCreator.DENIED, False)
change_state_and_verify_email(CourseCreator.GRANTED, True)
change_state_and_verify_email(CourseCreator.PENDING, False)
change_state_and_verify_email(CourseCreator.GRANTED, True)
change_state_and_verify_email(CourseCreator.UNREQUESTED, False)
change_state_and_verify_email(CourseCreator.DENIED, False)
@mock.patch('course_creators.admin.render_to_string', mock.Mock(side_effect=mock_render_to_string, autospec=True))
def test_mail_admin_on_pending(self):
"""
Tests that the admin account is notified when a user is in the 'pending' state.
"""
def check_admin_message_state(state, expect_sent_to_admin, expect_sent_to_user):
""" Changes user state and verifies e-mail sent to admin address only when pending. """
mail.outbox = []
self._change_state(state)
# If a message is sent to the user about course creator status change, it will be the first
# message sent. Admin message will follow.
base_num_emails = 1 if expect_sent_to_user else 0
if expect_sent_to_admin:
context = {'user_name': "test_user", 'user_email': u'test_user+courses@edx.org'}
self.assertEquals(base_num_emails + 1, len(mail.outbox), 'Expected admin message to be sent')
sent_mail = mail.outbox[base_num_emails]
self.assertEquals(
mock_render_to_string('emails/course_creator_admin_subject.txt', context),
sent_mail.subject
)
self.assertEquals(
mock_render_to_string('emails/course_creator_admin_user_pending.txt', context),
sent_mail.body
)
self.assertEquals(self.studio_request_email, sent_mail.from_email)
self.assertEqual([self.studio_request_email], sent_mail.to)
else:
self.assertEquals(base_num_emails, len(mail.outbox))
with mock.patch.dict('django.conf.settings.FEATURES', self.enable_creator_group_patch):
# E-mail message should be sent to admin only when new state is PENDING, regardless of what
# previous state was (unless previous state was already PENDING).
# E-mail message sent to user only on transition into and out of GRANTED state.
check_admin_message_state(CourseCreator.UNREQUESTED, expect_sent_to_admin=False, expect_sent_to_user=False)
check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=True, expect_sent_to_user=False)
check_admin_message_state(CourseCreator.GRANTED, expect_sent_to_admin=False, expect_sent_to_user=True)
check_admin_message_state(CourseCreator.DENIED, expect_sent_to_admin=False, expect_sent_to_user=True)
check_admin_message_state(CourseCreator.GRANTED, expect_sent_to_admin=False, expect_sent_to_user=True)
check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=True, expect_sent_to_user=True)
check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=False, expect_sent_to_user=False)
check_admin_message_state(CourseCreator.DENIED, expect_sent_to_admin=False, expect_sent_to_user=True)
def _change_state(self, state):
""" Helper method for changing state """
self.table_entry.state = state
self.creator_admin.save_model(self.request, self.table_entry, None, True)
def test_add_permission(self):
"""
Tests that staff cannot add entries
"""
self.assertFalse(self.creator_admin.has_add_permission(self.request))
def test_delete_permission(self):
"""
Tests that staff cannot delete entries
"""
self.assertFalse(self.creator_admin.has_delete_permission(self.request))
def test_change_permission(self):
"""
Tests that only staff can change entries
"""
self.assertTrue(self.creator_admin.has_change_permission(self.request))
self.request.user = self.user
self.assertFalse(self.creator_admin.has_change_permission(self.request))
|
RandolphVI/CNN-Text-Classification
|
SANN/train_sann.py
|
Python
|
apache-2.0
| 11,145
| 0.004217
|
# -*- coding:utf-8 -*-
__author__ = 'Randolph'
import os
import sys
import time
import logging
sys.path.append('../')
logging.getLogger('tensorflow').disabled = True
import numpy as np
import tensorflow as tf
from tensorboard.plugins import projector
from text_sann import TextSANN
from utils import checkmate as cm
from utils import data_helpers as dh
from utils import param_parser as parser
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score
args = parser.parameter_parser()
OPTION = dh._option(pattern=0)
logger = dh.logger_fn("tflog", "logs/{0}-{1}.log".format('Train' if OPTION == 'T' else 'Restore', time.asctime()))
def create_input_data(data: dict):
return zip(data['f_pad_seqs'], data['b_p
|
ad_seqs'], data['onehot_labels'])
def train_sann():
"""Training RNN model."""
# Print parameters used for the model
dh.tab_printer(args, logger)
# Load word2vec model
word2idx, embedding_matrix = dh.load_word2vec_matrix(args.word2vec_file)
# Load sentences, labels, and training parameters
logger.info("Loa
|
ding data...")
logger.info("Data processing...")
train_data = dh.load_data_and_labels(args, args.train_file, word2idx)
val_data = dh.load_data_and_labels(args, args.validation_file, word2idx)
# Build a graph and sann object
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=args.allow_soft_placement,
log_device_placement=args.log_device_placement)
session_conf.gpu_options.allow_growth = args.gpu_options_allow_growth
sess = tf.Session(config=session_conf)
with sess.as_default():
sann = TextSANN(
sequence_length=args.pad_seq_len,
vocab_size=len(word2idx),
embedding_type=args.embedding_type,
embedding_size=args.embedding_dim,
lstm_hidden_size=args.lstm_dim,
attention_unit_size=args.attention_dim,
attention_hops_size=args.attention_hops_dim,
fc_hidden_size=args.fc_dim,
num_classes=args.num_classes,
l2_reg_lambda=args.l2_lambda,
pretrained_embedding=embedding_matrix)
# Define training procedure
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
learning_rate = tf.train.exponential_decay(learning_rate=args.learning_rate,
global_step=sann.global_step,
decay_steps=args.decay_steps,
decay_rate=args.decay_rate,
staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads, vars = zip(*optimizer.compute_gradients(sann.loss))
grads, _ = tf.clip_by_global_norm(grads, clip_norm=args.norm_ratio)
train_op = optimizer.apply_gradients(zip(grads, vars), global_step=sann.global_step, name="train_op")
# Keep track of gradient values and sparsity (optional)
grad_summaries = []
for g, v in zip(grads, vars):
if g is not None:
grad_hist_summary = tf.summary.histogram("{0}/grad/hist".format(v.name), g)
sparsity_summary = tf.summary.scalar("{0}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
grad_summaries_merged = tf.summary.merge(grad_summaries)
# Output directory for models and summaries
out_dir = dh.get_out_dir(OPTION, logger)
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
best_checkpoint_dir = os.path.abspath(os.path.join(out_dir, "bestcheckpoints"))
# Summaries for loss
loss_summary = tf.summary.scalar("loss", sann.loss)
# Train summaries
train_summary_op = tf.summary.merge([loss_summary, grad_summaries_merged])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# Validation summaries
validation_summary_op = tf.summary.merge([loss_summary])
validation_summary_dir = os.path.join(out_dir, "summaries", "validation")
validation_summary_writer = tf.summary.FileWriter(validation_summary_dir, sess.graph)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=args.num_checkpoints)
best_saver = cm.BestCheckpointSaver(save_dir=best_checkpoint_dir, num_to_keep=3, maximize=True)
if OPTION == 'R':
# Load sann model
logger.info("Loading model...")
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)
logger.info(checkpoint_file)
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{0}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
if OPTION == 'T':
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
# Embedding visualization config
config = projector.ProjectorConfig()
embedding_conf = config.embeddings.add()
embedding_conf.tensor_name = "embedding"
embedding_conf.metadata_path = args.metadata_file
projector.visualize_embeddings(train_summary_writer, config)
projector.visualize_embeddings(validation_summary_writer, config)
# Save the embedding visualization
saver.save(sess, os.path.join(out_dir, "embedding", "embedding.ckpt"))
current_step = sess.run(sann.global_step)
def train_step(batch_data):
"""A single training step."""
x_f, x_b, y_onehot = zip(*batch_data)
feed_dict = {
sann.input_x_front: x_f,
sann.input_x_behind: x_b,
sann.input_y: y_onehot,
sann.dropout_keep_prob: args.dropout_rate,
sann.is_training: True
}
_, step, summaries, loss = sess.run(
[train_op, sann.global_step, train_summary_op, sann.loss], feed_dict)
logger.info("step {0}: loss {1:g}".format(step, loss))
train_summary_writer.add_summary(summaries, step)
def validation_step(val_loader, writer=None):
"""Evaluates model on a validation set."""
batches_validation = dh.batch_iter(list(create_input_data(val_loader)), args.batch_size, 1)
eval_counter, eval_loss = 0, 0.0
true_labels = []
predicted_scores = []
predicted_labels = []
for batch_validation in batches_validation:
x_f, x_b, y_onehot = zip(*batch_validation)
feed_dict = {
sann.input_x_front: x_f,
sann.input_x_behind: x_b,
sann.input_y: y_onehot,
sann.dropout_keep_prob: 1.0,
sann.is_training: False
}
step, summaries, predictions, cur_loss = sess.run(
[sann.global_step, validation_summary_op, sann.topKPreds, sann.loss], feed_dict)
# Prepare for calculating metrics
for i in y_onehot:
true_labels.append(np.argmax(i))
for j in predictions[0]:
predicted_scores.append(
|
crwilcox/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/networkresourceprovider.py
|
Python
|
apache-2.0
| 866,693
| 0.007992
|
#
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Warning: This code was generated by a tool.
#
# Changes to this file may cause incorrect behavior and will be lost if the
# code is regenerated.
import json
import re
from requests import Session, Request
import time
try:
from urllib import quote, unquote
except:
from urllib.parse import quote, unquote
from azure.common import AzureHttpError
from azure.mgmt.common import AzureOperationResponse, OperationStatusResponse, OperationStatus, Service
from azure.mgmt.common.arm import ResourceBase, ResourceBaseExtended
class DnsNameAvailabilityResponse(AzureOperationResponse):
"""
Response for CheckDnsNameAvailability Api servive call
"""
def __init__(self, **kwargs):
super(DnsNameAvailabilityResponse, self).__init__(**kwargs)
self._dns_name_availability = kwargs.get('dns_name_availability')
@property
def dns_name_availability(self):
"""
Domain availability (True/False)
"""
return self._dns_name_availability
@dns_name_availability.setter
def dns_name_availability(self, value):
self._dns_name_availability = value
class ResourceProviderErrorResponse(AzureOperationResponse):
"""
If the resource provide needs to return an error to any operation, it
should return the appropriate HTTP error code and a message body as can
be seen below.The message should be localized per the Accept-Language
header specified in the original request such thatit could be directly be
exposed to users
"""
def __init__(self, **kwargs):
super(ResourceProviderErrorResponse, self).__init__(**kwargs)
self._error = kwargs.get('error')
@property
def error(self):
return self._error
@error.setter
def error(self, value):
self._error = value
class RetriableOperationResponse(ResourceProviderErrorResponse):
"""
If the resource provide needs to return an error to any operation, it
should return the appropriate HTTP error code and a message body as can
be seen below.The message should be localized per the Accept-Language
header specified in the original request such thatit could be directly be
exposed to users
"""
def __init__(self, **kwargs):
super(RetriableOperationResponse, self).__init__(**kwargs)
self._retry_after = kwargs.get('retry_after')
@property
def retry_after(self):
"""
The recommended retry interval for the Get Azure-AsyncOperation call
"""
return self._retry_after
@retry_after.setter
def retry_after(self, value):
self._retry_after = value
class AzureAsyncOperationResponse(RetriableOperationResponse):
"""
The response body contains the status of the specified asynchronous
operation, indicating whether it has succeeded, is inprogress, or has
failed. Note that this status is distinct from the HTTP status code
returned for the Get Operation Status operation itself. If the
asynchronous operation succeeded, the response body includes the HTTP
status code for the successful request. If the asynchronous operation
failed, the response body includes the HTTP status code for the failed
request and error information regarding the failure.
"""
def __init__(self, **kwargs):
super(AzureAsyncOperationResponse, self).__init__(**kwargs)
self._status = kwargs.get('status')
@property
def status(self):
"""
Status of the AzureAsuncOperation
"""
return self._status
@status.setter
def status(self, value):
self._status = value
class UpdateOperationResponse(RetriableOperationResponse):
"""
If the resource provide needs to return an error to any operation, it
should return the appropriate HTTP error code and a message body as can
be seen below.The message should be localized per the Accept-Language
header specified in the original request such thatit could be directly be
exposed to users
"""
def __init__(self, **kwargs):
super(Update
|
OperationResponse, self).__init__(**kwargs)
self._azure_async_operation = kwargs.get('azure_async_operation')
@property
def azure_async_operation(self):
"""
Users can perform a Get on Azure-AsyncOperation to get t
|
he status of
their update(PUT/PATCH/DELETE) operations
"""
return self._azure_async_operation
@azure_async_operation.setter
def azure_async_operation(self, value):
self._azure_async_operation = value
class LoadBalancerGetResponse(AzureOperationResponse):
"""
Response of a GET Load Balancer operation
"""
def __init__(self, **kwargs):
super(LoadBalancerGetResponse, self).__init__(**kwargs)
self._load_balancer = kwargs.get('load_balancer')
@property
def load_balancer(self):
"""
Gets a Load Balancer in a resource group
"""
return self._load_balancer
@load_balancer.setter
def load_balancer(self, value):
self._load_balancer = value
class LoadBalancerListResponse(AzureOperationResponse):
"""
Response for ListLoadBalancers Api service call
"""
def __init__(self, **kwargs):
super(LoadBalancerListResponse, self).__init__(**kwargs)
self._load_balancers = kwargs.get('load_balancers')
self._next_link = kwargs.get('next_link')
@property
def load_balancers(self):
"""
Gets a list of LoadBalancers in a resource group
"""
return self._load_balancers
@load_balancers.setter
def load_balancers(self, value):
self._load_balancers = value
@property
def next_link(self):
"""
Gets the URL to get the next set of results.
"""
return self._next_link
@next_link.setter
def next_link(self, value):
self._next_link = value
class LoadBalancerPutResponse(UpdateOperationResponse):
"""
Response of a PUT Load Balancer operation
"""
def __init__(self, **kwargs):
super(LoadBalancerPutResponse, self).__init__(**kwargs)
self._load_balancer = kwargs.get('load_balancer')
@property
def load_balancer(self):
"""
Gets Load Balancer in a resource group
"""
return self._load_balancer
@load_balancer.setter
def load_balancer(self, value):
self._load_balancer = value
class TopLevelResource(ResourceBaseExtended):
"""
A common class for general resource information
"""
def __init__(self, **kwargs):
super(TopLevelResource, self).__init__(**kwargs)
self._etag = kwargs.get('etag')
@property
def etag(self):
"""
Gets a unique read-only string that changes whenever the resource is
updated
"""
return self._etag
@etag.setter
def etag(self, value):
self._etag = value
class LoadBalancer(TopLevelResource):
"""
LoadBalancer resource
"""
def __init__(self, **kwargs):
super(LoadBalancer, self).__init__(**kwargs)
self._frontend_ip_configurations = kwargs.get('frontend_ip_configurations')
self._backend_address_pools = kwargs.get('backend_address_pools')
self._load_balancing_rules = kwargs.get('load_balancing_rules')
self._probes = kwa
|
rafaelthca/OmniDB
|
OmniDB/omnidb-server.py
|
Python
|
mit
| 12,308
| 0.011618
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import platform
import random
import string
#Parameters
import optparse
import configparser
import OmniDB.custom_settings
OmniDB.custom_settings.DEV_MODE = False
OmniDB.custom_settings.DESKTOP_MODE = False
parser = optparse.OptionParser(version=OmniDB.custom_settings.OMNIDB_VERSION)
parser.add_option("-H", "--host", dest="host",
default=None, type=str,
help="listening address")
parser.add_option("-p", "--port", dest="port",
default=None, type=int,
help="listening port")
parser.add_option("-w", "--wsport", dest="wsport",
default=None, type=int,
help="websocket port")
parser.add_option("-e", "--ewsport", dest="ewsport",
default=None, type=int,
help="external websocket port")
parser.add_option("-d", "--homedir", dest="homedir",
default='', type=str,
help="home directory containing local databases config and log files")
parser.add_option("-c", "--configfile", dest="conf",
default='', type=str,
help="configuration file")
parser.add_option("-A", "--app", dest="app",
action="store_true",
default=False,
help=optparse.SUPPRESS_HELP)
parser.add_option("-P", "--path", dest="path",
default='', type=str,
help="path to access the application, other than /")
(options, args) = parser.parse_args()
#Generate random token if in app mode
if options.app:
OmniDB.custom_settings.DESKTOP_MODE = True
OmniDB.custom_settings.APP_TOKEN = ''.join(random.choice(string.ascii_lowercase + string.digits) for i in range(50))
app_version = True
else:
app_version = False
if options.homedir!='':
if not os.path.exists(options.homedir):
print("Home directory does not exist. Please specify a directory that exists.",flush=True)
sys.exit()
else:
OmniDB.custom_settings.HOME_DIR = options.homedir
#importing runtime settings after setting HOME_DIR and other required parameters
import OmniDB.runtime_settings
if options.conf!='':
if not os.path.exists(options.conf):
print("Config file not found, using default settings.",flush=True)
config_file = OmniDB.runtime_settings.CONFFILE
else:
config_file = options.conf
else:
config_file = OmniDB.runtime_settings.CONFFILE
#Parsing config file
Config = configparser.ConfigParser()
Config.read(config_file)
if options.host!=None:
listening_address = options.host
else:
try:
listening_address = Config.get('webserver', 'listening_address')
except:
listening_address = OmniDB.custom_settings.OMNIDB_ADDRESS
if options.port!=None:
listening_port = options.port
else:
try:
listening_port = Config.getint('webserver', 'listening_port')
except:
listening_port = 8000
if options.wsport!=None:
ws_port = options.wsport
else:
try:
ws_port = Config.getint('webserver', 'websocket_port')
except:
ws_port = OmniDB.custom_settings.OMNIDB_WEBSOCKET_PORT
if options.ewsport!=None:
ews_port = options.ewsport
else:
try:
ews_port = Config.getint('webserver', 'external_websocket_port')
except:
ews_port = None
if options.path!='':
OmniDB.custom_settings.PATH = options.path
else:
try:
OmniDB.custom_settings.PATH = Config.get('webserver', 'path')
except:
OmniDB.custom_settings.PATH = ''
try:
is_ssl = Config.getboolean('webserver', 'is_ssl')
except:
is_ssl = False
try:
ssl_certificate_file = Config.get('webserver', 'ssl_certificate_file')
except:
ssl_certificate_file = ''
try:
ssl_key_file = Config.get('webserver', 'ssl_key_file')
except:
ssl_key_file = ''
try:
csrf_trusted_origins = Config.get('webserver', 'csrf_trusted_origins')
except:
csrf_trusted_origins = ''
try:
OmniDB.custom_settings.THREAD_POOL_MAX_WORKERS = Config.getint('queryserver', 'thread_pool_max_workers')
except:
pass
try:
OmniDB.custom_settings.PWD_TIMEOUT_TOTAL = Config.getint('queryserver', 'pwd_timeout_total')
except:
pass
#importing settings after setting HOME_DIR and other required parameters
import OmniDB.settings
import logging
import logging.config
logger = logging.getLogger('OmniDB_app.Init')
#Configuring Django settings before loading them
OmniDB.settings.DEBUG = False
if is_ssl:
OmniDB.settings.SESSION_COOKIE_SECURE = True
OmniDB.settings.CSRF_COOKIE_SECURE = True
csrf_trusted_origins_list = csrf_trusted_origins.split(',')
if len(csrf_trusted_origins_list)>0:
OmniDB.settings.CSRF_TRUSTED_ORIGINS = csrf_trusted_origins_list
if not os.path.exists(ssl_certificate_file):
print("Certificate file not found. Please specify a file that exists.",flush=True)
logger.info("Certificate file not found. Please specify a file that exists.")
sys.exit()
if not os.path.exists(ssl_key_file):
print("Key file not found. Please specify a file that exists.",flush=True)
logger.info("Key file not found. Please specify a file that exists.")
sys.exit()
import OmniDB
import OmniDB_app
import OmniDB_app.apps
os.environ['DJANGO_SETTINGS_MODULE'] = 'OmniDB.settings'
import django
django.setup()
import html.parser
import http.cookies
import django.template.defaulttags
import django.template.loader_tags
import django.contrib.staticfiles
import django.contrib.staticfiles.a
|
pps
import django.contrib.admin.apps
import django.contrib.au
|
th.apps
import django.contrib.contenttypes.apps
import django.contrib.sessions.apps
import django.contrib.messages.apps
import OmniDB_app.urls
import django.contrib.messages.middleware
import django.contrib.auth.middleware
import django.contrib.sessions.middleware
import django.contrib.sessions.serializers
import django.template.loaders
import django.contrib.auth.context_processors
import django.contrib.messages.context_processors
import django.views.defaults
import django.contrib.auth.password_validation
from django.core.handlers.wsgi import WSGIHandler
from OmniDB import startup, ws_core
import time
import cherrypy
from django.contrib.sessions.backends.db import SessionStore
import socket
import random
import urllib.request
def check_port(port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("127.0.0.1", port))
except socket.error as e:
return False
s.close()
return True
class DjangoApplication(object):
def mount_static(self, url, root):
config = {
'tools.staticdir.on': True,
'tools.staticdir.dir': root,
'tools.expires.on': True,
'tools.expires.secs': 86400
}
cherrypy.tree.mount(None, url, {'/': config})
def run(self,parameters):
#cherrypy.engine.unsubscribe('graceful', cherrypy.log.reopen_files)
logging.config.dictConfig(OmniDB.settings.LOGGING)
#cherrypy.log.error_log.propagate = False
cherrypy.log.access_log.propagate = False
self.mount_static(OmniDB.settings.STATIC_URL, OmniDB.settings.STATIC_ROOT)
cherrypy.tree.graft(WSGIHandler())
port = parameters['listening_port']
num_attempts = 0
print('''Starting OmniDB server...''',flush=True)
logger.info('''Starting OmniDB server...''')
print('''Checking port availability...''',flush=True)
logger.info('''Checking port availability...''')
while not check_port(port) or num_attempts >= 20:
print("Port {0} is busy, trying another port...".format(port),flush=True)
logger.info("Port {0} is busy, trying another port...".format(port))
port = random.randint(1025,32676)
num_attempts = num_attempts + 1
if num_attempts < 20:
v_cherrypy_config = {
'server.socket_host': parameters['listening_address'],
'server.socket_port': port,
'engine.autoreload_on': Fals
|
FreshXOpenSource/wallaby-base
|
wallaby/pf/peer/pref.py
|
Python
|
bsd-2-clause
| 2,613
| 0.003444
|
# Copyright (c) by it's authors.
# Some rights reserved. See LICENSE, AUTHORS.
from peer import *
from documentCache import DocumentCache
from viewer import Viewer
class Pref(Peer):
Load = Pillow.In
Add = Pillow.In
NoDocument = Pillow.Out
SheetNotFound = Pillow.Out
Ready = Pillow.OutState
Receiving = [
Viewer.In.Document,
DocumentCache.Out.RequestedDocument,
DocumentCache.Out.DocumentChanged
]
Sending = [
DocumentCache.In.RequestDocument,
Viewer.In.Refresh
]
def isReady(self, ignoreVersion=False):
if self._configDoc is not None and self._document is not None:
configRev = self._configDoc.rev()
docRev = self._document.rev()
if (configRev == None or configRev != self._configRev) or (docRev == None or docRev != self._docRev) or ignoreVersion:
self._configRev, self._docRev = configRev, docRev
from twisted.internet import reactor
reactor.callLater(0, self._controller.load)
self._throw(Pref.Out.Ready, True)
else:
self._throw(Pref.O
|
ut.Ready, False)
def __init__(self, room, controller, configDocId, path):
Peer.__init__(self,
|
room)
self._configDoc = None
self._path = path
self._configRev = None
self._docRev = None
self._document = None
self._controller = controller
self._configDocId = configDocId
self._catch(Pref.In.Add, self._add)
self._catch(Viewer.In.Document, self._doc)
self._catch(DocumentCache.Out.RequestedDocument, self._setConfigDoc)
self._catch(DocumentCache.Out.DocumentChanged, self._setConfigDoc)
def updateAll(self):
self._throw(Viewer.In.Refresh, self._path + ".")
def _setConfigDoc(self, pillow, doc):
if not doc or doc.documentID != self._configDocId: return
self._configDoc = doc
self.isReady()
def initialize(self):
self._throw(DocumentCache.In.RequestDocument, self._configDocId)
self.isReady()
def noDocument(self):
self._throw(Pref.Out.NoDocument, None)
def notFound(self, name):
self._throw(Pref.Out.SheetNotFound, name)
def _doc(self, pillow, doc):
self._document = doc
self.isReady()
def document(self):
return self._document
def _add(self, pillow, sheet):
from twisted.internet import reactor
reactor.callLater(0, self._controller.createSheet, sheet)
def configDoc(self):
return self._configDoc
|
petterreinholdtsen/creepy
|
creepy/models/ProjectWizardPluginListModel.py
|
Python
|
gpl-3.0
| 2,179
| 0.005507
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from PyQt4.QtCore impo
|
rt QVariant, QAbstractListModel, Qt
from PyQt4.Qt import QPixmap, QFileSystemModel, QIcon
from utilities import GeneralUtilities
import os
class ProjectWizardPluginListModel(QAbstractListModel):
def __init__(self, plugins, parent=None):
super(ProjectWizardPluginListModel, self).__init__(parent)
self.plugins = plugins
self.checkedPlugins = set()
d
|
ef rowCount(self, index):
return len(self.plugins)
def data(self, index, role):
plugin = self.plugins[index.row()][0]
if index.isValid():
if role == Qt.DisplayRole:
return QVariant(plugin.name)
if role == Qt.DecorationRole:
for dir in GeneralUtilities.getPluginDirs():
picturePath = os.path.join(dir, plugin.plugin_object.name, 'logo.png')
if picturePath and os.path.exists(picturePath):
pixmap = QPixmap(picturePath)
return QIcon(pixmap.scaled(30, 30, Qt.IgnoreAspectRatio, Qt.FastTransformation))
pixmap = QPixmap(os.path.join(GeneralUtilities.getIncludeDir(), 'generic_plugin.png'))
pixmap.scaled(30, 30, Qt.IgnoreAspectRatio)
return QIcon(pixmap)
if role == Qt.CheckStateRole:
if plugin:
return (Qt.Checked if plugin.name in self.checkedPlugins else Qt.Unchecked)
else:
return QVariant()
def setData(self, index, value, role=Qt.EditRole):
if role == Qt.CheckStateRole:
plugin = self.plugins[index.row()][0]
if value == Qt.Checked:
self.checkedPlugins.add(plugin.name)
else:
self.checkedPlugins.discard(plugin.name)
return True
return QFileSystemModel.setData(self, index, value, role)
def flags(self, index):
if not index.isValid():
return Qt.ItemIsEnabled
return Qt.ItemFlags(QAbstractListModel.flags(self, index)|Qt.ItemIsUserCheckable)
|
skorokithakis/nxt-python
|
nxt/server.py
|
Python
|
gpl-3.0
| 9,781
| 0.007054
|
# nxt.server module -- LEGO Mindstorms NXT socket interface module
# Copyright (C) 2009 Marcus Wanner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
'''Use for a socket-interface NXT driver. Command and protocol docs at:
http://code.google.com/p/nxt-python/wiki/ServerUsage'''
import nxt.locator
from nxt.motor import *
from nxt.sensor import *
from nxt.compass import *
import socket, string, sys
global brick
host = ''
port = 54174
outport = 54374
def _process_port(nxtport):
if nxtport == 'A' or nxtport == 'a':
nxtport = PORT_A
elif nxtport == 'B' or nxtport == 'b':
nxtport = PORT_B
elif nxtport == 'C' or nxtport == 'c':
nxtport = PORT_C
elif nxtport == 'ALL' or nxtport == 'All' or nxtport == 'all':
nxtport = PORT_ALL
elif nxtport == '1':
nxtport = PORT_1
elif nxtport == '2':
nxtport = PORT_2
elif nxtport == '3':
nxtport = PORT_3
elif nxtport == '4':
nxtport = PORT_4
else:
raise ValueError, 'Invalid port: '+nxtport
return nxtport
def _process_command(cmd):
global brick
retcode = 0
retmsg = ''
#act on messages, these conditions can be in no particular order
#it should send a return code on port 54374. 0 for success, 1 for failure
#then an error message
if cmd.startswith('find_brick'):
try:
brick = nxt.locator.find_one_brick()
brick = brick.connect()
retmsg = 'Connected to brick.'
retcode = 0
except:
retcode = 1
retmsg = str(sys.exc_info()[1])
elif cmd.startswith('get_t
|
ouch_sample'):
try:
port = string.split(c
|
md, ':')[1]
port = _process_port(port)
retmsg = str(TouchSensor(brick, port).get_sample())
retcode = 0
except:
retcode = 1
retmsg = str(sys.exc_info()[1])
elif cmd.startswith('get_sound_sample'):
try:
port = string.split(cmd, ':')[1]
port = _process_port(port)
retmsg = str(SoundSensor(brick, port).get_sample())
retcode = 0
except:
retcode = 1
retmsg = str(sys.exc_info()[1])
elif cmd.startswith('get_light_sample'):
try:
data = string.split(cmd, ':')[1]
data = string.split(data, ',')
if len(data) > 1:
#there is emit light data
port, emit = data
else:
port, emit = data[0], False
port = _process_port(port)
light = LightSensor(brick, port)
light.set_illuminated(emit)
retmsg = str(light.get_sample())
light.set_illuminated(False)
retcode = 0
except:
retcode = 1
retmsg = str(sys.exc_info()[1])
elif cmd.startswith('get_ultrasonic_sample'):
try:
port = string.split(cmd, ':')[1]
port = _process_port(port)
retmsg = str(UltrasonicSensor(brick, port).get_sample())
retcode = 0
except:
retcode = 1
retmsg = str(sys.exc_info()[1])
elif cmd.startswith('get_accelerometer_sample'):
try:
port = string.split(cmd, ':')[1]
port = _process_port(port)
retmsg = str(AccelerometerSensor(brick, port).get_sample())
retcode = 0
except:
retcode = 1
retmsg = str(sys.exc_info()[1])
elif cmd.startswith('get_compass_sample'):
try:
port = string.split(cmd, ':')[1]
port = _process_port(port)
retmsg = str(CompassSensor(brick, port).get_sample())
retcode = 0
except:
retcode = 1
retmsg = str(sys.exc_info()[1])
elif cmd.startswith('update_motor:'):
try:
#separate the information from the command keyword
info = string.split(cmd, ':')[1]
[port, power, tacholim] = string.split(info, ',')
portarray = []
if port.count('(') > 0 and port.count(')') > 0:
#there are more than 1 ports, separate them
port = port.strip('()')
#port.strip(')')
port.replace(' ', '')
for separateport in string.split(port, ';'):
portarray.append(separateport)
else:
#one port, just use that
portarray.append(port)
#process the port
for currentport in portarray:
processedport = _process_port(currentport)
Motor(brick, processedport).update(int(power), int(tacholim))
retmsg = 'Motor command succeded.'
retcode = 0
except:
retcode = 1
retmsg = str(sys.exc_info()[1])
elif cmd.startswith('run_motor:'):
try:
#separate the information from the command keyword
info = string.split(cmd, ':')[1]
[port, power, regulated] = string.split(info, ',')
port = _process_port(port)
Motor(brick, port).run(int(power), int(regulated))
retmsg = 'Motor run command succeded.'
except:
retcode = 1
retmsg = str(sys.exc_info()[1])
elif cmd.startswith('stop_motor:'):
try:
#separate the information from the command keyword
info = string.split(cmd, ':')[1]
[port, braking] = string.split(info, ',')
port = _process_port(port)
Motor(brick, port).stop(int(braking))
retmsg = 'Motor stop command succeded.'
except:
retcode = 1
retmsg = str(sys.exc_info()[1])
elif cmd.startswith('play_tone:'):
try:
#separate the information from the command keyword
info = string.split(cmd, ':')[1]
[freq, dur] = string.split(info, ',')
#call the function
brick.play_tone_and_wait(int(freq), int(dur))
retmsg = 'Tone command succeded.'
retcode = 0
except:
retcode = 1
retmsg = str(sys.exc_info()[1])
#close_brick
elif cmd == 'close_brick':
try:
brick.close()
retcode = 0
except:
retcode = 1
retmsg = str(sys.exc_info()[1])
#command not recognised
else:
retmsg = 'Command not found.'
retcode = 1
#then return 1 or 0 and a message
return retcode, retmsg
def serve_forever(password=None, authorizedips = []):
'''Serve clients until the window is closed or there is an unhandled error.
If you supply a password, then any ip that wants to control the NXT will have
to send the password once to be authorized before any of the commands it sends
will be carried out.
authorizedips is a list of the ips that can have access to the NXT without
supplying a password. Normally, this is left blank.'''
#make sockets
outsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
insock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
insock.bind((host, port))
while 1:
#get a message from port on any host
inmsg, (clientip,assignedport) = insock.recvfrom(100) #no commands can be longer than 100 chars
#print a helpful message to the console.
print 'Got command '+inmsg+' f
|
banderlog/greed
|
greed/colors.py
|
Python
|
mit
| 370
| 0.018919
|
class Color:
''' print() wrappers for console colors
'''
d
|
ef red(*args, **kwargs): print("\033[91m{}\033
|
[0m".format(" ".join(map(str,args))), **kwargs)
def green(*args, **kwargs): print("\033[92m{}\033[0m".format(" ".join(map(str,args))), **kwargs)
def yellow(*args, **kwargs): print("\033[93m{}\033[0m".format(" ".join(map(str,args))), **kwargs)
|
billryan/github-rss
|
github/gh.py
|
Python
|
mit
| 5,484
| 0.000547
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import getpass
import requests
import ConfigParser
BASE_DIR = os.path.dirname(__file__)
class Auth:
"""GitHub API Auth"""
def __init__(self):
self.auth_url = 'https://api.github.com'
auth_conf = os.path.join(BASE_DIR, 'auth.conf')
if os.path.exists(auth_conf):
cf = ConfigParser.ConfigParser()
cf.read(auth_conf)
self.user = cf.get("auth", "user")
self.passwd = cf.get("auth", "passwd")
else:
self.user = ''
self.passwd = ''
def get_session(self):
s = requests.Session()
s.auth = (self.user, self.passwd)
r = s.get(self.auth_url)
if r.status_code != 200:
print("authentication failed. status_code: " + r.status_code)
return requests.Session()
else:
print("authentication succeed")
return s
class Repo:
"""GitHub Repository"""
def __init__(self):
self.auth_url = 'https://api.github.com'
def get_commits(self, s, owner, repo, nums=30):
url = '/'.join([self.auth_url, 'repos', owner, repo, 'commits'])
commits = s.get(url)
if commits.status_code == 200:
return commits.json()
def get_commit_info(self, s, commit_json):
commit_info = {}
# url for get_commit_diff
commit_info['diff_url'] = commit_json['url']
commit_info['diff'] = self.get_commit_diff(s, commit_info['diff_url'])
commit_info['html_url'] = commit_json['html_url']
commit_info['sha'] = commit_json['sha']
commit = commit_json['commit']
commit_info['url'] = commit['url']
author = {}
author['name'] = commit['author']['name']
author['email'] = commit['author']['email']
commit_info['author'] = author
commit_info['updated'] = commit['author']['date']
commit_info['message'] = commit['message']
return commit_info
def get_commit_diff(self, s, commit_url):
diff_headers = {'Accept': 'application/vnd.github.diff'}
commit_diff = s.get(commit_url, headers=diff_headers)
if commit_diff.status_code == 200:
commit_diff_txt = commit_diff.text
return commit_diff_txt
else:
return ''
def get_repo_info(self, s, owner, repo):
url = '/'.join([self.auth_url, 'repos', owner, repo])
repo_json = s.get(url).json()
repo_info = {}
repo_info['description'] = repo_json['description']
repo_info['full_name'] = repo_json['full_name']
repo_info['html_url'] = repo_json['html_url']
repo_info['updated_at'] = repo_json['updated_at']
repo_info['author'] = self.get_author(s, owner)
return repo_info
def get_author(self, s, owner):
url = '/'.join([self.auth_url, 'users', owner])
author_raw = s.get(url)
if author_raw.status_code != 200:
return None
author_json = author_raw.json()
author_info = {}
author_info['name'] = owner
author_info['email'] = author_json['email']
return author_info
def get_commits_info(self, s, owner, repo):
commits_json = self.get_commits(s, owner, repo)
commits_info = []
for commit_json in commits_json:
commit_info = self.get_commit_info(s, commit_json)
commits_info.append(commit_info)
return commits_info
class GitHubRSS:
"""GitHub RSS"""
def __init__(self):
self.atom = True
def init_fg(self, repo_info):
fg = FeedGenerator()
title = 'Recent commits to ' + repo_info['full_name']
fg.title(title)
fg.link(href=repo_info['html_url'])
fg.updated(repo_info['updated_at'])
fg.id(repo_info['html_url'])
fg.author(re
|
po_info['author'])
return fg
def add_entry(self, fg, co
|
mmit_info):
fe = fg.add_entry()
fe.title(commit_info['message'])
fe.link(href=commit_info['html_url'])
id_prefix = 'tag:github.com,2008:Grit::Commit/'
entry_id = id_prefix + commit_info['sha']
fe.id(entry_id)
fe.author(commit_info['author'])
fe.published(commit_info['updated'])
fe.updated(commit_info['updated'])
fe.content(commit_info['diff'])
return fg
def gen_atom(self, fg, atom_fn='atom.xml'):
fg.atom_file(atom_fn)
if __name__ == "__main__":
# auth with GitHub username and password
user = raw_input('Enter your GitHub username: ')
passwd = getpass.getpass()
g_commit = GitHubCommit(user, passwd)
s = requests.Session()
s.auth = (g_commit.user, g_commit.passwd)
r = s.get(g_commit.auth_url)
if r.status_code == 401:
print("Unauthorized. Wrong username or password!")
sys.exit("Exit for Unauthorized status")
owner = 'billryan'
repo = 'algorithm-exercise'
repo_info = g_commit.get_repo_info(s, owner, repo)
commits_json = g_commit.get_commits(s, owner, repo)
commits_info = []
for commit_json in commits_json:
commit_info = g_commit.get_commit_info(s, commit_json)
commits_info.append(commit_info)
# generate rss
rss = GitHubRSS()
fg_repo = rss.init_fg(repo_info)
for commit_info in commits_info:
rss.add_entry(fg_repo, commit_info)
rss.gen_atom(fg_repo, '/tmp/test/atom_test.xml')
|
ekopylova/burrito-fillings
|
bfillings/usearch.py
|
Python
|
bsd-3-clause
| 101,582
| 0.000975
|
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013--, biocore development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
"""Application controller for usearch v5.2.32
Includes application controllers for usearch and
convenience wrappers for different functions of uclust including
sorting fasta files, finding clusters, converting to cd-hit format and
searching and aligning against a database. Also contains
a parser for the resulting .clstr file.
Modified from pycogent_backports/uclust.py, written by
Greg Caporaso/William Walters
"""
from os.path import splitext, abspath, join
from tempfile import mkstemp
from skbio.parse.sequences import parse_fasta
from burrito.parameters import ValuedParameter, FlagParameter
from burrito.util import (CommandLineApplication, ResultPath,
ApplicationError, ApplicationNotFoundError)
from skbio.util import remove_files
class UsearchParseError(Exception):
pass
class Usearch(CommandLineApplication):
""" Usearch ApplicationController
"""
_command = 'usearch'
_input_handler = '_input_as_parameters'
_parameters = {
# Fasta input file for merge-sort function
'--mergesort': ValuedParameter('--', Name='mergesort', Delimiter=' ',
IsPath=True),
# Fasta input file for merge-sort function
'--evalue': ValuedParameter('--', Name='evalue', Delimiter=' ',
IsPath=False),
# Output file, used by several difference functions
'--output': ValuedParameter('--', Name='output', Delimiter=' ',
IsPath=True),
# Output filename will be in uclust (.uc) format
# Output cluster file, required parameter
'--uc': ValuedParameter('--', Name='uc', Delimiter=' ',
IsPath=True),
'--blast6out': ValuedParameter('--', Name='blast6out', Delimiter=' ',
IsPath=True),
# ID percent for OTU, by default is 97%
'--id': ValuedParameter('--', Name='id', Delimiter=' ', IsPath=False),
'--evalue':
ValuedParameter('--', Name='evalue', Delimiter=' ', IsPath=False),
'--queryalnfract':
ValuedParameter(
'--',
Name='queryalnfract',
Delimiter=' ',
IsPath=False),
'--targetalnfract':
ValuedParameter(
'--',
Name='targetalnfract',
Delimiter=' ',
IsPath=False),
# Enable reverse strand matching. Will double memory.
'--rev': FlagParameter('--', Name='rev'),
# Maximum hits before quitting search (default 1, 0=infinity).
'--maxaccepts':
ValuedParameter('--', Name='maxaccepts', Delimiter=' '),
# Maximum rejects before quitting search (default 8, 0=infinity).
'--maxrejects':
ValuedParameter('--', Name='maxrejects', Delimiter=' '),
# Target nr. of common words (default 8, 0=don't step)
'--stepwords': ValuedParameter('--', Name='stepwords', Delimiter=' '),
# Word length for windex (default 5 aa.s, 8 nuc.s).
'--w': ValuedParameter('--', Name='w', Delimiter=' '),
# Don't assume input is sorted by length (default assume sorted).
'--usersort': FlagParameter('--', Name='usersort'),
# log filepath
'--log': ValuedParameter('--', Name='log', Delimiter=' ', IsPath=True),
# cluster command
'--cluster': ValuedParameter('--', Name='cluster', Delimiter=' ',
IsPath=True),
# Size of compressed index table. Should be prime, e.g. 40000003.
'--slots': ValuedParameter('--', Name='slots', Delimiter=' ',
IsPath=False),
# Not specified in usearch helpstring...
'--sizein': FlagParameter('--', Name='sizein'),
# Not specified in usearch helpstring...
'--sizeout': FlagParameter('--', Name='sizeout'),
# Not specified in usearch helpstring...
'--minlen': ValuedParameter('--', Name='minlen', Delimiter=' ',
IsPath=False),
# output filepath for dereplicated fasta file
'--seedsout': ValuedParameter('--', Name='seedsout', Delimiter=' ',
IsPath=True),
# Dereplicate exact subsequences
'--derep_subseq': FlagParameter('--', Name='derep_subseq'),
# Dereplicate exact sequences
'--derep_fullseq': FlagParameter('--', Name='derep_fullseq'),
# Sort by abundance
'--sortsize': ValuedParameter('--', Name='sortsize', Delimiter=' ',
IsPath=True),
# usearch search plus clustering
'--consout': ValuedParameter('--', Name='consout', Delimiter=' ',
IsPath=True),
# Abundance skew setting for uchime de novo chimera detection
'--abskew': ValuedParameter('--', Name='abskew', Delimiter=' ',
IsPath=False),
# input fasta filepath for uchime chimera
'--uchime': ValuedParameter('--', Name='uchime', Delimiter=' ',
IsPath=True),
# output chimera filepath
'--chimeras': ValuedParameter('--', Name='chimeras', Delimiter=' ',
IsPath=True),
# output non-chimera filepath
'--nonchimeras': ValuedParameter('--', Name='nonchimeras',
Delimiter=' ', IsPath=True),
# reference sequence database for ref based chimera detection
'--db': ValuedParameter('--', Name='db', Delimiter=' ', IsPath=True),
# output clusters filepath for chimera detection
'--uchimeout': ValuedParameter('--', Name='uchimeout', Delimiter=' ',
IsPath=True),
# minimum cluster size for quality filtering
'--minsize': ValuedParameter('--', Name='minsize', Delimiter=' ',
IsPath=False),
# input fasta for blast alignments
'--query': ValuedParameter('--', Name='query', Delimiter=' ',
IsPath=True),
# global alignment flag
'--global': FlagParameter('--', Name='global')
}
_suppress_stdout = False
_suppress_stderr = False
def _input_as_parameters(self, data):
""" Set the input path (a fasta filepath)
"""
# The list of values which can be passed on a per-run basis
allowed_values = ['--uc', '--output', '--mergesort', '--log',
'--cluster', '--seedsout', '--sortsize',
'--consout', '--uchime', '--chimeras',
'--nonchimeras', '--db', '--uchimeout',
'--query', '--blast6out']
unsupported_parameters = set(data.keys()) - set(allowed_values)
if unsupported_parameters:
raise ApplicationError(
"Unsupported parameter(s) passed when calling usearch: %s" %
' '.join(unsupported_parameters))
for v in allowed_values:
|
# turn the parameter off so subsequent runs are not
# affected by parameter settings from previous runs
self.Parameters[v].off()
if v in data:
# turn the parameter on if specified by the user
|
self.Parameters[v].on(data[v])
return ''
def _get_result_paths(self, data):
""" Set the result paths """
result = {}
result['Output'] = ResultPath(
Path=self.Parameters['--output'].Value,
IsWritten=self.Parameters['--output'].isOn())
result['ClusterFile'] = ResultPath(
P
|
chemelnucfin/tensorflow
|
tensorflow/python/summary/writer/writer.py
|
Python
|
apache-2.0
| 17,167
| 0.004078
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides an API for generating Event protocol buffers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import time
import warnings
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import plugin_asset
from tensorflow.python.summary.writer.event_file_writer import EventFileWriter
from tensorflow.python.summary.writer.event_file_writer_v2 import EventFileWriterV2
from tensorflow.python.util.tf_export import tf_export
_PLUGINS_DIR = "plugins"
class SummaryToEventTransformer(object):
"""Abstractly implements the SummaryWriter API.
This API basically implements a number of endpoints (add_summary,
add_session_log, etc). The endpoints all generate an event protobuf, which is
passed to the contained event_writer.
"""
def __init__(self, event_writer, graph=None, graph_def=None):
"""Creates a `SummaryWriter` and an event file.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_session_log()`,
`add_event()`, or `add_graph()`.
If you pass a `Graph` to the constructor it is added to
the event file. (This is equivalent to calling `add_graph()` later).
TensorBoard will pick the graph from the file and display it graphically so
you can interactively explore the graph you built. You will usually pass
the graph from the session in which you launched it:
```python
...create a graph...
# Launch the graph in a session.
sess = tf.compat.v1.Session()
# Create a summary writer, add the 'graph' to the event file.
writer = tf.compat.v1.summary.FileWriter(<some-directory>, sess.graph)
```
Args:
event_writer: An EventWriter. Implements add_event and get_logdir.
graph: A `Graph` object, such as `sess.graph`.
graph_def: DEPRECATED: Use the `graph` argument instead.
"""
self.event_writer = event_writer
# For storing used tags for session.run() outputs.
self._session_run_tags = {}
if graph is not None or graph_def is not None:
# Calling it with both graph and graph_def for backward compatibility.
self.add_graph(graph=graph, graph_def=graph_def)
# Also export the meta_graph_def in this case.
# graph may itself be a graph_def due to positional arguments
maybe_graph_as_def = (graph.as_graph_def(add_shapes=True)
if isinstance(graph, ops.Graph) else graph)
self.add_meta_graph(
meta_graph.create_meta_graph_def(graph_def=graph_def or
maybe_graph_as_def))
# This set contains tags of Summary Values that have been encountered
# already. The motivation here is that the SummaryWriter only keeps the
# metadata property (which is a SummaryMetadata proto) of the first Summary
# Value encountered for each tag. The SummaryWriter strips away the
# SummaryMetadata for all subsequent Summary Values with tags seen
# previously. This saves space.
self._seen_summary_tags = set()
def add_summary(self, summary, global_step=None):
"""Adds a `Summary` protocol buffer to the event file.
This method wraps the provided summary in an `Event` protocol buffer
and adds it to the event file.
You can pass the result of evaluating any summary op, using
`tf.Session.run` or
`tf.Tensor.eval`, to this
function. Alternatively, you can pass a `tf.compat.v1.Summary` protocol
buffer that you populate with your own data. The latter is
commonly done to report evaluation results in event files.
Args:
summary: A `Summary` protocol buffer, optionally serialized as a string.
global_step: Number. Optional global step value to record with the
summary.
"""
if isinstance(summary, bytes):
summ = summary_pb2.Summary()
summ.ParseFromString(summary)
summary = summ
# We strip metadata from values with tags that we have seen before in order
# to save space - we just store the metadata on the first value with a
# specific tag.
for value in summary.value:
if not value.metadata:
continue
if value.tag in self._seen_summary_tags:
# This tag has been encountered before. Strip the metadata.
value.ClearField("metadata")
continue
# We encounter a value with a tag we have not encountered previously. And
# it has metadata. Remember to strip metadata from future values with this
# tag string.
self._seen_summary_tags.add(value.tag)
event = event_pb2.Event(summary=summary)
self._add_event(event, global_step)
def add_session_log(self, session_log, global_step=None):
"""Adds a `SessionLog` protocol buffer to the event file.
This method wraps the provided session in an `Event` protocol buffer
and adds it to the event file.
Args:
session_log: A `SessionLog` protocol buffer.
glo
|
bal_step: Number. Optional global step value to record with the
summary.
"""
event = event_pb2.Event(session_log=session_log)
self._add_event(event, global_step)
def _add_graph_def(self, graph_def, global_step=None):
graph_bytes = graph_def.SerializeToString()
event = eve
|
nt_pb2.Event(graph_def=graph_bytes)
self._add_event(event, global_step)
def add_graph(self, graph, global_step=None, graph_def=None):
"""Adds a `Graph` to the event file.
The graph described by the protocol buffer will be displayed by
TensorBoard. Most users pass a graph in the constructor instead.
Args:
graph: A `Graph` object, such as `sess.graph`.
global_step: Number. Optional global step counter to record with the
graph.
graph_def: DEPRECATED. Use the `graph` parameter instead.
Raises:
ValueError: If both graph and graph_def are passed to the method.
"""
if graph is not None and graph_def is not None:
raise ValueError("Please pass only graph, or graph_def (deprecated), "
"but not both.")
if isinstance(graph, ops.Graph) or isinstance(graph_def, ops.Graph):
# The user passed a `Graph`.
# Check if the user passed it via the graph or the graph_def argument and
# correct for that.
if not isinstance(graph, ops.Graph):
logging.warning("When passing a `Graph` object, please use the `graph`"
" named argument instead of `graph_def`.")
graph = graph_def
# Serialize the graph with additional info.
true_graph_def = graph.as_graph_def(add_shapes=True)
self._write_plugin_assets(graph)
elif (isinstance(graph, graph_pb2.GraphDef) or
isinstance(graph_def, graph_pb2.GraphDef)):
# The user passed a `GraphDef`.
logging.warning("Passing a `GraphDef` to the SummaryWriter is deprecated."
" Pass a `Graph` object instead, such as `sess.grap
|
antechrestos/cf-python-client
|
integration/v2/test_service_instances.py
|
Python
|
apache-2.0
| 1,263
| 0.003959
|
import logging
import unittest
from config_test import build_client_from_configuration
_logger = logging.getLogger(__name__)
class TestServiceInstances(unittest.TestCase):
def test_create_update_delete(self):
client = build_client_from_configuration()
result = client.v2.service_instances.create(client.space_guid, "test_name", client.plan_guid, client.creation_parameters)
if len(client.update_parameters) > 0:
|
client.v2.service_instances.update(result["metadata"]["guid"], client.update_parameters)
else:
_logger.warning("update test skipped")
client.v2.service_instances.remove(result["metadata"]["guid"])
def test_get(self):
client = build_client_from_configuration()
cpt = 0
for
|
instance in client.v2.service_instances.list():
if cpt == 0:
self.assertIsNotNone(client.v2.service_instances.get_first(space_guid=instance["entity"]["space_guid"]))
self.assertIsNotNone(client.v2.service_instances.get(instance["metadata"]["guid"]))
self.assertIsNotNone(client.v2.service_instances.list_permissions(instance["metadata"]["guid"]))
cpt += 1
_logger.debug("test_get - %d found", cpt)
|
ovaskevich/PyLaTeX
|
pylatex/base_classes/latex_object.py
|
Python
|
mit
| 5,960
| 0
|
# -*- coding: utf-8 -*-
"""
This module implements the base LaTeX object.
.. :copyright: (c) 2014 by Jelte Fennema.
:license: MIT, see License for more details.
"""
from ordered_set import OrderedSet
from ..utils import dumps_list
from abc import abstractmethod, ABCMeta
from reprlib import recursive_repr
from inspect import getfullargspec
class _CreatePackages(ABCMeta):
def __init__(cls, name, bases, d): # noqa
packages = OrderedSet()
for b in bases:
if hasattr(b, 'packages'):
packages |= b.packages
if 'packages' in d:
packages |= d['packages']
cls.packages = packages
super().__init__(name, bases, d)
class LatexObject(metaclass=_CreatePackages):
"""The class that every other LaTeX class is a subclass of.
This class implements the main methods that every LaTeX object needs. For
conversion to LaTeX formatted strings it implements the dumps, dump and
generate_tex methods. It also provides the methods that can be used to
represent the packages required by the LatexObject.
"""
_latex_name = None
#: Set this to an iterable to override the list of default repr
#: attributes.
_repr_attributes_override = None
#: Set this to a dict to change some of the default repr attributes to
#: other attributes. The key is the old one, the value the new one.
_repr_attributes_mapping = None
#: Set on a class to make instances default to a certain kind of escaping
_default_escape = True
#: Only set this directly by changing the cls.escape
_escape = None
@property
def escape(self):
|
"""Determine wheter or not to escape content of this class.
This defaults to `True` for most classes.
"""
if self._escape is not None:
return self._escape
if s
|
elf._default_escape is not None:
return self._default_escape
return True
#: Start a new paragraph before this environment.
begin_paragraph = False
#: Start a new paragraph after this environment.
end_paragraph = False
#: Same as enabling `begin_paragraph` and `end_paragraph`, so
#: effectively placing this element in its own paragraph.
separate_paragraph = False
def __init__(self):
# TODO: only create a copy of packages when it will
# Create a copy of the packages attribute, so changing it in an
# instance will not change the class default.
self.packages = self.packages.copy()
@recursive_repr()
def __repr__(self):
"""Create a printable representation of the object."""
return self.__class__.__name__ + '(' + \
', '.join(map(repr, self._repr_values)) + ')'
@property
def _repr_values(self):
"""Return values that are to be shown in repr string."""
def getattr_better(obj, field):
try:
return getattr(obj, field)
except AttributeError as e:
try:
getattr(obj, '_' + field)
except AttributeError:
raise e
return (getattr_better(self, attr) for attr in self._repr_attributes)
@property
def _repr_attributes(self):
"""Return attributes that should be part of the repr string."""
if self._repr_attributes_override is None:
# Default to init arguments
attrs = getfullargspec(self.__init__).args[1:]
mapping = self._repr_attributes_mapping
if mapping:
attrs = [mapping[a] if a in mapping else a for a in attrs]
return attrs
return self._repr_attributes_override
@property
def latex_name(self):
"""The name of the class used in LaTeX.
It can be `None` when the class doesn't have a name.
"""
if self._latex_name is not None:
return self._latex_name
return self.__class__.__name__.lower()
@latex_name.setter
def latex_name(self, value):
self._latex_name = value
@abstractmethod
def dumps(self):
"""Represent the class as a string in LaTeX syntax.
This method should be implemented by any class that subclasses this
class.
"""
def dump(self, file_w):
"""Write the LaTeX representation of the class to a file.
Args
----
file_w: io.TextIOBase
The file object in which to save the data
"""
file_w.write(self.dumps())
def generate_tex(self, filepath):
"""Generate a .tex file.
Args
----
filepath: str
The name of the file (without .tex)
"""
with open(filepath + '.tex', 'w', encoding='utf-8') as newf:
self.dump(newf)
def dumps_packages(self):
"""Represent the packages needed as a string in LaTeX syntax.
Returns
-------
list
"""
return dumps_list(self.packages)
def dump_packages(self, file_w):
"""Write the LaTeX representation of the packages to a file.
Args
----
file_w: io.TextIOBase
The file object in which to save the data
"""
file_w.write(self.dumps_packages())
def dumps_as_content(self):
"""Create a string representation of the object as content.
This is currently only used to add new lines before and after the
output of the dumps function. These can be added or removed by changing
the `begin_paragraph`, `end_paragraph` and `separate_paragraph`
attributes of the class.
"""
string = self.dumps()
if self.separate_paragraph or self.begin_paragraph:
string = '\n\n' + string.lstrip('\n')
if self.separate_paragraph or self.end_paragraph:
string = string.rstrip('\n') + '\n\n'
return string
|
terbolous/cloudstack-python-client
|
setup.py
|
Python
|
mit
| 1,975
| 0.015696
|
#!/usr/bin/python
# Copyright (c) 2011 Jason Hancock <jsnbyh@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
|
OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read()
setup(
name = 'CloudStackClient',
version = '0.1.4',
des
|
cription = "CloudStack API Client",
long_description = (read('README.md') + '\r\n' +
read('HISTORY.rst') + '\r\n' +
read('AUTHORS')
),
author = "Erik Weber",
author_email = "terbolous@gmail.com",
url = "https://github.com/terbolous/cloudstack-python-client",
packages = ['CloudStackClient'],
license = 'MIT',
platforms = 'Posix; MacOS X; Windows',
)
|
eduNEXT/edunext-platform
|
common/lib/capa/capa/tests/test_responsetypes.py
|
Python
|
agpl-3.0
| 119,649
| 0.001847
|
# -*- coding: utf-8 -*-
"""
Tests of responsetypes
"""
import io
import json
import os
import textwrap
import unittest
import zipfile
from datetime import datetime
import pytest
import calc
import mock
import pyparsing
import random2 as random
import requests
import six
from pytz import UTC
from six import text_type
from capa.correctmap import CorrectMap
from capa.responsetypes import LoncapaProblemError, ResponseError, StudentInputError
from capa.tests.helpers import load_fixture, new_loncapa_problem, test_capa_system
from capa.tests.response_xml_factory import (
AnnotationResponseXMLFactory,
ChoiceResponseXMLFactory,
ChoiceTextResponseXMLFactory,
CodeResponseXMLFactory,
CustomResponseXMLFactory,
FormulaResponseXMLFactory,
ImageResponseXMLFactory,
MultipleChoiceResponseXMLFactory,
NumericalResponseXMLFactory,
OptionResponseXMLFactory,
SchematicResponseXMLFactory,
StringResponseXMLFactory,
SymbolicResponseXMLFactory,
TrueFalseResponseXMLFactory
)
from capa.util import convert_files_to_filenames
from capa.xqueue_interface import dateformat
class ResponseTest(unittest.TestCase):
"""Base class for tests of capa responses."""
xml_factory_class = None
# If something is wrong, show it to us.
maxDiff = None
def setUp(self):
super(ResponseTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
if self.xml_factory_class:
self.xml_factory = self.xml_factory_class() # lint-amnesty, pylint: disable=not-callable
def build_problem(self, capa_system=None, **kwargs):
xml = self.xml_factory.build_xml(**kwargs)
return new_loncapa_problem(xml, capa_system=capa_system)
# pylint: disable=missing-function-docstring
def assert_grade(self, problem, submission, expected_correctness, msg=None):
input_dict = {'1_2_1': submission}
correct_map = problem.grade_answers(input_dict)
if msg is None:
assert correct_map.get_correctness('1_2_1') == expected_correctness
else:
assert correct_map.get_
|
correctness('1_2_1') == expected_correctness, msg
def assert_answer_form
|
at(self, problem):
answers = problem.get_question_answers()
assert answers['1_2_1'] is not None
# pylint: disable=missing-function-docstring
def assert_multiple_grade(self, problem, correct_answers, incorrect_answers):
for input_str in correct_answers:
result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1')
assert result == 'correct'
for input_str in incorrect_answers:
result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1')
assert result == 'incorrect'
def assert_multiple_partial(self, problem, correct_answers, incorrect_answers, partial_answers):
"""
Runs multiple asserts for varying correct, incorrect,
and partially correct answers, all passed as lists.
"""
for input_str in correct_answers:
result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1')
assert result == 'correct'
for input_str in incorrect_answers:
result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1')
assert result == 'incorrect'
for input_str in partial_answers:
result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1')
assert result == 'partially-correct'
def _get_random_number_code(self):
"""Returns code to be used to generate a random result."""
return "str(random.randint(0, 1e9))"
def _get_random_number_result(self, seed_value):
"""Returns a result that should be generated using the random_number_code."""
rand = random.Random(seed_value)
return str(rand.randint(0, 1e9))
class MultiChoiceResponseTest(ResponseTest): # pylint: disable=missing-class-docstring
xml_factory_class = MultipleChoiceResponseXMLFactory
def test_multiple_choice_grade(self):
problem = self.build_problem(choices=[False, True, False])
# Ensure that we get the expected grades
self.assert_grade(problem, 'choice_0', 'incorrect')
self.assert_grade(problem, 'choice_1', 'correct')
self.assert_grade(problem, 'choice_2', 'incorrect')
def test_partial_multiple_choice_grade(self):
problem = self.build_problem(choices=[False, True, 'partial'], credit_type='points')
# Ensure that we get the expected grades
self.assert_grade(problem, 'choice_0', 'incorrect')
self.assert_grade(problem, 'choice_1', 'correct')
self.assert_grade(problem, 'choice_2', 'partially-correct')
def test_named_multiple_choice_grade(self):
problem = self.build_problem(choices=[False, True, False],
choice_names=["foil_1", "foil_2", "foil_3"])
# Ensure that we get the expected grades
self.assert_grade(problem, 'choice_foil_1', 'incorrect')
self.assert_grade(problem, 'choice_foil_2', 'correct')
self.assert_grade(problem, 'choice_foil_3', 'incorrect')
def test_multiple_choice_valid_grading_schemes(self):
# Multiple Choice problems only allow one partial credit scheme.
# Change this test if that changes.
problem = self.build_problem(choices=[False, True, 'partial'], credit_type='points,points')
with pytest.raises(LoncapaProblemError):
input_dict = {'1_2_1': 'choice_1'}
problem.grade_answers(input_dict)
# 'bongo' is not a valid grading scheme.
problem = self.build_problem(choices=[False, True, 'partial'], credit_type='bongo')
with pytest.raises(LoncapaProblemError):
input_dict = {'1_2_1': 'choice_1'}
problem.grade_answers(input_dict)
def test_partial_points_multiple_choice_grade(self):
problem = self.build_problem(
choices=['partial', 'partial', 'partial'],
credit_type='points',
points=['1', '0.6', '0']
)
# Ensure that we get the expected number of points
# Using assertAlmostEqual to avoid floating point issues
correct_map = problem.grade_answers({'1_2_1': 'choice_0'})
assert round(correct_map.get_npoints('1_2_1') - 1, 7) >= 0
correct_map = problem.grade_answers({'1_2_1': 'choice_1'})
assert round(correct_map.get_npoints('1_2_1') - 0.6, 7) >= 0
correct_map = problem.grade_answers({'1_2_1': 'choice_2'})
assert round(correct_map.get_npoints('1_2_1') - 0, 7) >= 0
def test_contextualized_choices(self):
script = textwrap.dedent("""
a = 2
b = 9
c = a + b
ok0 = c % 2 == 0 # check remainder modulo 2
text0 = "$a + $b is even"
ok1 = c % 2 == 1 # check remainder modulo 2
text1 = "$a + $b is odd"
ok2 = "partial"
text2 = "infinity may be both"
""")
choices = ["$ok0", "$ok1", "$ok2"]
choice_names = ["$text0 ... (should be $ok0)",
"$text1 ... (should be $ok1)",
"$text2 ... (should be $ok2)"]
problem = self.build_problem(script=script,
choices=choices,
choice_names=choice_names,
credit_type='points')
# Ensure the expected correctness and choice names
self.assert_grade(problem, 'choice_2 + 9 is even ... (should be False)', 'incorrect')
self.assert_grade(problem, 'choice_2 + 9 is odd ... (should be True)', 'correct')
self.assert_grade(problem, 'choice_infinity may be both ... (should be partial)', 'partially-correct')
class TrueFalseResponseTest(ResponseTest): # pylint: disable=missing-class-docstring
xml_factory_class = TrueFalseResponseXMLFactory
def test_true_false_grade(self):
problem = self.build_problem(choices=[False, True, True])
# Check the results
|
hzlf/openbroadcast
|
website/shop/shop_simplevariations/urls.py
|
Python
|
gpl-3.0
| 697
| 0.005739
|
#-*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, url
from shop_simplev
|
ariations.views import SimplevariationCartDetails
urlpatterns = patterns('',
url(r'^delete/$',
SimplevariationCartDetails.as_view(action='delete'),
name='cart_delete'),
url('^item/$',
SimplevariationCartDetails.as_view(action='post'),
name='cart_item_add' ),
url(r'^$',
SimplevariationCartDetails.as_view(), name='cart'),
url(r'^update/$',
SimplevariationCartDetails.as_view(action='put'),
name='cart_update'),
url('^item/(?P<
|
id>[0-9A-Za-z-_.//]+)$',
SimplevariationCartDetails.as_view(),
name='cart_item' ),
)
|
kmadathil/sanskrit_parser
|
sanskrit_parser/rest_api/api_v1.py
|
Python
|
mit
| 4,459
| 0.002018
|
from flask import Blueprint
import flask_restx
from flask_restx import Resource
from flask import request
# import subprocess
# from os import path
# from flask import redirect
from sanskrit_parser.base.sanskrit_base import SanskritObject, SLP1
from sanskrit_parser.parser.sandhi_analyzer import LexicalSandhiAnalyzer
from sanskrit_parser import __version__
from sanskrit_parser import Parser
URL_PREFIX = '/v1'
api_blueprint = Blueprint(
'sanskrit_parser', __name__,
template_folder='templates'
)
api = flask_restx.Api(app=api_blueprint, version='1.0', title='sanskrit_parser API',
description='For detailed intro and to report issues: see <a href="https://github.com/kmadathil/sanskrit_parser">here</a>. '
'A list of REST and non-REST API routes avalilable on this server: <a href="../sitemap">sitemap</a>.',
default_label=api_blueprint.name,
prefix=URL_PREFIX, doc='/docs')
analyzer = LexicalSandhiAnalyzer()
def jedge(pred, node, label):
return (node.pada.devanagari(strict_io=False),
jtag(node.getMorphologicalTags()),
SanskritObject(label, encoding=SLP1).devanagari(strict_io=False),
pred.pada.devanagari(strict_io=False))
def jnode(node):
""" Helper to translate parse node into serializable format"""
return (node.pada.devanagari(strict_io=False),
jtag(node.getMorphologicalTags()), "", "")
def jtag(tag):
""" Helper to translate tag to serializable format"""
return (tag[0].devanagari(strict_io=False), [t.devanagari(strict_io=False) for t in list(tag[1])])
def jtags(tags):
""" Helper to translate tags to serializable format"""
return [jtag(x) for x in tags]
@api.route('/version/')
class Version(Resource):
def get(self):
"""Library Version"""
r = {"version": str(__version__)}
return r
@api.route('/tags/<string:p>')
class Tags(Resource):
def get(self, p):
""" Get lexical tags for p """
pobj = SanskritObject(p, strict_io=False)
tags = analyzer.getMorphologicalTags(pobj)
if tags is not None:
ptags = jtags(tags)
else:
ptags = []
r = {"input": p, "devanagari": pobj.devanagari(), "tags": ptags}
return r
@api.route('/splits/<string:v>')
class Splits(Resource):
def get(self, v):
""" Get lexical tags for v """
strict_p = True
if request.args.get("strict") == "false":
strict_p = False
vobj = SanskritObject(v, strict_io=strict_p, replace_ending_visarga=None)
g = analyzer.getSandhiSplits(vobj)
if g:
splits = g.find_all_paths(10)
jsplits = [[ss.devanagari(strict_io=False) for ss in s] for s in splits]
else:
jsplits = []
r = {"input": v, "devanagari": vobj.devanagari(), "splits": jsplits}
return r
@api.route('/parse-presegmented/<string:v>')
class Parse_Presegmented(Resource):
def get(self, v):
""" Parse a presegmented
|
sentence """
strict
|
_p = True
if request.args.get("strict") == "false":
strict_p = False
vobj = SanskritObject(v, strict_io=strict_p, replace_ending_visarga=None)
parser = Parser(input_encoding="SLP1",
output_encoding="Devanagari",
replace_ending_visarga='s')
mres = []
print(v)
for split in parser.split(vobj.canonical(), limit=10, pre_segmented=True):
parses = list(split.parse(limit=10))
sdot = split.to_dot()
mres = [x.serializable() for x in parses]
pdots = [x.to_dot() for x in parses]
r = {"input": v, "devanagari": vobj.devanagari(), "analysis": mres,
"split_dot": sdot,
"parse_dots": pdots}
return r
@api.route('/presegmented/<string:v>')
class Presegmented(Resource):
def get(self, v):
""" Presegmented Split """
vobj = SanskritObject(v, strict_io=True, replace_ending_visarga=None)
parser = Parser(input_encoding="SLP1",
output_encoding="Devanagari",
replace_ending_visarga='s')
splits = parser.split(vobj.canonical(), limit=10, pre_segmented=True)
r = {"input": v, "devanagari": vobj.devanagari(), "splits": [x.serializable()['split'] for x in splits]}
return r
|
google-research/kubric
|
test/test_cameras.py
|
Python
|
apache-2.0
| 1,140
| 0.007024
|
# Copyright 2022 The Kubric Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from kubric.core import cameras
def test_orthographic_camera_constructor():
|
cam = cameras.OrthographicCamera(orthographic_scale=7)
assert cam.orthographic_scale == 7
def test_perspective_camera_constructor():
cam = cameras.PerspectiveCamera(focal_leng
|
th=22, sensor_width=33)
assert cam.focal_length == 22
assert cam.sensor_width == 33
def test_perspective_camera_field_of_view():
cam = cameras.PerspectiveCamera(focal_length=28, sensor_width=36)
assert cam.field_of_view == pytest.approx(1.1427, abs=1e-4) # ca 65.5°
|
cloudbase/cloudbase-init-ci
|
argus/recipes/base.py
|
Python
|
apache-2.0
| 2,745
| 0
|
# Copyright 2015 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#
|
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Contains base recipes functionality.
A recipe is a class which knows how to provision an instance,
by installing and configuring it with what it's necessary.
|
"""
import abc
from argus import config as argus_config
from argus import log as argus_log
LOG = argus_log.LOG
CONFIG = argus_config.CONFIG
RETRY_COUNT = 15
RETRY_DELAY = 10
__all__ = (
'BaseRecipe',
)
class BaseRecipe(object):
"""Base class for a recipe.
A recipe is a way in which an instance can be provisioned with
some easy steps.
"""
def __init__(self, backend):
self._backend = backend
def _execute(self, cmd, count=RETRY_COUNT, delay=RETRY_DELAY,
command_type=None,
upper_timeout=CONFIG.argus.upper_timeout):
"""Execute until success and return only the standard output."""
# A positive exit code will trigger the failure
# in the underlying methods as an `ArgusError`.
# Also, if the retrying limit is reached, `ArgusTimeoutError`
# will be raised.
return self._backend.remote_client.run_command_with_retry(
cmd, count=count, delay=delay, command_type=command_type,
upper_timeout=upper_timeout)[0]
def _execute_until_condition(self, cmd, cond, count=RETRY_COUNT,
delay=RETRY_DELAY, command_type=None):
"""Execute a command until the condition is met without returning."""
self._backend.remote_client.run_command_until_condition(
cmd, cond, retry_count=count,
delay=delay, command_type=command_type)
@abc.abstractmethod
def prepare(self, **kwargs):
"""Call this method to provision an instance.
:param kwargs:
If the recipe requires, pass additional information in the
**kwargs** parameter.
"""
@abc.abstractmethod
def cleanup(self, **kwargs):
"""Call this method to cleanup an instance.
:param kwargs:
If the recipe requires, pass additional information in the
**kwargs** parameter.
"""
|
jdemon519/cfme_tests
|
cfme/storage/object_store.py
|
Python
|
gpl-2.0
| 1,961
| 0.00153
|
# -*- coding: utf-8 -*-
from functools import partial
from navmazing import NavigateToSibling, NavigateToAttribute
from cfme.common import SummaryMixin, Taggable
from cfme.fixtures import pytest_selenium as sel
from cfme.web_ui import toolbar as tb
from cfme.web_ui import Quadicon, match_location, mixins
from utils.appliance.implementations.ui import navigate_to, navigator, CFMENavigateStep
from utils.appliance import Navigatable
match_page = partial(match_location, controller='cloud_object_store_container',
title='Object Stores')
class ObjectStore(Tagga
|
ble, SummaryMixin, Navigatable):
""" Automate Model page of Cloud Object Stores
Args:
name: Name of Object Store
"""
def __init__(self, name=None, appliance=None):
Navigatable.__init__(self, appliance=appliance)
self.name = name
self.quad_name = 'object_store'
def add_tag(self, tag, **kwargs):
"""Tags the system by given tag"""
|
navigate_to(self, 'Details')
mixins.add_tag(tag, **kwargs)
def untag(self, tag):
"""Removes the selected tag off the system"""
navigate_to(self, 'Details')
mixins.remove_tag(tag)
@navigator.register(ObjectStore, 'All')
class All(CFMENavigateStep):
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self):
if self.obj.appliance.version >= "5.7":
self.prerequisite_view.navigation.select('Storage', 'Object Stores')
else:
self.prerequisite_view.navigation.select('Compute', 'Clouds', 'Object Stores')
def resetter(self):
tb.select("Grid View")
@navigator.register(ObjectStore, 'Details')
class Details(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def am_i_here(self):
return match_page(summary="{} (Summary)".format(self.obj.name))
def step(self):
sel.click(Quadicon(self.obj.name, self.obj.quad_name))
|
datamade/large-lots
|
tests/lots_client/test_ppf.py
|
Python
|
mit
| 2,817
| 0.00142
|
import datetime
import uuid
import pytest
from lots_admin.models import Application
from lots_client.views import advance_if_ppf_and_eds_submitted
@pytest.mark.django_db
@pytest.mark.parametrize('eds_received,ppf_received', [
(False, False),
(True, False),
(False, True),
(True, True)
])
def test_advance_to_step_8(django_db_setup,
eds_received,
ppf_received):
application = Application.objects.get(applicationstatus__current_step__step=7)
application.eds_sent = True
application.save()
setattr(application, 'eds_received', eds_received)
setattr(application, 'ppf_received', ppf_received)
if all([eds_received, ppf_received]):
step = 8
else:
step = 7
advance_if_ppf_and_eds_submitted(application)
assert application.applicationstatus_set.first().current_step.step == step
@pytest.mark.django_db
def test_view_requires_tracking_id(django_db_setup,
client,
application):
rv = client.get('/principal-profile-form/')
assert 'Oops!' in str(rv.content)
app = application.build()
rv = client.get('/principal-profile-form/{}/'.format(app.tracking_id))
assert 'Instructions' in str(rv.content)
@pytest.mark.django_db
@pytest.mark.parametrize('ppf_type', ['individual', 'organization'])
def test_individual_ppf_submission(django_db_setup,
client,
application,
ppf_blob,
ppf_type):
if ppf_type == 'individual':
app = application.build()
data = ppf_blob.build(app)
elif ppf_type == 'organization':
app = application.build(organization_confirmed=True,
organization='The Peacock Company')
data = ppf_blob.build(app, home_a
|
ddress_street='456 Feather Lane')
rv = client.post(
'/principal-profile-form/{}/'.format(app.tracking_id),
data
|
=data,
)
assert rv.status_code == 200
assert 'Success!' in str(rv.content)
app.refresh_from_db()
assert app.ppf_received == True
principal_profiles = app.principalprofile_set.all()
assert len(principal_profiles) == 2
primary_ppf = principal_profiles.first()
if ppf_type == 'organization':
assert primary_ppf.address == '456 Feather Lane, Chicago, IL 60609'
elif ppf_type == 'individual':
assert primary_ppf.address == '5000 S ELIZABETH ST, Chicago, IL 60609'
related_person = app.relatedperson_set.get()
related_ppf = principal_profiles.last()
assert 'Petmore Dogs' in str(related_person)
assert related_ppf.address == '4539 N Paulina, Chicago, IL 60624'
|
lilleswing/deepchem
|
deepchem/dock/pose_generation.py
|
Python
|
mit
| 13,071
| 0.005967
|
"""
Generates protein-ligand docked poses.
"""
import platform
import logging
import os
import tempfile
import tarfile
import numpy as np
from subprocess import call
from subprocess import check_output
from typing import List, Optional, Tuple, Union
from deepchem.dock.binding_pocket import BindingPocketFinder
from deepchem.utils.data_utils import download_url, get_data_dir
from deepchem.utils.typing import RDKitMol
from deepchem.utils.geometry_utils import compute_centroid, compute_protein_range
from deepchem.utils.rdkit_utils import load_molecule, write_molecule
from deepchem.utils.vina_utils import load_docked_ligands, write_vina_conf
logger = logging.getLogger(__name__)
DOCKED_POSES = List[Tuple[RDKitMol, RDKitMol]]
class PoseGenerator(object):
"""A Pose Generator computes low energy conformations for molecular complexes.
Many questions in structural biophysics reduce to that of computing
the binding free energy of molecular complexes. A key step towards
computing the binding free energy of two complexes is to find low
energy "poses", that is energetically favorable conformations of
molecules with respect to each other. One application of this
technique is to find low energy poses for protein-ligand
interactions.
"""
def generate_poses(self,
molecular_complex: Tuple[str, str],
centroid: Optional[np.ndarray] = None,
box_dims: Optional[np.ndarray] = None,
exhaustiveness: int = 10,
num_modes: int = 9,
num_pockets: Optional[int] = None,
out_dir: Optional[str] = None,
generate_scores: bool = False):
"""Generates a list of low energy poses for molecular complex
Parameters
----------
molecular_complexes: Tuple[str, str]
A representation of a molecular complex. This tuple is
(protein_file, ligand_file).
centroid: np.ndarray, optional (default None)
The centroid to dock against. Is computed if not specified.
box_dims: np.ndarray, optional (default None)
A numpy array of shape `(3,)` holding the size of the box to dock. If not
specified is set to size of molecular complex plus 5 angstroms.
exhaustiveness: int, optional (default 10)
Tells pose generator how exhaustive it should be with pose
generation.
num_modes: int, optional (default 9)
Tells pose generator how many binding modes it should generate at
each invocation.
num_pockets: int, optional (default None)
If specified, `self.pocket_finder` must be set. Will only
generate poses for the first `num_pockets` returned by
`self.pocket_finder`.
out_dir: str, optional (default None)
If specified, write generated poses to this directory.
generate_score: bool, optional (default False)
If `True`, the pose generator will return scores for complexes.
This is used typically when invoking external docking programs
that compute scores.
Returns
-------
A list of molecular complexes in energetically favorable poses.
"""
raise NotImplementedError
class VinaPoseGenerator(PoseGenerator):
"""Uses Autodock Vina to generate binding poses.
This class uses Autodock Vina to make make predictions of
binding poses. It downloads the Autodock Vina executable for
your system to your specified DEEPCHEM_DATA_DIR (remember this
is an environment variable you set) and invokes the executable
to perform pose generation for you.
Note
----
This class requires RDKit to be installed.
"""
def __init__(self,
sixty_four_bits: bool = True,
pocket_finder: Optional[BindingPocketFinder] = None):
"""Initializes Vina Pose Generator
Parameters
----------
sixty_four_bits: bool, optional (default True)
Specifies whether this is a 64-bit machine. Needed to download
the correct executable.
pocket_finder: BindingPocketFinder, optional (default None)
If specified should be an instance of
`dc.dock.BindingPocketFinder`.
"""
data_dir = get_data_dir()
if platform.system() == 'Linux':
url = "http://vina.scripps.edu/download/autodock_vina_1_1_2_linux_x86.tgz"
filename = "autodock_vina_1_1_2_linux_x86.tgz"
dirname = "autodock_vina_1_1_2_linux_x86"
self.vina_dir = os.path.join(data_dir, dirname)
self.vina_cmd = os.path.join(self.vina_dir, "bin/vina")
elif platform.system() == 'Darwin':
if sixty_four_bits:
url = "http://vina.scripps.edu/download/autodock_vina_1_1_2_mac_64bit.tar.gz"
filename = "autodock_vina_1_1_2_mac_64bit.tar.gz"
dirname = "autodock_vina_1_1_2_mac_catalina_64bit"
else:
url = "http://vina.scripps.edu/download/autodock_vina_1_1_2_mac.tgz"
filename = "autodock_vina_1_1_2_mac.tgz"
dirname = "autodock_vina_1_1_2_mac"
self.vina_dir = os.path.join(data_dir, dirname)
self.vina_cmd = os.path.join(self.vina_dir, "bin/vina")
elif platform.system() == 'Windows':
url = "http://vina.scripps.edu/download/autodock_vina_1_1_2_win32.msi"
filename = "autodock_vina_1_1_2_win32.msi"
self.vina_dir = "\\Program Files (x86)\\The Scripps Research Institute\\Vina"
self.vina_cmd = os.path.join(self.vina_dir, "vina.exe")
else:
raise ValueError(
"Unknown operating system. Try using a cloud platform to run this code instead."
)
self.pocket_finder = pocket_finder
if not os.path.exists(self.vina_dir):
logger.info("Vina not available. Downloading")
download_url(url, data_dir)
downloaded_file = os.path.join(data_dir, filename)
logger.info("Downloaded Vina. Extracting")
if platform.system() == 'Windows':
msi_cmd = "msiexec /i %s" % downloaded_file
check_output(msi_cmd.split())
else:
with tarfile.open(downloaded_file) as tar:
tar.extractall(data_dir)
logger.info("Cleanup: removing downloaded vina tar.gz")
os.remove(downloaded_file)
def generate_poses(self,
molecular_complex: Tuple[str, str],
centroid: Optional[np.ndarray] = None,
box_dims: Optional[np.ndarray] = None,
exhaustiveness: int = 10,
num_modes: int = 9,
num_pockets: Optional[int] = None,
out_dir: Optional[str] = None,
generate_scores: bool = False
) -> Union[Tuple[DOCKED_POSES, List[float]], DOCKED_POSES]:
"""Generates the docked complex and outputs files for docked complex.
TODO: How can this work on Windows? We need to install a .msi file and
invoke it correctly from Python for this to work.
Parameters
----------
molecular_complexes: Tuple[str, str]
A representation of a molecular complex. This tuple is
(protein_file, ligand_file).
centroid: np.ndarray, optional
The centroid to dock against. Is computed if not specified.
box_dims: np.ndarray, optional
A numpy array of shape `(3,)` holding the size of the box to dock. If not
specified is set to size of molecular complex plus 5 angstroms.
exhaustiveness: int, optional (default 10)
Tells Autodock Vina how exhaustive it should be with pose
generation.
num_modes: int, optional (default 9)
Tells Autodock Vina how many binding modes it should generate at
each invocation.
num_pockets: int, optional (defau
|
lt None)
If specified, `self.pocket_finder` must be set. Will only
generate poses for t
|
he first `num_pockets` returned by
`self.pocket_finder`.
out_dir: str, optional
If specified, write generated poses to this directory.
generate_score: bool, optional (default False)
If `True`, the pose generator will return scores for complexes.
This is used typically when invoking external docking programs
that compute scores.
Returns
-------
Tuple[`docked_poses`, `scores`] or `docked_poses`
Tuple of `(docked_
|
jcfr/mystic
|
examples_UQ/TEST_surrogate_cut.py
|
Python
|
bsd-3-clause
| 9,913
| 0.023303
|
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 2009-2015 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
DEBUG = False
#######################################################################
# scaling and mpi info; also optimizer configuration parameters
# hard-wired: use DE solver, don't use mpi, F-F' calculation
# (similar to concentration.in)
#######################################################################
from TEST_surrogate_diam import * # model, limit
from mystic.math.stats import volume, prob_mass, mean, mcdiarmid_bound
from mystic.math.integrate import integrate as expectation_value
from mystic.math.samples import sample
#######################################################################
# the differential evolution optimizer
# (replaces the call to dakota)
#######################################################################
def optimize(cost,lb,ub):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import CandidateRelativeTolerance as CRT
from mystic.strategy import Best1Exp
from mystic.monitors import VerboseMonitor, Monitor
from mystic.tools import random_seed
random_seed(123)
#stepmon = VerboseMonitor(100)
stepmon = Monitor()
evalmon = Monitor()
ndim = len(lb) # [(1 + RVend) - RVstart] + 1
solver = DifferentialEvolutionSolver2(ndim,npop)
solver.SetRandomInitialPoints(min=lb,max=ub)
solver.SetStrictRanges(min=lb,max=ub)
solver.SetEvaluationLimits(maxiter,maxfun)
solver.SetEvaluationMonitor(evalmon)
solver.SetGenerationMonitor(stepmon)
tol = convergence_tol
solver.Solve(cost,termination=CRT(tol,tol),strategy=Best1Exp, \
CrossProbability=crossover,ScalingFactor=percent_change)
solved = solver.bestSolution
#if DEBUG: print "solved: %s" % solved
diameter_squared = -solver.bestEnergy / scale #XXX: scale != 0
func_evals = solver.evaluations
return solved, diameter_squared, func_evals
#######################################################################
# loop over model parameters to calculate concentration of measure
# (similar to main.cc)
#######################################################################
def UQ(start,end,lower,upper):
params = []
diameters = []
function_evaluations = []
total_func_evals = 0
total_diameter = 0.0
for i in range(start,end+1):
lb = lower + [lower[i]]
ub = upper + [upper[i]]
# construct cost function and run optimizer
cost = costFactory(i)
# optimize, using no initial conditions
solved, subdiameter, func_evals = optimize(cost,lb,ub)
function_evaluations.append(func_evals)
diameters.append(subdiameter)
params.append(solved)
total_func_evals += function_evaluations[-1]
total_diameter += diameters[-1]
if DEBUG:
for solved in params:
print "solved: %s" % solved
print "subdiameters (squared): %s" % diameters
print "diameter (squared): %s" % total_diameter
print "func_evals: %s => %s" % (function_evaluations, total_func_evals)
return params, total_diameter, diameters
#######################################################################
# get solved_params, subdiameters, and prob_mass for a sliced cuboid
#######################################################################
PROBABILITY_MASS = []
SUB_DIAMETERS = []
TOTAL_DIAMETERS = []
SOLVED_PARAMETERS = []
NEW_SLICES = []
def test_cuboids(lb,ub,RVstart,RVend,cuboid_volume):
probmass = []
subdiams = []
tot_diam = []
solved_p = []
# subdivisions
for i in range(len(lb)):
if DEBUG:
print "\n"
print " lower bounds: %s" % lb[i]
print " upper bounds: %s" % ub[i]
if i in NEW_SLICES or not NEW_SLICES:
subcuboid_volume = volume(lb[i],ub[i])
sub_prob_mass = prob_mass(subcuboid_volume,cuboid_volume)
probmass.append(sub_prob_mass)
if DEBUG: print " probability mass: %s" % sub_prob_mass
solved, diameter, subdiameters = UQ(RVstart,RVend,lb[i],ub[i])
solved_p.append(solved)
subdiams.append(subdiameters)
tot_diam.append(diameter)
else:
probmass.append(PROBABILITY_MASS[i])
if DEBUG: print " probability mass: %s" % PROBABILITY_MASS[i]
solved_p.append(SOLVED_PARAMETERS[i])
subdiams.append(SUB_DIAMETERS[i])
tot_diam.append(TOTAL_DIAMETERS[i])
return solved_p, subdiams, tot_diam, probmass
#######################################################################
# slice the cuboid
#######################################################################
def make_cut(lb,ub,RVStart,RVend,vol):
params, subdiams, diam, probmass = test_cuboids(lb,ub,RVstart,RVend,vol)
SOLVED_PARAMETERS, SUB_DIAMETERS = params, subdiams
TOTAL_DIAMETERS, PROBABILITY_MASS = diam, probmass
#####XXX: probably needs to be some sort of loop ######
# get region with largest probability mass
region = probmass.index(max(probmass))
NEW_SLICES = [region,region+1]
# get direction with largest subdiameter
direction = subdiams[region].index(max(subdiams[region]))
#XXX: should check 'params' for 'no more improvement possilble'
#XXX: ... if so, check all the directions in that region
#XXX: ... if still so, then select region with next largest probmass
#####XXX: probably needs to be some sort of loop ######
# get the midpoint
cutvalue = 0.5 * ( ub[region][direction] + lb[region][direction] )
# modify bounds to include cut plane
l = lb[:region+1]
l += [lb[region][:direction] + [cutvalue] + lb[region][direction+1:]]
l += lb[region+1:]
u = ub[:region]
u += [ub[region][:direction] + [cutvalue] + ub[region][direction+1:]]
u += ub[region:]
return l,u
#######################################################################
# rank, bounds, and restart information
# (similar to concentration.variables)
#######################################################################
if __name__ == '__main__':
from math import sqrt
function_name = "marc_surr"
lower_bounds = [60.0, 0.0, 2.1]
upper_bounds = [105.0, 30.0, 2.8]
RVstart = 0; RVend = 2
max_number_of_cuts = 255 #NOTE: number of resulting subcuboids = cuts + 1
print "...SETTINGS..."
print "npop = %s" % npop
print "maxiter = %s" % maxiter
print "maxfun = %s" % maxfun
print "convergence_tol = %s" % convergence_tol
print "crossover = %s" % crossover
print "percent_change = %s" % percent_change
print "..............\n\n"
print " model: f(x) = %s(x)" % function_name
RVmax = len(lower_bounds)
param_string = "["
for i in range(RVmax):
param_string += "'x%s'" % str(i+1)
if i == (RVmax - 1):
param_string += "]"
else:
param_string += ", "
print " parameters: %s" % param_string
# get diameter for entire cuboid
lb,ub = [lower_bounds],[upper_bounds]
cuboid_volume = volume(lb[0],ub[0])
params0, subdiams0, diam0, probmass0 = test_cuboids(lb,ub,RVstart,RVend,\
cuboid_volume)
SOLVED_PARAMETERS, SUB_DIAMETERS = params0, subdiams0
TOTAL_DIAMETERS, PROBABILITY_MASS = diam0, probmass0
if not DEB
|
UG:
failure,success = sample(model,lb[0],ub[0])
pof = float(failure) / float(failure + success)
print "Exact PoF: %s" % pof
for i in range(len(lb)):
print
|
"\n"
print " lower bounds: %s" % lb[i]
print " upper bounds: %s" % ub[i]
for solved in params0[0]:
print "solved: %s" % solved
print "subdiameters (squared): %s" % subdiams0[0]
print "diameter (squared): %s" % diam0[0]
print " probability mass: %s" % probmass0[0]
expectation = expectation_value(model,lower_bounds,upper_bounds)
print " expectation: %s" % expectation
mean_value = mean(expectation,cuboid_volume)
print " mean value: %s" % mean_value
mcdiarmid = mcdiarmid_bound(mean_value,sqrt(diam0[0]))
print "McDiarmid bound: %s" % mcdiarmid
# determine 'best' cuts to cuboid
for cut in range(max_number_of_cuts):
print "\n..... cut #%s ....." % (cut+1)
|
2ndy/RaspIM
|
usr/lib/python2.6/distutils/util.py
|
Python
|
gpl-2.0
| 21,928
| 0.001779
|
"""distutils.util
Miscellaneous utility functions -- anything that doesn't fit into
one of the other *util.py modules.
"""
__revision__ = "$Id$"
import sys, os, string, re
from distutils.errors import DistutilsPlatformError
from distutils.dep_util import newer
from distutils.spawn import spawn
from distutils import log
from distutils.errors import DistutilsByteCompileError
def get_platform ():
"""Return a string that identifies the current platform. This is used
mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win-ia64 (64bit Windows on Itanium)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
if os.name == 'nt':
# sniff sys.version for architecture.
prefix = " bit ("
i = string.find(sys.version, prefix)
if i == -1:
return sys.platform
j = string.find(sys.version, ")", i)
look = sys.version[i+len(prefix):j].lower()
if look=='amd64':
return 'win-amd64'
if look=='itanium':
return 'win-ia64'
return sys.platform
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
(osname, host, release, version, machine) = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = string.lower(osname)
osname = string.replace(osname, '/', '')
machine = string.replace(machine, ' ', '_')
machine = string.replace(machine, '/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile (r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
from distutils.sysconfig import get_config_vars
cfgvars = get_config_vars()
macver = os.environ.get('MACOSX_DEPLOYMENT_TARGET')
if not macver:
macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
if 1:
# Always calculate the release of the running machine,
# needed to determine if we can build fat binaries or not.
macrelease = macver
# Get the system version. Reading this plist is a documented
# way to get the system version (see the documentation for
# the Gestalt Manager)
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except IOError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
m = re.search(
r'<key>ProductUserVisibleVersion</key>\s*' +
r'<string>(.*?)</string>', f.read())
f.close()
if m is not None:
macrelease = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
if not macver:
macver = macrelease
if macver:
from distutils.sysconfig import get_config_vars
release = macver
osname = "macosx"
if (macrelease + '.') >= '10.4.' and \
'-arch' in get_config_vars().get('CFLAGS', '').strip():
# The universal build will build fat binaries, but not on
# systems before 10.4
#
# Try to detect 4-way universal builds, those have machine-type
# 'universal' instead of 'fat'.
machine = 'fat'
cflags = get_config_vars().get('CFLAGS')
archs = re.findall('-arch\s+(\S+)', cflags)
|
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
|
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r"%(archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxint >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
machine = 'ppc'
# See 'i386' case
if sys.maxint >= 2**32:
machine = 'ppc64'
return "%s-%s-%s" % (osname, release, machine)
# get_platform ()
def convert_path (pathname):
"""Return 'pathname' as a name that will work on the native filesystem,
i.e. split it on '/' and put it back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError, "path '%s' cannot be absolute" % pathname
if pathname[-1] == '/':
raise ValueError, "path '%s' cannot end with '/'" % pathname
paths = string.split(pathname, '/')
while '.' in paths:
paths.remove('.')
if not paths:
return os.curdir
return os.path.join(*paths)
# convert_path ()
def change_root (new_root, pathname):
"""Return 'pathname' with 'new_root' prepended. If 'pathname' is
relative, this is equivalent to "os.path.join(new_root,pathname)".
Otherwise, it requires making 'pathname' relative and then joining the
two, which is tricky on DOS/Windows and Mac O
|
ares/robottelo
|
tests/foreman/cli/test_domain.py
|
Python
|
gpl-3.0
| 11,616
| 0
|
# -*- encoding: utf-8 -*-
"""Test class for Domain CLI
:Requirement: Domain
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: CLI
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from fauxfactory import gen_string
from robottelo.cli.base import CLIReturnCodeError
from robottelo.cli.doma
|
in import Domain
from robottelo.cli.factory import CLIFactoryError
from robottelo.cli.factory import make_domain, make_location, make_org
from robottelo.datafactory import (
filtered_datapoint, invalid_id_list, valid_data_list
)
from robottelo.decorat
|
ors import (
run_only_on,
tier1,
tier2,
bz_bug_is_open,
)
from robottelo.test import CLITestCase
@filtered_datapoint
def valid_create_params():
"""Returns a list of valid domain create parameters"""
return [
{u'name': u'white spaces {0}'.format(gen_string(str_type='utf8')),
u'description': gen_string(str_type='alpha')},
{u'name': gen_string(str_type='utf8'),
u'description': gen_string(str_type='utf8')},
{u'name': gen_string(str_type='numeric'),
u'description': gen_string(str_type='numeric')},
{u'name': gen_string(str_type='utf8', length=255),
u'description': gen_string(str_type='utf8', length=255)},
]
@filtered_datapoint
def invalid_create_params():
"""Returns a list of invalid domain create parameters"""
params = [
{u'name': gen_string(str_type='utf8', length=256)},
]
if not bz_bug_is_open(1398392):
params.append({u'dns-id': '-1'})
return params
@filtered_datapoint
def valid_update_params():
"""Returns a list of valid domain update parameters"""
return [
{u'name': u'white spaces {0}'.format(gen_string(str_type='utf8')),
u'description': gen_string(str_type='alpha')},
{u'name': gen_string(str_type='utf8'),
u'description': gen_string(str_type='utf8')},
{u'name': gen_string(str_type='numeric'),
u'description': gen_string(str_type='numeric')},
{u'name': gen_string(str_type='utf8', length=255),
u'description': gen_string(str_type='utf8', length=255)},
]
@filtered_datapoint
def invalid_update_params():
"""Returns a list of invalid domain update parameters"""
params = [
{u'name': ''},
{u'name': gen_string(str_type='utf8', length=256)},
]
if not bz_bug_is_open(1398392):
params.append({u'dns-id': '-1'})
return params
@filtered_datapoint
def valid_set_params():
"""Returns a list of valid domain set parameters"""
return [
{'name': gen_string(str_type='utf8'),
'value': gen_string(str_type='utf8')},
{'name': gen_string(str_type='utf8', length=255),
'value': gen_string(str_type='utf8')},
{'name': gen_string(str_type='utf8'),
'value': gen_string(str_type='utf8', length=255)},
{'name': gen_string(str_type='utf8'),
'value': ''},
]
@filtered_datapoint
def invalid_set_params():
"""Returns a list of invalid domain set parameters"""
return [
{'name': u'white spaces {0}'.format(gen_string(str_type='utf8')),
'value': gen_string(str_type='utf8')},
{'name': '',
'value': gen_string(str_type='utf8')},
{'name': gen_string(str_type='utf8', length=256),
'value': gen_string(str_type='utf8')},
]
@filtered_datapoint
def valid_delete_params():
"""Returns a list of valid domain delete parameters"""
return [
{'name': gen_string(str_type='utf8'),
'value': gen_string(str_type='utf8')},
{'name': gen_string(str_type='utf8', length=255),
'value': gen_string(str_type='utf8')},
{'name': gen_string(str_type='utf8'),
'value': ''},
]
class DomainTestCase(CLITestCase):
"""Domain CLI tests"""
@tier1
@run_only_on('sat')
def test_positive_create_with_name_description(self):
"""Create domain with valid name and description
:id: 018740bf-1551-4162-b88e-4d4905af097b
:expectedresults: Domain successfully created
:CaseImportance: Critical
"""
for options in valid_create_params():
with self.subTest(options):
domain = make_domain(options)
self.assertEqual(domain['name'], options['name'])
self.assertEqual(
domain['description'], options['description'])
@tier1
@run_only_on('sat')
def test_positive_create_with_loc(self):
"""Check if domain with location can be created
:id: 033cc37d-0189-4b88-94cf-97a96839197a
:expectedresults: Domain is created and has new location assigned
:CaseImportance: Critical
"""
location = make_location()
domain = make_domain({'location-ids': location['id']})
self.assertIn(location['name'], domain['locations'])
@tier1
@run_only_on('sat')
def test_positive_create_with_org(self):
"""Check if domain with organization can be created
:id: f4dfef1b-9b2a-49b8-ade5-031da29e7f6a
:expectedresults: Domain is created and has new organization assigned
:CaseImportance: Critical
"""
org = make_org()
domain = make_domain({'organization-ids': org['id']})
self.assertIn(org['name'], domain['organizations'])
@tier1
@run_only_on('sat')
def test_negative_create(self):
"""Create domain with invalid values
:id: 6d3aec19-75dc-41ca-89af-fef0ca37082d
:expectedresults: Domain is not created
:CaseImportance: Critical
"""
for options in invalid_create_params():
with self.subTest(options):
with self.assertRaises(CLIFactoryError):
make_domain(options)
@tier2
@run_only_on('sat')
def test_negative_create_with_invalid_dns_id(self):
"""Attempt to register a domain with invalid id
:id: 4aa52167-368a-41ad-87b7-41d468ad41a8
:expectedresults: Error is raised and user friendly message returned
:BZ: 1398392
:CaseLevel: Integration
"""
with self.assertRaises(CLIFactoryError) as context:
make_domain({
'name': gen_string('alpha'),
'dns-id': -1,
})
valid_messages = ['Invalid smart-proxy id', 'Invalid capsule id']
exception_string = str(context.exception)
messages = [
message
for message in valid_messages
if message in exception_string
]
self.assertGreater(len(messages), 0)
@tier1
@run_only_on('sat')
def test_positive_update(self):
"""Update domain with valid values
:id: 9da3cc96-c146-4f82-bb25-b237a367ba91
:expectedresults: Domain is updated
:CaseImportance: Critical
"""
domain = make_domain({
u'description': gen_string(str_type='utf8')
})
for options in valid_update_params():
with self.subTest(options):
# update description
Domain.update(dict(options, id=domain['id']))
# check - domain updated
domain = Domain.info({'id': domain['id']})
for key, val in options.iteritems():
self.assertEqual(domain[key], val)
@tier1
@run_only_on('sat')
def test_negative_update(self):
"""Update domain with invalid values
:id: 9fc708dc-20f9-4d7c-af53-863826462981
:expectedresults: Domain is not updated
:CaseImportance: Critical
"""
domain = make_domain()
for options in invalid_update_params():
with self.subTest(options):
with self.assertRaises(CLIReturnCodeError):
Domain.update(dict(options, id=domain['id']))
# check - domain not updated
result = Domain.info({'id': domain['id']})
for key in options.keys():
self.assertEqual(result[key], domain[key])
@tier1
@run_only_on('sat')
|
codegooglecom/sphivedb
|
client/python/testcli.py
|
Python
|
gpl-2.0
| 1,157
| 0.076923
|
import sphivedbcli
import time
import sys
def printResultSet( rs ):
print "row.count %d" % ( rs.getRowCount() )
columnCount = rs.getColumnCount()
hdrs = ""
for i in range( columnCount ):
hdrs = hdrs + ( "\t%s(%s)" % ( rs.getName( i ), rs.getType( i ) ) )
print hdrs
for i in range( rs.getRowCount() ):
rs.moveTo( i )
row = ""
for j in range( columnCount ):
row = row + ( "\t[%s]" % ( rs.getString( j ) ) )
print row
if __name__ == "__main__":
if len( sys.argv ) != 2:
print "Usage: %s <config file>" % ( sys.argv[0] )
print "\tpython %s ../../sphivedb/sphivedbcli.ini" % ( sys.argv[0] )
|
sys.exit( -1 )
configFile = sys.argv[1]
cli = sphivedbcli.SPHiveDBClient()
cli.init( configFile )
try:
resp = cli.execute( 0, "foobar", "addrbook", \
[ "insert into addrbook ( addr ) values ( \"
|
%d\" )" % ( time.time() ), \
"select * from addrbook" ] )
if 0 == resp.getErrorCode():
rsCount = resp.getResultCount()
for i in range( rsCount ):
rs = resp.getResultSet( i )
printResultSet( rs )
else:
print "%d: %s" % ( resp.getErrdataCode(), resp.getErrdataMsg() )
except Exception, e:
print e
|
js0701/chromium-crosswalk
|
tools/telemetry/telemetry/internal/actions/load_media.py
|
Python
|
bsd-3-clause
| 1,508
| 0.005305
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core import exceptions
from telemetry.internal.actions import media_action
from telemetry.internal.actions import page_action
class LoadMediaAction(media_action.MediaAction):
""" For calling load() on media elements and waiting for an event to fire.
"""
def __init__(self, selector=None, timeout_in_seconds=0,
event_to_await='canplaythrough'):
super(LoadMediaAction, self).__init__()
self._selector = selector or ''
self._timeout_in_seconds = timeout_in_seconds
self._event_to_await = event_to_await
|
def WillRunAction(self, tab):
"""Load the JS code prior to running the action."""
super(LoadMediaAction, self).WillRunAction(tab)
self.LoadJS(tab, 'load_media.js')
def RunAction(self, ta
|
b):
try:
tab.ExecuteJavaScript('window.__loadMediaAndAwait("%s", "%s");'
% (self._selector, self._event_to_await))
if self._timeout_in_seconds > 0:
self.WaitForEvent(tab, self._selector, self._event_to_await,
self._timeout_in_seconds)
except exceptions.EvaluateException:
raise page_action.PageActionFailed('Failed waiting for event "%s" on '
'elements with selector = %s.' %
(self._event_to_await, self._selector))
|
myangeline/pygame
|
flightgame/game.py
|
Python
|
apache-2.0
| 5,800
| 0.00141
|
# _*_ coding:utf-8 _*_
import random
from parser_xml import doxml
__author__ = 'Administrator'
import pygame
def item_to_int(array=[]):
arr = []
for a in array:
arr.append(int(a))
return arr
pygame.init()
keys = [False, False, False, False]
screen = pygame.display.set_mode((450, 650), 0, 32)
pygame.display.set_caption(u'飞机大战'.encode('utf-8'))
|
plane = pygame.image.load('resources/plane.png').convert_alpha()
pos = doxml('resources/
|
plane.xml')
# hero_1
hero_1_p = pos['hero_1']
hero_1_p = item_to_int(hero_1_p)
hero_1 = plane.subsurface(pygame.Rect((hero_1_p[2], hero_1_p[3]), (hero_1_p[0], hero_1_p[1])))
hero_1_pos = [200, 580]
# bullet_1 蓝色
bullet_1_p = item_to_int(pos['bullet_1'])
bullet_1 = plane.subsurface(pygame.Rect((bullet_1_p[2], bullet_1_p[3]), (bullet_1_p[0], bullet_1_p[1])))
bullet_1_pos = [hero_1_pos[0] + hero_1_p[0] / 2 - bullet_1_p[0] / 2 + 1, hero_1_pos[1] - bullet_1_p[1]]
bullet_1_rect = pygame.Rect(bullet_1.get_rect())
# bullet_0 橙色
bullet_0_p = item_to_int(pos['bullet_0'])
bullet_0 = plane.subsurface(pygame.Rect((bullet_0_p[2], bullet_0_p[3]), (bullet_0_p[0], bullet_0_p[1])))
# 背景图片
bg1 = pygame.image.load('resources/bg_01.png')
# enemy_s
enemy_s_p = item_to_int(pos['enemy_s'])
enemy_s = plane.subsurface(pygame.Rect((enemy_s_p[2], enemy_s_p[3]), (enemy_s_p[0], enemy_s_p[1])))
enemy_s_rect = pygame.Rect(enemy_s.get_rect())
# enemy_m
enemy_m_p = item_to_int(pos['enemy_m'])
enemy_m = plane.subsurface(pygame.Rect((enemy_m_p[2], enemy_m_p[3]), (enemy_m_p[0], enemy_m_p[1])))
enemy_m_rect = pygame.Rect(enemy_m.get_rect())
# enemy_b
enemy_b_p = item_to_int(pos['enemy_b'])
enemy_b = plane.subsurface(pygame.Rect((enemy_b_p[2], enemy_b_p[3]), (enemy_b_p[0], enemy_b_p[1])))
enemy_b_rect = pygame.Rect(enemy_b.get_rect())
bullet_1_time = 15
bullet_1_array = [bullet_1_pos]
enemytimer = [100, 200, 300]
enemytimers = [0, 0, 0]
# 敌机的发子弹概率
enemy_s_g = [1, 4, 7, 9]
enemy_m_g = [1, 4]
enemy_b_g = [1]
# 敌机子弹的保存列表
enemy_s_array = []
enemy_m_array = []
enemy_b_array = []
# 敌机保存列表
smallenemy = [[100, 0]]
midenemy = []
bigenemy = []
while True:
bullet_1_time -= 1
for i in range(3):
enemytimer[i] -= 1
screen.fill(0)
screen.blit(bg1, (0, 0))
screen.blit(hero_1, hero_1_pos)
# 绘制hero_1子弹
if not bullet_1_time:
bullet_1_array.append([hero_1_pos[0] + hero_1_p[0] / 2 - bullet_1_p[0] / 2 + 1, hero_1_pos[1] - bullet_1_p[1]])
bullet_1_time = 15
index = 0
for bullet_pos in bullet_1_array:
if bullet_pos[1] < 0:
bullet_1_array.pop(index)
bullet_pos[1] -= 5
index += 1
for bullet_pos in bullet_1_array:
screen.blit(bullet_1, bullet_pos)
# 绘制小敌机
if not enemytimer[0]:
smallenemy.append([random.randint(0, 410), -20])
enemytimer[0] = 100 - (enemytimers[0] * 2)
enemytimers[0] = 35 if enemytimers[0] > 35 else enemytimers[0] + 5
index = 0
for se in smallenemy:
if se[1] > 650:
smallenemy.pop(index)
se[1] += 3
enemy_s_rect.left = se[0]
enemy_s_rect.top = se[1]
index_bullet = 0
for bullet in bullet_1_array:
bullet_1_rect.left = bullet[0]
bullet_1_rect.top = bullet[1]
if enemy_s_rect.colliderect(bullet_1_rect):
bullet_1_array.pop(index_bullet)
smallenemy.pop(index)
index += 1
# 随机是否发射子弹
# r = random.randint(1, 500)
# if r in enemy_s_g:
# enemy_s_array.append([se[0] + 15, se[1] + 27])
index = 0
# for bullet in enemy_s_array:
# if bullet[1] > 650:
# enemy_s_array.pop(index)
# bullet[1] += 5
# index += 1
for se in smallenemy:
screen.blit(enemy_s, se)
for bullet in enemy_s_array:
screen.blit(bullet_0, bullet)
# 绘制中等敌机
if not enemytimer[1]:
midenemy.append([random.randint(0, 380), -40])
enemytimer[1] = 200 - (enemytimers[1] * 2)
enemytimers[1] = 55 if enemytimers[1] > 55 else enemytimers[1] + 5
index = 0
for me in midenemy:
if me[1] > 650:
midenemy.pop(index)
me[1] += 2
index += 1
for me in midenemy:
screen.blit(enemy_m, me)
# 绘制大飞机
if not enemytimer[2]:
bigenemy.append([random.randint(0, 340), -100])
enemytimer[2] = 300 - (enemytimers[2] * 2)
enemytimers[2] = 65 if enemytimers[2] > 65 else enemytimers[2] + 5
index = 0
for be in bigenemy:
if be[1] > 650:
bigenemy.pop(index)
be[1] += 1
index += 1
for be in bigenemy:
screen.blit(enemy_b, be)
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_w:
keys[0] = True
elif event.key == pygame.K_a:
keys[1] = True
elif event.key == pygame.K_s:
keys[2] = True
elif event.key == pygame.K_d:
keys[3] = True
if event.type == pygame.KEYUP:
if event.key == pygame.K_w:
keys[0] = False
elif event.key == pygame.K_a:
keys[1] = False
elif event.key == pygame.K_s:
keys[2] = False
elif event.key == pygame.K_d:
keys[3] = False
if keys[0]:
hero_1_pos[1] -= 5
elif keys[2]:
hero_1_pos[1] += 5
if keys[1]:
hero_1_pos[0] -= 5
elif keys[3]:
hero_1_pos[0] += 5
|
tlake/http-server
|
test_functests_gevent_server.py
|
Python
|
mit
| 5,433
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import gevent_server
import socket
import pytest
import time
from multiprocessing import Process
addr = ("127.0.0.1", 8000)
_CRLF = b'\r\n'
# yield fixtures are demons
# We used to have a yield fixture here and in the server.py tests
# which would start up the server and keep it going, I thought, until
# the end of its scope. Since these server fixtures were scoped to
# module, we believed they would terminate at the end of the module.
# The theory seemed to hold true through the end of Step 3, s
|
ince we
# only ever started one server throughout the entire testing process.
# Once we created the gevent server, there were within the test suite
# two different server creation fixtures, both scoped to module. We
# falsely believed that each of these fixtures would terminate at the
# end of the module. In practice, it seems that a yield fixture doesn't
# terminate until the end of the enti
|
re testing session, regardless
# of defined scope.
# The solution, seen below, is to use just a regular fixture with
# a process-terminating finalizer. The scope behaves properly,
# and autouse also still works.
@pytest.fixture(scope='module', autouse=True)
def gevent_server_setup(request):
process = Process(target=gevent_server.run_gevent_server)
process.daemon = True
process.start()
time.sleep(0.1)
def cleanup():
process.terminate()
request.addfinalizer(cleanup)
return process
@pytest.fixture(scope='function')
def client_setup():
client = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_IP
)
client.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
client.connect(addr)
return client
def verify_response(response):
assert 2 * _CRLF in response
head_and_body = response.split((2 * _CRLF), 1)
head_chunk = head_and_body[0].split(_CRLF)
first_line = head_chunk[0].split()
assert first_line[0] == b"HTTP/1.1"
assert first_line[1].isdigit()
assert first_line[2] is not None
################
# FUNCTIONAL TESTS
################
def test_client_receives_ok_on_image_request(client_setup):
client = client_setup
request = _CRLF.join([
b"GET /images/sample_1.png HTTP/1.1",
b"Host: www.host.com:80",
b""
])
ok_header = b"HTTP/1.1 200 OK"
content_type = b'image'
client.sendall(request)
server_response = client.recv(4096)
client.close()
verify_response(server_response)
assert ok_header in server_response
assert content_type in server_response
def test_client_receives_ok_on_textfile_request(client_setup):
client = client_setup
request = _CRLF.join([
b"GET /sample.txt HTTP/1.1",
b"Host: www.host.com:80",
b""
])
ok_header = b"HTTP/1.1 200 OK"
content_type = b'text/plain'
client.sendall(request)
server_response = client.recv(4096)
client.close()
verify_response(server_response)
assert ok_header in server_response
assert content_type in server_response
def test_client_receives_sample_txt_on_request(client_setup):
client = client_setup
client.sendall(
b"GET /sample.txt HTTP/1.1\r\n"
b"Host: www.host.com:80\r\n"
b"\r\n"
)
text = (
"This is a very simple text file.\n"
"Just to show that we can server it up.\n"
"It is three lines long."
)
server_response = client.recv(4096)
client.close()
verify_response(server_response)
assert text in server_response
def test_client_receives_root_filesystem(client_setup):
client = client_setup
client.sendall(b"GET / HTTP/1.1\r\nHost: www.host.com:80\r\n\r\n")
expected_response = [
'<li>a_web_page.html</li>',
'<li>sample.txt</li>',
'<li>make_time.py</li>',
'<li>images</li>'
]
server_response = client.recv(4096)
client.close()
verify_response(server_response)
for line in expected_response:
assert line in server_response
def test_client_receives_error_on_not_get(client_setup):
client = client_setup
client.sendall(b"POST / HTTP/1.1\r\nHost: www.host.com:80\r\n\r\n")
expected_response = (b"405 Method Not Allowed")
server_response = client.recv(4096)
client.close()
verify_response(server_response)
assert expected_response in server_response
def test_client_receives_error_on_bad_request(client_setup):
client = client_setup
client.sendall(b"GET / HTTP/1.9\r\nHost: www.host.com:80\r\n\r\n")
expected_response = (b"400 Bad Request")
server_response = client.recv(4096)
client.close()
verify_response(server_response)
assert expected_response in server_response
def test_client_receives_error_on_no_host(client_setup):
client = client_setup
client.sendall(b"GET / HTTP/1.1\r\n\r\n")
expected_response = (b"406 Not Acceptable")
server_response = client.recv(4096)
client.close()
verify_response(server_response)
assert expected_response in server_response
def test_client_receives_error_on_bad_uri(client_setup):
client = client_setup
client.sendall(b"GET /sadsge HTTP/1.1\r\nHost: www.host.com:80\r\n\r\n")
expected_response = (b"404 Not Found")
server_response = client.recv(4096)
client.close()
verify_response(server_response)
assert expected_response in server_response
|
nschloe/maelstrom
|
test/test_poisson_order.py
|
Python
|
mit
| 4,993
| 0.000401
|
# -*- coding: utf-8 -*-
#
from __future__ import print_function
import warnings
import numpy
import pytest
import sympy
from dolfin import (
MPI,
Constant,
Diri
|
chletBC,
Expression,
FunctionSpace,
UnitSquareMesh,
errornorm,
pi,
triangle,
)
import helpers
import matplotlib.pyplot as plt
from maelstrom import heat
|
MAX_DEGREE = 5
def problem_sinsin():
"""cosine example.
"""
def mesh_generator(n):
return UnitSquareMesh(n, n, "left/right")
x = sympy.DeferredVector("x")
# Choose the solution such that the boundary conditions are fulfilled
# exactly. Also, multiply with x**2 to make sure that the right-hand side
# doesn't contain the term 1/x. Although it looks like a singularity at
# x=0, this terms is esentially harmless since the volume element 2*pi*x is
# used throughout the code, canceling out with the 1/x. However, Dolfin has
# problems with this, cf.
# <https://bitbucket.org/fenics-project/dolfin/issues/831/some-problems-with-quadrature-expressions>.
solution = {
"value": x[0] ** 2 * sympy.sin(pi * x[0]) * sympy.sin(pi * x[1]),
"degree": MAX_DEGREE,
}
# Produce a matching right-hand side.
phi = solution["value"]
kappa = 2.0
rho = 3.0
cp = 5.0
conv = [1.0, 2.0]
rhs_sympy = sympy.simplify(
-1.0 / x[0] * sympy.diff(kappa * x[0] * sympy.diff(phi, x[0]), x[0])
- 1.0 / x[0] * sympy.diff(kappa * x[0] * sympy.diff(phi, x[1]), x[1])
+ rho * cp * conv[0] * sympy.diff(phi, x[0])
+ rho * cp * conv[1] * sympy.diff(phi, x[1])
)
rhs = {
"value": Expression(helpers.ccode(rhs_sympy), degree=MAX_DEGREE),
"degree": MAX_DEGREE,
}
return mesh_generator, solution, rhs, triangle, kappa, rho, cp, Constant(conv)
@pytest.mark.parametrize("problem", [problem_sinsin])
@pytest.mark.parametrize("stabilization", [None, "supg"])
def test_order(problem, stabilization):
"""Assert the correct discretization order.
"""
mesh_sizes = [16, 32, 64]
errors, hmax = _compute_errors(problem, mesh_sizes, stabilization)
# Compute the numerical order of convergence.
order = helpers.compute_numerical_order_of_convergence(hmax, errors)
# The test is considered passed if the numerical order of convergence
# matches the expected order in at least the first step in the coarsest
# spatial discretization, and is not getting worse as the spatial
# discretizations are refining.
tol = 0.1
expected_order = 2.0
assert (order > expected_order - tol).all()
return
def _compute_errors(problem, mesh_sizes, stabilization):
mesh_generator, solution, f, cell_type, kappa, rho, cp, conv = problem()
if solution["degree"] > MAX_DEGREE:
warnings.warn(
"Expression degree ({}) > maximum degree ({}). Truncating.".format(
solution["degree"], MAX_DEGREE
)
)
degree = MAX_DEGREE
else:
degree = solution["degree"]
sol = Expression(
helpers.ccode(solution["value"]), t=0.0, degree=degree, cell=cell_type
)
errors = numpy.empty(len(mesh_sizes))
hmax = numpy.empty(len(mesh_sizes))
for k, mesh_size in enumerate(mesh_sizes):
mesh = mesh_generator(mesh_size)
hmax[k] = MPI.max(MPI.comm_world, mesh.hmax())
Q = FunctionSpace(mesh, "CG", 1)
prob = heat.Heat(
Q,
kappa=kappa,
rho=rho,
cp=cp,
convection=conv,
source=f["value"],
dirichlet_bcs=[DirichletBC(Q, 0.0, "on_boundary")],
stabilization=stabilization,
)
phi_approx = prob.solve_stationary()
errors[k] = errornorm(sol, phi_approx)
return errors, hmax
def _show_order_info(problem, mesh_sizes, stabilization):
"""Performs consistency check for the given problem/method combination and
show some information about it. Useful for debugging.
"""
errors, hmax = _compute_errors(problem, mesh_sizes, stabilization)
order = helpers.compute_numerical_order_of_convergence(hmax, errors)
# Print the data
print()
print("hmax ||u - u_h|| conv. order")
print("{:e} {:e}".format(hmax[0], errors[0]))
for j in range(len(errors) - 1):
print(32 * " " + "{:2.5f}".format(order[j]))
print("{:e} {:e}".format(hmax[j + 1], errors[j + 1]))
# Plot the actual data.
plt.loglog(hmax, errors, "-o")
# Compare with order curves.
plt.autoscale(False)
e0 = errors[0]
for order in range(4):
plt.loglog(
[hmax[0], hmax[-1]], [e0, e0 * (hmax[-1] / hmax[0]) ** order], color="0.7"
)
plt.xlabel("hmax")
plt.ylabel("||u-u_h||")
plt.show()
return
if __name__ == "__main__":
# mesh_sizes_ = [16, 32, 64, 128]
# _show_order_info(problem_sinsin, mesh_sizes_, None)
test_order(problem_sinsin, "supg")
|
alexbredo/site-packages3
|
handler/elasticsearch.py
|
Python
|
bsd-2-clause
| 6,002
| 0.033156
|
# Copyright (c) 2014 Alexander Bredo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -*- coding: utf-8 -*-
from base.applog import *
import urllib3
import json
from base.text import clean
'''
TODO:
- BUG: Illegal unquoted character ((CTRL-CHAR, code 8)): has to be escaped using backslash to be included in string value
'''
class ElasticsearchClient():
def __init__(self, host='127.0.0.1', port=9200, index='default', doctype='doc', ttl='1w'):
self.http = urllib3.PoolManager()
self.index = index
self.doctype = doctype
self.host = host
self.port = port
self.ttl = ttl
self.setup()
def setup(self):
if not self.exists_index():
log.info("Elasticsearch-Index '%s' does not exist. Trying to create now." % self.index)
self.create_index_mapping()
else:
log.info("Elasticsearch-Index '%s' present." % self.index)
def saveOne(self, data, doctype):
nice_data = json.dumps(clean(data), indent=4, separators=(',', ': '))
r = self.http.urlopen('POST',
'http://%s:%i/%s/%s' % (self.host, self.port, self.index, doctype),
headers = {'Content-Type':'application/json'},
body = nice_data
)
print (r.status, r.data)
if int(r.status/100) == 2:
log.debug("Element %s has been saved." % nice_data)
else:
log.error("Element could not be saved: %s. Error: %s" % (nice_data, r.data))
def saveMany(self, data, doctype):
log.debug("Trying to save %d items to Elasticsearch." % len(data))
serialized_data = [self.__makeStringsFromDict(x) for x in data]
head = ({ "index" : { "_index" : self.index, "_type" : doctype } }).__str__() + '\n'
dataAsStr = ('\n'.join([head + line.__str__() for line in serialized_data])).replace('\'', '\"') + '\n'
r = self.http.urlopen('POST',
'http://%s:%i/%s/%s/_bulk' % (self.host, self.port, self.index, doctype),
headers = {'Content-Type':'application/json'},
body = dataAsStr
)
if int(r.status/100) == 2:
log.debug("%s Elements has been saved." % len(data))
else:
log.error("Elements could not be saved: %s. Error: %s" % (dataAsStr, r.data))
def __makeStringsFromDict(self, dictionary):
try:
for key in dictionary.keys(): # Native Datatypes: No Objects!
if isinstance(dictionary[key], dict): # nested...
dictionary[key] = self.__makeStringsFromDict(dictionary[key])
elif isinstance(dictionary[key], str):
dictionary[key] = dictionary[key].__str__()
|
#elif isinstance(dictionary[key], int) and isinstance(dictionary[key], float):
# dictionary[key] = dictionary[key]
return dictionary
except Exception as e:
log.error(e)
de
|
f deleteIndex(self):
r = self.http.request('DELETE', 'http://%s:%i/%s/' % (self.host, self.port, self.index))
if int(r.status/100) == 2:
log.info("Elasticsearch-Index '%s' was removed." % self.index)
return True
else:
log.warning("Elasticsearch-Index '%s' does not exist." % self.index)
return False # print r.data
def exists_index(self):
r = self.http.request('GET', 'http://%s:%i/%s/_mapping' % (self.host, self.port, self.index))
if int(r.status/100) == 2:
return True
else:
return False # print r.data
def create_index_mapping(self):
# POST /index/
data = """{
"mappings" : {
"_default_" : {
"_ttl": {
"enabled": "true",
"default": "%s"
},
"properties" : {
"sourceIPv6Address": { "type": "string", "index": "not_analyzed" },
"destinationIPv6Address": { "type": "string", "index": "not_analyzed" },
"sourceHostname" : {"type" : "string", "index" : "not_analyzed"},
"destinationHostname" : {"type" : "string", "index" : "not_analyzed"},
"destinationTransportPortName" : {"type" : "string", "index" : "not_analyzed"},
"sourceTransportPortName" : {"type" : "string", "index" : "not_analyzed"},
"protocolIdentifierName" : {"type" : "string", "index" : "not_analyzed"},
"networkLocation" : {"type" : "string", "index" : "not_analyzed"},
"command" : {"type" : "string", "index" : "not_analyzed"},
"session" : {"type" : "string", "index" : "not_analyzed"}
}
}
}
}""" % self.ttl
r = self.http.urlopen('POST',
'http://%s:%i/%s/' % (self.host, self.port, self.index),
headers = {'Content-Type':'application/json'},
body = data
)
if int(r.status/100) == 2:
log.info("Elasticsearch-Index '%s' has been created." % self.index)
else:
log.error("Elasticsearch-Index '%s' has NOT been created. (%s)" % (self.index, r.data))
if __name__ == '__main__':
ec = ElasticsearchClient('lnx06-elasticsearch1', 9200, 'honeypot')
ec.deleteIndex()
#ec.saveOne({'ab':1, 'cd':'blub'}, 'intrusion')
#ec.saveMany([{'ab':1, 'cd':'blub'}, {'ddd':22, 'dfd':'fdgg'}], 'intrusion')
|
arvinddoraiswamy/blahblah
|
stockfighter/test.py
|
Python
|
mit
| 539
| 0.011132
|
import requests
import os
import sys
proxies = {
"http": "http://127.0.0.1:8080",
"https": "http://127.0.0.1:8080",
}
#Adding directory to the path where Python searches for modules
cmd_folder = os.path.dirname
|
('/home/arvind/Documents/Me/My_Projects/challenges/stockfighter/modules/')
sys.path.insert(0, cmd_folder)
#Importing API module
import api
if __name__ == "__main__":
requests.packages.urllib3.disable_warnings()
r1= api.orderbook().json()[u'asks']
print r1
|
r1= api.orderbook().json()[u'bids']
print r1
|
sot/mica
|
mica/web/admin.py
|
Python
|
bsd-3-clause
| 127
| 0
|
# Licensed under a 3-
|
clause BSD style license - see LICENSE.rst
from django.cont
|
rib import admin
# Register your models here.
|
nathan-hoad/aesop
|
aesop/utils.py
|
Python
|
bsd-3-clause
| 4,678
| 0.001069
|
import asyncio
import os
from urllib.parse import urlparse
import aiohttp
def damerau_levenshtein(first_string, second_string):
"""Returns the Damerau-Levenshtein edit distance between two strings."""
previous = None
prev_a = None
current = [i for i, x in enumerate(second_string, 1)] + [0]
for a_pos, a in enumerate(first_string):
prev_b = None
previously_previous, previous, current = previous, current, [0] * len(second_string) + [a_pos+1]
for b_pos, b in enumerate(second_string):
cost = int(a != b)
deletion = previous[b_pos] + 1
insertion = current[b_pos-1] + 1
substitution = previous[b_pos-1] + cost
current[b_pos] = min(deletion, insertion, substitution)
if prev_b and prev_a and a == prev_b and b == prev_a and a != b:
current[b_pos] = min(current[b_pos], previously_previous[b_pos-2] + cost)
prev_b = b
prev_a = a
return current[len(second_string) - 1]
def complete(value):
"""asyncio equivalent to `twisted.internet.defer.succeed`"""
f = asyncio.Future()
f.set_result(value)
return f
roman_numeral_table = [
('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1)
]
def int_to_roman(num):
def parts():
nonlocal num
for letter, value in roman_numeral_table:
while value <= num:
num -= value
yield letter
return ''.join(parts())
class RequestManager:
"""Gross class for managing active requests.
The only thing it really does is make sure that anything using `get()`
won't send out duplicate requests. This is
|
useful when trying to download
metadata for new series.
"""
# FIXME: make this connection map configurable.
connection_map =
|
{
'www.omdbapi.com': 20,
}
current_requests = {}
limits = {}
CONN_POOL = aiohttp.TCPConnector()
count = 0
@classmethod
def get_pool(cls, key):
if key not in cls.limits:
limit = cls.connection_map.get(key, 50)
cls.limits[key] = asyncio.BoundedSemaphore(limit)
return cls.limits[key]
def __init__(self, url, **kwargs):
self.url = url
self.kwargs = kwargs
self.callbacks = []
RequestManager.count += 1
@asyncio.coroutine
def run(self):
key = urlparse(self.url).netloc
p = self.get_pool(key)
with (yield from p):
response = yield from aiohttp.request('GET', self.url, connector=self.CONN_POOL, **self.kwargs)
try:
json = yield from response.json()
except Exception as e:
for cb in self.callbacks:
cb.set_exception(e)
else:
for cb in self.callbacks:
cb.set_result((response, json))
def wait_for(self):
self.callbacks.append(asyncio.Future())
return self.callbacks[-1]
def get(url, **kwargs):
full_url = url + '&'.join(sorted('='.join(kv) for kv in kwargs.get('params', {}).items()))
if full_url in RequestManager.current_requests:
return RequestManager.current_requests[full_url].wait_for()
r = RequestManager(url, **kwargs)
RequestManager.current_requests[full_url] = r
asyncio.async(r.run())
cb = r.wait_for()
@cb.add_done_callback
def callback(result):
del RequestManager.current_requests[full_url]
return r.wait_for()
def setup_logging(name, level):
from logbook import NullHandler, RotatingFileHandler, lookup_level
path = os.path.expanduser('~/.config/aesop/{}.log'.format(name))
level = lookup_level(level)
# null handler to prevent logs unhandled from RotatingFileHandler going to
# stderr
NullHandler().push_application()
RotatingFileHandler(path, level=level).push_application()
def get_language(path):
from aesop import isocodes
for suffix in path.suffixes:
suffix = suffix[1:]
try:
isoname = isocodes.isoname(suffix.title())
except KeyError:
pass
else:
return isoname
if len(suffix) not in {2, 3}:
continue
suffix = suffix.lower()
if len(suffix) == 2:
try:
suffix = isocodes.iso2to3(suffix)
except KeyError:
continue
try:
isocodes.nicename(suffix)
except KeyError:
pass
else:
return suffix
|
muttiopenbts/nazar
|
create_netblocks.py
|
Python
|
gpl-2.0
| 2,145
| 0.000466
|
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 1 12:36:04 2015
@author: Mutti
argument 1 country
argument 2 city
argument 3 location of maxmind csv files
"""
from netaddr import *
import sys
import os
import re
BASE_PATH = '/opt/masscan/targets/'
def grep(search_string, filename, options=''):
'''
Call OS grep
Return matching lines
'''
result = ''
p = os.popen("egrep " + options + ' "' + search_string + '" ' + filename, "r")
while 1:
line = p.readline()
result += line
if not line:
break
return result
def extract_cidr(csv_line):
matchObj = re.match(r'(\d+\.\d+\.\d+\.\d+/\d+)', csv_line)
if matchObj:
return matchObj.group(1)
def write_log(output):
datetime = time.strftime("%x") + ' ' + time.strftime("%X")
with open(LOG_FILE, 'a') as f:
f.write("%s: %s\n" % (datetime, output))
def write_city_netblock_to_file(netblock, filename):
with open(BASE_PATH + filename, 'w+') as f:
f.write(netblock)
def main():
country = 'US'
country = sys.argv[1]
city = 'San Diego'
city = sys.argv[2]
maxmind_netblocks = '/opt/maxmind_geoip/processed/'
maxmind_netblocks = sys.argv[3]
grep_options = '-h' # Don't print filenames
grep_options += ' -r' # Recursively search all files in directory
result = grep(
search_string=country+'.*'+city,
filename=maxmind_netblocks,
options=grep_options
)
# Seperate grep result into array
cidr_blocks = [extract_cidr(csv_line) for csv_line in result.splitlines()]
# Extract CIDR netblock
merged_cidr_blocks = [IPNetwork(cidr_block) for cidr_block in cidr_blocks]
|
print len(cidr_blocks)
# Attempt to merge overlaping netblocks
cidr_merge(merged_cidr_blocks)
print len(merged_cidr_blocks)
merged_cidr_blocks = [cidr.__str__() for cidr in merged_cidr_blocks]
new_filename = country+"-"+city+".txt"
write_city_netblock_to_file(
'\n'.join(merged_cidr_blocks),
new_filename.replace(" ", "_"),
)
# print cidr_blocks
if __name__ ==
|
'__main__':
main()
|
olexiim/edx-platform
|
cms/djangoapps/contentstore/views/videos.py
|
Python
|
agpl-3.0
| 12,554
| 0.001434
|
"""
Views related to the video upload feature
"""
from boto import s3
import csv
from uuid import uuid4
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseNotFound
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_GET, require_http_methods
import rfc6266
from edxval.api import create_video, get_videos_for_ids
from opaque_keys.edx.keys import CourseKey
from contentstore.models import VideoUploadConfig
from contentstore.utils import reverse_course_url
from edxmako.shortcuts import render_to_response
from util.json_request import expect_json, JsonResponse
from xmodule.assetstore import AssetMetadata
from xmodule.modulestore.django import modulestore
from .course import get_course_and_check_access
__all__ = ["videos_handler", "video_encodings_download"]
# String constant used in asset keys to identify video assets.
VIDEO_ASSET_TYPE = "video"
# Default expiration, in seconds, of one-time URLs used for uploading videos.
KEY_EXPIRATION_IN_SECONDS = 86400
class StatusDisplayStrings(object):
"""
Enum of display strings for Video Status presented in Studio (e.g., in UI and in CSV download).
"""
# Translators: This is the status of an active video upload
UPLOADING = _("Uploading")
# Translators: This is the status for a video that the servers are currently processing
IN_PROGRESS = _("In Progress")
# Translators: This is the status for a video that the servers have successfully processed
COMPLETE = _("Complete")
# Translators: This is the status for a video that the servers have failed to process
FAILED = _("Failed"),
# Translators: This is the status for a video for which an invalid
# processing token was provided in the course settings
INVALID_TOKEN = _("Invalid Token"),
# Translators: This is the status for a video that is in an unknown state
UNKNOWN = _("Unknown")
def status_display_string(val_status):
"""
Converts VAL status string to Studio status string.
"""
status_map = {
"upload": StatusDisplayStrings.UPLOADING,
"ingest": StatusDisplayStrings.IN_PROGRESS,
"transcode_queue": StatusDisplayStrings.IN_PROGRESS,
"transcode_active": StatusDisplayStrings.IN_PROGRESS,
"file_delivered": StatusDisplayStrings.COMPLETE,
"file_complete": StatusDisplayStrings.COMPLETE,
"file_corrupt": StatusDisplayStrings.FAILED,
"pipeline_error": StatusDisplayStrings.FAILED,
"invalid_token": StatusDisplayStrings.INVALID_TOKEN
}
return status_map.get(val_status, StatusDisplayStrings.UNKNOWN)
@expect_json
@login_required
@require_http_methods(("GET", "POST"))
def videos_handler(request, course_key_string):
"""
The restful handler for video uploads.
GET
html: return an HTML page to display previous video uploads and allow
new ones
json: return json representing the videos that have been uploaded and
their statuses
POST
json: create a new video upload; the actual files should not be provided
to this endpoint but rather PUT to the respective upload_url values
contained in the response
"""
course = _get_and_validate_course(course_key_string, request.user)
if not course:
return HttpResponseNotFound()
if request.method == "GET":
if "application/json" in request.META.get("HTTP_ACCEPT", ""):
return videos_index_json(course)
else:
return videos_index_html(course)
else:
return videos_post(course, request)
@login_required
@require_GET
def video_encodings_download(request, course_key_string):
"""
Returns a CSV report containing the encoded video URLs for video uploads
in the following format:
Video ID,Name,Status,Profile1 URL,Profile2 URL
aaaaaaaa-aaaa-4aaa-aaaa-aaaaaaaaaaaa,video.mp4,Complete,http://example.com/prof1.mp4,http://example.com/prof2.mp4
"""
course = _get_and_validate_course(course_key_string, request.user)
if not course:
return HttpResponseNotFound()
def get_profile_header(profile):
"""Returns the column header string for the given profile's URLs"""
# Translators: This is the header for a CSV file column
# containing URLs for video encodings for the named profile
# (e.g. desktop, mobile high quality, mobile low quality)
return _("{profile_name} URL").format(profile_name=profile)
profile_whitelist = VideoUploadConfig.get_profile_whitelist()
status_whitelist = VideoUploadConfig.get_status_whitelist()
videos = list(_get_videos(course))
name_col = _("Name")
duration_col = _("Duration")
added_col = _("Date Added")
video_id_col = _("Video ID")
status_col = _("Status")
profile_cols = [get_profile_header(profile) for profile in profile_whitelist]
def make_csv_dict(video):
"""
Makes a dictionary suitable for writing CSV output. This involves
extracting the required items from the original video dict and
converting all keys and values to UTF-8 encoded string objects,
because the CSV module doesn't play well with unicode objects.
"""
# Translators: This is listed as the duration for a video that has not
# yet reached the point in its processing by the servers where its
# duration is determined.
duration_val = str(video["duration"]) if video["duration"] > 0 else _("Pending")
ret = dict(
[
(name_col, video["client_video_id"]),
(duration_col, duration_val),
(added_col, video["created"].isoformat()),
(video_id_col, video["edx_video_id"]),
(status_col, video["status"]),
] +
[
(get_profile_header(encoded_video["profile"]), encoded_video["url"])
for encoded_video in video["encoded_videos"]
if encoded_video["profile"] in profile_whitelist
]
)
return {
key.encode("utf-8"): value.encode("utf-8")
for key, value in ret.items()
}
response = HttpResponse(content_type="text/csv")
# Translators: This is the suggested filename when downloading the URL
# listing for videos uploaded through Studio
filename = _("{course}_video_urls").format(course=course.id.course)
# See https://tools.ietf.org/html/rfc6266#appendix-D
response["Content-Disposition"] = rfc6266.build_header(
filename + ".csv",
filename_compat="video_urls.csv"
)
writer = csv.DictWriter(
response,
[name_col, duration_col, added_col, video_id_col, status_col] + profile_cols,
dialect=csv.excel
)
writer.writeheader()
for video in videos:
if video["status"] in status_whitelist:
writer.writerow(make_csv_dict(video))
return response
def _get_and_validate_course(course_key_string, user):
"""
Given a course key, return the course if it exists, the giv
|
en user has
access to it, and it is properly configured for video uploads
"""
course_key = CourseKey.from_string(course_key_string)
# For now, assume all studio users that have access to the course can upload videos.
# In the future, we plan to add a new org-level role for video uploaders.
course = get_course_and_check_access(course_key, user)
if (
settings.FEATURES["ENABLE_VIDEO_UPLOAD_PIPELINE"] and
getattr(settings, "VIDEO_UPLOAD_PIPELINE", None) an
|
d
course and
course.video_pipeline_configured
):
return course
else:
return None
def _get_videos(course):
"""
Retrieves the list of videos from VAL corresponding to the videos listed in
the asset metadata store.
"""
edx_videos_ids = [
v.asset_id.path
for v in modulestore().get_all_asset_metadata(course.id, VIDEO_ASSET_TYPE)
]
videos = list(get_videos_for_ids(edx_videos_
|
MIRAvzw/qt-brisa
|
doc/docSource/conf.py
|
Python
|
lgpl-3.0
| 6,284
| 0.006684
|
# -*- coding: utf-8 -*-
#
# BRisa UPnP documentation build configuration file, created by
# sphinx-quickstart on Mon May 4 11:21:11 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If your extensions (or modules documented by autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Qt BRisa UPnP framework'
copyright = u'2010, BRisa Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0'
# The full version, including alpha/beta/rc tags.
release = '2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['.build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name
|
will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# ------
|
-----------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'BRisaUPnPdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'BRisaUPnP.tex', ur'Qt BRisa Documentation',
ur'BRisa Team', 'manual'),
]
# Code used to block generation of blank pages on the final pdf documment
latex_elements = {'classoptions' : ',oneside,openany',
'babel' : '\\usepackage[USenglish]{babel}'}
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
pbanaszkiewicz/amy
|
amy/reports/tests/test_instructor_issues.py
|
Python
|
mit
| 1,579
| 0
|
from django.urls import reve
|
rse
from workshops.models import Event, Role, Tag, Task
from workshops.tests.base import TestBase
|
class TestInstructorIssues(TestBase):
"""Tests for the `instructor_issues` view."""
def setUp(self):
super().setUp()
self._setUpUsersAndLogin()
TTT, _ = Tag.objects.get_or_create(name="TTT")
stalled = Tag.objects.get(name="stalled")
learner, _ = Role.objects.get_or_create(name="learner")
# add two TTT events, one stalled and one normal
e1 = Event.objects.create(slug="ttt-stalled", host=self.org_alpha)
e1.tags.set([TTT, stalled])
e2 = Event.objects.create(slug="ttt-not-stalled", host=self.org_alpha)
e2.tags.add(TTT)
Task.objects.create(event=e1, person=self.spiderman, role=learner)
Task.objects.create(event=e1, person=self.ironman, role=learner)
Task.objects.create(event=e1, person=self.blackwidow, role=learner)
Task.objects.create(event=e2, person=self.spiderman, role=learner)
def test_stalled_trainees_not_in_pending(self):
""""""
rv = self.client.get(reverse("instructor_issues"))
pending = [t.person for t in rv.context["pending"]]
stalled = [t.person for t in rv.context["stalled"]]
self.assertIn(self.spiderman, pending)
self.assertNotIn(self.spiderman, stalled)
self.assertNotIn(self.ironman, pending)
self.assertIn(self.ironman, stalled)
self.assertNotIn(self.blackwidow, pending)
self.assertIn(self.blackwidow, stalled)
|
x0rnn/minqlx-plugins
|
midair_only.py
|
Python
|
gpl-3.0
| 9,927
| 0.004835
|
# midair_only.py, this plugin changes the gameplay into a rockets-only mode where only midair shots kill.
# If you just want a midair ranking system, use midair.py instead.
# This plugin also keeps score of top X midair rocket kills per map in terms of distance.
# On evey midair kill that counts (minheight and mindistance variables), a text message will inform everyone
# about the distance involved, and who killed who, together with a "Holy shit" sound announcement.
# If the map record has been broken, then the "New high score" announcement will be used instead.
# Also shows a kill counter of Killer:Victim (taken from pummel.py by mattiZed).
# !top will list top X map records, !mytop will list your top X map records.
# !kills/!killstats will list top X players with most midair kills and their kill count for the current map.
# (admins only) !cleartopshots will clear all map topshots, !clearkillstats will clear all map killstats.
# by x0rnn, mattiZed, kanzo, iou; thanks guys :)
import minqlx
import minqlx.database
import math
import time
MIDAIR_KEY = "minqlx:midair:{}"
PLAYER_KEY = "minqlx:players:{}"
class midair_only(minqlx.Plugin):
def __init__(self):
self.add_hook("frame", self.handle_frame, priority=minqlx.PRI_LOWEST)
self.add_hook("death", self.handle_death)
self.add_hook("map", self.handle_map)
self.add_hook("game_countdown", self.handle_game_countdown)
self.add_hook("game_end", self.handle_game_end)
self.add_command(("topshots", "top"), self.cmd_topshots)
self.add_command(("mytopshots", "mytop"), self.cmd_mytopshots)
self.add_command(("kills", "killstats"), self.cmd_killstats)
self.add_command("cleartopshots", self.cmd_cleartopshots, 5)
self.add_command("clearkillstats", self.cmd_clearkillstats, 5)
self.record = 0.0
self.top_midair = {}
def handle_frame(self):
for pl in self.players():
if pl.team != "spectator":
if pl.health > 0:
if pl.velocity().z == 0.0:
pl.health = 6666
else:
pl.health = 100
def handle_game_countdown(self):
self.top_midair.clear()
def handle_game_end(self, data):
if self.top_midair:
self.msg("Top midair: {} killed {} from a distance of: ^1{} ^7units.".format(self.top_midair['k_name'], self.top_midair['v_name'], round(self.top_midair['units'])))
self.top_midair.clear()
def handle_death(self, victim, killer, data):
if data['KILLER'] is not None:
if data['MOD'] == "ROCKET" and data['VICTIM']['AIRBORNE']:
k_X = data['KILLER']['POSITION']['X']
k_Y = data['KILLER']['POSITION']['Y']
k_Z = data['KILLER']['POSITION']['Z']
v_X = data['VICTIM']['POSITION']['X']
v_Y = data['VICTIM']['POSITION']['Y']
v_Z = data['VICTIM']['POSITION']['Z']
k_id = data['
|
KILLER']['STEAM_ID']
v_id = data['VICTIM']['STEAM_ID']
distance = math.sqrt((v_X - k_X) ** 2 + (v_Y - k_Y) ** 2 + (v_Z - k_Z) ** 2)
|
height = abs(data['KILLER']['POSITION']['Z'] - data['VICTIM']['POSITION']['Z'])
killer_name = data['KILLER']['NAME']
victim_name = data['VICTIM']['NAME']
players = self.players()
map_name = self.game.map.lower()
minheight = 100 #min height difference to register midairs
mindistance = 300 #min length distance to register midairs
if height > minheight and distance > mindistance:
self.db.zadd(MIDAIR_KEY.format(map_name), distance, "{},{},{}".format(k_id, v_id, int(time.time())))
self.db.zincrby(MIDAIR_KEY.format(map_name) + ":count", k_id, 1)
self.db.zadd(PLAYER_KEY.format(k_id) + ":midair:" + str(map_name), distance, "{},{}".format(v_id, int(time.time())))
self.db.sadd(PLAYER_KEY.format(k_id) + ":midair", v_id)
self.db.incr(PLAYER_KEY.format(k_id) + ":midair:" + v_id)
killer_score = self.db[PLAYER_KEY.format(k_id) + ":midair:" + v_id]
victim_score = 0
if PLAYER_KEY.format(v_id) + ":midair:" + k_id in self.db:
victim_score = self.db[PLAYER_KEY.format(v_id) + ":midair:" + k_id]
if distance <= self.record:
msg = "{} killed {} from a distance of: ^1{} ^7units. Score: ^2{}^7:^2{}".format(killer_name, victim_name, round(distance), killer_score, victim_score)
for p in players:
if self.db.get_flag(p, "essentials:sounds_enabled", default=True):
self.play_sound("sound/vo_evil/holy_shit", p)
self.msg(msg)
elif distance > self.record:
msg = "^1New map record^7! {} killed {} from a distance of: ^1{} ^7units. Score: ^2{}^7:^2{}".format(killer_name, victim_name, round(distance), killer_score, victim_score)
for p in players:
if self.db.get_flag(p, "essentials:sounds_enabled", default=True):
self.play_sound("sound/vo_evil/new_high_score", p)
self.msg(msg)
self.record = distance
if not self.top_midair:
self.top_midair = {'k_name': killer_name, 'v_name': victim_name, 'units': distance}
else:
if distance > self.top_midair['units']:
self.top_midair = {'k_name': killer_name, 'v_name': victim_name, 'units': distance}
def cmd_topshots(self, player, msg, channel):
x = 5 #how many topshots to list
map_name = self.game.map.lower()
topshots = self.db.zrevrange(MIDAIR_KEY.format(map_name), 0, x-1, withscores=True)
player.tell("^1Midair ^7topshots for map ^1" + map_name + "^7:\n")
i = 1
for shot, distance in topshots:
k_id, v_id, timestamp = map(lambda el: int(el), shot.split(","))
k_id_name = self.db.lindex(PLAYER_KEY.format(k_id), 0)
v_id_name = self.db.lindex(PLAYER_KEY.format(v_id), 0)
if not k_id_name:
player.tell("^2" + str(i) + "^7: BOT killed {} from a distance of: ^1{} ^7units.".format(v_id_name, round(distance)))
elif not v_id_name:
player.tell("^2" + str(i) + "^7: {} killed BOT from a distance of: ^1{} ^7units.".format(k_id_name, round(distance)))
else:
player.tell("^2" + str(i) + "^7: {} killed {} from a distance of: ^1{} ^7units.".format(k_id_name, v_id_name, round(distance)))
i += 1
def cmd_mytopshots(self, player, msg, channel):
x = 10 #how many topshots to list
map_name = self.game.map.lower()
topshots = self.db.zrevrange(PLAYER_KEY.format(player.steam_id) + ":midair:" + str(map_name), 0, x-1, withscores=True)
player.tell("^7Your ^1midair ^7topshots for map ^1" + map_name + "^7:\n")
i = 1
for shot, distance in topshots:
v_id, timestamp = map(lambda el: int(el), shot.split(","))
v_id_name = self.db.lindex(PLAYER_KEY.format(v_id), 0)
if not v_id_name:
player.tell("^2" + str(i) + "^7: Victim: BOT, distance: ^1{} ^7units.".format(round(distance)))
else:
player.tell("^2" + str(i) + "^7: Victim: {}, distance: ^1{} ^7units.".format(v_id_name, round(distance)))
i += 1
def cmd_killstats(self, player, msg, channel):
x = 5 #how many to list
map_name = self.game.map.lower()
killstats = self.db.zrevrange(MIDAIR_KEY.format(map_name) + ":count", 0, x-1, withscores=True)
player.tell("^7Most midair kills for map ^1" + map_name + "^7:\n")
i = 1
for steamid, count in killstats:
name = self.db.lindex(PLA
|
suutari/shoop
|
shuup_workbench/__main__.py
|
Python
|
agpl-3.0
| 998
| 0.001002
|
#!/usr/bin/env python
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is
|
licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
import warnings
from shuup.utils.deprecation import RemovedInFutureShuupWarning
if __name__ == "__main__":
if not sys.warnoptions:
# Route warnings through python logging
logging.capt
|
ureWarnings(True)
# RemovedInFutureShuupWarning is a subclass of PendingDeprecationWarning which
# is hidden by default, hence we force the "default" behavior
warnings.simplefilter("default", RemovedInFutureShuupWarning)
sys.path.insert(0, os.path.realpath(os.path.dirname(__file__) + "/.."))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "shuup_workbench.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
jamesblunt/glances
|
glances/plugins/glances_diskio.py
|
Python
|
lgpl-3.0
| 5,679
| 0.000704
|
# -*- coding: utf-8 -*-
#
# This file is part of Glances.
#
# Copyright (C) 2014 Nicolargo <nicolas@nicolargo.com>
#
# Glances is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Glances is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Disk I/O plugin."""
# Import Glances libs
from glances.core.glances_timer import getTimeSinceLastUpdate
from glances.plugins.glances_plugin import GlancesPlugin
import psutil
class Plugin(GlancesPlugin):
"""Glances' disks I/O plugin.
stats is a list
"""
def __init__(self, args=None):
"""Init the plugin."""
GlancesPlugin.__init__(self, args=args)
# We want to display the stat in the curse interface
self.display_curse = True
# Set the message position
# It is NOT the curse position but the Glances column/line
# Enter -1 to right align
self.column_curse = 0
# Enter -1 to diplay bottom
self.line_curse = 3
# Init the stats
self.reset()
def reset(self):
"""Reset/init the stats."""
self.stats = []
def update(self):
"""Update disk I/O stats using the input method."""
# Reset stats
self.reset()
if self.get_input() == 'local':
# Update stats using the standard system lib
# Grab the stat using the PsUtil disk_io_counters method
# read_count: number of reads
# write_count: number of writes
# read_bytes: number of bytes read
# write_bytes: number of bytes written
# read_time: time spent reading from disk (in milliseconds)
# write_time: time spent writing to disk (in milliseconds)
try:
diskiocounters = psutil.disk_io_counters(perdisk=True)
except:
return self.stats
# Previous disk IO stats are stored in the diskio_old variable
if not hasattr(self, 'diskio_old'):
# First call, we init the network_old var
try:
self.diskio_old = diskiocounters
except (IOError, UnboundLocalError):
pass
else:
# By storing time data we enable Rx/s and Tx/s calculations in the
# XML/RPC API, which would otherwise be overly difficult work
# for users of the API
time_since_update = getTimeSinceLastUpdate('disk')
diskio_new = diskiocounters
for disk in diskio_new:
try:
# Try necessary to manage dynamic disk creation/del
diskstat = {}
diskstat['time_since_update'] = time_since_update
diskstat['disk_name'] = disk
diskstat['read_bytes'] = (
diskio_new[disk].read_bytes -
self.diskio_old[disk].read_bytes)
diskstat['write_bytes'] = (
diskio_new[disk].write_bytes -
self.diskio_old[disk].write_bytes)
except KeyError:
continue
else:
self.stats.append(diskstat)
# Save stats to compute next bitrate
self.diskio_old = diskio_new
elif self.get_input() == 'snmp':
# Update stats using SNMP
# No standard way for the moment...
pass
return self.stats
def msg_curse(self, args=None):
"""Return the dict to display in the curse interface."""
# Init the return message
ret = []
# Only process if stats exist and display plugin enable...
if self.stats == [] or args.disable_diskio:
return ret
# Build the string message
# Header
msg = '{0:9}'.format(_("DISK I/O"))
ret.append(self.curse_add_line(msg, "TITLE"))
msg = '{0:>7}'.format(_("R/s"))
ret.append(self.curse_add_line(msg))
msg = '{0:>7}'.format(_("W/s"))
ret.append
|
(self.curse_add_line(msg))
# Disk list (sorted by name)
for i in sorted(self.stats, key=lambda diskio: diskio['disk_name']):
# Do not display hidden interfaces
if self.is_hide(i['disk_name']):
|
continue
# New line
ret.append(self.curse_new_line())
if len(i['disk_name']) > 9:
# Cut disk name if it is too long
disk_name = '_' + i['disk_name'][-8:]
else:
disk_name = i['disk_name']
msg = '{0:9}'.format(disk_name)
ret.append(self.curse_add_line(msg))
txps = self.auto_unit(int(i['read_bytes'] // i['time_since_update']))
rxps = self.auto_unit(int(i['write_bytes'] // i['time_since_update']))
msg = '{0:>7}'.format(txps)
ret.append(self.curse_add_line(msg))
msg = '{0:>7}'.format(rxps)
ret.append(self.curse_add_line(msg))
return ret
|
mas90/pygopherd
|
pygopherd/protocols/http.py
|
Python
|
gpl-2.0
| 12,731
| 0.003692
|
# pygopherd -- Gopher-based protocol server in Python
# module: serve up gopherspace via http
# $Id: http.py,v 1.21 2002/04/26 15:18:10 jgoerzen Exp $
# Copyright (C) 2002 John Goerzen
# <jgoerzen@complete.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import SocketServer
import re, binascii
import os, stat, os.path, mimetypes, urllib, time
from pygopherd import handlers, protocols, GopherExceptions
from pygopherd.protocols.base import BaseGopherProtocol
import pygopherd.version
import cgi
class HTTPProtocol(BaseGopherProtocol):
def canhandlerequest(self):
self.requestparts = map(lambda arg: arg.strip(), self.request.split(" "))
return len(self.requestparts) == 3 and \
(self.requestparts[0] == 'GET' or self.requestparts[0] == 'HEAD') and \
self.requestparts[2][0:5] == 'HTTP/'
def headerslurp(self):
if hasattr(self.requesthandler, 'pygopherd_http_slurped'):
# Already slurped.
self.httpheaders = self.requesthandler.pygopherd_http_slurped
return
# Slurp up remaining lines.
self.httpheaders = {}
while 1:
line = self.rfile.readline()
if not len(line):
break
line = line.strip()
if not len(line):
break
splitline = line.split(':', 1)
if len(splitline) == 2:
self.httpheaders[splitline[0].lower(
|
)] = splitline[1]
self.requesthandler.pygopherd_http_slurped = self.httpheaders
def handle(self):
self.canhandlerequest() # To get self.requestparts
self.iconmapping = eval(self.config.get("protocols.http.HTTPProtocol",
"iconmapping"))
self.header
|
slurp()
splitted = self.requestparts[1].split('?')
self.selector = splitted[0]
self.selector = urllib.unquote(self.selector)
self.selector = self.slashnormalize(self.selector)
self.formvals = {}
if len(splitted) >= 2:
self.formvals = cgi.parse_qs(splitted[1])
if self.formvals.has_key('searchrequest'):
self.searchrequest = self.formvals['searchrequest'][0]
icon = re.match('/PYGOPHERD-HTTPPROTO-ICONS/(.+)$', self.selector)
if icon:
iconname = icon.group(1)
if icons.has_key(iconname):
self.wfile.write("HTTP/1.0 200 OK\r\n")
self.wfile.write("Last-Modified: Fri, 14 Dec 2001 21:19:47 GMT\r\n")
self.wfile.write("Content-Type: image/gif\r\n\r\n")
if self.requestparts[0] == 'HEAD':
return
self.wfile.write(binascii.unhexlify(icons[iconname]))
return
try:
handler = self.gethandler()
self.log(handler)
self.entry = handler.getentry()
handler.prepare()
self.wfile.write("HTTP/1.0 200 OK\r\n")
if self.entry.getmtime() != None:
gmtime = time.gmtime(self.entry.getmtime())
mtime = time.strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime)
self.wfile.write("Last-Modified: " + mtime + "\r\n")
mimetype = self.entry.getmimetype()
mimetype = self.adjustmimetype(mimetype)
self.wfile.write("Content-Type: " + mimetype + "\r\n\r\n")
if self.requestparts[0] == 'GET':
if handler.isdir():
self.writedir(self.entry, handler.getdirlist())
else:
self.handlerwrite(self.wfile)
except GopherExceptions.FileNotFound, e:
self.filenotfound(str(e))
except IOError, e:
GopherExceptions.log(e, self, None)
self.filenotfound(e[1])
def handlerwrite(self, wfile):
self.handler.write(wfile)
def adjustmimetype(self, mimetype):
if mimetype == None:
return 'text/plain'
if mimetype == 'application/gopher-menu':
return 'text/html'
return mimetype
def renderobjinfo(self, entry):
url = None
# Decision time....
if re.match('(/|)URL:', entry.getselector()):
# It's a plain URL. Make it that.
url = re.match('(/|)URL:(.+)$', entry.getselector()).group(2)
elif (not entry.gethost()) and (not entry.getport()):
# It's a link to our own server. Make it as such. (relative)
url = urllib.quote(entry.getselector())
else:
# Link to a different server. Make it a gopher URL.
url = entry.geturl(self.server.server_name, 70)
# OK. Render.
return self.getrenderstr(entry, url)
def getrenderstr(self, entry, url):
retstr = '<TR><TD>'
retstr += self.getimgtag(entry)
retstr += "</TD>\n<TD> "
if entry.gettype() != 'i' and entry.gettype() != '7':
retstr += '<A HREF="%s">' % url
retstr += "<TT>"
if entry.getname() != None:
retstr += cgi.escape(entry.getname()).replace(" ", " ")
else:
retstr += cgi.escape(entry.getselector()).replace(" ", " ")
retstr += "</TT>"
if entry.gettype() != 'i' and entry.gettype() != '7':
retstr += '</A>'
if (entry.gettype() == '7'):
retstr += '<BR><FORM METHOD="GET" ACTION="%s">' % url
retstr += '<INPUT TYPE="text" NAME="searchrequest" SIZE="30">'
retstr += '<INPUT TYPE="submit" NAME="Submit" VALUE="Submit">'
retstr += '</FORM>'
retstr += '</TD><TD><FONT SIZE="-2">'
if entry.getmimetype():
subtype = re.search('/.+$', entry.getmimetype())
if subtype:
retstr += cgi.escape(subtype.group()[1:])
retstr += '</FONT></TD></TR>\n'
return retstr
def renderdirstart(self, entry):
retstr ='<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN" "http://www.w3.org/TR/REC-html40/loose.dtd">'
retstr += "\n<HTML><HEAD><TITLE>Gopher"
if self.entry.getname():
retstr += ": " + cgi.escape(self.entry.getname())
retstr += "</TITLE></HEAD><BODY>"
if self.config.has_option("protocols.http.HTTPProtocol", "pagetopper"):
retstr += re.sub('GOPHERURL',
self.entry.geturl(self.server.server_name,
self.server.server_port),
self.config.get("protocols.http.HTTPProtocol",
"pagetopper"))
retstr += "<H1>Gopher"
if self.entry.getname():
retstr += ": " + cgi.escape(self.entry.getname())
retstr += '</H1><TABLE WIDTH="100%" CELLSPACING="1" CELLPADDING="0">'
return retstr
def renderdirend(self, entry):
retstr = "</TABLE><HR>\n[<A HREF=\"/\">server top</A>]"
retstr += " [<A HREF=\"%s\">view with gopher</A>]" % \
entry.geturl(self.server.server_name,
self.server.server_port)
retstr += '<BR>Generated by <A HREF="%s">%s</A>' % (
pygopherd.version.homepage, pygopherd.version.productname)
return retstr + "\n</BODY></HTML>\n"
def filenotfound(self, msg):
self.wfile.write("HTTP/1.0 404 Not Found\r\n")
self.wfile.write("Content-Type: tex
|
julien6387/supvisors
|
supvisors/test/scripts/check_starting_strategy.py
|
Python
|
apache-2.0
| 14,658
| 0.001364
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ======================================================================
# Copyright 2017 Julien LE CLEACH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================================================
import sys
import unittest
from supervisor.compat import xmlrpclib
from supervisor.states import STOPPED_STATES
from supervisor.xmlrpc import Faults
from supvisors.ttypes import SupvisorsInstanceStates, StartingStrategies
from .event_queues import SupvisorsEventQueues
from .running_identifiers import RunningIdentifiersTest
class StartingStrategyTest(RunningIdentifiersTest):
""" Test case to check the loading strategies of Supvisors. """
def setUp(self):
""" Get initial status. """
RunningIdentifiersTest.setUp(self)
# check the loading on running instances
self._refresh_loading()
# check that 15 converter programs are STOPPED
processes_info = self.local_supvisors.get_process_info('my_movies:*')
converters = [info for info in processes_info
if info['process_name'].startswith('converter') and info['statecode'] in STOPPED_STATES]
self.assertEqual(15, len(converters))
# check that 10 converter programs are configured with loading 25
processes_rules = self.local_supvisors.get_process_rules('my_movies:*')
converters = [rules for rules in processes_rules
if rules['process_name'].startswith('converter') and rules['expected_loading'] == 25]
self.assertEqual(15, len(converters))
def tearDown(self):
""" The tearDown stops the converters that may have been started. """
# stop all converters
for idx in range(15):
try:
self.local_supvisors.stop_process(f'my_movies:converter_{idx:02d}')
except Exception:
pass
# call parent
RunningIdentifiersTest.tearDown(self)
def _refresh_loading(self):
""" Get the current loading status. """
nodes_info = self.local_supvisors.get_all_instances_info()
self.loading = {info['identifier']: info['loading']
for info in nodes_info
if info['statecode'] == SupvisorsInstanceStates.RUNNING.value}
def _start_converter(self, idx):
""" Get the current loading status. """
self.local_supvisors.start_process(self.strategy.value, f'my_movies:converter_{idx:02d}')
# wait for event STARTING
event = self._get_next_process_event()
assert {'group': 'my_movies', 'name': f'converter_{idx:02d}', 'state': 10}.items() < event.items()
# wait for event RUNNING
event = self._get_next_process_event()
assert {'group': 'my_movies', 'name': f'converter_{idx:02d}', 'state': 20}.items() < event.items()
# refresh the node loadings
self._refresh_loading()
def _start_converter_failed(self, idx):
""" Get the current loading status. """
with self.assertRaises(xmlrpclib.Fault) as exc:
self.local_supvisors.start_process(self.strategy.value, f'my_movies:converter_{idx:02d}')
self.assertEqual(Faults.ABNORMAL_TERMINATION, exc.exception.faultCode)
|
self.assertEqual(f'ABNORMAL_TERMINATION: my_movies:converter_{idx:02d}', exc.exception.faultString)
# wait for event FATAL
event = self._get_next_process_event()
assert {'group': 'my_movies', 'name': f'converter_{idx:02d}', 'state': 200}.items() < event.items()
# refresh the node loadings
self._refresh_loading()
def test_config(self):
""" Test the CONFIG starting strategy.
Start converters and check they have been started on the first node
|
available defined in the program section of the rules file. """
print('### Testing CONFIG starting strategy')
# initial state is cliche81=10% cliche82=15% cliche83=9% cliche85=0%
assert list(self.loading.values()) == [10, 15, 9, 0]
self.strategy = StartingStrategies.CONFIG
# no node config for almost all converters (excepted 04 and 07)
# so applicable order is the one defined in the supvisors section,
# i.e. cliche81, cliche82, cliche83, cliche84 (not running), cliche85
self._start_converter(0)
self.assertEqual([35, 15, 9, 0], list(self.loading.values()))
# continue with cliche81
self._start_converter(1)
self.assertEqual([60, 15, 9, 0], list(self.loading.values()))
# try with converter_04 to check the alt config where cliche83 comes first
self._start_converter(4)
self.assertEqual([60, 15, 34, 0], list(self.loading.values()))
# there is still place on cliche81
self._start_converter(2)
self.assertEqual([85, 15, 34, 0], list(self.loading.values()))
# cliche81 is full. cliche82 will be used now
self._start_converter(3)
self.assertEqual([85, 40, 34, 0], list(self.loading.values()))
# there is still place on cliche82
# try with converter_07 to check the alt config
# cliche81 is full, so second node in config will be used (cliche83)
self._start_converter(7)
self.assertEqual([85, 40, 59, 0], list(self.loading.values()))
# there is still place on cliche82
self._start_converter(5)
self.assertEqual([85, 65, 59, 0], list(self.loading.values()))
# cliche81 is full. cliche82 will be used now
self._start_converter(6)
self.assertEqual([85, 90, 59, 0], list(self.loading.values()))
# cliche81 & cliche82 are full. cliche83 will be used now
self._start_converter(8)
self.assertEqual([85, 90, 84, 0], list(self.loading.values()))
# cliche81 & cliche82 & cliche83 are full. cliche85 is empty but node (cliche81 + cliche85) is full
self._start_converter_failed(9)
self.assertEqual([85, 90, 84, 0], list(self.loading.values()))
def test_less_loaded(self):
""" Test the LESS_LOADED starting strategy.
Start converters and check they have been started on the Supvisors instance having the lowest load. """
print('### Testing LESS_LOADED starting strategy')
# initial state is cliche81=10% cliche82=15% cliche83=9% cliche85=0%
assert list(self.loading.values()) == [10, 15, 9, 0]
self.strategy = StartingStrategies.LESS_LOADED
self._start_converter(0)
self.assertEqual([10, 15, 9, 25], list(self.loading.values()))
self._start_converter(1)
self.assertEqual([10, 15, 34, 25], list(self.loading.values()))
self._start_converter(2)
self.assertEqual([35, 15, 34, 25], list(self.loading.values()))
self._start_converter(3)
self.assertEqual([35, 40, 34, 25], list(self.loading.values()))
# converter 4 cannot run onto cliche85
self._start_converter(4)
self.assertEqual([35, 40, 59, 25], list(self.loading.values()))
self._start_converter(5)
self.assertEqual([35, 40, 59, 50], list(self.loading.values()))
self._start_converter(6)
self.assertEqual([35, 65, 59, 50], list(self.loading.values()))
# converter 7 cannot run onto cliche85
self._start_converter(7)
self.assertEqual([35, 65, 84, 50], list(self.loading.values()))
self._start_converter(8)
self.assertEqual([35, 90, 84, 50], list(self.loading.values()))
# last converter cannot be started: no resource left
self._start_converter_failed(9)
self.assertEqual([35, 90, 84, 50],
|
evenmarbles/mlpy
|
mlpy/auxiliary/datastructs.py
|
Python
|
mit
| 10,818
| 0.000277
|
"""
.. module:: mlpy.auxiliary.datastructs
:platform: Unix, Windows
:synopsis: Provides data structure implementations.
.. moduleauthor:: Astrid Jackson <ajackson@eecs.ucf.edu>
"""
from __future__ import division, print_function, absolute_import
import heapq
import numpy as np
from abc import ABCMeta, abstra
|
ctmethod
class Array(object):
"""The managed array class.
The managed array class pre-allocates memory to
|
the given size
automatically resizing as needed.
Parameters
----------
size : int
The size of the array.
Examples
--------
>>> a = Array(5)
>>> a[0] = 3
>>> a[1] = 6
Retrieving an elements:
>>> a[0]
3
>>> a[2]
0
Finding the length of the array:
>>> len(a)
2
"""
def __init__(self, size):
self._data = np.zeros((size,))
self._capacity = size
self._size = 0
def __setitem__(self, index, value):
"""Set the the array at the index to the given value.
Parameters
----------
index : int
The index into the array.
value :
The value to set the array to.
"""
if index >= self._size:
if self._size == self._capacity:
self._capacity *= 2
new_data = np.zeros((self._capacity,))
new_data[:self._size] = self._data
self._data = new_data
self._size += 1
self._data[index] = value
def __getitem__(self, index):
"""Get the value at the given index.
Parameters
----------
index : int
The index into the array.
"""
return self._data[index]
def __len__(self):
"""The length of the array.
Returns
-------
int :
The size of the array
"""
return self._size
class Point2D(object):
"""The 2d-point class.
The 2d-point class is a container for positions
in a 2d-coordinate system.
Parameters
----------
x : float, optional
The x-position in a 2d-coordinate system. Default is 0.0.
y : float, optional
The y-position in a 2d-coordinate system. Default is 0.0.
Attributes
----------
x : float
The x-position in a 2d-coordinate system.
y : float
The y-position in a 2d-coordinate system.
"""
__slots__ = ['x', 'y']
def __init__(self, x=0.0, y=0.0):
self.x = x
self.y = y
class Point3D(object):
"""
The 3d-point class.
The 3d-point class is a container for positions
in a 3d-coordinate system.
Parameters
----------
x : float, optional
The x-position in a 2d-coordinate system. Default is 0.0.
y : float, optional
The y-position in a 2d-coordinate system. Default is 0.0.
z : float, optional
The z-position in a 3d-coordinate system. Default is 0.0.
Attributes
----------
x : float
The x-position in a 2d-coordinate system.
y : float
The y-position in a 2d-coordinate system.
z : float
The z-position in a 3d-coordinate system.
"""
__slots__ = ['x', 'y', 'z']
def __init__(self, x=0.0, y=0.0, z=0.0):
self.x = x
self.y = y
self.z = z
class Vector3D(Point3D):
"""The 3d-vector class.
.. todo::
Implement vector functionality.
Parameters
----------
x : float, optional
The x-position in a 2d-coordinate system. Default is 0.0.
y : float, optional
The y-position in a 2d-coordinate system. Default is 0.0.
z : float, optional
The z-position in a 3d-coordinate system. Default is 0.0.
Attributes
----------
x : float
The x-position in a 2d-coordinate system.
y : float
The y-position in a 2d-coordinate system.
z : float
The z-position in a 3d-coordinate system.
"""
def __init__(self, x=0.0, y=0.0, z=0.0):
super(Vector3D, self).__init__(x, y, z)
class Queue(object):
"""The abstract queue base class.
The queue class handles core functionality common for
any type of queue. All queues inherit from the queue
base class.
See Also
--------
:class:`FIFOQueue`, :class:`PriorityQueue`
"""
__metaclass__ = ABCMeta
def __init__(self):
self._queue = []
def __len__(self):
return len(self._queue)
def __contains__(self, item):
try:
self._queue.index(item)
return True
except Exception:
return False
def __iter__(self):
return iter(self._queue)
def __str__(self):
return '[' + ', '.join('{}'.format(el) for el in self._queue) + ']'
def __repr__(self):
return ', '.join('{}'.format(el) for el in self._queue)
@abstractmethod
def push(self, item):
"""Push a new element on the queue
Parameters
----------
item :
The element to push on the queue
"""
raise NotImplementedError
@abstractmethod
def pop(self):
"""Pop an element from the queue."""
raise NotImplementedError
def empty(self):
"""Check if the queue is empty.
Returns
-------
bool :
Whether the queue is empty.
"""
return len(self._queue) <= 0
def extend(self, items):
"""Extend the queue by a number of elements.
Parameters
----------
items : list
A list of items.
"""
for item in items:
self.push(item)
def get(self, item):
"""Return the element in the queue identical to `item`.
Parameters
----------
item :
The element to search for.
Returns
-------
The element in the queue identical to `item`. If the element
was not found, None is returned.
"""
try:
index = self._queue.index(item)
return self._queue[index]
except Exception:
return None
def remove(self, item):
"""Remove an element from the queue.
Parameters
----------
item :
The element to remove.
"""
self._queue.remove(item)
class FIFOQueue(Queue):
"""The first-in-first-out (FIFO) queue.
In a FIFO queue the first element added to the queue
is the first element to be removed.
Examples
--------
>>> q = FIFOQueue()
>>> q.push(5)
>>> q.extend([1, 3, 7])
>>> print q
[5, 1, 3, 7]
Retrieving an element:
>>> q.pop()
5
Removing an element:
>>> q.remove(3)
>>> print q
[1, 7]
Get the element in the queue identical to the given item:
>>> q.get(7)
7
Check if the queue is empty:
>>> q.empty()
False
Loop over the elements in the queue:
>>> for x in q:
>>> print x
1
7
Check if an element is in the queue:
>>> if 7 in q:
>>> print "yes"
yes
See Also
--------
:class:`PriorityQueue`
"""
def __init__(self):
super(FIFOQueue, self).__init__()
def push(self, item):
"""Push an element to the end of the queue.
Parameters
----------
item :
The element to append.
"""
self._queue.append(item)
def pop(self):
"""Return the element at the front of the queue.
Returns
-------
The first element in the queue.
"""
return self._queue.pop(0)
def extend(self, items):
"""Append a list of elements at the end of the queue.
Parameters
----------
items : list
List of elements.
"""
self._queue.extend(items)
class PriorityQueue(Queue):
"""
The priority queue.
In a priority queue each element has a priority associated with it. An element
with high priority (i.e., smallest value) is served before an element with low priority
(i.e., largest value). The priority
|
Pixdigit/Saufbot
|
logger.py
|
Python
|
mit
| 1,904
| 0.003155
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
import string
import json
import config
import helper
import busses
def log_message(msg):
#log format: time type message
time_str = str(time.time())
line = time_str[:time_str.find(".")]
line = line.rjust(10, str(" "))
line += " "
busses.status_bus["latest_messages"][msg.chat_id] = msg
msg_type = helper.get_message_type(msg)
if msg_type == "text" and msg.text.startswith("/"):
msg_type = "command"
appendix = "ERROR"
if msg_type == "text":
appendix = msg.text
elif
|
msg_type == "command":
appendix = msg.text[1:]
elif msg_type == "location":
location_data = msg.location.to_dict()
ap
|
pendix = str(location_data["latitude"]) + "°, " + str(location_data["longitude"]) + "°"
elif msg_type == "contact":
appendix = str(msg.contact.user_id) + " " + msg.contact.first_name + " " + msg.contact.last_name
elif msg_type == "new_user":
appendix = str(msg.new_chat_member.id) + " " + str(msg.new_chat_member.first_name) + " " + str(msg.new_chat_member.last_name)
elif msg_type in ["audio", "document", "game", "photo", "sticker", "video", "voice", "video_note", "unknown"]:
appendix = ""
msg_type = msg_type.rjust(10, str(" "))
appendix = appendix.replace("\n", "\\n").rjust(40, str(" "))
line += msg_type + " " + appendix + " "
line += str(msg.chat_id) + "," + str(msg.message_id)
line += "\n"
with open(config.msg_log_file_path, "a") as log_file:
log_file.write(line.encode("utf-8"))
def complete_log(update):
with open(config.complete_log_file_path, "a") as log_file:
data = update.to_dict()
data.update({"time": time.time()})
json_data = json.dumps(data)
log_file.write(str(json_data).replace("\n", "\\n") + "\n".encode("utf-8"))
|
AISpace2/AISpace2
|
aipython/cspSLSPlot.py
|
Python
|
gpl-3.0
| 12,720
| 0.001101
|
# cspSLS.py - Stochastic Local Search for Solving CSPs
# AIFCA Python3 code Version 0.7.1 Documentation at http://aipython.org
# Artificial Intelligence: Foundations of Computational Agents
# http://artint.info
# Copyright David L Poole and Alan K Mackworth 2017.
# This work is licensed under a Creative Commons
# Attribution-NonCommercial-ShareAlike 4.0 International License.
# See: http://creativecommons.org/licenses/by-nc-sa/4.0/deed.en
import heapq
import random
import matplotlib.pyplot as plt
from aipython.cspProblem import CSP, Constraint
from aipython.searchProblem import Arc, Search_problem
from aipython.utilities import Displayable
class SLSearcher(Displayable):
"""A search problem directly from the CSP..
A node is a variable:value dictionary"""
def __init__(self, csp):
self.csp = csp
self.variables_to_select = {var for var in self.csp.variables
if len(self.csp.domains[var]) > 1}
# Create assignment and conflicts set
self.current_assignment = None # this will trigger a random restart
self.number_of_steps = 1 # number of steps after the initialization
super().__init__()
def restart(self):
"""creates a new total assignment and the conflict set
"""
self.current_assignment = {var: random_sample(dom) for (var, dom) in self.csp.domains.items()}
self.display(2, "Initial assignment", self.current_assignment)
self.conflicts = set()
for con in self.csp.constraints:
if not con.holds(self.current_assignment):
self.conflicts.add(con)
self.display(2, "Conflicts:", self.conflicts)
self.variable_pq = None
def search(self, max_steps=500, prob_best=1.0, prob_anycon=1.0):
"""
returns the number of steps or None if these is no solution
if there is a solution, it can be found in self.current_assignment
"""
if self.current_assignment is None:
self.restart()
self.number_of_steps += 1
if not self.conflicts:
return self.number_of_steps
if prob_best > 0: # we need to maintain a variable priority queue
return self.search_with_var_pq(max_steps, prob_best, prob_anycon)
else:
return self.search_with_any_conflict(max_steps, prob_anycon)
def search_with_any_conflict(self, max_steps, prob_anycon=1.0):
"""Searches with the any_conflict heuristic.
This relies on just maintaining the set of conflicts;
it does not maintain a priority queue
"""
self.variable_pq = None # we are not maintaining the priority queue.
# This ensures it is regenerated if needed.
for i in range(max_steps):
self.number_of_steps += 1
if random.random() < prob_anycon:
con = random_sample(self.conflicts) # pick random conflict
var = random_sample(con.scope) # pick variable in conflict
else:
var = random_sample(self.variables_to_select)
if len(self.csp.domains[var]) > 1:
val = random_sample(self.csp.domains[var] -
{self.current_assignment[var]})
self.display(2, "Assigning", var, "=", val)
self.current_assignment[var] = val
for varcon in self.csp.var_to_const[var]:
if varcon.holds(self.current_assignment):
if varcon in self.conflicts:
self.conflicts.remove(varcon)
self.display(3, "Became consistent", varcon)
else:
self.display(3, "Still consistent", varcon)
else:
if varcon not in self.conflicts:
self.conflicts.add(varcon)
self.display(3, "Became inconsistent", varcon)
else:
self.display(3, "Still inconsistent", varcon)
self.display(2, "Conflicts:", self.conflicts)
if not self.conflicts:
self.display(1, "Solution found", self.current_assignment,
"in", self.number_of_steps, "steps")
return self.number_of_steps
self.display(1, "No solution in", self.number_of_steps, "steps",
len(self.conflicts), "conflicts remain")
return None
def search_with_var_pq(self, max_steps, prob_best=1.0, prob_anycon=1.0):
"""search with a priority queue of variables.
This is used to select a variable with the most conflicts.
"""
if not self.variable_pq:
self.create_pq()
pick_best_or_con = prob_best + prob_anycon
for i in range(max_steps):
self.number_of_steps += 1
randnum = random.random()
# Pick a variable
if randnum < prob_best: # pick best variable
var, oldval = self.variable_pq.top()
elif randnum < pick_best_or_con: # pick a variable in a conflict
con = random_sample(self.conflicts)
var = random_sample(con.scope)
else: # pick any variable that can be selected
var = random_sample(self.variables_to_select)
if len(self.csp.domains[var]) > 1: # var has other values
# Pick a value
val = random_sample(self.csp.domains[var] - {self.current_assignment[var]})
self.display(2, "Assigning", var, "=", val)
# Update the priority queue
var_differential = {}
self.current_assignment[var] = val
for varcon in self.csp.var_to_const[var]:
self.display(3, "Checking", varcon)
if varcon.holds(self.current_assignment):
if varcon in self.conflicts: # was incons, now consis
self.display(3, "Became consistent", varcon)
self.conflicts.remove(varcon)
for v in varcon.scope: # v is in one fewer conflicts
var_differential[v] = var_differential.get(v, 0) - 1
else:
self.display(3, "Still consistent", varcon)
else:
if varcon not in self.conflicts: # was consis, not now
self.display(3, "Became inconsistent", varcon)
self.conflicts.add(varcon)
for v in varcon.scope: # v is in one more conflicts
|
var_differential[v] = var_differential.get(v, 0) + 1
else:
self.display(3, "Still inconsistent", varcon)
self.variable_pq.update_each_priority(var_differential)
self.display(2, "Conflicts:", self.conflicts)
if not self.conflicts: # no conflicts, so solution found
self.display(1, "Solution found", self.current_assignment
|
, "in", self.number_of_steps, "steps")
return self.number_of_steps
self.display(1, "No solution in", self.number_of_steps, "steps", len(self.conflicts), "conflicts remain")
return None
def create_pq(self):
"""Create the variable to number-of-conflicts priority queue.
This is needed to select the variable in the most conflicts.
The value of a variable in the priority queue is the negative of the
number of conflicts the variable appears in.
"""
self.variable_pq = Updatable_priority_queue()
var_to_number_conflicts = {}
for con in self.conflicts:
for var in con.scope:
var_to_number_conflicts[var] = var_to_number_conflicts.get(var, 0) + 1
for var, num in var_to_number_conflicts.items():
if num > 0:
self.variable_pq.add(var, -num)
def random_samp
|
caktus/rapidsms-reports
|
reports/tests/base.py
|
Python
|
bsd-3-clause
| 1,259
| 0
|
from __future__ import unicode_literals
import datetime
import random
from django.conf import settings
from rapidsms.tests.harness import RapidTest
from healthcare.api import client
class ReportTestBase(RapidTest):
def setUp(self):
# Before doing anything else, we must clear out the dummy backend
# as this is not automatically flushed between tests.
self.clear_healthcare_backends()
def clear_healthcare_backends(self):
if 'healthcare.backends.dummy' in settings.INSTALLED_APPS:
for registry in (client.providers, client.patients):
registry.backe
|
nd._patients = {}
registry.backend._patient_ids = {}
registry.backend._providers = {}
def create_patient(self, **kwargs):
defaults = {
'name': self.random_string(25),
'birth_date': datetime.date.today() - datetime.timedelta(365),
'sex': random.choice(['M', 'F'])
}
defaults.update(kwargs)
return client.patients.create(**defaults)
|
def create_provider(self, **kwargs):
defaults = {
'name': self.random_string(25),
}
defaults.update(kwargs)
return client.providers.create(**defaults)
|
mrakitin/sirepo
|
tests/template/model_units_test.py
|
Python
|
apache-2.0
| 1,624
| 0.001847
|
# -*- coding: utf-8 -*-
u"""Test sirepo.cooki
|
e
:copyright: Copy
|
right (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import pytest
def test_model_units():
from sirepo.template.template_common import ModelUnits
import re
def _xpas(value, is_native):
cm_to_m = lambda v: ModelUnits.scale_value(v, 'cm_to_m', is_native)
if is_native:
if re.search(r'^#', str(value)):
value = re.sub(r'^#', '', value)
return map(cm_to_m, value.split('|'))
else:
if type(value) is list:
return '#' + '|'.join(map(str, map(lambda v: int(cm_to_m(v)), value)))
return cm_to_m(value)
units = ModelUnits({
'CHANGREF': {
'XCE': 'cm_to_m',
'YCE': 'cm_to_m',
'ALE': 'deg_to_rad',
'XPAS': _xpas,
},
})
native_model = {
'XCE': 2,
'YCE': 0,
'ALE': 8,
'XPAS': '#20|20|20',
}
sirepo_model = units.scale_from_native('CHANGREF', native_model.copy())
assert sirepo_model == {
'XCE': 2e-2,
'YCE': 0,
'ALE': 0.13962634015954636,
'XPAS': [2e-1, 2e-1, 2e-1],
}
assert native_model == units.scale_to_native('CHANGREF', sirepo_model.copy())
assert units.scale_from_native('CHANGREF', {
'XPAS': '20',
})['XPAS'] == 0.2
assert ModelUnits.scale_value(2, 'cm_to_m', True) == 2e-2
assert ModelUnits.scale_value(0.02, 'cm_to_m', False) == 2
|
Tong-Chen/scikit-learn
|
sklearn/linear_model/tests/test_sgd.py
|
Python
|
bsd-3-clause
| 30,538
| 0.000295
|
import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
fr
|
om sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDClassifier.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDClassifier.partial_fit(self, X, y, *args, **kw)
def decision_func
|
tion(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDClassifier.decision_function(self, X, *args, **kw)
def predict_proba(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDClassifier.predict_proba(self, X, *args, **kw)
def predict_log_proba(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDClassifier.predict_log_proba(self, X, *args, **kw)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
##
## Test Data
##
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
##
## Classification Test Case
##
class CommonTest(object):
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
#... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
"""Input format tests. """
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
"""Test whether clone works ok. """
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory = SGDClassifier
def test_sgd(self):
"""Check that SGD gives any results :-)"""
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
#assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
"""Check whether expected ValueError on bad l1_ratio"""
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
"""Check whether expected ValueError on bad learning_rate"""
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
"""Check whether expected ValueError on bad eta0"""
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
"""Check whether expected ValueError on bad alpha"""
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
"""Check whether expected ValueError on bad penalty"""
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
"""Check whether expected ValueError on bad loss"""
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
"""Test parameter validity check"""
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
"""Test parameter validity check"""
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
"""Checks coef_init not allowed as model argument (only fit)"""
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
"""Checks coef_init shape for the warm starts"""
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
"""Checks intercept_ shape for the warm starts"""
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
"""Checks intercept_ shape for the warm starts in binary case"""
self.factory().fit(X5, Y5, intercept_init=0)
def test_set_intercept_to_intercept(self):
"""Checks intercept_ shape consistency for the warm starts"""
# Inconsistent intercept_ shape.
clf = self.factory().fit
|
pmquang/python-anyconfig
|
anyconfig/schema.py
|
Python
|
mit
| 4,139
| 0
|
#
# Copyright (C) 2015 Satoru SATOH <ssato redhat.com>
# License: MIT
#
"""anyconfig.schema module.
.. versionadded:: 0.0.11
Added new API :function:`gen_schema` to generate schema object
.. versionadded:: 0.0.10
Added new API :function:`validate` to validate config with JSON schema
"""
from __future__ import absolute_import
import anyconfig.compat
try:
import jsonschema
except ImportError:
pass
_SIMPLETYPE_MAP = {list: "array", tuple: "array",
bool: "boolean",
int: "integer", float: "number",
dict: "object",
str: "string"}
_SIMPLE_TYPES = (bool, int, float, str)
if not anyconfig.compat.IS_PYTHON_3:
_SIMPLETYPE_MAP[unicode] = "string"
_SIMPLE_TYPES = (bool, int, float, str, unicode)
def validate(obj, schema, format_checker=None, safe=True):
"""
Validate target object with given schema object, loaded from JSON schema.
See also: https://python-jsonschema.readthedocs.org/en/latest/validate/
:parae obj: Target object (a dict or a dict-like object) to validate
:param schema: Schema object (a dict or a dict-like object)
instantiated from schema JSON file or schema JSON string
:param format_checker: A format property checker object of which class is
inherited from jsonschema.FormatChecker, it's default if None given.
:param safe: Exception (jsonschema.ValidationError or
jsonschema.SchemaError) will be thrown if it's True and any validation
error occurs.
:return: (True if validation succeeded else False, error message)
"""
try:
if format_checker is None:
format_checker
|
= jsonschema.FormatChecker() # :raises: NameError
try:
jsonschema.validate(obj, schema, format_checker=format_checker)
return (True, '')
except (jsonschema.ValidationError, jsonschema.SchemaError,
Exception) as exc:
if safe:
return (False, str(exc))
else:
raise
except NameError:
return (True, "Validation module (jsonschema)
|
is not available")
return (True, '')
def array_to_schema_node(arr, typemap=None):
"""
Generate a node represents JSON schema object with type annotation added
for given object node.
:param arr: Array of dict or MergeableDict objects
:param typemap: Type to JSON schema type mappings
:return: Another MergeableDict instance represents JSON schema of items
"""
if typemap is None:
typemap = _SIMPLETYPE_MAP
if arr:
return gen_schema(arr[0], typemap)
else:
return gen_schema("str", typemap)
def object_to_schema_nodes_iter(obj, typemap=None):
"""
Generate a node represents JSON schema object with type annotation added
for given object node.
:param obj: Dict or MergeableDict object
:param typemap: Type to JSON schema type mappings
:yield: Another MergeableDict instance represents JSON schema of object
"""
if typemap is None:
typemap = _SIMPLETYPE_MAP
for key, val in anyconfig.compat.iteritems(obj):
yield (key, gen_schema(val, typemap=typemap))
def gen_schema(node, typemap=None):
"""
Generate a node represents JSON schema object with type annotation added
for given object node.
:param node: Object node :: MergeableDict
:param typemap: Type to JSON schema type mappings
:return: Another MergeableDict instance represents JSON schema of this node
"""
if typemap is None:
typemap = _SIMPLETYPE_MAP
ret = dict(type="null")
if node is None:
return ret
_type = type(node)
if _type in _SIMPLE_TYPES:
ret = dict(type=typemap[_type])
elif isinstance(node, dict):
props = object_to_schema_nodes_iter(node, typemap)
ret = dict(type=typemap[dict], properties=dict(props))
elif _type in (list, tuple) or hasattr(node, "__iter__"):
ret = dict(type=typemap[list],
items=array_to_schema_node(node, typemap))
return ret
# vim:sw=4:ts=4:et:
|
DigitalCampus/django-nurhi-oppia
|
docs/conf.py
|
Python
|
gpl-3.0
| 8,476
| 0.007433
|
# -*- coding: utf-8 -*-
#
# OppiaMobile-Server documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 25 16:03:07 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from datetime import date
sys.path.append(os.path.dirname(__file__))
sys.path.append(os.path.abspath('../oppia/'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
if django.VERSION < (1, 4):
from django.core.management import setup_environ
settings = __import__(os.environ["DJANGO_SETTINGS_MODULE"])
setup_environ(settings)
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage',]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'OppiaMobile'
copyright = u'%d, Digital Campus' % date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.10.1'
# The full version, including alpha/beta/rc tags.
release = '0.10.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> docu
|
mentation".
#html_title = None
# A shorter title for the navigation bar. Default is the s
|
ame as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'OppiaMobile'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'OppiaMobile.tex', u'OppiaMobile Documentation',
u'Alex Little, Digital Campus', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'oppiamobile-server', u'OppiaMobile-Server Documentation',
[u'Alex Little, Digital Campus'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'OppiaMobile', u'OppiaMobile Documentation',
u'Alex Little, Digital Campus', 'OppiaMobile', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#tex
|
amiv-eth/amivapi
|
amivapi/tests/test_cascade_delete.py
|
Python
|
agpl-3.0
| 1,659
| 0
|
# -*- coding: utf-8 -*-
#
# license: AGPLv3, see LICENSE for details. In addition we strongly encourage
# you to buy us beer if we meet and you like the software.
"""Test for cascading deletes"""
from bson import ObjectI
|
d
from amivapi.tests.utils import WebTestNoAuth
class CascadingDeleteTest(WebTestNoAuth):
def test_delete_cascades(self):
"""Test that deletion of an object deletes referencing objects, when
|
cascading is enabled"""
self.load_fixture({
'users': [
{
'_id': 'deadbeefdeadbeefdeadbeef',
'nethz': 'user1'
},
{
'_id': 'deadbeefdeadbeefdeadbee3',
'nethz': 'user2'
}
],
'sessions': [
{
'_id': 'deadbeefdeadbeefdeadbee2',
'username': 'user1'
},
{
'_id': 'deadbeefdeadbeefdeadbee1',
'username': 'user2'
}
]
})
user1 = self.api.get("/users/deadbeefdeadbeefdeadbeef",
status_code=200).json
self.api.delete("/users/deadbeefdeadbeefdeadbeef",
headers={'If-Match': user1['_etag']},
status_code=204)
session_count = self.db['sessions'].count_documents({})
self.assertEqual(session_count, 1)
session_count = self.db['sessions'].count_documents({
'user': ObjectId('deadbeefdeadbeefdeadbeef')})
self.assertEqual(session_count, 0)
|
ehuelsmann/openipam
|
openIPAM/openipam/web/admin/system/system.py
|
Python
|
gpl-3.0
| 1,167
| 0.042845
|
import che
|
rrypy
from openipam.web.basepage import BasePage
from openipam.web.admin.admin import Admin
from openipam.web.resource.submenu import submenu
class AdminSystem(Admin):
'''The admin system settings class. This includes a
|
ll pages that are /admin/sys/*'''
#-----------------------------------------------------------------
# PUBLISHED FUNCTIONS
#-----------------------------------------------------------------
#-----------------------------------------------------------------
#-----------------------------------------------------------------
# EXPOSED FUNCTIONS
#-----------------------------------------------------------------
@cherrypy.expose
def index(self):
'''The settings management page'''
# Confirm user authentication
self.check_session()
leftnav = str(self.leftnav_manage("System Settings"))
text = '<h1>System Settings</h1><div class="message">Under construction</div>'
return self._template.wrap(text, str(self.leftnav_manage("System Settings")))
# TODO: make sure to add checkbox for host change digest emails
#-----------------------------------------------------------------
|
genialis/resolwe-bio
|
resolwe_bio/processes/support_processors/bam_conversion.py
|
Python
|
apache-2.0
| 4,807
| 0.00104
|
"""Converting BAM to BEDPE and normalized BigWig files."""
import os
from resolwe.process import (
Cmd,
DataField,
FileField,
FloatField,
Process,
SchedulingClass,
StringField,
)
class BamToBedpe(Process):
"""Takes in a BAM file and calculates a normalization factor in BEDPE format.
Done by sorting with Samtools and transformed with Bedtools.
"""
slug = "bedtools-bamtobed"
name = "Bedtools bamtobed"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"}
},
"resources": {"cores": 1, "memory": 8192},
}
data_name = "Bedtools bamtobed ({{alignment|sample_name|default('?')}})"
version = "1.2.0"
process_type = "data:bedpe"
category = "Other"
entity = {"type": "sample"}
scheduling_class = SchedulingClass.BATCH
class Input:
"""Input fields."""
alignment = DataField("alignment:bam", label="Alignment BAM file")
class Output:
"""Output fields."""
bedpe = FileField(label="BEDPE file")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run the analysis."""
path = inputs.alignment.output.bam.path
basename = os.path.basename(path)
assert basename.endswith(".bam")
name = basename[:-4]
bedpe_file = f"{name}.bedpe"
samtools_param = ["-n", path]
bedtools_param = ["-bedpe", "-i"]
(
Cmd["samtools"]["sort"][samtools_param]
| Cmd["bedtools"]["bamtobed"][bedtools_param]
> bedpe_file
)()
if not os.path.exists(bedpe_file):
self.error("Converting BAM to BEDPE with Bedtools bamtobed failed.")
outputs.bedpe = bedpe_file
outputs.species = inputs.alignment.output.species
outputs.build = inputs.alignment.output.build
class ScaleBigWig(Process):
"""Creates a scaled BigWig file."""
slug = "scale-bigwig"
name = "Deeptools bamCoverage"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"}
},
"resources": {"cores": 1, "memory": 16384},
}
data_name = "Scale BigWig ({{alignment|sample_name|default('?')}})"
version = "1.2.0"
process_type = "data:coverage:bigwig"
category = "Other"
entity = {"type": "sample"}
scheduling_class = SchedulingClass.BATCH
class Input:
"""Input fields."""
alignment = DataField("alignment:bam", label="Alignment BAM file")
bedpe = DataField(
"bedpe",
label="BEDPE Normalization factor",
description="The BEDPE file describes disjoint genome features, "
"such as structural variations or paired-end sequence alignments. "
"It is used to estimate the scale factor.",
)
scale = FloatField(
label="Scale for the normalization factor",
description="Magnitude of the scale factor. "
"The scaling factor is calculated by dividing the scale "
"with the number of features in BEDPE "
"(scale/(number of features)).",
default=10000,
)
class Output:
"""Output fields."""
bigwig = FileField(label="bigwig file")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run the analysis."""
path = inputs.alignment.output.bam.path
basename = os.path.ba
|
sename(path)
assert basename.endswith(".bam")
name = basename[:-4]
out_file = f"{name}.SInorm.bigwig"
out_index = f"{name}.bai"
with open(inputs.bedpe.output.bedpe.path) as f:
spike_count = f.readlines()
spike_count = len(spike_count)
scale_factor = inputs.scale / spike_count
bam_coverage_param = [
"--bam",
|
path,
"--scaleFactor",
scale_factor,
"--outFileName",
out_file,
"--numberOfProcessors",
self.requirements.resources.cores,
"--outFileFormat",
"bigwig",
]
(Cmd["samtools"]["index"][path][out_index])()
self.progress(0.5)
(Cmd["bamCoverage"][bam_coverage_param])()
if not os.path.exists(out_file):
self.error("Generation of a scaled BigWig file with bamCoverage failed.")
outputs.bigwig = out_file
outputs.species = inputs.alignment.output.species
outputs.build = inputs.alignment.output.build
|
diydrones/ardupilot
|
Tools/scripts/uploader.py
|
Python
|
gpl-3.0
| 45,137
| 0.002725
|
#!/usr/bin/env python
############################################################################
#
# Copyright (c) 2012-2017 PX4 Development Team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name PX4 nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
############################################################################
#
# Serial firmware uploader for the PX4FMU bootloader
#
# The PX4 firmware file is a JSON-encoded Python object, containing
# metadata fields and a zlib-compressed base64-encoded firmware image.
#
# The uploader uses the following fields from the firmware file:
#
# image
# The firmware that will be uploaded.
# image_size
# The size of the firmware in bytes.
# board_id
# The board for which the firmware is intended.
# board_revision
# Currently only used for informational purposes.
#
# AP_FLAKE8_CLEAN
# for python2.7 compatibility
from __future__ import print_function
import sys
import argparse
import binascii
import serial
import struct
import json
import zlib
import base64
import time
import array
import os
import platform
import re
from sys import platform as _platform
is_WSL = bool("Microsoft" in platform.uname()[2])
# default list of port names to look for autopilots
default_ports = ['/dev/serial/by-id/usb-Ardu*',
'/dev/serial/b
|
y-id/usb-3D*',
'/dev/serial/by-id/usb-APM*',
'/dev/serial/by-id/usb-Radio*',
'/dev/serial/by-id/usb-*_3DR_*',
'/dev/serial/by-id/usb-Hex_Technology_Limited*',
|
'/dev/serial/by-id/usb-Hex_ProfiCNC*',
'/dev/serial/by-id/usb-Holybro*',
'/dev/serial/by-id/usb-mRo*',
'/dev/serial/by-id/usb-modalFC*',
'/dev/serial/by-id/usb-*-BL_*',
'/dev/serial/by-id/usb-*_BL_*',
'/dev/tty.usbmodem*']
if "cygwin" in _platform or is_WSL:
default_ports += ['/dev/ttyS*']
# Detect python version
if sys.version_info[0] < 3:
runningPython3 = False
else:
runningPython3 = True
# dictionary of bootloader {boardID: (firmware boardID, boardname), ...}
# designating firmware builds compatible with multiple boardIDs
compatible_IDs = {33: (9, 'AUAVX2.1')}
# CRC equivalent to crc_crc32() in AP_Math/crc.cpp
crctab = array.array('I', [
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d])
def crc32(bytes, state=0):
'''crc32 exposed for use by chibios.py'''
for byte in bytes:
index = (state ^ byte) & 0xff
state = crctab[index] ^ (state >> 8)
return state
class firmware(object):
'''Loads a firmware file'''
desc = {}
image = bytes()
crcpad = bytearray(b'\xff\xff\xff\xff')
def __init__(self, path):
# read the file
f = open(path, "r")
self.desc = json.load(f)
f.close()
self.image = bytearray(zlib.decompress(base64.b64decode(self.desc['image'])))
if 'extf_image' in self.desc:
self.extf_image = bytearray(zlib.decompress(base64.b64decode(self.desc['extf_image'])))
else:
self.extf_image = None
# pad image to 4-byte length
while ((len(self.image) % 4) != 0):
self.image.append('\xff')
# pad image to 4-byte length
if self.extf_image is not None:
while ((len(self.extf_image) % 4) != 0):
self.extf_image.append('\xff')
def property(self, propname, default=None):
if propname in self.desc:
return self.desc[propname]
return default
def extf_crc(self, size):
state = crc32(self.ext
|
camsas/qjump-nsdi15-plotting
|
figure11/plot_throughput_factor_experiment.py
|
Python
|
bsd-3-clause
| 8,217
| 0.008762
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Simple script which takes a file with one packet latency (expressed as a
# signed integer) per line and plots a trivial histogram.
# Copyright (c) 2015, Malte Schwarzkopf
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the project, the name of copyright holder nor the names
# of its contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import sys, re
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from utils import *
from matplotlib import pylab
from scipy.stats import scoreatpercentile
pkt_size = 256
train_length = 6
# @author: Aaron Blankstein, with modifications by Malte Schwarzkopf
class boxplotter(object):
def __init__(self, median, top, bottom, whisk_top=None,
whisk_bottom=None, extreme_top=None):
self.median = median
self.top = top
self.bott = bottom
self.whisk_top = whisk_top
self.whisk_bott = whisk_bottom
self.extreme_top = extreme_top
def draw_on(self, ax, index, box_color = "blue",
median_color = "red", whisker_color = "black"):
width = .7
w2 = width / 2
ax.broken_barh([(index - w2, width)],
(self.bott,self.top - self.bott),
facecolor="white",edgecolor=box_color, lw=0.5)
ax.broken_barh([(index - w2, width)],
(self.median,0),
facecolor="white", edgecolor=median_color, lw=0.5)
if self.whisk_top is not None:
ax.broken_barh([(index - w2, width)],
(self.whisk_top,0),
facecolor="white", edgecolor=whisker_color, lw=0.5)
ax.broken_barh([(index , 0)],
(self.whisk_top, self.top-self.whisk_top),
edgecolor=box_color,linestyle="solid", lw=0.5)
if self.whisk_bott is not None:
ax.broken_barh([(index - w2, width)],
(self.whisk_bott,0),
facecolor="white", edgecolor=whisker_color, lw=0.5)
ax.broken_barh([(index , 0)],
(self.whisk_bott,self.bott-self.whisk_bott),
edgecolor=box_color,linestyle="solid", lw=0.5)
if self.extreme_top is not None:
ax.scatter([index], [self.extreme_top], marker='*',
lw=0.5)
def percentile_box_plot(ax, data, indexer=None, box_top=75,
box_bottom=25,whisker_top=99,whisker_bottom=1):
if indexer is None:
indexed_data = zip(range(1,len(data)+1), data)
else:
indexed_data = [(indexer(datum), datum) for datum in data]
def get_whisk(vector, w):
if w is None:
return None
return scoreatpercentile(vector, w)
for index, x in indexed_data:
bp = boxplotter(scoreatperce
|
ntile(x, 50),
|
scoreatpercentile(x, box_top),
scoreatpercentile(x, box_bottom),
get_whisk(x, whisker_top),
get_whisk(x, whisker_bottom),
scoreatpercentile(x, 100))
bp.draw_on(ax, index)
def worst_case_approx(setups, trainlength, plength):
base_worst = 4.0 * 3
#base_worst = 0.566
#packet_time = (plength + 18.0) * 8.0 / 10.0 / 1000.0
packet_time = plength * 8.0 / 10.0 / 1000.0
tmp = [x * (packet_time * trainlength) for x in setups]
worst = [x + base_worst for x in tmp]
for i in range(len(worst)):
print "WORST CASE %d: %f" % (setups[i], worst[i])
return worst
######################################
if len(sys.argv) < 2:
print "usage: plot_switch_experiment.py <input dir1> <input1 label> " \
"<input dir2> <input2 label> ... <output file>"
sys.exit(1)
paper_mode = True
if paper_mode:
set_paper_rcs()
# arg processing
if (len(sys.argv) - 1) % 2 == 1:
# odd number of args, have output name
outname = sys.argv[-1]
print "Output name specified: %s" % (outname)
else:
print "Please specify an output name!"
sys.exit(1)
inputdirs = []
labels = []
for i in range(1, len(sys.argv)-1, 2):
inputdirs.append(sys.argv[i])
labels.append(sys.argv[i+1])
# parsing
data = []
negs_ignored = 0
for indir in inputdirs:
ds = []
for line in open(indir).readlines():
#for line in open(indir).readlines():
if line.strip() == "":
continue
val = float(line.strip()) / 1000.0
if val > 0:
ds.append(val)
else:
negs_ignored += 1
data.append(ds)
print "Ignored %d negative latency values!" % (negs_ignored)
# plotting
fig = plt.figure(figsize=(3.33,2.22))
#plt.rc("font", size=7.0)
fig, ax = plt.subplots(figsize=(3.33,2.22))
pos = np.array(range(len(data)))+1
#bp = percentile_box_plot(ax, data)
plt.plot(pos, [np.mean(x) for x in data], marker='+', label='average',
lw=1.0, color='g')
plt.plot(pos, [np.percentile(x, 99) for x in data], marker='v',
label='99\\textsuperscript{th}\%ile',
lw=1.0, color='y', mfc='none', mec='y', mew=1.0)
plt.scatter(pos, [max(x) for x in data], marker='x',
label='100\\textsuperscript{th}\%ile',
lw=1.0, color='r')
# worst-case analytical approximation
#plt.plot(range(1, len(data)+1),
# worst_case_approx(range(0, len(data)), train_length, pkt_size),
# ':', color='r', label="modelled worst case", lw=1.0)
worst_case_approximation = worst_case_approx([10], train_length, pkt_size)[0]
wc_line = plt.axhline(worst_case_approximation, ls=':', color='r', lw=1.0)
#plt.axvline(worst_case_approx([10], train_length, pkt_size)[0] - 8, ls='--',
# color='k', lw=1.0, label="optimal network epoch")
first_legend = plt.legend(loc='upper left', frameon=False, handletextpad=0.1,
borderaxespad=0.05)
plt.gca().add_artist(first_legend)
plt.legend([wc_line], ["latency bound"], frameon=False, loc='upper center',
borderaxespad=0.05, handletextpad=0.1)
ax.set_xlabel('Throughput factor $f$')
ax.set_ylabel('End-to-end latency [$\mu$s]')
plt.ylim(0, 30.0)
plt.yticks(range(0, 31, 5), [str(x) for x in range(0, 31, 5)])
plt.xlim(0, len(inputdirs) + 1)
plt.xticks(range(pos[0], pos[-1] + 1, len(pos) / 5),
[round(worst_case_approximation / float(labels[i-1]), 1)
for i in range(pos[0], pos[-1] + 1, len(pos) / 5)])
plt.axvspan(0, 5, facecolor='0.8', alpha=0.5, zorder=0, lw=0.0)
plt.axvspan(20.5, 23, facecolor='0.8', alpha=0.5, zorder=0, lw=0.0)
plt.text(2, 31, "\\textbf{A}", fontsize=12)
plt.text(13, 31, "\\textbf{B}", fontsize=12)
plt.text(21.3, 31, "\\textbf{C}", fontsize=12)
#plt.setp(bp['whiskers'], color='k', linestyle='-' )
#plt.setp(bp['fliers'], markersize=3.0)
plt.savefig(outname, format="pdf", b
|
jasonwee/asus-rt-n14uhp-mrtg
|
src/lesson_the_internet/uuid_uuid1.py
|
Python
|
apache-2.0
| 641
| 0
|
import uuid
u = uuid.uuid1()
p
|
rint(u)
print(type(u))
print('bytes :', repr(u.bytes))
print('hex :', u.h
|
ex)
print('int :', u.int)
print('urn :', u.urn)
print('variant :', u.variant)
print('version :', u.version)
print('fields :', u.fields)
print(' time_low : ', u.time_low)
print(' time_mid : ', u.time_mid)
print(' time_hi_version : ', u.time_hi_version)
print(' clock_seq_hi_variant: ', u.clock_seq_hi_variant)
print(' clock_seq_low : ', u.clock_seq_low)
print(' node : ', u.node)
print(' time : ', u.time)
print(' clock_seq : ', u.clock_seq)
|
opennetworkinglab/spring-open-cli
|
sdncon/rest/models.py
|
Python
|
epl-1.0
| 944
| 0.001059
|
#
# Copyright (c) 2013 Big Switch Networks, Inc.
#
# Licensed under the Eclipse Public License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.eclipse.org/legal/epl-v10.html
#
# Unless required by applicable law or agreed to in writing,
|
software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, eithe
|
r express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class UserData(models.Model):
user = models.ForeignKey(User, null=True)
name = models.CharField(max_length=256)
content_type = models.CharField(max_length=128)
binary = models.BooleanField()
data = models.TextField()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.