repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
kwadrat/ipij_vim
|
rdzen_vim.py
|
Python
|
isc
| 6,361
| 0.005823
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import time
import re
import unittest
zncz_pocz = 'godz_pocz'
zncz_kon = 'godz_kon'
etkt_pocz = 'BEGIN:'
etkt_kon = 'END:'
znak_daty_pocz = 7
znak_daty_kon = 32
rozmiar_daty = 19
wzorcowa_data_pocz = 'BEGIN: 2012.12.29_13.01.23 END: 0000.00.00_00.00.00'
sama_wzorcowa_data = '2012.12.29_13.01.23'
inna_wzorcowa_data = '2012.12.29_13.52.09'
pusty_czas = '0000.00.00_00.00.00'
znane_wzorce = frozenset([zncz_pocz, zncz_kon])
def wstaw_na_pozycji(linia, wspx, Teraz):
pocz = linia[:wspx + 1]
kon = linia[wspx + 1:]
wynik = pocz + Teraz + kon
return wynik
def pobierz_czas(wzorzec):
return time.strftime(wzorzec, time.localtime(time.time()))
def wstaw_date_z_dzis(vim):
linia = vim.current.line
Teraz = pobierz_czas('%Y.%m.%d')
wspy, wspx = vim.current.window.cursor
wynik = wstaw_na_pozycji(linia, wspx, Teraz)
wspx += len(Teraz)
vim.current.line = wynik
vim.current.window.cursor = wspy, wspx
def jestem_w_ostatniej_linii(vim):
wspy, wspx = vim.current.window.cursor
return wspy == len(vim.current.buffer)
def kursor_w_dol(vim):
wspy, wspx = vim.current.window.cursor
wspy += 1
vim.current.window.cursor = wspy, wspx
def rozepchnij_ponizej_i_wstaw(vim, napis):
nr_linii, _ = vim.current.window.cursor
vim.current.buffer[nr_linii + 1:] = vim.current.buffer[nr_linii:]
vim.current.buffer[nr_linii] = napis
def dolacz_na_koncu_pliku(vim, napis):
vim.current.buffer.append(napis)
def wstaw_ponizej_tresc_linii(vim, napis):
if jestem_w_ostatniej_linii(vim):
dolacz_na_koncu_pliku(vim, napis)
else:
rozepchnij_ponizej_i_wstaw(vim, napis)
kursor_w_dol(vim)
def moment_czasowy():
return pobierz_czas('%Y.%m.%d_%H.%M.%S')
def wyznacz_tresc_poczatkowa():
return ''.join([etkt_pocz, ' ', moment_czasowy(), ' ', etkt_kon, ' ', pusty_czas])
def kursor_na_koniec_linii(vim):
wspy, wspx = vim.current.window.cursor
wspx = len(vim.current.line) - 1
vim.current.window.cursor = wspy, wspx
def mamy_linie_miernicza(vim):
return ksztalt_linii_mierniczej(vim.current.line)
def miarka_ma_zakonczenie(napis):
return wytnij_kon(napis) != pusty_czas
def linia_jest_pelna(vim):
return miarka_ma_zakonczenie(vim.current.line)
def aktywnie_wstaw_poczatek_pomiaru(vim):
poczatkowy = wyznacz_tresc_poczatkowa()
wstaw_ponizej_tresc_linii(vim, poczatkowy)
kursor_na_koniec_linii(vim)
def stempel_poczatkowy(vim):
if not mamy_linie_miernicza(vim) or linia_jest_pelna(vim):
aktywnie_wstaw_poczatek_pomiaru(vim)
def wstaw_date_koncowa(vim):
vim.current.line = vim.current.line[:znak_daty_kon] + moment_czasowy()
def stempel_koncowy(vim):
if mamy_linie_miernicza(vim) and not linia_jest_pelna(vim):
wstaw_date_koncowa(vim)
def obsluga_stempli_czasowych(rodzaj, vim):
if rodzaj == zncz_pocz:
stempel_poczatkowy(vim)
elif rodzaj == zncz_kon:
stempel_koncowy(vim)
else:
raise RuntimeError(rodzaj)
def wykonaj(rodzaj, vim):
if rodzaj in znane_wzorce:
obsluga_s
|
templi_czasowych(rodzaj, vim)
else:
wstaw_date_z_dzis(vim)
format_linii = r'''
BEGIN:
\s # Spacja po słowie BEGIN
\d{4} # Rok
\. # Kropka
\d{2} # Miesiąc
\. # K
|
ropka
\d{2} # Dzień
_ # Oddzielenie dnia od godziny
\d{2} # Godzina
\. # Kropka
\d{2} # Minuta
\. # Kropka
\d{2} # Sekunda
\s # Spacja po dacie początkowej
END:
\s # Spacja po słowie END
\d{4} # Rok
\. # Kropka
\d{2} # Miesiąc
\. # Kropka
\d{2} # Dzień
_ # Oddzielenie dnia od godziny
\d{2} # Godzina
\. # Kropka
\d{2} # Minuta
\. # Kropka
\d{2} # Sekunda
$ # Koniec tekstu
'''
wzor = re.compile(format_linii, re.VERBOSE)
def data_od_znaku(napis, nr_pocz):
return napis[nr_pocz:nr_pocz + rozmiar_daty]
def wytnij_pocz(napis):
return data_od_znaku(napis, znak_daty_pocz)
def wytnij_kon(napis):
return data_od_znaku(napis, znak_daty_kon)
def ksztalt_linii_mierniczej(napis):
return wzor.match(napis)
def wyznacz_krotke_czasu(napis):
return map(int, [
napis[0:4],
napis[5:7],
napis[8:10],
napis[11:13],
napis[14:16],
napis[17:19],
])
def wyznacz_moment(napis):
paczka_do_sekundy = wyznacz_krotke_czasu(napis)
razem = paczka_do_sekundy + [0, 0, 0]
return int(time.mktime(razem))
def wyznacz_jeden_kawalek(label, yyyy_mm, day):
return ''.join([
label,
' ',
yyyy_mm,
'.',
'%02d' % day,
'_',
'00.00',
'.00',
])
def wyznacz_linie_dnia(yyyy_mm, day):
return ''.join([
wyznacz_jeden_kawalek(etkt_pocz, yyyy_mm, day),
' ',
wyznacz_jeden_kawalek(etkt_kon, yyyy_mm, day),
])
class TestRdzeniaDlaEdytora(unittest.TestCase):
def test_lokalnej_paczki_danych(self):
'''
TestRdzeniaDlaEdytora:
'''
self.assertEqual(wstaw_na_pozycji('abcd', 1, 'x'), 'abxcd')
self.assertEqual(len(moment_czasowy()), rozmiar_daty)
def test_formatu_linii(self):
'''
TestRdzeniaDlaEdytora:
'''
self.assertTrue(ksztalt_linii_mierniczej(wzorcowa_data_pocz))
wyznaczony_napis = wyznacz_tresc_poczatkowa()
self.assertTrue(ksztalt_linii_mierniczej(wyznaczony_napis))
self.assertEqual(wytnij_pocz(wzorcowa_data_pocz), sama_wzorcowa_data)
self.assertEqual(wytnij_kon(wzorcowa_data_pocz), '0000.00.00_00.00.00')
self.assertEqual(wyznacz_krotke_czasu(sama_wzorcowa_data), [2012, 12, 29, 13, 1, 23])
self.assertEqual(wyznacz_krotke_czasu(inna_wzorcowa_data), [2012, 12, 29, 13, 52, 9])
self.assertEqual(wyznacz_moment(sama_wzorcowa_data), 1356782483)
def test_szkieletu_miesiaca(self):
'''
TestRdzeniaDlaEdytora:
'''
odp = wyznacz_jeden_kawalek(etkt_pocz, '2012.10', 31)
self.assertEqual(odp,
'BEGIN: 2012.10.31_00.00.00')
odp = wyznacz_linie_dnia('2013.11', 1)
self.assertEqual(odp,
'BEGIN: 2013.11.01_00.00.00 END: 2013.11.01_00.00.00')
odp = wyznacz_linie_dnia('2013.12', 1)
self.assertEqual(odp,
'BEGIN: 2013.12.01_00.00.00 END: 2013.12.01_00.00.00')
if __name__ == '__main__':
unittest.main()
|
dotKom/onlineweb4
|
apps/splash/api/views.py
|
Python
|
mit
| 665
| 0
|
from rest_framework.pagination import PageNumberPagination
from rest_framework.viewsets import ReadOnlyModelVi
|
ewSet
from apps.splash.api.serializers import SplashEventSerializer
from apps.splash.filters import SplashEventFilter
from apps.splash.models import SplashEvent
class HundredItemsPaginator(PageNumberPagination):
page_size = 100
class SplashEventViewSet(ReadOnlyModelViewSet):
queryset = SplashEvent.objects.all()
serializer_class = Spla
|
shEventSerializer
pagination_class = HundredItemsPaginator
filter_class = SplashEventFilter
filter_fields = ('start_time', 'end_time')
ordering_fields = ('id', 'start_time', 'end_time')
|
xwzy/triplet-deep-hash-pytorch
|
triplet-deep-hash-pytorch/src/extract_feature/convert_weights/convert_weights_to_keras.py
|
Python
|
apache-2.0
| 1,736
| 0.035138
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.insert(0,'..')
import tensorflow as tf
import numpy as np
import itertools
import pickle
import os
import re
import inception_v4
os.environ['CUDA_VISIBLE_DEVICES'] = ''
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(myobject):
return [ atoi(c) for c in re.split('(\d+)', myobject.name) ]
def setWeights(layers, weights):
for index, layer in enumerate(layers):
if "dense" in layer.name:
continue
layer.set_weights(weights[index])
print(layer.name + " weights have been set!")
print("Finished Setting Weights!")
def get_layers(model):
# Get Trainable layers
layers = model.layers
layers.sort(key=natural_keys)
result = []
for i in range(len(layers)):
try:
layer = model.layers[i]
if layer.trainable:
bad = ["pooling", "flatten", "dropout", "activation", "concatenate"]
if not any(word in layer.name for word in bad):
result.append(layer)
except:
continue
bn,cv,fn=result[:int((len(result)-1)/2)],result[int((len(result)-1)/2):],result[-1]
res_zipped = zip(cv, bn)
out_prep = [list(elem) for elem in res_zipped]
out = ou
|
t_prep + [[fn]]
return out
if __name__ == "__main__":
model = inception_v4.create_model()
with open('weights.p', 'rb') as fp:
weights = pickle.load(fp)
# Get layers to set
layers = get_layers(model)
layers = list(itertools.chain.from_iterable(layers))
# Set the layer weights
|
setWeights(layers, weights)
# Save model weights in h5 format
model.save_weights("../weights/inception-v4_weights_tf_dim_ordering_tf_kernels_notop.h5")
print("Finished saving weights in h5 format")
|
ericawright/bedrock
|
tests/pages/contact.py
|
Python
|
mpl-2.0
| 2,012
| 0.001988
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from selenium.webdriver.common.by import By
from pages.base import BasePage, BaseRegion
class ContactPage(BasePage):
URL_TEMPLATE = '/{locale}/contact/'
_contact_tab_locator = (By.CSS_SELECTOR, '.category-tabs > li[data-id=contact]')
_spaces_tab_locator = (By.CSS_SELECTOR, '.category-tabs > li[data-id=spaces]')
_mobile_menu_toggle_locator = (By.CSS_SELECTOR, '.mzp-c-sidemenu-summary.mzp-js-toggle')
@property
def contact_tab(self):
el = self.find_element(*self._contact_tab_locator)
return self.Tab(self, root=el)
@property
def spaces_tab(self):
el = self.find_element(*self._spaces_tab_locator)
return self.Tab(self, root=el)
@property
def is_mobile_menu_toggle_disp
|
layed(self):
return self.is_element_displayed(*self._mobile_menu_toggle_locator)
class Tab(BaseRegion):
@property
def is_selected(self):
return 'current' in self.root.g
|
et_attribute('class')
class SpacesPage(ContactPage):
URL_TEMPLATE = '/{locale}/contact/spaces/{slug}'
_map_locator = (By.ID, 'map')
_nav_locator = (By.CSS_SELECTOR, '#nav-spaces li h4')
@property
def is_nav_displayed(self):
return self.is_element_displayed(*self._nav_locator)
@property
def spaces(self):
return [self.Space(self, root=el) for el in self.find_elements(*self._nav_locator)]
def open_spaces_mobile_menu(self):
self.find_element(*self._mobile_menu_toggle_locator).click()
self.wait.until(lambda s: self.is_nav_displayed)
class Space(BaseRegion):
@property
def id(self):
return self.root.get_attribute('data-id')
@property
def is_selected(self):
return 'mzp-is-current' in self.root.get_attribute('class')
|
airbnb/kafka
|
tests/kafkatest/tests/produce_consume_validate.py
|
Python
|
apache-2.0
| 8,828
| 0.003285
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.tests.test import Test
from ducktape.utils.util import wait_until
import time
class ProduceConsumeValidateTest(Test):
"""This class provides a shared template for tests which follow the common pattern of:
- produce to a topic in the background
- consume from that topic in the background
- run some logic, e.g. fail topic leader etc.
- perform validation
"""
def __init__(self, test_context):
super(ProduceConsumeValidateTest, self).__init__(test_context=test_context)
# How long to wait for the producer to declare itself healthy? This can
# be overidden by inheriting classes.
self.producer_start_timeout_sec = 20
# How long to wait for the consumer to start consuming messages?
self.consumer_start_timeout_sec = 60
# How long wait for the consumer process to fork? This
# is important in the case when the consumer is starting from the end,
# and we don't want it to miss any messages. The race condition this
# timeout avoids is that the consumer has not forked even after the
# producer begins producing messages, in which case we will miss the
# initial set of messages and get spurious test failures.
self.consumer_init_timeout_sec = 0
self.enable_idempotence = False
def setup_producer_and_consumer(self):
raise NotImplementedError("Subclasses should implement this")
def start_producer_and_consumer(self):
# Start background producer and consumer
self.consumer.start()
if (self.consumer_init_timeout_sec > 0):
self.logger.debug("Waiting %ds for the consumer to initialize.",
self.consumer_init_timeout_sec)
start = int(time.time())
wait_until(lambda: self.consumer.alive(self.consumer.nodes[0]) is True,
timeout_sec=self.consumer_init_timeout_sec,
err_msg="Consumer process took more than %d s to fork" %\
self.consumer_init_timeout_sec)
end = int(time.time())
# If `JMXConnectFactory.connect` is invoked during the
# initialization of the JMX server, it may fail to throw the
# specified IOException back to the calling code. The sleep is a
# workaround that should allow initialization to complete before we
# try to connect. See KAFKA-4620 for more details.
time.sleep(1)
remaining_time = self.consumer_init_timeout_sec - (end - start)
if remaining_time < 0 :
remaining_time = 0
if self.consumer.new_consumer:
wait_until(lambda: self.consumer.has_partitions_assigned(self.consumer.nodes[0]) is True,
timeout_sec=remaining_time,
err_msg="Consumer process took more than %d s to have partitions assigned" %\
remaining_time)
self.producer.start()
wait_until(lambda: self.producer.num_acked > 5,
timeout_sec=self.producer_start_timeout_sec,
err_msg="Producer failed to produce messages for %ds." %\
self.producer_start_timeout_sec)
wait_until(lambda: len(self.consumer.messages_consumed[1]) > 0,
timeout_sec=self.consumer_start_timeout_sec,
err_msg="Consumer failed to consume messages for %ds." %\
self.consumer_start_timeout_sec)
def check_alive(self):
msg = ""
for node in self.consumer.nodes:
if not self.consumer.alive(node):
msg = "The consumer has terminated, or timed out, on node %s." % str(node.account)
for node in self.producer.nodes:
if not self.producer.alive(node):
msg += "The producer has terminated, or timed out, on node %s." % str(node.account)
if len(msg) > 0:
raise Exception(msg)
def check_producing(self):
currently_acked = self.producer.num_acked
wait_until(lambda: self.producer.num_acked > currently_acked + 5, timeout_sec=30,
err_msg="Expected producer to still be producing.")
def stop_producer_and_consumer(self):
self.check_alive()
self.check_producing()
self.producer.stop()
self.consumer.wait()
def run_produce_consume_validate(self, core_test_action=None, *args):
"""Top-level template for simple produce/consume/validate tests."""
try:
self.start_producer_and_consumer()
if core_test_action is not None:
core_test_action(*args)
self.stop_producer_and_consumer()
self.validate()
except BaseException as e:
for s in self.test_context.services:
self.mark_for_collect(s)
raise
@staticmethod
def annotate_missing_msgs(missing, acked, consumed, msg):
missing_list = list(missing)
msg += "%s acked message did not make it to the Consumer. They are: " %\
len(missing_list)
if len(missing_list) < 20:
msg += str(missing_list) + ". "
else:
msg += ", ".join(str(m) for m in missing_list[:20])
msg += "...plus %s more. Total Acked: %s, Total Consumed: %s. " \
% (len(missing_list) - 20, len(set(acked)), len(set(consumed)))
return msg
@staticmethod
def annotate_data_lost(data_lost, msg, number_validated):
print_limit = 10
if len(data_lost) > 0:
msg += "The first %s missing messages were validated to ensure they are in Kafka's data files. " \
"%s were missing. This suggests data loss. Here are some of the messages not found in the data files: %s\n" \
% (number_validated, len(data_lost), str(data_lost[0:print_limit]) if len(data_lost) > print_limit else str(data_lost))
else:
msg += "We validated that the first %s of these missing messages correctly made it into Kafka's data files. " \
"This suggests they were lost on their way to the consumer." % number_validated
return msg
def validate(self):
"""Check that each acked message was consumed."""
success = True
msg = ""
acked = self.producer.acked
consumed = self.consumer.messages_consumed[1]
# Correctness of the set difference operation depends on using equivalent message_validators in procuder and consumer
missing = set(acked) - set(consumed)
self.logger.info("num consumed: %d" % len(consumed))
# Were all acked messages consumed?
if len(missing) > 0:
m
|
sg = self.annotate_missing_msgs(missing, acked, consumed, msg)
success = False
#Did we miss anything due to data loss?
to_validate = list(missing)[0:1000 if len(missing) > 1000 else len(missing)]
data_lost = self.kafk
|
a.search_data_files(self.topic, to_validate)
msg = self.annotate_data_lost(data_lost, msg, len(to_validate))
if self.enable_idempotence:
self.logger.info("Ran a test with idempotence enabled. We expect no duplicates")
else:
self.logger.info("Ran a te
|
SarahBA/b2share
|
b2share/modules/communities/views.py
|
Python
|
gpl-2.0
| 10,646
| 0.00047
|
# -*- coding: utf-8 -*-
#
# This file is part of EUDAT B2Share.
# Copyright (C) 2016 University of Tuebingen, CERN.
# Copyright (C) 2015 University of Tuebingen.
#
# B2Share is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# B2Share is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with B2Share; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""B2Share Communities REST API"""
from __future__ import absolute_import
from functo
|
ols import wraps
from flask import Blueprint, abort, current_app, make_response, request, \
jsonify, url_for
from invenio_db import db
from invenio_rest import ContentNeg
|
otiatedMethodView
from invenio_rest.decorators import require_content_types
from jsonpatch import InvalidJsonPatch, JsonPatchConflict, JsonPatchException
from werkzeug.exceptions import HTTPException
from werkzeug.local import LocalProxy
from .api import Community
from .errors import CommunityDeletedError, CommunityDoesNotExistError, \
InvalidCommunityError
from .permissions import communities_create_all_permission, \
delete_permission_factory, read_permission_factory, \
update_permission_factory
from .serializers import community_to_json_serializer, \
search_to_json_serializer, community_self_link
current_communities = LocalProxy(
lambda: current_app.extensions['b2share-communities'])
blueprint = Blueprint(
'b2share_communities',
__name__,
url_prefix='/communities'
)
def pass_community(f):
"""Decorator to retrieve a community."""
@wraps(f)
def inner(self, community_id, *args, **kwargs):
try:
community = Community.get(id=community_id)
except (CommunityDoesNotExistError):
abort(404)
except (CommunityDeletedError):
abort(410)
return f(self, community=community, *args, **kwargs)
return inner
def verify_community_permission(permission_factory, community):
"""Check that the current user has the required permissions on community.
Args:
permission_factory: permission factory used to check permissions.
community: community whose access is limited.
"""
# Note, cannot be done in one line due overloading of boolean
# operations permission object.
if not permission_factory(community).can():
from flask_login import current_user
if not current_user.is_authenticated:
abort(401)
abort(403)
def need_community_permission(permission_factory):
"""Decorator checking that the user has the required community permissions.
Args:
factory_name: name of the factory to retrieve.
"""
def need_community_permission_builder(f):
@wraps(f)
def need_community_permission_decorator(self, community, *args,
**kwargs):
if not current_communities.rest_access_control_disabled:
verify_community_permission(permission_factory, community)
return f(self, community=community, *args, **kwargs)
return need_community_permission_decorator
return need_community_permission_builder
def _generic_search_result(item_array):
self_link = url_for('b2share_communities.communities_list', _external=True)
return {
'hits': {
'hits':item_array,
'total':len(item_array)
},
'links':{
'self': self_link,
}
}
class CommunityListResource(ContentNegotiatedMethodView):
view_name = 'communities_list'
def __init__(self, **kwargs):
"""Constructor."""
super(CommunityListResource, self).__init__(
method_serializers={
'GET': {
'application/json': search_to_json_serializer,
},
'POST': {
'application/json': community_to_json_serializer,
},
},
default_method_media_type={
'GET': 'application/json',
'POST': 'application/json',
},
default_media_type='application/json',
**kwargs)
def get(self):
"""Retrieve a list of communities."""
# TODO: change this to a search function, not just a list of communities
from .serializers import community_to_dict
start = request.args.get('start') or 0
stop = request.args.get('stop') or 100
community_list = Community.get_all(start, stop)
community_dict_list = [community_to_dict(c) for c in community_list]
response_dict = _generic_search_result(community_dict_list)
response = jsonify(response_dict)
# TODO: set etag
return response
def post(self):
"""Create a new community."""
if request.content_type != 'application/json':
abort(415)
data = request.get_json()
if data is None:
return abort(400)
# check user permissions
if (not current_communities.rest_access_control_disabled and
not communities_create_all_permission.can()):
from flask_login import current_user
if not current_user.is_authenticated:
abort(401)
abort(403)
try:
community = Community.create_community(**data)
response = self.make_response(
community=community,
code=201,
)
# set the header's Location field.
response.headers['Location'] = community_self_link(community)
db.session.commit()
return response
except InvalidCommunityError as e1:
try:
db.session.rollback()
except Exception as e2:
raise e2 from e1
abort(400)
except Exception as e1:
try:
db.session.rollback()
except Exception as e2:
raise e2 from e1
if isinstance(e1, HTTPException):
raise e1
current_app.logger.exception('Failed to create record.')
abort(500)
class CommunityResource(ContentNegotiatedMethodView):
view_name = 'communities_item'
def __init__(self, **kwargs):
"""Constructor."""
super(CommunityResource, self).__init__(
serializers={
'application/json': community_to_json_serializer,
},
method_serializers={
'DELETE': {'*/*': lambda *args: make_response(*args), },
},
default_method_media_type={
'GET': 'application/json',
'PUT': 'application/json',
'DELETE': '*/*',
'PATCH': 'application/json',
},
default_media_type='application/json',
**kwargs)
@pass_community
@need_community_permission(delete_permission_factory)
def delete(self, community, **kwargs):
"""Delete a community."""
# check the ETAG
self.check_etag(str(community.updated))
try:
community.delete()
db.session.commit()
except Exception as e1:
current_app.logger.exception('Failed to create record.')
try:
db.session.rollback()
except Exception as e2:
raise e2 from e1
abort(500)
return '', 204
@pass_community
@need_community_permi
|
pudo/morphium
|
morphium/archive.py
|
Python
|
mit
| 2,477
| 0
|
import os
import logging
import boto3
import mimetypes
from datetime import datetime
from morphium.util import env, TAG_LATEST
log = logging.getLogger(__name__)
config = {}
class Archive(object):
"""A scraper archive on S3. This is called when a scraper has generated a
file which needs to be backed up to a bucket."""
def __init__(self, bucket=None, prefix=None):
self.tag = datetime.utcnow().date().isoformat()
self.bucket = bucket or env('aws_bucket')
self.prefix = prefix or 'data'
@property
def client(self):
if not hasattr(self, '_client'):
if self.bucket is None:
log.warning("No $AWS_BUCKET, skipping upload.")
self._client = None
return None
access_key = env('aws_access_key_id')
if access_key is None:
log.warning("No $AWS_ACCESS_KEY_ID, skipping upload.")
self._client = None
return None
secret_key = env('aws_secret_access_key')
if secret_key is None:
log.warning("No $AWS_SECRET_ACCESS_KEY, skipping upload.")
self._client = None
return None
session = boto3.Session(aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
self._client = session.client('s3')
return self._client
def upload_file(self, source_path, file_name=None, mime_type=None):
"""Upload a file to the given bucket."""
if self.client is None:
return
if file_name is None:
file_name = os.path.basename(source_path)
if mime_type is None:
mime_type, _ = mimetypes.guess_type(file_name)
mime_type = mime_type or 'application/octet-stream'
key_name = os.path.join(self.prefix, self.tag, file_name)
log.info("Uploading [%s]: %s", self.bucket, key_name)
args = {
'ContentType': mime_type,
'ACL': 'public-read',
}
self.client.upload_file(source_path, self.bucket, key_name,
ExtraArgs=args)
copy_name = os.path.join(self.prefix, TAG_LATEST, file_name)
|
copy_source = {'Key': key_name, 'Bucket': self.bucket}
self.client.copy(copy_source, self.bucket, copy_nam
|
e,
ExtraArgs=args)
return 'http://%s/%s' % (self.bucket, key_name)
|
abilian/abilian-sbe
|
src/abilian/sbe/apps/wiki/forms.py
|
Python
|
lgpl-2.1
| 1,942
| 0.00103
|
"""Forms for the Wiki module."""
from __future__ import annotations
from typing import Any
from wtforms import HiddenField, StringField, TextAreaField, ValidationError
from wtforms.validators import data_required
from abilian.i18n import _, _l
from abilian.web.forms import Form
from abilian.web.forms.filters import strip
from abilian.web.forms.validators import flaghidden
from abilian.web.forms.widgets import TextArea
from .util import page_exists
def clean_up(src: str) -> str:
"""Form filter."""
src = src.replace("\r", "")
return src
def int_or_none(val: Any) -> int | None:
try:
return int(val)
except (TypeError, ValueError):
return None
class WikiPageForm(Form):
title = StringField(
label=_l("Title"), filters=(strip,), validators=[data_required()]
)
body_src = TextAreaField(
label=_l("Body"),
filters=(strip, clean_up),
validators=[data_required()],
widget=TextArea(rows=10, resizeable="vertical"),
)
message = StringField(label=_l("Commit message"))
page_id = HiddenField(filters=(int_or_none,), validators=[flaghidden()])
last_revision_id = HiddenField(filters=(int_or_none,), validators=[flaghidden()])
def validate_title(self, field: StringField):
title = field.data
|
if title != field.object_data and page_exists(title):
raise ValidationError(
_("A page with this name already exists. Please use another name.")
)
def validate_last_revision_id(self, field: HiddenField):
val = field.data
current = field
|
.object_data
if val is None or current is None:
return
if val != current:
raise ValidationError(_("this page has been edited since"))
# Not used yet
class CommentForm(Form):
message = TextAreaField(label=_l("Message"), validators=[data_required()])
page_id = HiddenField()
|
elyezer/robottelo
|
tests/foreman/api/test_usergroup.py
|
Python
|
gpl-3.0
| 10,650
| 0
|
"""Unit tests for the ``usergroups`` paths.
Each ``APITestCase`` subclass tests a single URL. A full list of URLs to be
tested can be found here:
http://theforeman.org/api/1.11/apidoc/v2/usergroups.html
:Requirement: Usergroup
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: API
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from fauxfactory import gen_string
from nailgun import entities
from random import randint
from requests.exceptions import HTTPError
from robottelo.datafactory import (
invalid_values_list,
valid_data_list,
valid_usernames_list,
)
from robottelo.decorators import tier1, tier2
from robottelo.test import APITestCase
class UserGroupTestCase(APITestCase):
"""Tests for the ``usergroups`` path."""
@tier1
def test_positive_create_with_name(self):
"""Create new user group using different valid names
:id: 3a2255d9-f48d-4f22-a4b9-132361bd9224
:expectedresults: User group is created successfully.
:CaseImportance: Critical
"""
for name in valid_data_list():
with self.subTest(name):
user_group = entities.UserGroup(name=name).create()
self.assertEqual(user_group.name, name)
@tier1
def test_positive_create_with_user(self):
"""Create new user group using valid user attached to that group.
:id: ab127e09-31d2-4c5b-ae6c-726e4b11a21e
:expectedresults: User group is created successfully.
:CaseImportance: Critical
"""
for login in valid_usernames_list():
with self.subTest(login):
user = entities.User(login=login).create()
user_group = entities.UserGroup(user=[user]).create()
self.assertEqual(len(user_group.user), 1)
self.assertEqual(user_group.user[0].read().login, login)
@tier1
def test_positive_create_with_users(self):
"""Create new user group using multiple users attached to that group.
:id: b8dbbacd-b5cb-49b1-985d-96df21440652
:expectedresults: User group is created successfully and contains all
expected users.
:CaseImportance: Critical
"""
users = [entities.User().create() for _ in range(randint(3, 5))]
user_group = entities.UserGroup(user=users).create()
self.assertEqual(
sorted([user.login for user in users]),
sorted([user.read().login for user in user_group.user])
)
@tier1
def test_positive_create_with_role(self):
"""Create new user group using valid role attached to that group.
:id: c4fac71a-9dda-4e5f-a5df-be362d3cbd52
:expectedresults: User group is created successfully.
:CaseImportance: Critical
"""
for role_name in valid_data_list():
with self.subTest(role_name):
role = entities.Role(name=role_name).create()
user_group = entities.UserGroup(role=[role]).create()
self.assertEqual(len(user_group.role), 1)
self.assertEqual(user_group.role[0].read().name, role_name)
@tier1
def test_positive_create_with_roles(self):
"""Create new user group using multiple roles attached to that group.
:id: 5838fcfd-e256-49cf-aef8-b2bf215b3586
:expectedresults: User group is created successfully and contains all
expected roles
:CaseImportance: Critical
"""
roles = [entities.Role().create() for _ in range(randint(3, 5))]
user_group = entities.UserGroup(role=roles).create()
self.assertEqual(
sorted([role.name for role in roles]),
sorted([role.read().name for role in user_group.role])
)
@tier1
def test_positive_create_with_usergroup(self):
"""Create new user group using another user group attached to the
initial group.
:id: 2a3f7b1a-7411-4c12-abaf-9a3ca1dfae31
:expectedresults: User group is created successfully.
:CaseImportance: Critical
"""
for name in valid_data_list():
with self.subTest(name):
sub_user_group = entities.UserGroup(name=name).create()
user_group = entities.UserGroup(
usergroup=[sub_user_group],
).create()
self.assertEqual(len(user_group.usergroup), 1)
self.assertEqual(user_group.usergroup[0].read().name, name)
@tier2
def test_positive_create_with_usergroups(self):
"""Create new user group using multiple user groups attached to that
initial group.
:id: 9ba71288-af8b-4957-8413-442a47057634
:expectedresults: User group is created successfully and contains all
expected user groups
:CaseLevel: Integration
"""
sub_user_groups = [
entities.UserGroup().create() for _ in range(randint(3, 5))]
user_group = entities.UserGroup(usergroup=sub_user_groups).create()
self.assertEqual(
sorted([usergroup.name for usergroup in sub_user_groups]),
sorted(
[usergroup.read().name for usergroup in user_group.usergroup])
)
@tier1
def test_negative_create_with_name(self):
"""Attempt to create user group with invalid name.
:id: 1a3384dc-5d52-442c-87c8-e38048a61dfa
:expectedresults: User group is not created.
:CaseImportance: Critical
"""
for name in invalid_values_list():
with self.subTest(name):
with self.assertRaises(HTTPError):
entities.UserGroup(name=name).create()
@tier1
def test_negative_create_with_same_name(self):
"""Attempt to create user group with a name of already existent entity.
:id: aba0925a-d5ec-4e90-86c6-404b9b6f0179
:expectedresults: User group is not created.
:CaseImportance: Critical
"""
user_group = entities.UserGroup().create()
with self.assertRaises(HTTPError):
entities.UserGroup(name=user_group.name).create()
@tier1
def test_positive_update(self):
"""Update existing user group with different valid names.
:id: b4f0a19b-9059-4e8b-b245-5a30ec06f9f3
:expectedresults: User group is updated successfully.
:CaseImportance: Critical
"""
user_group = entities.UserGroup().create()
for new_name in valid_data_list():
with self.subTest(new_name):
user_group.name = new_name
user_group = user_group.update(['name'])
self.assertEqual(new_name, user_group.name)
@tier1
def test_positive_update_with_new_user(self):
"""Add new user to user group
:id: e11b57c3-5f86-4963-9cc6-e10e2f02468b
:expectedresults: User is added to user group successfully.
:CaseImportance: Critical
"""
user = entities.User().create()
user_group = entities.UserGroup().create()
user_group.user = [user]
user_group = user_group.update(['user'])
self.assertEqual(user
|
.login, user_group.user[0].read().login)
@tier2
def test_positive_update_with_existing_user(self):
"""Update user that assigned to user group with another one
|
:id: 71b78f64-867d-4bf5-9b1e-02698a17fb38
:expectedresults: User group is updated successfully.
:CaseLevel: Integration
"""
users = [entities.User().create() for _ in range(2)]
user_group = entities.UserGroup(user=[users[0]]).create()
user_group.user[0] = users[1]
user_group = user_group.update(['user'])
self.assertEqual(users[1].login, user_group.user[0].read().login)
@tier1
def test_positive_update_with_new_role(self):
"""Add new role to user group
:id: 8e0872c1-ae88-4971-a6fc-cd60127d6663
:expectedresults: Role is added to user group successfully.
:CaseImportance: Critical
"""
new_role = entities.Role().create()
|
ilendl2/wagtail-cookiecutter-foundation
|
{{cookiecutter.project_slug}}/pages/wagtail_hooks.py
|
Python
|
mit
| 1,721
| 0
|
from django.urls import reverse
from django.utils.html import format_html, format_html_join
from django
|
.utils.translation import ugettext_lazy as _
from django.conf import settings
from wagtail.core import hooks
from wagtail.admin.menu import MenuItem
from wagtail.core.whitelist import attribute_rule, check_url
@hooks.register('register_settings_menu_item')
def register_django_admin_menu_item():
return MenuItem(_('Django Admin'), reverse('admin:index'),
classnames='icon icon-cogs', order=700)
@hooks.register('cons
|
truct_whitelister_element_rules')
def whitelister_element_rules():
# Whitelist custom elements to the hallo.js editor
return {
'a': attribute_rule({'href': check_url, 'target': True}),
'blockquote': attribute_rule({'class': True})
}
@hooks.register('insert_editor_js')
def editor_js():
# Add extra JS files to the admin
js_files = [
'js/hallo-custom.js',
]
js_includes = format_html_join(
'\n', '<script src="{0}{1}"></script>',
((settings.STATIC_URL, filename) for filename in js_files)
)
return js_includes + format_html(
"""
<script>
registerHalloPlugin('blockquotebutton');
registerHalloPlugin('blockquotebuttonwithclass');
</script>
"""
)
@hooks.register('insert_editor_css')
def editor_css():
# Add extra CSS files to the admin like font-awesome
css_files = [
'node_modules/font-awesome/css/font-awesome.min.css'
]
css_includes = format_html_join(
'\n', '<link rel="stylesheet" href="{0}{1}">',
((settings.STATIC_URL, filename) for filename in css_files)
)
return css_includes
|
ziiin/onjCodes
|
codejam/2014/B-cookie-clicker.py
|
Python
|
mit
| 294
| 0.05102
|
import sys
import mat
|
h
def getNextEle (X, C, F, i):
res = 0
|
residue = 0
for j in range(1,i):
t = int ( math.ceil ( (C-residue) / (x+ j*F)))
residue = t * # cosider residue cookies because of integral time seconds
res += X / (x + i*F);
def main():
|
madmax983/h2o-3
|
h2o-py/tests/testdir_algos/glm/pyunit_DEPRECATED_link_functions_binomialGLM.py
|
Python
|
apache-2.0
| 1,473
| 0.033944
|
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import pandas as pd
import zipfile
import statsmodels.api as sm
def link_functions_binomial():
print("Read in prostate data.")
h2o_data = h2o.import_file(path=pyun
|
it_utils.locate("smalldata/prostate/prostate_complete.csv.zip"))
h2o_data.head()
sm_data = pd.read_csv(zipfile.ZipFile(pyunit_utils.locate("smalldata/prostate/prostate_complete.csv.zip")).open("prostate_complete.csv")).as_matrix()
sm_data_response = sm_data[:,2]
sm_data_features = sm_data[:,[1,3,4,5,6,7,8,9]]
print("Testing for family: BINOMIAL")
print("Set variables for h2o.")
myY = "CAPSULE"
myX = ["
|
ID","AGE","RACE","GLEASON","DCAPS","PSA","VOL","DPROS"]
print("Create models with canonical link: LOGIT")
h2o_model = h2o.glm(x=h2o_data[myX], y=h2o_data[myY].asfactor(), family="binomial", link="logit",alpha=[0.5], Lambda=[0])
sm_model = sm.GLM(endog=sm_data_response, exog=sm_data_features, family=sm.families.Binomial(sm.families.links.logit)).fit()
print("Compare model deviances for link function logit")
h2o_deviance = h2o_model.residual_deviance() / h2o_model.null_deviance()
sm_deviance = sm_model.deviance / sm_model.null_deviance
assert h2o_deviance - sm_deviance < 0.01, "expected h2o to have an equivalent or better deviance measures"
if __name__ == "__main__":
pyunit_utils.standalone_test(link_functions_binomial)
else:
link_functions_binomial()
|
pcdummy/socketrpc
|
socketrpc/gevent_srpc.py
|
Python
|
bsd-3-clause
| 10,766
| 0.002415
|
# -*- coding: utf-8 -*-
# vim: set et sts=4 sw=4 encoding=utf-8:
###############################################################################
#
# This file is part of socketrpc.
#
# Copyright (C) 2011 Rene Jochum <rene@jrit.at>
#
###############################################################################
from socketrpc import set_serializer2, Fault, STRUCT_INT, struct_error
from socketrpc import STATUS_OK, NOT_WELLFORMED_ERROR, METHOD_NOT_FOUND, APPLICATION_ERROR
from gevent import spawn, spawn_later
from gevent.server import StreamServer
from gevent.event import AsyncResult, Event
from gevent.queue import Queue
from gevent.socket import create_connection
from gevent.socket import socket as gsocket
from socket import error as pysocket_error
import random
try:
from cStringIO import StringIO
except ImportError, e:
from StringIO import StringIO
import logging
# For pylint
def decode(data):
pass
def encode(obj):
pass
def set_serializer(predefined=None, encode=None, decode=None):
""" Sets the serializer for this class.
@see: socketrpc.set_serializer2
"""
set_serializer2(predefined, encode, decode, globals())
def _recvsized(self):
try:
message_length = STRUCT_INT.unpack(self.recv(4))[0]
except struct_error:
return Fault(NOT_WELLFORMED_ERROR, 'Haven\'t got a length.')
sock_buf = StringIO()
bytes_count = 0
while bytes_cou
|
nt < message_length:
chunk = self.rec
|
v(min(message_length - bytes_count, 32768))
part_count = len(chunk)
if part_count < 1:
return None
bytes_count += part_count
sock_buf.write(chunk)
return sock_buf.getvalue()
def _sendsized(self, data):
data = STRUCT_INT.pack(len(data)) + data
self.sendall(data)
# Monkey patch the gevent socket
_socket = gsocket
_socket.recvsized = _recvsized
_socket.sendsized = _sendsized
del _socket
class SocketRPCProtocol:
debug = False
allow_dotted_attributes = False
def __init__(self):
""" Sets up instance only variables
"""
self.id = 0
self.calls = {}
self.connected = Event()
self.writeQueue = Queue()
self.doWrite = True
def make_connection(self, socket, address, factory):
""" Sets up per connection vars
"""
self.socket = socket
self.address = address
self.factory = factory
self.logger = logging.getLogger("%s.%s:%s" % (self.__class__.__name__, address[0], address[1]))
def handle_read(self):
self.connected.set()
self.connection_made()
_sock = self.socket
try:
while True:
data = _sock.recvsized()
if isinstance(data, Fault):
return
data = decode(data)
if isinstance(data, Fault):
self.fault_received(data)
continue
transaction, obj = data.iteritems().next()
# Dispatch the transaction
if transaction == 'call':
spawn(self.dispatch_call, obj[0], obj[3], obj[1], obj[2])
elif transaction == 'reply':
spawn(self.dispatch_reply, obj[0], obj[1], obj[2])
else:
self.fault_received(NOT_WELLFORMED_ERROR, 'Unknown transaction: %s' % transaction)
finally:
# TODO: Make sure that everything has been transmitted.
self.connected.clear()
self.connection_lost()
def handle_write(self):
q = self.writeQueue
self.connected.wait()
_sock = self.socket
try:
while True:
data = q.get()
try:
self.socket.sendsized(data)
except (TypeError, pysocket_error), e:
# TODO: This needs to be passed
self.logger.exception(e)
finally:
pass
def connection_made(self):
self.logger.info('New connection from %s:%s' % self.address)
def connection_lost(self):
self.logger.info('Lost connection from %s:%s' % self.address)
def dispatch_call(self, method, id, args, kwargs):
if not self.allow_dotted_attributes:
method = method.replace('.', '')
cmd = 'docall_%s' % method
if self.debug:
self.logger.debug('exec CALL %s (%d)' % (method, id))
try:
func = getattr(self, cmd)
except AttributeError, e:
self.logger.error('Unknown CALL method %s (%d)' % (method, id))
self.send_response(METHOD_NOT_FOUND, 'Method "%s" not found (%d)' % (method, id))
return
try:
result = func(*args, **kwargs)
self.send_response(result=result, id=id)
except Fault, e:
self.send_response(e.faultCode, e.faultString, id)
except Exception, e:
self.send_response(APPLICATION_ERROR, "%s: %s" % (e.__class__.__name__, repr(e)), id)
def dispatch_reply(self, status, result, id):
if self.debug:
self.logger.debug('recv REPLY (%d)' % id)
try:
if status >= STATUS_OK:
self.calls[id].set(result)
del self.calls[id]
else:
self.calls[id].set_exception(Fault(status, result))
del self.calls[id]
except KeyError:
self.fault_received(Fault(APPLICATION_ERROR, 'Unknown result: %d' % id))
def fault_received(self, fault):
""" Gets called whenever we receive a fault
which isn't assignable.
"""
self.logger.exception(fault)
def send_response(self, code=STATUS_OK, result='', id=None):
if self.debug:
self.logger.debug('send REPLY (%d)' % id)
data = encode({'reply': [code,
result,
id,
]})
self.writeQueue.put(data)
def call(self, method, *args, **kwargs):
self.connected.wait()
self.id += 1
data = encode({'call': [method, args, kwargs, self.id]})
if isinstance(data, Fault):
finished = AsyncResult()
finished.set(data)
return finished
if self.debug:
self.logger.debug('send CALL (%d) %s' % (self.id, method))
self.writeQueue.put(data)
finished = AsyncResult()
self.calls[self.id] = finished
return finished
class SocketRPCServer(StreamServer):
def __init__(self, listener, protocol, backlog=None, spawn='default'):
StreamServer.__init__(self, listener, backlog=backlog, spawn=spawn)
self.protocol = protocol
def handle(self, socket, address):
""" Start the socket handlers
self.protocol.handle_write and
self.protocol.handle_read.
"""
protocol = self.protocol()
protocol.make_connection(socket, address, self)
# XXX: Is this greenlet independent from handle?
spawn(protocol.handle_write)
protocol.handle_read()
class SocketRPCClient(object):
""" RPClient for the above Server.
Automaticaly reconnects to the target server (with "reconnect=True")
but looses any results which hasn't been transfered on reconnect.
"""
## START Reconnecting feature,
# shameless borrowed from
# twisted.i.p.ReconnectingClientFactory (Rene)
maxDelay = 3600
initialDelay = 1.0
factor = 2.7182818284590451
jitter = 0.11962656472
delay = initialDelay
retries = 0
maxRetries = None
continueTrying = True
isTrying = False
## END Reconnecting
def __init__(self, address, protocol, timeout=None, source_address=None, reconnect=False):
self.sock_args = [address, timeout, source_address]
proto = self.protocol = protocol()
if not isinstance(proto, SocketRPCProtocol):
raise AttributeError('protocol must implement "SocketRPCProtocol"')
self.continueTrying = reconnect
# Do
|
kobotoolbox/kobocat
|
onadata/apps/logger/migrations/0012_add_asset_uid_to_xform.py
|
Python
|
bsd-2-clause
| 390
| 0
|
#
|
coding: utf-8
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('logger', '0011_add-index-to-instance-uuid_and_xform_uuid'),
]
operations = [
migrations.AddField(
model_name='xform',
name='kpi_
|
asset_uid',
field=models.CharField(max_length=32, null=True),
),
]
|
leschzinerlab/AWS
|
aws/list_all.py
|
Python
|
mit
| 8,823
| 0.030488
|
#!/usr/bin/env python
import subprocess
import os
import sys
onlyinstances=False
if len(sys.argv) ==1:
print '\nUsage: awsls_admin [region]\n'
print '\nSpecify region (NOT availability zone) that will be displayed for all users\n'
sys.exit()
region=sys.argv[1]
if sys.argv[-1] == '-i':
onlyinstances=True
#List instances given a users tag
keyPath=subprocess.Popen('echo $KEYPAIR_PATH',shell=True, stdout=subprocess.PIPE).stdout.read().strip()
if len(keyPath) == 0:
print '\nError: KEYPAIR_PATH not specified as environment variable. Exiting\n'
sys.exit()
if keyPath.split('/')[-1].split('.')[-1] != 'pem':
print '\nError: Keypair specified is invalid, it needs to have .pem extension. Found .%s extension instead. Exiting\n' %(keyPath.split('/')[-1].split('.')[-1])
sys.exit()
tag=keyPath.split('/')[-1].split('.')[0]
#Get number of instances to loop over
numInstances=subprocess.Popen('aws ec2 describe-instances --region %s --query "Reservations[*].Instances[*].{InstanceID:InstanceId}" | grep InstanceID | wc -l' %(region),shell=True, stdout=subprocess.PIPE).stdout.read().strip()
counter=0
print '\nAWS usage in region %s' %(region)
print '\n---------------------------------------------------------------------------------------------'
print 'ReservedInstanceType\tAvail. Zone\tInstanceID\tStatus\t\tIP Address\tUser'
print '---------------------------------------------------------------------------------------------'
if float(numInstances) == 0:
print 'No instances found\n'
while counter < float(numInstances):
instanceID=subprocess.Popen('aws ec2 describe-instances --region %s --query "Reservations[%i].Instances[*].{InstanceID:InstanceId}" | grep InstanceID' %(region,counter),shell=True, stdout=subprocess.PIPE).stdout.read().strip()
if len(instanceID) > 0:
instanceID=instanceID.split()[-1].split('"')[1]
if len(instanceID) == 0:
instanceID='---'
status=subprocess.Popen('aws ec2 describe-instances --region %s --query "Reservations[%i].Instances[*].{State:State}" | grep Name' %(region,counter),shell=True, stdout=subprocess.PIPE).stdout.read().strip()
if len(status) > 0:
status=status.split()[-1].split('"')[1]
if len(status) == 0:
status='--'
owner=subprocess.Popen('aws ec2 describe-instances --region %s --query "Reservations[%i].Instances[*].{Owner:KeyName}" | grep Owner' %(region,counter),shell=True, stdout=subprocess.PIPE).stdout.read().strip().split()[-1].split('"')[1]
instanceType=subprocess.Popen('aws ec2 describe-instances --region %s --query "Reservations[%i].Instances[*].{Type:InstanceType}" | grep Type' %(region,counter),shell=True, stdout=subprocess.PIPE).stdout.read().strip().split()[-1].split('"')[1]
availZone=subprocess.Popen('aws ec2 describe-instances --region %s --query "Reservations[%i].Instances[*]" | grep AvailabilityZone' %(region,counter),shell=True, stdout=subprocess.PIPE).stdout.read().strip().split()[-1].split('"')[1]
PublicIP=subprocess.Popen('aws ec2 describe-instances --region %s --instance-id %s --query "Reservations[*].Instances[*].{IPaddress:PublicIpAddress}" | grep IPaddress' %(region,instanceID),shell=True, stdout=subprocess.PIPE).stdout.read().strip()
if len(PublicIP) > 0:
if PublicIP[0] == '"':
PublicIP=PublicIP.split()[-1].split('"')
if len(PublicIP)>1:
PublicIP='\t%s'%(PublicIP[1])
if len(PublicIP)==1:
PublicIP=PublicIP[0]
if PublicIP == 'null':
PublicIP='---\t'
if len(PublicIP) == 0:
PublicIP='---'
print '%s\t\t%s\t%s\t%s\t%s\t%s' %(instanceType,availZone,instanceID,status,PublicIP,owner)
counter=counter+1
#Info needed: instance ID, AMI, region, zone, tag
numSpotInstances=subprocess.Popen('aws ec2 describe-spot-instance-requests --region %s --query "SpotInstanceRequests[*].{State:State}"|grep State | wc -l' %(region),shell=True, stdout=subprocess.PIPE).stdout.read().strip()
counter=0
print '\n----------------------------------------------------------------------------------------------------------------------------------------'
print 'SpotInstanceType\tAvail. Zone\tSpotInstanceID\tSpotStatus\tInstanceID\tStatus\t\tIP Address\tPrice\tUser\t'
print '----------------------------------------------------------------------------------------------------------------------------------------'
if float(numSpotInstances) == 0:
print 'No spot instances found\n'
while counter < float(numSpotInstances):
instanceID='---\t'
status='---\t'
PublicIP='---\t'
spotID=subprocess.Popen('aws ec2 describe-spot-instance-requests --region %s --query "SpotInstanceRequests[%i].{SpotID:SpotInstanceRequestId}"|grep SpotID' %(region,counter),shell=True, stdout=subprocess.PIPE).stdout.read().strip().split()[-1].split('"')[1]
spotStatus=subprocess.Popen('aws ec2 describe-spot-instance-requests --region %s --query "SpotInstanceRequests[%i].{State:State}"|grep State' %(region,counter),shell=True, stdout=subprocess.PIPE).stdout.read().strip().split()[-1].split('"')[1]
instanceType=subprocess.Popen('aws ec2 describe-spot-instance-requests --region %s --query "SpotInstanceRequests[%i].LaunchSpecification.{Type:InstanceType}"|grep Type' %(region,counter),shell=True, stdout=subprocess.PIPE).stdout.read().strip().split()[-1].split('"')[1]
availZone=subprocess.Popen('aws ec2 describe-spot-instance-requests --region %s --query "SpotInstanceRequests[%i].LaunchSpecification.Placement.{AZone:AvailabilityZone}" | grep AZone' %(region,counter),shell=True, stdout=subprocess.PIPE).stdout.read().strip().split()[-1].split('"')[1]
spotPrice=subprocess.Popen('aws ec2 describe-spot-instance-requests --region %s --query "SpotInstanceRequests[%i].{Price:SpotPrice}" | grep Price' %(region,counter),shell=True, stdout=subprocess.PIPE).stdout.read().strip().split()[-1].split('"')[1]
if spotStatus == 'active':
instanceID=subprocess.Popen('aws ec2 describe-spot-instance-requests --region %s --query "SpotInstanceRequests[%i].{InstanceID:InstanceId}"|grep InstanceID' %(region,counter),shell=True, stdout=subprocess.PIPE).stdout.read().strip().split()[-1].split('"')[1]
status=subprocess.Popen('aws ec2 describe-instances --insta
|
nce-id %s --region %s --query "Reservations[0].Instances[*].State" | grep Name' %(instanceID,region),shell=True, stdout=subprocess.PIPE).stdout.read().strip().split()[-1].split('"')[1]
if status == 'running':
PublicIP=subprocess.Popen('aws ec2 describe-instances --region %s --instance-id %s --query "Reservations[*].Instances[*].{IPaddress:PublicIpAddress}" | grep IPaddress' %(region,instanceID),shell=True, stdout=subpro
|
cess.PIPE).stdout.read().strip().split()[-1].split('"')[1]
status='%s\t'%(status)
print '%s\t\t%s\t%s\t%s\t\t%s\t%s\t%s\t$%1.3f' %(instanceType,availZone,spotID,spotStatus,instanceID,status,PublicIP,float(spotPrice))
counter=counter+1
if onlyinstances is True:
sys.exit()
#Get number of instances to loop over
numVols=subprocess.Popen('aws ec2 describe-volumes --region %s --query "Volumes[*].{VolumeID:VolumeId}" | grep VolumeID | wc -l' %(region) ,shell=True, stdout=subprocess.PIPE).stdout.read().strip()
counter=0
print '\n----------------------------------------------------------------------------------------'
print 'Volume ID\tAvail. Zone\tSize\tUser\t\tStatus\t\tInstance'
print '----------------------------------------------------------------------------------------'
if float(numVols) == 0:
print 'No volumes found\n'
while counter < float(numVols):
volumeID=subprocess.Popen('aws ec2 describe-volumes --region %s --query "Volumes[%i].{VolumeID:VolumeId}" | grep VolumeID' %(region,counter),shell=True, stdout=subprocess.PIPE).stdout.read().strip().split()[-1].split('"')[1]
status=subprocess.Popen('aws ec2 describe-volumes --region %s --query "Volumes[%i].{State:State}" | grep State' %(region,counter),shell=True, stdout=subprocess.PIPE).stdout.read().strip().split()[-1].split('"')[1]
availZone=subprocess.Popen('aws ec2 describe-volumes --region %s --query "Volumes[%i].{AvailZone:AvailabilityZone}" | grep AvailZone' %(region,counter),shell=True, stdout=subprocess.PIPE).stdout.read().strip().split()[-1].split('"')[1]
size=subprocess.Popen('aws ec2
|
alexgorin/txcaching
|
examples/cache_render_get_example.py
|
Python
|
mit
| 3,675
| 0.003265
|
# -*- coding: utf-8 -*-
import time
from twisted.internet import defer, reactor
from twisted.web import server
from twisted.web.resource import Resource
from txcaching import cache, keyregistry
cache.load_config(**{"disable": False, "ip": "127.0.0.1", "port": 11212})
header = """
<html>
<head>
<style>
body {
font-family: "Calisto MT", "Bookman Old Style", Bookman, "Goudy Old Style", Garamond, "Hoefler Text", "Bitstream Charter", Georgia, serif;
font-size: 1.3em;
border: #000000 2px solid;
border-radius: 20px;
padding: 15px;
}
</style>
</head>
<body>
BODY
</body>
</html>
"""
main_html = header.replace("BODY", """
<h1>Users</h1>
<ol>
%s
</ol>
<h6><a href="/set">Set email for a user</a>
""")
get_email_by_name = header.replace("BODY", """
<form action="/get" method="get">
<label>
Username:
<input name="username" type="text"/>
</label>
<input class='btn' type="submit" value="Get email" />
</form>
<a href="/">Home</a>
""")
email_response = header.replace("BODY", """
<h4>EMAIL=%s</h4>
<a href="/">Home</a>
""")
email_not_found = header.replace("BODY", """
<h4>Email is not set for the user %s</h4>
<a href="/">Home</a>
""")
email_set_confirmation = header.replace("BODY", """
<h4>Email %s for user %s has been set.</h4>
<a href="/">Home</a>
""")
set_email = header.replace("BODY", """
<form action="/set" method="post">
<label>
Username:
<input name="username" type="text"/>
</label>
<label>
Email:
<input name="email" type="text"/>
</label>
<input class='btn' type="submit" value="Set email" />
</form>
<a href="/">Home</a>
""")
class DB:
data = {}
@staticmethod
def get(username):
"""Very heavy request"""
print "Reading from DB"
time.sleep(2)
email = DB.data.get(username, None)
if email:
return defer.succeed(email)
else:
return defer.fail(Exception("User not found"))
@staticmethod
def set(username, email):
DB.data[username] = email
class Getter(Resource):
def getChild(self, path, request):
return EmailGetter(path)
class EmailGetter(Resource):
def __init__(self, username):
self.username = username
@cache.cache_async_render_GET(class_name="EmailGetter")
def render_GET(self, request):
d = DB.get(self.username)
d.addCallback(lambda email: request.write(email_response % email))
d.addErrback(lambda failure: request.write(email_not_found % self.username))
d.addBoth(lambda _: request.finish())
return server.NOT_DONE_YET
class EmailSetter(Resource):
def render_GET(self, request):
return set_email
def render_POST(self, request):
username = request.args.get("username", [""])[0]
email = request.args.get("email", [""])[0]
cache_key = keyregistry.key(EmailGetter.render_GET, args=(EmailGetter(username),))
if cache_key:
cache.delete(cache_key)
DB.set(username, email)
return email_set_confirmation % (username, email)
class MainResource(Resource):
def getChild(self, path, re
|
quest):
if not path:
return self
if path == "set":
return EmailSetter()
if
|
path == "get":
return Getter()
def render_GET(self, request):
return main_html % "\n".join(
'<li><a href="/get/%s">%s</a>' % (username, username)
for username in DB.data.keys()
)
cache.flushAll()
reactor.listenTCP(8888, server.Site(MainResource()))
reactor.run()
|
lifanov/cobbler
|
cobbler/utils.py
|
Python
|
gpl-2.0
| 65,236
| 0.001211
|
"""
Misc heavy lifting functions for cobbler
Copyright 2006-2009, Red Hat, Inc and Others
Michael DeHaan <michael.dehaan AT gmail>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import copy
import errno
import glob
import hashlib
import netaddr
import os
import random
import re
import shlex
import shutil
import simplejson
import subprocess
import string
import sys
import traceback
import urllib2
import yaml
from cexceptions import FileNotFoundException, CX
from cobbler import clogger
from cobbler import field_info
from cobbler import validate
def md5(key):
return hashlib.md5(key)
CHEETAH_ERROR_DISCLAIMER = """
# *** ERROR ***
#
# There is a templating error preventing this file from rendering correctly.
#
# This is most likely not due to a bug in Cobbler and is something you can fix.
#
# Look at the message below to see what things are causing problems.
#
# (1) Does the template file reference a $variable that is not defined?
# (2) is
|
there a formatting error in a Cheetah directive?
# (3) Should dollar signs ($) be escaped that are not being escaped?
#
# Try fixing the problem and then investigate to see if this message goes
# away or changes.
#
"""
# From http://code.activestate.com/recipes/303342/
class Translator:
allchars = string.maketrans('', '')
def __init__(self, frm='', to='', delete='', keep=None):
if len(to) == 1:
to = to * len(frm)
self.trans = string.maketrans(frm, to)
|
if keep is None:
self.delete = delete
else:
self.delete = self.allchars.translate(self.allchars, keep.translate(self.allchars, delete))
def __call__(self, s):
return s.translate(self.trans, self.delete)
# placeholder for translation
def _(foo):
return foo
MODULE_CACHE = {}
SIGNATURE_CACHE = {}
_re_kernel = re.compile(r'(vmlinu[xz]|kernel.img)')
_re_initrd = re.compile(r'(initrd(.*).img|ramdisk.image.gz)')
_re_is_mac = re.compile(':'.join(('[0-9A-Fa-f][0-9A-Fa-f]',) * 6) + '$')
_re_is_ibmac = re.compile(':'.join(('[0-9A-Fa-f][0-9A-Fa-f]',) * 20) + '$')
# all logging from utils.die goes to the main log even if there
# is another log.
main_logger = None # the logger will be lazy loaded later
def die(logger, msg):
global main_logger
if main_logger is None:
main_logger = clogger.Logger()
# log the exception once in the per-task log or the main
# log if this is not a background op.
try:
raise CX(msg)
except:
if logger is not None:
log_exc(logger)
else:
log_exc(main_logger)
# now re-raise it so the error can fail the operation
raise CX(msg)
def log_exc(logger):
"""
Log an exception.
"""
(t, v, tb) = sys.exc_info()
logger.info("Exception occured: %s" % t)
logger.info("Exception value: %s" % v)
logger.info("Exception Info:\n%s" % string.join(traceback.format_list(traceback.extract_tb(tb))))
def get_exc(exc, full=True):
(t, v, tb) = sys.exc_info()
buf = ""
try:
getattr(exc, "from_cobbler")
buf = str(exc)[1:-1] + "\n"
except:
if not full:
buf += str(t)
buf = "%s\n%s" % (buf, v)
if full:
buf += "\n" + "\n".join(traceback.format_list(traceback.extract_tb(tb)))
return buf
def cheetah_exc(exc, full=False):
lines = get_exc(exc).split("\n")
buf = ""
for l in lines:
buf += "# %s\n" % l
return CHEETAH_ERROR_DISCLAIMER + buf
def pretty_hex(ip, length=8):
"""
Pads an IP object with leading zeroes so that the result is
_length_ hex digits. Also do an upper().
"""
hexval = "%x" % ip.value
if len(hexval) < length:
hexval = '0' * (length - len(hexval)) + hexval
return hexval.upper()
def get_host_ip(ip, shorten=True):
"""
Return the IP encoding needed for the TFTP boot tree.
"""
ip = netaddr.ip.IPAddress(ip)
cidr = netaddr.ip.IPNetwork(ip)
if len(cidr) == 1: # Just an IP, e.g. a /32
return pretty_hex(ip)
else:
pretty = pretty_hex(cidr[0])
if not shorten or len(cidr) <= 8:
# not enough to make the last nibble insignificant
return pretty
else:
cutoff = (32 - cidr.prefixlen) / 4
return pretty[0:-cutoff]
def _IP(ip):
"""
Returns a netaddr.IP object representing ip.
If ip is already an netaddr.IP instance just return it.
Else return a new instance
"""
ip_class = netaddr.ip.IPAddress
if isinstance(ip, ip_class) or ip == "":
return ip
else:
return ip_class(ip)
def get_config_filename(sys, interface):
"""
The configuration file for each system pxe uses is either
a form of the MAC address of the hex version of the IP. If none
of that is available, just use the given name, though the name
given will be unsuitable for PXE configuration (For this, check
system.is_management_supported()). This same file is used to store
system config information in the Apache tree, so it's still relevant.
"""
interface = str(interface)
if interface not in sys.interfaces:
return None
if sys.name == "default":
return "default"
mac = sys.get_mac_address(interface)
ip = sys.get_ip_address(interface)
if mac is not None and mac != "":
return "01-" + "-".join(mac.split(":")).lower()
elif ip is not None and ip != "":
return get_host_ip(ip)
else:
return sys.name
def is_ip(strdata):
"""
Return whether the argument is an IP address.
"""
try:
_IP(strdata)
except:
return False
return True
def is_mac(strdata):
"""
Return whether the argument is a mac address.
"""
if strdata is None:
return False
return bool(_re_is_mac.match(strdata) or _re_is_ibmac.match(strdata))
def is_systemd():
"""
Return whether or not this system uses systemd
"""
if os.path.exists("/usr/lib/systemd/systemd"):
return True
return False
def get_random_mac(api_handle, virt_type="xenpv"):
"""
Generate a random MAC address.
from xend/server/netif.py
return: MAC address string
"""
if virt_type.startswith("vmware"):
mac = [
0x00, 0x50, 0x56,
random.randint(0x00, 0x3f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff)
]
elif virt_type.startswith("xen") or virt_type.startswith("qemu") or virt_type.startswith("kvm"):
mac = [
0x00, 0x16, 0x3e,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff)
]
else:
raise CX("virt mac assignment not yet supported")
mac = ':'.join(map(lambda x: "%02x" % x, mac))
systems = api_handle.systems()
while (systems.find(mac_address=mac)):
mac = get_random_mac(api_handle)
return mac
def find_matching_files(directory, regex):
"""
Find all files in a given directory that match a given regex.
Can't use glob directly as glob doesn't take regexen.
"""
files = glob.glob(os.path.join(directory, "*"))
results = []
for f in files:
if regex.match(os.path.basename(f)):
results.append(f)
return results
def find_highest_files(directory, unversioned, regex):
"""
Find the highest numbered file (kernel or initrd numbering scheme)
in a given directory that matches a given patter
|
JohnTroony/nikola
|
nikola/plugins/command/github_deploy.py
|
Python
|
mit
| 4,476
| 0.000895
|
# -*- coding: utf-8 -*-
# Copyright © 2014-2015 Puneeth Chaganti and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PUR
|
POSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import pri
|
nt_function
from datetime import datetime
import io
import os
import subprocess
import sys
from textwrap import dedent
from nikola.plugin_categories import Command
from nikola.plugins.command.check import real_scan_files
from nikola.utils import get_logger, req_missing, makedirs, unicode_str
from nikola.__main__ import main
from nikola import __version__
def uni_check_output(*args, **kwargs):
o = subprocess.check_output(*args, **kwargs)
return o.decode('utf-8')
def check_ghp_import_installed():
try:
subprocess.check_output(['ghp-import', '-h'])
except OSError:
# req_missing defaults to `python=True` — and it’s meant to be like this.
# `ghp-import` is installed via pip, but the only way to use it is by executing the script it installs.
req_missing(['ghp-import'], 'deploy the site to GitHub Pages')
class CommandGitHubDeploy(Command):
""" Deploy site to GitHub Pages. """
name = 'github_deploy'
doc_usage = ''
doc_purpose = 'deploy the site to GitHub Pages'
doc_description = dedent(
"""\
This command can be used to deploy your site to GitHub Pages.
It uses ghp-import to do this task.
"""
)
logger = None
def _execute(self, command, args):
self.logger = get_logger(
CommandGitHubDeploy.name, self.site.loghandlers
)
# Check if ghp-import is installed
check_ghp_import_installed()
# Build before deploying
build = main(['build'])
if build != 0:
self.logger.error('Build failed, not deploying to GitHub')
sys.exit(build)
# Clean non-target files
only_on_output, _ = real_scan_files(self.site)
for f in only_on_output:
os.unlink(f)
# Commit and push
self._commit_and_push()
return
def _commit_and_push(self):
""" Commit all the files and push. """
source = self.site.config['GITHUB_SOURCE_BRANCH']
deploy = self.site.config['GITHUB_DEPLOY_BRANCH']
remote = self.site.config['GITHUB_REMOTE_NAME']
source_commit = uni_check_output(['git', 'rev-parse', source])
commit_message = (
'Nikola auto commit.\n\n'
'Source commit: %s'
'Nikola version: %s' % (source_commit, __version__)
)
output_folder = self.site.config['OUTPUT_FOLDER']
command = ['ghp-import', '-n', '-m', commit_message, '-p', '-r', remote, '-b', deploy, output_folder]
self.logger.info("==> {0}".format(command))
try:
subprocess.check_call(command)
except subprocess.CalledProcessError as e:
self.logger.error(
'Failed GitHub deployment — command {0} '
'returned {1}'.format(e.cmd, e.returncode)
)
sys.exit(e.returncode)
self.logger.info("Successful deployment")
# Store timestamp of successful deployment
timestamp_path = os.path.join(self.site.config["CACHE_FOLDER"], "lastdeploy")
new_deploy = datetime.utcnow()
makedirs(self.site.config["CACHE_FOLDER"])
with io.open(timestamp_path, "w+", encoding="utf8") as outf:
outf.write(unicode_str(new_deploy.isoformat()))
|
culots/meld
|
setup_win32.py
|
Python
|
gpl-2.0
| 4,346
| 0.002531
|
#!/usr/bin/env python
import glob
import os
import site
from cx_Freeze import setup, Executable
import meld.build_helpers
import meld.conf
site_dir = site.getsitepackages()[1]
include_dll_path = os.path.join(site_dir, "gnome")
missing_dll = [
'libgtk-3-0.dll',
'libgdk-3-0.dll',
'libatk-1.0-0.dll',
'libintl-8.dll',
'libzzz.dll',
'libwinpthread-1.dll',
'libcairo-gobject-2.dll',
'libgdk_pixbuf-2.0-0.dll',
'libpango-1.0-0.dll',
'libpangocairo-1.0-0.dll',
'libpangoft2-1.0-0.dll',
'libpangowin32-1.0-0.dll',
'libffi-6.dll',
'libfontconfig-1.dll',
'libfreetype-6.dll',
'libgio-2.0-0.dll',
'libglib-2.0-0.dll',
'libgmodule-2.0-0.dll',
'libgobject-2.0-0.dll',
'libgirepository-1.0-1.dll',
'libgtksourceview-3.0-1.dll',
'libjasper-1.dll',
'libjpeg-8.dll',
'libpng16-16.dll',
'libgnutls-26.dll',
'libxmlxpat.dll',
'librsvg-2-2.dll',
'libharfbuzz-gobject-0.dll',
'libwebp-5.dll',
]
gtk_libs = [
'etc/fonts',
'etc/gtk-3.0/settings.ini',
'etc/pango',
'lib/gdk-pixbuf-2.0',
'lib/girepository-1.0',
'share/fontconfig',
'share/fonts',
'share/glib-2.0',
'share/gtksourceview-3.0',
'share/icons',
]
include_files = [(os.path.join(include_dl
|
l_path, path), path) for path in
missing_dll + gtk_libs]
build_exe_options = {
"compressed": False,
"icon": "data/icons/meld
|
.ico",
"includes": ["gi"],
"packages": ["gi", "weakref"],
"include_files": include_files,
}
# Create our registry key, and fill with install directory and exe
registry_table = [
('MeldKLM', 2, 'SOFTWARE\Meld', '*', None, 'TARGETDIR'),
('MeldInstallDir', 2, 'SOFTWARE\Meld', 'InstallDir', '[TARGETDIR]', 'TARGETDIR'),
('MeldExecutable', 2, 'SOFTWARE\Meld', 'Executable', '[TARGETDIR]Meld.exe', 'TARGETDIR'),
]
# Provide the locator and app search to give MSI the existing install directory
# for future upgrades
reg_locator_table = [
('MeldInstallDirLocate', 2, 'SOFTWARE\Meld', 'InstallDir', 0)
]
app_search_table = [('TARGETDIR', 'MeldInstallDirLocate')]
msi_data = {
'Registry': registry_table,
'RegLocator': reg_locator_table,
'AppSearch': app_search_table
}
bdist_msi_options = {
"upgrade_code": "{1d303789-b4e2-4d6e-9515-c301e155cd50}",
"data": msi_data,
}
setup(
name="Meld",
version=meld.conf.__version__,
description='Visual diff and merge tool',
author='The Meld project',
author_email='meld-list@gnome.org',
maintainer='Kai Willadsen',
url='http://meldmerge.org',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: X11 Applications :: GTK',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
'Programming Language :: Python',
'Topic :: Desktop Environment :: Gnome',
'Topic :: Software Development',
'Topic :: Software Development :: Version Control',
],
options = {
"build_exe": build_exe_options,
"bdist_msi": bdist_msi_options,
},
executables = [
Executable(
"bin/meld",
base="Win32GUI",
targetName="Meld.exe",
shortcutName="Meld",
shortcutDir="ProgramMenuFolder",
),
],
packages=[
'meld',
'meld.ui',
'meld.util',
'meld.vc',
],
package_data={
'meld': ['README', 'COPYING', 'NEWS']
},
scripts=['bin/meld'],
data_files=[
('share/man/man1',
['meld.1']
),
('share/doc/meld-' + meld.conf.__version__,
['COPYING', 'NEWS']
),
('share/meld',
['data/meld.css', 'data/meld-dark.css']
),
('share/meld/icons',
glob.glob("data/icons/*.png") +
glob.glob("data/icons/COPYING*")
),
('share/meld/ui',
glob.glob("data/ui/*.ui") + glob.glob("data/ui/*.xml")
),
],
cmdclass={
"build_i18n": meld.build_helpers.build_i18n,
"build_help": meld.build_helpers.build_help,
"build_icons": meld.build_helpers.build_icons,
"build_data": meld.build_helpers.build_data,
}
)
|
joel-wright/DDRPi
|
experiments/python/drawingarea.py
|
Python
|
mit
| 6,432
| 0.007774
|
#!/usr/bin/env python
# example drawingarea.py
import pygtk
pygtk.require('2.0')
import gtk
import operator
import time
import string
class DrawingAreaExample:
def __init__(self):
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.set_title("Drawing Area Example")
window.connect("destroy", lambda w: gtk.main_quit())
self.area = gtk.DrawingArea()
self.area.set_size_request(400, 300)
self.pangolayout = self.area.create_pango_layout("")
self.sw = gtk.ScrolledWindow()
self.sw.add_with_viewport(self.area)
self.table = gtk.Table(2,2)
self.table.attach(self.sw, 1, 2, 1, 2)
window.add(self.table)
self.area.set_events(gtk.gdk.POINTER_MOTION_MASK |
gtk.gdk.POINTER_MOTION_HINT_MASK )
self.area.connect("expose-event", self.area_expose_cb)
def motion_notify(ruler, event):
return ruler.emit("motion_notify_event", event)
self.hadj = self.sw.get_hadjustment()
self.vadj = self.sw.get_vadjustment()
def val_cb(adj, ruler, horiz):
if horiz:
span = self.sw.get_allocation()[3]
else:
span = self.sw.get_allocation()[2]
l,u,p,m = ruler.get_range()
v = adj.value
ruler.set_range(v, v+span, p, m)
while gtk.events_pending():
gtk.main_iteration()
self.area.show()
self.sw.show()
self.table.show()
window.show()
def area_expose_cb(self, area, event):
self.style = self.area.get_style()
self.gc = self.style.fg_gc[gtk.STATE_NORMAL]
self.draw_point(10,10)
self.draw_points(110, 10)
self.draw_line
|
(210, 10)
s
|
elf.draw_lines(310, 10)
self.draw_segments(10, 100)
self.draw_rectangles(110, 100)
self.draw_arcs(210, 100)
# self.draw_pixmap(310, 100)
self.draw_polygon(10, 200)
# self.draw_rgb_image(110, 200)
return True
def draw_point(self, x, y):
self.area.window.draw_point(self.gc, x+30, y+30)
self.pangolayout.set_text("Point")
self.area.window.draw_layout(self.gc, x+5, y+50, self.pangolayout)
return
def draw_points(self, x, y):
points = [(x+10,y+10), (x+10,y), (x+40,y+30),
(x+30,y+10), (x+50,y+10)]
self.area.window.draw_points(self.gc, points)
self.pangolayout.set_text("Points")
self.area.window.draw_layout(self.gc, x+5, y+50, self.pangolayout)
return
def draw_line(self, x, y):
self.area.window.draw_line(self.gc, x+10, y+10, x+20, y+30)
self.pangolayout.set_text("Line")
self.area.window.draw_layout(self.gc, x+5, y+50, self.pangolayout)
return
def draw_lines(self, x, y):
points = [(x+10,y+10), (x+10,y), (x+40,y+30),
(x+30,y+10), (x+50,y+10)]
self.area.window.draw_lines(self.gc, points)
self.pangolayout.set_text("Lines")
self.area.window.draw_layout(self.gc, x+5, y+50, self.pangolayout)
return
def draw_segments(self, x, y):
segments = ((x+20,y+10, x+20,y+70), (x+60,y+10, x+60,y+70),
(x+10,y+30 , x+70,y+30), (x+10, y+50 , x+70, y+50))
self.area.window.draw_segments(self.gc, segments)
self.pangolayout.set_text("Segments")
self.area.window.draw_layout(self.gc, x+5, y+80, self.pangolayout)
return
def draw_rectangles(self, x, y):
self.area.window.draw_rectangle(self.gc, False, x, y, 80, 70)
self.area.window.draw_rectangle(self.gc, True, x+10, y+10, 20, 20)
self.area.window.draw_rectangle(self.gc, True, x+50, y+10, 20, 20)
self.area.window.draw_rectangle(self.gc, True, x+20, y+50, 40, 10)
self.area.window.draw_rectangle(self.gc, False, 0, 0, 1, 1)
print self.area.window
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, 0, 8, 20, 20)
w,h = pixbuf.get_width(), pixbuf.get_height()
drawable = gtk.gdk.Pixmap(None, w, h, 24)
pixbuf.get_from_drawable(self.area.window,self.area.window.get_colormap(),0,0,0,0,w,h)
print pixbuf.get_pixels_array()
self.pangolayout.set_text("Rectangles")
self.area.window.draw_layout(self.gc, x+5, y+80, self.pangolayout)
return
def draw_arcs(self, x, y):
self.area.window.draw_arc(self.gc, False, x+10, y, 70, 70,
0, 360*64)
self.area.window.draw_arc(self.gc, True, x+30, y+20, 10, 10,
0, 360*64)
self.area.window.draw_arc(self.gc, True, x+50, y+20, 10, 10,
0, 360*64)
self.area.window.draw_arc(self.gc, True, x+30, y+10, 30, 50,
210*64, 120*64)
self.pangolayout.set_text("Arcs")
self.area.window.draw_layout(self.gc, x+5, y+80, self.pangolayout)
return
def draw_pixmap(self, x, y):
pixmap, mask = gtk.gdk.pixmap_create_from_xpm(
self.area.window, self.style.bg[gtk.STATE_NORMAL], "gtk.xpm")
self.area.window.draw_drawable(self.gc, pixmap, 0, 0, x+15, y+25,
-1, -1)
self.pangolayout.set_text("Pixmap")
self.area.window.draw_layout(self.gc, x+5, y+80, self.pangolayout)
return
def draw_polygon(self, x, y):
points = [(x+10,y+60), (x+10,y+20), (x+40,y+70),
(x+30,y+30), (x+50,y+40)]
self.area.window.draw_polygon(self.gc, True, points)
self.pangolayout.set_text("Polygon")
self.area.window.draw_layout(self.gc, x+5, y+80, self.pangolayout)
return
def draw_rgb_image(self, x, y):
b = 80*3*80*['\0']
for i in range(80):
for j in range(80):
b[3*80*i+3*j] = chr(255-3*i)
b[3*80*i+3*j+1] = chr(255-3*abs(i-j))
b[3*80*i+3*j+2] = chr(255-3*j)
buff = string.join(b, '')
self.area.window.draw_rgb_image(self.gc, x, y, 80, 80,
gtk.gdk.RGB_DITHER_NONE, buff, 80*3)
self.pangolayout.set_text("RGB Image")
self.area.window.draw_layout(self.gc, x+5, y+80, self.pangolayout)
return
def main():
gtk.main()
return 0
if __name__ == "__main__":
DrawingAreaExample()
main()
|
odoousers2014/odoo
|
addons/account_analytic_analysis/sale_order.py
|
Python
|
agpl-3.0
| 1,018
| 0.000982
|
# -*- coding: utf-8 -*-
from openerp import models, api
class sale_order_line(models.Model):
_inherit = "sale.order.line"
@api.one
def button_confirm(self):
if self.product_id.recurring_invoice and self.order_id.project_id:
invoice_line_ids = [((0, 0, {
'product_id': self.product_id.id,
'analytic_account_id': self.order_id.project_id.id,
'name': self.name,
'quantity': self.product_uom_qty,
'uom_id': self.product_uom.id,
'price_unit': self.price_unit,
'price_subtotal': self.price_subtotal
}))]
analytic_values = {'recurring_invoices': True, 'recurring_invoice_line_ids': invoice_line_
|
ids}
if not self.order_id.project_id.partner_id:
analytic_values['partner_id'] = self.order_id.partner_id.id
self.order_id.project_id.write(analytic_values)
return super(sale_order_line, sel
|
f).button_confirm()
|
contactgsuraj/ICHack2017
|
RBP/test_client.py
|
Python
|
mit
| 390
| 0.017949
|
#A client for testing the bluetooth server running on the pi
import socket
serverMACAddress = 'b8:27:eb
|
:f1:bb:a6'
port = 3
s = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM)
s.connect((serverMACAddress,port))
s.send("Hello")
while 1:
#client, address == s.accept()
#data = s.recv(1024)
data = "
|
Hello"
s.send(data)
#print(data)
s.close()
|
gph82/PyEMMA
|
pyemma/thermo/util/__init__.py
|
Python
|
lgpl-3.0
| 790
| 0.001266
|
# This file is part of PyEMMA.
#
# Copyright (c) 2016 Computational Molecular Biology Group, Fre
|
ie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the impl
|
ied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .util import *
|
dabura667/electrum
|
plugins/digitalbitbox/cmdline.py
|
Python
|
mit
| 415
| 0.004819
|
from electrum.util import print_msg
from .digitalbitbox import DigitalBitboxPlugin
class Di
|
gitalBitboxCmdLineHandler:
def stop(self):
pass
def show_message(self, msg):
print_msg(msg)
def get_passphrase(self, msg, confirm):
import getpass
print_msg(msg)
return getpass.getpass('')
class Plugin(DigitalBitboxPlugin):
handle
|
r = DigitalBitboxCmdLineHandler()
|
mdwhatcott/pyspecs
|
setup.py
|
Python
|
mit
| 1,353
| 0
|
"""
Because I always forget, here's how to submit to PyPI:
# python setup.py register sdist upload
"""
from distutils.core import setup
import pyspecs
setup(
name='pyspecs',
version=pyspecs.__version__,
packages=['pyspecs'],
scripts=['scripts/run_pyspecs.py'],
url='https://github.com/mdwhatcott/pyspecs',
license='MIT',
author='Michael Whatcott',
author_email='mdwhatcott+pyspecs@gmail.com',
description='Concise BDD in python',
long_description='pyspecs is a testing framework that strives to achieve '
'more readable specifications (tests) by leveraging '
'some fancy syntactic sugar and auto-discovery of '
|
'tests/specs.',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI App
|
roved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: OS Independent',
'Environment :: Console',
'Programming Language :: Python',
'Topic :: Software Development :: Documentation',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
'Topic :: Utilities',
]
)
|
nomuna/codility
|
Lesson_06/max_prod_three.py
|
Python
|
mit
| 390
| 0.048718
|
# correc
|
tness: 100%, performance: 0%
def solution(a):
l = len(a)
if l < 3: return reduce(lambda x, y: x * y, a)
products = []
for i in xrange(0, l):
for j in xrange(i+1, l):
for
|
k in xrange (j+1, l):
products.append(a[i] * a[j] * a[k])
return max(products)
if __name__ == '__main__':
array = [ -3 , 1 , 2 , -2 , 5 , 6]
print "result: ", solution(array)
|
pysmt/pysmt
|
pysmt/solvers/z3.py
|
Python
|
apache-2.0
| 40,275
| 0.003501
|
#
# This file is part of pySMT.
#
# Copyright 2014 Andrea Micheli and Marco Gario
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from pysmt.exceptions import SolverAPINotFound
try:
import z3
except ImportError:
raise SolverAPINotFound
# Keep array models expressed as values instead of Lambdas
# (see https://github.com/Z3Prover/z3/issues/1769)
z3.set_param('model.compact', False)
import pysmt.typing as types
import pysmt.operators as op
from pysmt.solvers.solver import (IncrementalTrackingSolver, UnsatCoreSolver,
Model, Converter, SolverOptions)
from pysmt.solvers.smtlib import SmtLibBasicSolver, SmtLibIgnoreMixin
from pysmt.solvers.qelim import QuantifierEliminator
from pysmt.walkers import DagWalker
from pysmt.exceptions import (SolverReturnedUnknownResultError,
SolverNotConfiguredForUnsatCoresError,
SolverStatusError,
ConvertExpressionError,
UndefinedSymbolError, PysmtValueError)
from pysmt.decorators import clear_pending_pop, catch_conversion_error
from pysmt.logics import LRA, LIA, QF_UFLRA, PYSMT_LOGICS
from pysmt.oracles import get_logic
from pysmt.constants import Fraction, Numeral, is_pysmt_integer, to_python_integer
# patch z3api
z3.is_ite = lambda x: z3.is_app_of(x, z3.Z3_OP_ITE)
z3.is_function = lambda x: z3.is_app_of(x, z3.Z3_OP_UNINTERPRETED)
z3.is_array_store = lambda x: z3.is_app_of(x, z3.Z3_OP_STORE)
z3.get_payload = lambda node,i : z3.Z3_get_decl_int_parameter(node.ctx.ref(),
node.decl().ast, i)
class AstRefKey:
def __init__(self, n):
self.n = n
def __hash__(self):
return self.n.hash()
def __eq__(self, other):
return self.n.eq(other.n)
def askey(n):
assert isinstance(n, z3.AstRef)
return AstRefKey(n)
class Z3Model(Model):
def __init__(self, environment, z3_model):
Model.__init__(self, environment)
self.z3_model = z3_model
self.converter = Z3Converter(environment, z3_model.ctx)
def get_value(self, formula, model_completion=True):
titem = self.converter.convert(formula)
z3_res = self.z3_model.eval(titem, model_completion=model_completion)
return self.converter.back(z3_res, model=self.z3_model)
def iterator_over(self, language):
for x in language:
yield x, self.get_value(x, model_completion=True)
def __iter__(self):
"""Overloading of iterator from Model. We iterate only on the
variables defined in the assignment.
"""
for d in self.z3_model.decls():
if d.arity() == 0:
try:
pysmt_d = self.converter.back(d())
yield pysmt_d, self.get_value(pysmt_d)
except UndefinedSymbolError:
# avoids problems with symbols generated by z3
pass
def __contains__(self, x):
"""Returns whether the model contains a value for 'x'."""
return x in (v for v, _ in self)
# EOC Z3Model
class Z3Options(SolverOptions):
@staticmethod
def _set_option(z3solver, name, value):
try:
z3solver.set(name, value)
except z3.Z3Exception:
raise PysmtValueError("Error setting the option '%s=%s'" \
% (name, value))
except z3.z3types.Z3Exception:
raise PysmtValueError("Error setting the option '%s=%s'" \
% (name, value))
def __call__(self, solver):
self._set_option(solver.z3, 'model', self.generate_models)
if self.unsat_cores_mode is not None:
self._set_option(solver.z3, 'unsat_core', True)
if self.random_seed is not None:
self._set_option(solver.z3, 'random_seed', self.random_seed)
for k,v in self.solver_options.items():
try:
self._set_option(solver.z3, str(k), v)
except z3.Z3Exception:
raise PysmtValueError("Error setting the option '%s=%s'" % (k,v))
except z3.z3types.Z3Exception:
raise PysmtValueError("Error setting the option '%s=%s'" % (k,v))
# EOC Z3Options
class Z3Solver(IncrementalTrackingSolver, UnsatCoreSolver,
SmtLibBasicSolver, SmtLibIgnoreMixin):
LOGICS = PYSMT_LOGICS - set(x for x in PYSMT_LOGICS if x.theory.strings)
OptionsClass = Z3Options
def __init__(self, environment, logic, **options):
IncrementalTrackingSolver.__init__(self,
environment=environment,
logic=logic,
**options)
try:
self.z3 = z3.SolverFor(str(logic))
except z3.Z3Exception:
self.z3 = z3.Solver()
except z3.z3types.Z3Exception:
self.z3 = z3.Solver()
except OSError:
self.z3 = z3.Solver()
self.options(self)
self.declarations = set()
self.converter = Z3Converter(environment, z3_ctx=self.z3.ctx)
self.mgr = environment.formula_manager
self._name_cnt = 0
return
@clear_pending_pop
def _reset_assertions(self):
self.z3.reset()
self.options(self)
@clear_pending_pop
def declare_variable(self, var):
raise NotImplementedError
@clear_pending_pop
def _add_assertion(self, formula, named=None):
self._assert_is_boolean(formula)
term = self.converter.convert(formula)
if self.options.unsat_cores_mode is not None:
# TO
|
DO: IF unsat_cores_mode is all, then we add this fresh variable.
# Otherwise, we should track this only if
|
it is named.
key = self.mgr.FreshSymbol(template="_assertion_%d")
tkey = self.converter.convert(key)
self.z3.assert_and_track(term, tkey)
return (key, named, formula)
else:
self.z3.add(term)
return formula
def get_model(self):
return Z3Model(self.environment, self.z3.model())
@clear_pending_pop
def _solve(self, assumptions=None):
if assumptions is not None:
bool_ass = []
other_ass = []
for x in assumptions:
if x.is_literal():
bool_ass.append(self.converter.convert(x))
else:
other_ass.append(x)
if len(other_ass) > 0:
self.push()
self.add_assertion(self.mgr.And(other_ass))
self.pending_pop = True
res = self.z3.check(*bool_ass)
else:
res = self.z3.check()
sres = str(res)
assert sres in ['unknown', 'sat', 'unsat']
if sres == 'unknown':
raise SolverReturnedUnknownResultError
return (sres == 'sat')
def get_unsat_core(self):
"""After a call to solve() yielding UNSAT, returns the unsat core as a
set of formulae"""
return self.get_named_unsat_core().values()
def _named_assertions_map(self):
if self.options.unsat_cores_mode is not None:
return dict((t[0], (t[1],t[2])) for t in self.assertions)
return None
def get_named_unsat_core(self):
"""After a call to solve() yielding UNSAT, returns the unsat core as a
dict of names to formulae"""
if self.options.unsat_cores_mode is None:
raise SolverNo
|
nbari/zunzuncito
|
my_api/default/v0/zun_tld/tldextract/tldextract.py
|
Python
|
bsd-3-clause
| 14,203
| 0.004224
|
# -*- coding: utf-8 -*-
"""`tldextract` accurately separates the gTLD or ccTLD (generic or country code
top-level domain) from the registered domain and subdomains of a URL.
>>> import tldextract
>>> tldextract.extract('http://forums.news.cnn.com/')
ExtractResult(subdomain='forums.news', domain='cnn', suffix='com')
>>> tldextract.extract('http://forums.bbc.co.uk/') # United Kingdom
ExtractResult(subdomain='forums', domain='bbc', suffix='co.uk')
>>> tldextract.extract('http://www.worldbank.org.kg/') # Kyrgyzstan
ExtractResult(subdomain='www', domain='worldbank', suffix='org.kg')
`ExtractResult` is a namedtuple, so it's simple to access the parts you want.
>>> ext = tldextract.extract('http://forums.bbc.co.uk')
>>> ext.domain
'bbc'
>>> '.'.join(ext[:2]) # rejoin subdomain and domain
'forums.bbc'
"""
from __future__ import with_statement
try:
import cPickle as pickle
except ImportError:
import pickle
from contextlib import closing
import errno
from functools import wraps
import logging
from operator import itemgetter
import os
import sys
import warnings
try:
import pkg_resources
except ImportError:
class pkg_resources(object):
"""Fake pkg_resources interface which falls back to getting resources
|
inside `tldextract`'s directory.
"""
@classmethod
def resource_stream(cls, package, res
|
ource_name):
moddir = os.path.dirname(__file__)
f = os.path.join(moddir, resource_name)
return open(f)
import re
import socket
try:
string_types = basestring
except NameError:
string_types = str
try: # pragma: no cover
# Python 2
from urllib2 import urlopen
from urlparse import scheme_chars
except ImportError: # pragma: no cover
# Python 3
from urllib.request import urlopen
from urllib.parse import scheme_chars
unicode = str
LOG = logging.getLogger("tldextract")
CACHE_FILE_DEFAULT = os.path.join(os.path.dirname(__file__), '.tld_set')
CACHE_FILE = os.path.expanduser(os.environ.get("TLDEXTRACT_CACHE", CACHE_FILE_DEFAULT))
PUBLIC_SUFFIX_LIST_URLS = (
'http://mxr.mozilla.org/mozilla-central/source/netwerk/dns/effective_tld_names.dat?raw=1',
'https://raw.github.com/mozilla/gecko-dev/master/netwerk/dns/effective_tld_names.dat',
)
SCHEME_RE = re.compile(r'^([' + scheme_chars + ']+:)?//')
IP_RE = re.compile(r'^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$')
class ExtractResult(tuple):
'ExtractResult(subdomain, domain, suffix)'
__slots__ = ()
_fields = ('subdomain', 'domain', 'suffix')
def __new__(_cls, subdomain, domain, suffix):
'Create new instance of ExtractResult(subdomain, domain, suffix)'
return tuple.__new__(_cls, (subdomain, domain, suffix))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new ExtractResult object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != 3:
raise TypeError('Expected 3 arguments, got %d' % len(result))
return result
def __repr__(self):
'Return a nicely formatted representation string'
return 'ExtractResult(subdomain=%r, domain=%r, suffix=%r)' % self
def _asdict(self):
'Return a new dict which maps field names to their values'
base_zip = zip(self._fields, self)
zipped = base_zip + [('tld', self.tld)]
return dict(zipped)
def _replace(_self, **kwds):
'Return a new ExtractResult object replacing specified fields with new values'
result = _self._make(map(kwds.pop, ('subdomain', 'domain', 'suffix'), _self))
if kwds:
raise ValueError('Got unexpected field names: %r' % kwds.keys())
return result
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self)
subdomain = property(itemgetter(0), doc='Alias for field number 0')
domain = property(itemgetter(1), doc='Alias for field number 1')
suffix = property(itemgetter(2), doc='Alias for field number 2')
@property
def tld(self):
warnings.warn('This use of tld is misleading. Use `suffix` instead.', DeprecationWarning)
return self.suffix
@property
def registered_domain(self):
"""
Joins the domain and suffix fields with a dot, if they're both set.
>>> extract('http://forums.bbc.co.uk').registered_domain
'bbc.co.uk'
>>> extract('http://localhost:8080').registered_domain
''
"""
if self.domain and self.suffix:
return self.domain + '.' + self.suffix
return ''
class TLDExtract(object):
def __init__(self, cache_file=CACHE_FILE, suffix_list_url=PUBLIC_SUFFIX_LIST_URLS, fetch=True,
fallback_to_snapshot=True):
"""
Constructs a callable for extracting subdomain, domain, and suffix
components from a URL.
Upon calling it, it first checks for a Python-pickled `cache_file`.
By default, the `cache_file` will live in the tldextract directory.
You can disable the caching functionality of this module by setting `cache_file` to False.
If the `cache_file` does not exist (such as on the first run), a live HTTP request
will be made to obtain the data at the `suffix_list_url` -- unless `suffix_list_url`
evaluates to `False`. Therefore you can deactivate the HTTP request functionality
by setting this argument to `False` or `None`, like `suffix_list_url=None`.
The default URL points to the latest version of the Mozilla Public Suffix List, but any
similar document could be specified.
Local files can be specified by using the `file://` protocol. (See `urllib2` documentation.)
If there is no `cache_file` loaded and no data is found from the `suffix_list_url`,
the module will fall back to the included TLD set snapshot. If you do not want
this behavior, you may set `fallback_to_snapshot` to False, and an exception will be
raised instead.
"""
if not fetch:
LOG.warning("The 'fetch' argument is deprecated. Instead of specifying fetch, "
"you should specify suffix_list_url. The equivalent of fetch=False would "
"be suffix_list_url=None.")
self.suffix_list_urls = ()
if suffix_list_url and fetch:
if isinstance(suffix_list_url, string_types):
self.suffix_list_urls = (suffix_list_url,)
else:
# TODO: kwarg suffix_list_url can actually be a sequence of URL
# strings. Document this.
self.suffix_list_urls = suffix_list_url
self.suffix_list_urls = tuple(url.strip() for url in self.suffix_list_urls if url.strip())
self.cache_file = os.path.expanduser(cache_file or '')
self.fallback_to_snapshot = fallback_to_snapshot
if not (self.suffix_list_urls or self.cache_file or self.fallback_to_snapshot):
raise ValueError("The arguments you have provided disable all ways for tldextract "
"to obtain data. Please provide a suffix list data, a cache_file, "
"or set `fallback_to_snapshot` to `True`.")
self._extractor = None
def __call__(self, url):
"""
Takes a string URL and splits it into its subdomain, domain, and
suffix (effective TLD, gTLD, ccTLD, etc.) component.
>>> extract = TLDExtract()
>>> extract('http://forums.news.cnn.com/')
ExtractResult(subdomain='forums.news', domain='cnn', suffix='com')
>>> extract('http://forums.bbc.co.uk/')
ExtractResult(subdomain='forums', domain='bbc', suffix='co.uk')
"""
netloc = SCHEME_RE.sub("", url) \
.partition("/")[0] \
.partition("?")[0] \
.partition("#")[0] \
.split("@")[-1] \
.partition(":")[0] \
.rstrip(".")
registered_domain, tld = self._g
|
mondhs/heroku_test
|
transcriber_re_mg14.py
|
Python
|
gpl-3.0
| 7,933
| 0.023621
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
@author: Mindaugas Greibus
trancriber mg1.4
'''
import sys, re
import collections
class TranscriberRegexp:
# http://cmusphinx.sourceforge.net/wiki/tutorialam
# Do not use case-sensitive variants like “e” and “E”. Instead, all your phones must be different even in case-insensitive variation. Sphinxtrain doesn't support some special characters like '*' or '/' and supports most of others like ”+” or ”-” or ”:” But to be safe we recommend you to use alphanumeric-only phone-set. Replace special characters in the phone-set, like colons or dashes or tildes, with something alphanumeric. For example, replace “a~” with “aa” to make it alphanumeric only.
# 1 - nosinė, 2 - šnipštimas(ž,š), e3 ė _-ilgumas($/:) .-minkštumas(')
graphemeToPhonemeMap = [
( u"iu", u"IU"),#Svarbu jei be minkštumo
( u"ių", u"IU_"),#Svarbu jei be minkštumo
( u"io", u"IO_"),#Svarbu jei be minkštumo
#( u"ui", u"UI"),
#( u"uo", u"UO"),
( u"ia", u"E"),
( u"ią", u"E_"),
#( u"tst", u"T S T"), #atstatyk# nebėra versijoje z1.3
#( u"ts", u"C"),#atsakymą,atsiųsk# nebėra versijoje z1.3
( u"iau", u"E U"),
( u"ja", u"J. E"), #jau, japonas
( u"ją", u"J. E_"), #naują
#Dantiniai priebalsiai {S, Z, C, DZ} prieš alveolinius {S2, Z2, C2, DZ2} keičiami atitinkamai į alveolinius {S2, Z2, C2, DZ2} (slenksčiai -> S L E N K S2 C2 E I).
( u"sž", u"S2 Z2"),#?
( u"sč", u"S2 C2"),#kunigaikštysčiu
( u"zdž", u"Z2 DZ2"),#vabzdžiai
#vyKdyk duslieji prieš skardžiuos g
( u"gk", u"K K"),#angkoras -> A N K K O_ R A S
( u"gt", u"K T"),#vašingtonas, jungtinių
( u"tb", u"D B"),#atbaidyti
( u"šb", u"Z2 B"),#išbandyti
( u"kd", u"G D"),#atlikdavo
( u"sd", u"Z D"),#kasdami
( u"šd", u"Z2 D"),#neišdildoma
( u"pg", u"B G"),#apgadintas
( u"tg", u"D G"),#atgabenti
( u"šg", u"Z2 G"),#išgaubti
( u"tž", u"D Z2"),#atžvilgiu
( u"žk", u"S2 K"),#grįžk
( u"zt", u"S T"),#megztinis
( u"ch", u"CH"),
( u"dž", u"DZ2"),
( u"dz", u"DZ"),
#grafemos
( u"a", u"A"),
( u"ą", u"A_"),
( u"b", u"B"),
( u"c", u"C"),
( u"č", u"C2"),
( u"d", u"D"),
( u"e", u"E"),
( u"ę", u"E_"),
( u"ė", u"E3_"),
( u"f", u"F"),
( u"g", u"G"),
( u"h", u"H"),
( u"i", u"I"),
( u"į", u"I_"),
( u"y", u"I_"),
( u"j", u"J."),
( u"k", u"K"),
( u"l", u"L"),
( u"m", u"M"),
( u"n", u"N"),
( u"o", u"O_"),
( u"p", u"P"),
( u"r", u"R"),
( u"s", u"S"),
( u"š", u"S2"),
( u"t", u"T"),
( u"u", u"U"),
( u"ų", u"U_"),
( u"ū", u"U_"),
( u"v", u"V"),
( u"w", u"V"),
( u"z", u"Z"),
( u"ž", u"Z2"),
]
#daug gale b,d,g,z,ž(skardieji) kaip p,t,k,s,š(duslieji)+
#grįžk skardieji prieš duslieji š
#minkštumas: džiaugsmas prieš e, i, ė yra minkšti
#minkštumas: ankstenė - k ir g sustabdomas minkšumas anks't'enė
#!Neitraukiant minkšrumo! tai butinai reikia iu ir io kaip atskiros fonemos.
preprocesorMap = [
( u"^ie", u"jie"),
( u"g$", u"k"),
( u"d$", u"t"),
( u"ž$", u"š"),
( u"z$", u"s"),
( u"facebookas", u"feisbukas"),
( u"unesco", u"junesko"),
]
def __init__(self):
transcribation_keys = map(lambda x: x[0], self.graphemeToPhonemeMap)
self.transcribation_rulesDict = dict(self.graphemeToPhonemeMap)
transcribation_regexStr = u"(%s)" % u"|".join(map(re.escape, transcribation_keys))
# Create a regular expression from the dictionary keys
self.transcribation_regex = re.compile(transcribation_regexStr)
preprocess_keys = map(lambda x: x[0], self.preprocesorMap)
self.preprocess_rulesDict = dict(self.preprocesorMap)
preprocess_regexStr = u"(%s)" % u"|".join(map(re.escape, preprocess_keys))
# Create a regular expression from the dictionary keys
self.preprocess_regex = re.compile(preprocess_regexStr)
def multiple_replace(self, text):
preprocesedText = self.preprocess_regex.sub(lambda mo: u" " + self.preprocess_rulesDict[mo.string[mo.start():mo.end()]] + u" ", text)
#print "["+text+"]"
# For each match, look-up corresponding value in dictionary
return self.transcribation_regex.sub(lambda mo: u" " + self.transcribation_rulesDict[mo.string[mo.start():mo.end()]] + u" ", preprocesedText)
def transcribe(self, word):
#lowerWord = word.decode('utf-8').lower().encode('utf-8')
lowerWord = word.lower()
transcibedWord = self.multiple_replace(lowerWord)
transcibedWord = re.sub(ur'\s+', ' ', transcibedWord)
transcibedWord = transcibedWord.upper().strip()
return transcibedWord;
def transcribeDictionary(self, text):
translatedMap = {}
#lowerText = text.decode('utf-8').lower().encode('utf-8')
lowerText = text.lower()
lowerText = re.sub(ur"[\.\,\?\!\"\/\_><]+", r" ", lowerText)
for wortEntry in lowerText.split():
wordTranslated = self.transcribe(wortEntry)
translatedMap[wortEntry] = wordTranslated
translatedMap = collections.OrderedDict(sorted(translatedMap.items(), key=lambda t: t[0]))
return translatedMap
import argparse
def processWords(words):
transcriber = TranscriberRegexp()
sphinx_dictionary = transcriber.transcribeDictionary(words)
return sphinx_dictionary
def processFile(input_file):
sphinx_dictionary = collections.OrderedDict()
for line in input_file:
loop_dictionary = processWords(line)
sphinx_dictionary.update(loop_dictionary)
sphinx_dictionary = collections.OrderedDict(sorted(sphinx_dictionary.items(), key=lambda t: t[0]))
return sphinx_dictionary
def writeToFile(sphinx_dictionary, output_file):
for key, value in sphinx_dictionary.iteritems():
output_file.write( u"{}\t{}\n".format(key, value))
def writeToConsole(sphinx_dictionary):
for key, value in sphinx_dictionary.iteritems():
print u"{}\t{}".format(key, value)
def main():
usage='%(prog)s --help'
description='''Transcription text to phone for CMU Sphinx recognition. Example: %(prog)s -i zodziai.txt -o zodziai.dict
'''
parser = argparse.ArgumentParser(usage=usage,description=description)
parser.add_argument('-o', '--output_file', help='Output text dictionary file: word W O R D', metavar='out-file', type=argparse.FileType('wt'))
parser.add_argument('-v', '--verbose', action='store_true',help='Verbose output for debuging')
group = parser.add_mutually_exclusive_group()
parser.add_argument("input_words", nargs='?', help="echo the string you use here")
group.add_argument('-i', '--input_file', help
|
='Input text file one word per line, \'-\' for standard input', metavar='in-file', type=argparse.FileType('rt'))
args = parser.parse_args()
if args.verbose: print args
sphinx_dictionary = {}
if args.input_file:
sphinx_dictionary = processFile(args.input_file)
elif args.input_words:
sphinx_dictionary = processWords(args.input_words)
else:
sphinx_dictionary = processWords("bandom besikišk
|
iakopūstaudavome")
if args.output_file:
writeToFile(sphinx_dictionary, args.output_file)
else:
writeToConsole(sphinx_dictionary)
if __name__ == "__main__":
main()
|
fjorba/invenio
|
modules/websearch/lib/search_engine_query_parser.py
|
Python
|
gpl-2.0
| 54,879
| 0.007507
|
# -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2008, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301
"""Invenio Search Engine query parsers."""
import re
import string
from datetime import datetime
try:
import dateutil
if not hasattr(dateutil, '__version__') or dateutil.__version__ != '2.0':
from dateutil import parser as du_parser
from dateutil.relativedelta import relativedelta as du_delta
GOT_DATEUTIL = True
else:
from warnings import warn
warn("Not using dateutil module because the version %s is not compatible with Python-2.x" % dateutil.__version__)
GOT_DATEUTIL = False
except ImportError:
# Ok, no date parsing is possible, but continue anyway,
# since this package is only recommended, not mandatory.
GOT_DATEUTIL = False
from invenio.bibindex_engine_tokenizer import BibIndexFuzzyNameTokenizer as FNT
from invenio.logicutils import to_cnf
from invenio.config import CFG_WEBSEARCH_SPIRES_SYNTAX
NameScanner = FNT()
class InvenioWebSearchMismatchedParensError(Exception):
"""Exception for parse errors caused by mismatched parentheses."""
def __init__(self, message):
"""Initialization."""
self.message = message
def __str__(self):
"""String representation."""
return repr(self.message)
class SearchQueryParenthesisedParser(object):
"""Search query parser that handles arbitrarily-nested parentheses
Parameters:
* substitution_dict: a dictionary mapping strings to other strings. By
default, maps 'and', 'or' and 'not' to '+', '|', and '-'. Dictionary
values will be treated as valid operators for output.
A note (valkyrie 25.03.2011):
Based on looking through the prod search logs, it is evident that users,
when they are using parentheses to do searches, only run word characters
up against parens when they intend the parens to be part of the word (e.g.
U(1)), and when they are using parentheses to combine operators, they put
a space before and after them. As of writing, this is the behavior that
SQPP now expects, in order that it be able to handle such queries as
e(+)e(-) that contain operators in parentheses that should be interpreted
as words.
"""
def __init__(self, substitution_dict = {'and': '+', 'or': '|', 'not': '-'}):
self.substitution_dict = substitution_dict
self.specials = set(['(', ')', '+', '|', '-', '+ -'])
self.__tl_idx = 0
self.__tl_len = 0
# I think my names are both concise and clear
# pylint: disable=C0103
def _invenio_to_python_logical(self, q):
"""Translate the + and - in invenio query strings into & and ~."""
p = q
p = re.sub('\+ -', '&~', p)
p = re.sub('\+', '&', p)
p = re.sub('-', '~', p)
p = re.sub(' ~', ' & ~', p)
return p
def _python_logical_to_invenio(self, q):
"""Translate the & and ~ in logical expression strings into + and -."""
p = q
p = re.sub('\& ~', '-', p)
p = re.sub('~', '-', p)
|
p = re.sub('\&', '+', p)
return p
# pylint: enable=C0103
def parse_query(self, query):
"""Make query into something suitable for search_engine.
This is the main entry point of the class.
Given an expression of the form:
|
"expr1 or expr2 (expr3 not (expr4 or expr5))"
produces annoted list output suitable for consumption by search_engine,
of the form:
['+', 'expr1', '|', 'expr2', '+', 'expr3 - expr4 | expr5']
parse_query() is a wrapper for self.tokenize() and self.parse().
"""
toklist = self.tokenize(query)
depth, balanced, dummy_d0_p = self.nesting_depth_and_balance(toklist)
if not balanced:
raise SyntaxError("Mismatched parentheses in "+str(toklist))
toklist, var_subs = self.substitute_variables(toklist)
if depth > 1:
toklist = self.tokenize(self.logically_reduce(toklist))
return self.parse(toklist, var_subs)
def substitute_variables(self, toklist):
"""Given a token list, return a copy of token list in which all free
variables are bound with boolean variable names of the form 'pN'.
Additionally, all the substitutable logical operators are exchanged
for their symbolic form and implicit ands are made explicit
e.g., ((author:'ellis, j' and title:quark) or author:stevens jones)
becomes:
((p0 + p1) | p2 + p3)
with the substitution table:
{'p0': "author:'ellis, j'", 'p1': "title:quark",
'p2': "author:stevens", 'p3': "jones" }
Return value is the substituted token list and a copy of the
substitution table.
"""
def labels():
i = 0
while True:
yield 'p'+str(i)
i += 1
def filter_front_ands(toklist):
"""Filter out extra logical connectives and whitespace from the front."""
while toklist[0] == '+' or toklist[0] == '|' or toklist[0] == '':
toklist = toklist[1:]
return toklist
var_subs = {}
labeler = labels()
new_toklist = ['']
cannot_be_anded = self.specials.difference((')',))
for token in toklist:
token = token.lower()
if token in self.substitution_dict:
if token == 'not' and new_toklist[-1] == '+':
new_toklist[-1] = '-'
else:
new_toklist.append(self.substitution_dict[token])
elif token == '(':
if new_toklist[-1] not in self.specials:
new_toklist.append('+')
new_toklist.append(token)
elif token not in self.specials:
# apparently generators are hard for pylint to figure out
# Turns off msg about labeler not having a 'next' method
# pylint: disable=E1101
label = labeler.next()
# pylint: enable=E1101
var_subs[label] = token
if new_toklist[-1] not in cannot_be_anded:
new_toklist.append('+')
new_toklist.append(label)
else:
if token == '-' and new_toklist[-1] == '+':
new_toklist[-1] = '-'
else:
new_toklist.append(token)
return filter_front_ands(new_toklist), var_subs
def nesting_depth_and_balance(self, token_list):
"""Checks that parentheses are balanced and counts how deep they nest"""
depth = 0
maxdepth = 0
depth0_pairs = 0
good_depth = True
for i in range(len(token_list)):
token = token_list[i]
if token == '(':
if depth == 0:
depth0_pairs += 1
depth += 1
if depth > maxdepth:
maxdepth += 1
elif token == ')':
depth -= 1
if depth == -1: # can only happen with unmatched )
good_depth = False # so force depth check to fail
depth = 0 # but keep maxdepth in good range
return maxdepth, depth == 0 and good_depth, depth0_pairs
def logically_reduce(self, token_list):
"""Return token_list i
|
stephanie-wang/ray
|
rllib/utils/policy_server.py
|
Python
|
apache-2.0
| 3,404
| 0
|
import pickle
import traceback
from http.server import SimpleHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from ray.rllib.utils.annotations import PublicAPI
from ray.rllib.utils.policy_client import PolicyClient
@PublicAPI
class PolicyServer(ThreadingMixIn, HTTPServer):
"""REST server than can be launched from a ExternalEnv.
This launches a multi-threaded server that listens on the specified host
and port to serve policy requests and forward experiences to RLlib.
Examples:
>>> class CartpoleServing(ExternalEnv):
def __init__(self):
ExternalEnv.__init__(
self, spaces.Discrete(2),
spaces.Box(
low=-10,
high=10,
shape=(4,),
dtype=np.float32))
def run(self):
server = PolicyServer(self, "localhost", 8900)
server.serve_forever()
>>> register_env("srv", lambda _: CartpoleServing())
>>> pg = PGTrainer(env="srv", config={"num_workers": 0})
>>> while True:
pg.train()
>>> client = PolicyClient("localhost:8900")
>>> eps_id = client.start_episode()
>>> action = client.get_action(eps_id, obs)
>>> ...
>>> client.log_returns(eps_id, reward)
>>> ...
>>> client.log_returns(eps_id, reward)
"""
@PublicAPI
def __init__(self, external_env, address, port):
handler = _make_handler(external_env)
HTTPServer.__init__(self, (address, port), handler)
def _make_handler(external_env):
class Handler(SimpleHT
|
TPRequestHandler):
def do_POST(self):
content_len = int(self.headers.get("Content-Length"), 0)
raw_body = self.rfile.read(content_len)
parsed_input = pickle.loads(raw_body)
try:
response = self.execute_command(parsed_input)
self.send_response(200)
self.end_headers()
self.wfile.write(pickle.dumps(response))
except Exception:
self.send_error(500, traceback.
|
format_exc())
def execute_command(self, args):
command = args["command"]
response = {}
if command == PolicyClient.START_EPISODE:
response["episode_id"] = external_env.start_episode(
args["episode_id"], args["training_enabled"])
elif command == PolicyClient.GET_ACTION:
response["action"] = external_env.get_action(
args["episode_id"], args["observation"])
elif command == PolicyClient.LOG_ACTION:
external_env.log_action(args["episode_id"],
args["observation"], args["action"])
elif command == PolicyClient.LOG_RETURNS:
external_env.log_returns(args["episode_id"], args["reward"],
args["info"])
elif command == PolicyClient.END_EPISODE:
external_env.end_episode(args["episode_id"],
args["observation"])
else:
raise Exception("Unknown command: {}".format(command))
return response
return Handler
|
openhatch/oh-missions-oppia-beta
|
core/storage/base_model/gae_models.py
|
Python
|
apache-2.0
| 15,533
| 0.000193
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base model class."""
__author__ = 'Sean Lip'
import feconf
import utils
from core.platform import models
transaction_services = models.Registry.import_transaction_services()
from google.appengine.ext import ndb
class BaseModel(ndb.Model):
"""Base model for all persistent object storage classes."""
# When this entity was first created.
created_on = ndb.DateTimeProperty(auto_now_add=True)
# When this entity was last updated.
last_updated = ndb.DateTimeProperty(auto_now=True)
# Whether the current version of the file is deleted.
deleted = ndb.BooleanProperty(indexed=True, default=False)
@property
def id(self):
"""A unique id for this model instance."""
return self.key.id()
def _pre_put_hook(self):
"""This is run before model instances are saved to the datastore.
Subclasses of BaseModel should override this method.
"""
pass
class EntityNotFoundError(Exception):
"""Raised when no entity for a given id exists in the datastore."""
pass
@classmethod
def get(cls, entity_id, strict=True):
"""Gets an entity by id. Fails noisily if strict == True.
Args:
entity_id: str. The id of the
|
entity.
strict: bool. Whether to fail noisily if no entity with the given id
exists in the datastore.
Returns:
None, if strict == False and no undeleted entity with the given id
|
exists in the datastore. Otherwise, the entity instance that
corresponds to the given id.
Raises:
- base_models.BaseModel.EntityNotFoundError: if strict == True and
no undeleted entity with the given id exists in the datastore.
"""
entity = cls.get_by_id(entity_id)
if entity and entity.deleted:
entity = None
if strict and entity is None:
raise cls.EntityNotFoundError(
'Entity for class %s with id %s not found' %
(cls.__name__, entity_id))
return entity
def put(self):
super(BaseModel, self).put()
@classmethod
def get_multi(cls, entity_ids):
entity_keys = [ndb.Key(cls, entity_id) for entity_id in entity_ids]
return ndb.get_multi(entity_keys)
@classmethod
def put_multi(cls, entities):
return ndb.put_multi(entities)
def delete(self):
super(BaseModel, self).key.delete()
@classmethod
def get_all(cls, include_deleted_entities=False):
"""Returns a filterable iterable of all entities of this class.
If include_deleted_entities is True then entities that have been marked
deleted are returned as well.
"""
query = cls.query()
if not include_deleted_entities:
query = query.filter(cls.deleted == False)
return query
@classmethod
def get_new_id(cls, entity_name):
"""Gets a new id for an entity, based on its name.
The returned id is guaranteed to be unique among all instances of this
entity.
Args:
entity_name: the name of the entity. Coerced to a utf-8 encoded
string. Defaults to ''.
Returns:
str: a new unique id for this entity class.
Raises:
- Exception: if an id cannot be generated within a reasonable number
of attempts.
"""
try:
entity_name = unicode(entity_name).encode('utf-8')
except Exception:
entity_name = ''
MAX_RETRIES = 10
RAND_RANGE = 127 * 127
ID_LENGTH = 12
for i in range(MAX_RETRIES):
new_id = utils.convert_to_hash(
'%s%s' % (entity_name, utils.get_random_int(RAND_RANGE)),
ID_LENGTH)
if not cls.get_by_id(new_id):
return new_id
raise Exception('New id generator is producing too many collisions.')
class VersionedModel(BaseModel):
"""Model that handles storage of the version history of model instances.
To use this class, you must declare a SNAPSHOT_METADATA_CLASS and a
SNAPSHOT_CONTENT_CLASS. The former must contain the String fields
'committer_id', 'commit_type' and 'commit_message', and a JSON field for
the Python list of dicts, 'commit_cmds'. The latter must contain the JSON
field 'content'. The item that is being versioned must be serializable to a
JSON blob.
Note that commit() should be used for VersionedModels, as opposed to put()
for direct subclasses of BaseModel.
"""
# The class designated as the snapshot model. This should be a subclass of
# BaseSnapshotMetadataModel.
SNAPSHOT_METADATA_CLASS = None
# The class designated as the snapshot content model. This should be a
# subclass of BaseSnapshotContentModel.
SNAPSHOT_CONTENT_CLASS = None
# Whether reverting is allowed. Default is False.
ALLOW_REVERT = False
### IMPORTANT: Subclasses should only overwrite things above this line. ###
# The possible commit types.
_COMMIT_TYPE_CREATE = 'create'
_COMMIT_TYPE_REVERT = 'revert'
_COMMIT_TYPE_EDIT = 'edit'
_COMMIT_TYPE_DELETE = 'delete'
# A list containing the possible commit types.
COMMIT_TYPE_CHOICES = [
_COMMIT_TYPE_CREATE, _COMMIT_TYPE_REVERT, _COMMIT_TYPE_EDIT,
_COMMIT_TYPE_DELETE
]
# The delimiter used to separate the version number from the model instance
# id. To get the instance id from a snapshot id, use Python's rfind()
# method to find the location of this delimiter.
_VERSION_DELIMITER = '-'
# The reserved prefix for keys that are automatically inserted into a
# commit_cmd dict by this model.
_AUTOGENERATED_PREFIX = 'AUTO'
# The current version number of this instance. In each PUT operation,
# this number is incremented and a snapshot of the modified instance is
# stored in the snapshot metadata and content models. The snapshot
# version number starts at 1 when the model instance is first created.
# All data in this instance represents the version at HEAD; data about the
# previous versions is stored in the snapshot models.
version = ndb.IntegerProperty(default=0)
def _require_not_marked_deleted(self):
if self.deleted:
raise Exception('This model instance has been deleted.')
def _compute_snapshot(self):
"""Generates a snapshot (a Python dict) from the model fields."""
return self.to_dict(exclude=['created_on', 'last_updated'])
def _reconstitute(self, snapshot_dict):
"""Makes this instance into a reconstitution of the given snapshot."""
self.populate(**snapshot_dict)
return self
def _reconstitute_from_snapshot_id(self, snapshot_id):
"""Makes this instance into a reconstitution of the given snapshot."""
snapshot_model = self.SNAPSHOT_CONTENT_CLASS.get(snapshot_id)
snapshot_dict = snapshot_model.content
return self._reconstitute(snapshot_dict)
@classmethod
def _get_snapshot_id(cls, instance_id, version_number):
return '%s%s%s' % (
instance_id, cls._VERSION_DELIMITER, version_number)
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
if self.SNAPSHOT_METADATA_CLASS is None:
raise Exception('No snapshot metadata class defined.')
if self.SNAPSHOT_CONTENT_CLASS is None:
raise Exception('No snapshot content class defined.')
i
|
wldcordeiro/servo
|
etc/ci/performance/test_runner.py
|
Python
|
mpl-2.0
| 14,471
| 0.002073
|
#!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import runner
import pytest
def test_log_parser():
mock_log = b'''
[PERF] perf block start
[PERF],testcase,http://localhost:8000/page_load_test/56.com/www.56.com/index.html
[PERF],navigationStart,1460358376
[PERF],unloadEventStart,undefined
[PERF],unloadEventEnd,undefined
[PERF],redirectStart,undefined
[PERF],redirectEnd,undefined
[PERF],fetchStart,undefined
[PERF],domainLookupStart,undefined
[PERF],domainLookupEnd,undefined
[PERF],connectStart,undefined
[PERF],connectEnd,undefined
[PERF],secureConnectionStart,undefined
[PERF],requestStart,undefined
[PERF],responseStart,undefined
[PERF],responseEnd,undefined
[PERF],domLoading,1460358376000
[PERF],domInteractive,1460358388000
[PERF],domContentLoadedEventStart,1460358388000
[PERF],domContentLoadedEventEnd,1460358388000
[PERF],domComplete,1460358389000
[PERF],loadEventStart,undefined
[PERF],loadEventEnd,undefined
[PERF] perf block end
Shutting down the Constellation after generating an output file or exit flag specified
'''
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"navigationStart": 1460358376,
"unloadEventStart": None,
"unloadEventEnd": None,
"redirectStart": None,
"redirectEnd": None,
"fetchStart": None,
"domainLookupStart": None,
"domainLookupEnd": None,
"connectStart": None,
"connectEnd": None,
"secureConnectionStart": None,
"requestStart": None,
"responseStart": None,
"responseEnd": None,
"domLoading": 1460358376000,
"domInteractive": 1460358388000,
"domContentLoadedEventStart": 1460358388000,
"domContentLoadedEventEnd": 1460358388000,
"domComplete": 1460358389000,
"loadEventStart": None,
"loadEventEnd": None
}]
result = runner.parse_log(mock_log)
assert(expected == list(result))
def test_log_parser_complex():
mock_log = b'''
[PERF] perf block start
[PERF],testcase,http://localhost:8000/page_load_test/56.com/www.56.com/content.html
[PERF],navigationStart,1460358300
[PERF],unloadEventStart,undefined
[PERF],unloadEventEnd,undefined
[PERF],redirectStart,undefined
[PERF],redirectEnd,undefined
[PERF],fetchStart,undefined
[PERF],domainLookupStart,undefined
[PERF],domainLookupEnd,undefined
[PERF],connectStart,undefined
[PERF],connectEnd,undefined
[PERF],secureConnectionStart,undefined
[PERF],requestStart,undefined
[PERF],responseStart,undefined
[PERF],responseEnd,undefined
[PERF],domLoading,1460358376000
[PERF],domInteractive,1460358388000
[PERF],domContentLoadedEventStart,1460358388000
[PERF],domContentLoadedEventEnd,1460358388000
[PERF],domComplete,1460358389000
[PERF],loadEventStart,undefined
[PERF],loadEventEnd,undefined
[PERF] perf block end
Some other js error logs here
[PERF] perf block start
[PERF],testcase,http://localhost:8000/page_load_test/56.com/www.56.com/index.html
[PERF],navigationStart,1460358376
[PERF],unloadEventStart,undefined
[PERF],unloadEventEnd,undefined
[PERF],redirectStart,undefined
[PERF],redirectEnd,undefined
[PERF],fetchStart,undefined
[PERF],domainLookupStart,undefined
[PERF],domainLookupEnd,undefined
[PERF],connectStart,undefined
[PERF],connectEnd,undefined
[PERF],secureConnectionStart,undefined
[PERF],requestStart,undefined
[PERF],responseStart,undefined
[PERF],responseEnd,undefined
[PERF],domLoading,1460358376000
[PERF],domInteractive,1460358388000
[PERF],domContentLoadedEventStart,1460358388000
[PERF],domContentLoadedEventEnd,1460358388000
[PERF],domComplete,1460358389000
[PERF],loadEventStart,undefined
[PERF],loadEventEnd,undefined
[PERF] perf block end
Shutting down the Constellation after generating an output file or exit flag specified
'''
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/content.html",
"navigationStart": 1460358300,
"unloadEventStart": None,
"unl
|
oadEventEnd": None,
"redirectStart": None,
"redirectEnd": None,
"fetchStart": None,
"domainLookupStart": None,
"domainLookupEn
|
d": None,
"connectStart": None,
"connectEnd": None,
"secureConnectionStart": None,
"requestStart": None,
"responseStart": None,
"responseEnd": None,
"domLoading": 1460358376000,
"domInteractive": 1460358388000,
"domContentLoadedEventStart": 1460358388000,
"domContentLoadedEventEnd": 1460358388000,
"domComplete": 1460358389000,
"loadEventStart": None,
"loadEventEnd": None
}, {
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"navigationStart": 1460358376,
"unloadEventStart": None,
"unloadEventEnd": None,
"redirectStart": None,
"redirectEnd": None,
"fetchStart": None,
"domainLookupStart": None,
"domainLookupEnd": None,
"connectStart": None,
"connectEnd": None,
"secureConnectionStart": None,
"requestStart": None,
"responseStart": None,
"responseEnd": None,
"domLoading": 1460358376000,
"domInteractive": 1460358388000,
"domContentLoadedEventStart": 1460358388000,
"domContentLoadedEventEnd": 1460358388000,
"domComplete": 1460358389000,
"loadEventStart": None,
"loadEventEnd": None
}]
result = runner.parse_log(mock_log)
assert(expected == list(result))
def test_log_parser_empty():
mock_log = b'''
[PERF] perf block start
[PERF]BROKEN!!!!!!!!!1
[PERF]BROKEN!!!!!!!!!1
[PERF]BROKEN!!!!!!!!!1
[PERF]BROKEN!!!!!!!!!1
[PERF]BROKEN!!!!!!!!!1
[PERF] perf block end
'''
mock_testcase = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"navigationStart": 0,
"unloadEventStart": -1,
"unloadEventEnd": -1,
"redirectStart": -1,
"redirectEnd": -1,
"fetchStart": -1,
"domainLookupStart": -1,
"domainLookupEnd": -1,
"connectStart": -1,
"connectEnd": -1,
"secureConnectionStart": -1,
"requestStart": -1,
"responseStart": -1,
"responseEnd": -1,
"domLoading": -1,
"domInteractive": -1,
"domContentLoadedEventStart": -1,
"domContentLoadedEventEnd": -1,
"domComplete": -1,
"loadEventStart": -1,
"loadEventEnd": -1
}]
result = runner.parse_log(mock_log, mock_testcase)
assert(expected == list(result))
def test_log_parser_error():
mock_log = b'Nothing here! Test failed!'
mock_testcase = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"navigationStart": 0,
"unloadEventStart": -1,
"unloadEventEnd": -1,
"redirectStart": -1,
"redirectEnd": -1,
"fetchStart": -1,
"domainLookupStart": -1,
"domainLookupEnd": -1,
"connectStart": -1,
"connectEnd": -1,
"secureConnectionStart": -1,
"requestStart": -1,
"responseStart": -1,
"responseEnd": -1,
"domLoading": -1,
"domInteractive": -1,
"domContentLoadedEventStart": -1,
"domContentLoadedEventEnd": -1,
"domComplete": -1,
"loadEventStart": -1,
"loadEventEnd": -1
}]
result = runner.parse_log(mock_log, mock_testcase)
assert(expected == list(result))
def test_log_parser_bad_testcase_name():
mock_testcase = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
# Notice the testcase is about:blank, servo crashed
mock_log = b'''
[PERF] perf block start
[PERF],testcase,about:blank
[PERF],navigationStart,1460358376
[PERF],unloadEve
|
s20121035/rk3288_android5.1_repo
|
cts/apps/CameraITS/pymodules/its/error.py
|
Python
|
gpl-3.0
| 791
| 0.005057
|
# Copyright 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See t
|
he License for the specific language governing permissions and
# limitations under the License.
import unittest
class Error(Exception):
pass
class __UnitTest(unittest.TestCase):
"""Run a suite of unit
|
tests on this module.
"""
if __name__ == '__main__':
unittest.main()
|
wadobo/papersplease
|
papersplease/papers/admin.py
|
Python
|
agpl-3.0
| 1,742
| 0.000574
|
from __future__ import print_function
from __future__ import unicode_literals
from django.contrib import admin
fr
|
om .models import Conference
from .models import Paper
from .models import Author
from .models import Attachment
from .actions import paper_actions
class AttachInline(admin.TabularInline):
model = Attachment
class ConferenceAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
list_display = ('name', 'place', 'date')
search_fields = ('name
|
', 'place')
date_hierarchy = 'date'
class PaperAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("title",)}
list_display = ('title', 'conference', 'status', 'pauthors',
'hasattach')
list_filter = ('status', 'conference')
search_fields = ('title', 'conference__name', 'conference__place',
'authors__first_name',
'authors__last_name', 'authors__email')
filter_horizontal = ('authors', )
inlines = [AttachInline, ]
actions = paper_actions
def pauthors(self, obj):
return ', '.join(i.get_full_name() for i in obj.authors.all())
pauthors.short_description = 'Authors'
def hasattach(self, obj):
return obj.attachs.exists()
hasattach.short_description = 'Attach?'
hasattach.boolean = True
class AuthorAdmin(admin.ModelAdmin):
list_display = ('email', 'first_name', 'last_name')
search_fields = ('email', 'first_name', 'last_name')
class AttachmentAdmin(admin.ModelAdmin):
list_display = ('attach', 'paper', 'uploaded')
admin.site.register(Conference, ConferenceAdmin)
admin.site.register(Paper, PaperAdmin)
admin.site.register(Author, AuthorAdmin)
admin.site.register(Attachment, AttachmentAdmin)
|
shinriyo/workalendar
|
workalendar/core.py
|
Python
|
mit
| 17,530
| 0.000057
|
"""Working day tools
"""
import warnings
import ephem
import pytz
from calendar import monthrange
from datetime import date, timedelta, datetime
from ma
|
th import pi
from dateutil import easter
from lunardate import LunarDate
from calverter import Calverter
MON, TUE, WED, THU, FRI, SAT, SUN = range(7)
class Calendar(object):
FIXED_HOLIDAYS = ()
def __init__(self):
self._holidays = {}
def get_fixed_holidays(self, year):
"""Return the fixed days according to the FIXED_HOLIDAYS class property
"""
days = []
for month, day, label in self.FIXED_HOLIDAYS:
days.append((date(yea
|
r, month, day), label))
return days
def get_variable_days(self, year):
return []
def get_calendar_holidays(self, year):
"""Get calendar holidays.
If you want to override this, please make sure that it **must** return
a list of tuples (date, holiday_name)."""
return self.get_fixed_holidays(year) + self.get_variable_days(year)
def holidays(self, year=None):
"""Computes holidays (non-working days) for a given year.
Return a 2-item tuple, composed of the date and a label."""
if not year:
year = date.today().year
if year in self._holidays:
return self._holidays[year]
# Here we process the holiday specific calendar
temp_calendar = tuple(self.get_calendar_holidays(year))
# it is sorted
self._holidays[year] = sorted(temp_calendar)
return self._holidays[year]
def holidays_set(self, year=None):
"Return a quick date index (set)"
return set([day for day, label in self.holidays(year)])
def get_weekend_days(self):
"""Return a list (or a tuple) of weekdays that are *not* working days.
e.g: return (SAT, SUN,)
"""
raise NotImplementedError("Your Calendar class must implement the"
" `get_weekend_days` method")
def is_working_day(self, day,
extra_working_days=None, extra_holidays=None):
"""Return True if it's a working day.
In addition to the regular holidays, you can add exceptions.
By providing ``extra_working_days``, you'll state that these dates
**are** working days.
By providing ``extra_holidays``, you'll state that these dates **are**
holidays, even if not in the regular calendar holidays (or weekends).
Please note that the ``extra_working_days`` list has priority over the
``extra_holidays`` list.
"""
# a little exception: chop the datetime type
if type(day) is datetime:
day = day.date()
# Extra lists exceptions
if extra_working_days and day in extra_working_days:
return True
# Regular rules
if day.weekday() in self.get_weekend_days():
return False
return not self.is_holiday(day, extra_holidays=extra_holidays)
def is_holiday(self, day, extra_holidays=None):
"""Return True if it's an holiday.
In addition to the regular holidays, you can add exceptions.
By providing ``extra_holidays``, you'll state that these dates **are**
holidays, even if not in the regular calendar holidays (or weekends).
"""
if extra_holidays and day in extra_holidays:
return True
return day in self.holidays_set(day.year)
def add_working_days(self, day, delta,
extra_working_days=None, extra_holidays=None):
"""Add `delta` working days to the date.
By providing ``extra_working_days``, you'll state that these dates
**are** working days.
By providing ``extra_holidays``, you'll state that these dates **are**
holidays, even if not in the regular calendar holidays (or weekends).
Please note that the ``extra_working_days`` list has priority over the
``extra_holidays`` list.
"""
days = 0
temp_day = day
while days < delta:
temp_day = temp_day + timedelta(days=1)
if self.is_working_day(temp_day,
extra_working_days=extra_working_days,
extra_holidays=extra_holidays):
days += 1
return temp_day
def find_following_working_day(self, day):
"Looks for the following working day"
while day.weekday() in self.get_weekend_days():
day = day + timedelta(days=1)
return day
@staticmethod
def get_nth_weekday_in_month(year, month, weekday, n=1, start=None):
"""Get the nth weekday in a given month. e.g:
>>> # the 1st monday in Jan 2013
>>> Calendar.get_nth_weekday_in_month(2013, 1, MON)
datetime.date(2013, 1, 7)
>>> # The 2nd monday in Jan 2013
>>> Calendar.get_nth_weekday_in_month(2013, 1, MON, 2)
datetime.date(2013, 1, 14)
"""
day = date(year, month, 1)
if start:
day = start
counter = 0
while True:
if day.month != month:
# Don't forget to break if "n" is too big
return None
if day.weekday() == weekday:
counter += 1
if counter == n:
break
day = day + timedelta(days=1)
return day
@staticmethod
def get_last_weekday_in_month(year, month, weekday):
"""Get the last weekday in a given month. e.g:
>>> # the last monday in Jan 2013
>>> Calendar.get_last_weekday_in_month(2013, 1, MON)
datetime.date(2013, 1, 28)
"""
day = date(year, month, monthrange(year, month)[1])
while True:
if day.weekday() == weekday:
break
day = day - timedelta(days=1)
return day
class ChristianMixin(Calendar):
EASTER_METHOD = None # to be assigned in the inherited mixin
include_epiphany = False
include_clean_monday = False
include_annunciation = False
include_ash_wednesday = False
include_holy_thursday = False
include_good_friday = False
include_easter_monday = False
include_easter_saturday = False
include_easter_sunday = False
include_all_saints = False
include_immaculate_conception = False
include_christmas = True
include_christmas_eve = False
include_ascension = False
include_assumption = False
include_whit_sunday = False
whit_sunday_label = 'Whit Sunday'
include_whit_monday = False
whit_monday_label = 'Whit Monday'
include_corpus_christi = False
include_boxing_day = False
boxing_day_label = "Boxing Day"
def get_ash_wednesday(self, year):
sunday = self.get_easter_sunday(year)
return sunday - timedelta(days=46)
def get_holy_thursday(self, year):
"Return the date of the last thursday before easter"
sunday = self.get_easter_sunday(year)
return sunday - timedelta(days=3)
def get_good_friday(self, year):
"Return the date of the last friday before easter"
sunday = self.get_easter_sunday(year)
return sunday - timedelta(days=2)
def get_clean_monday(self, year):
"Return the clean monday date"
sunday = self.get_easter_sunday(year)
return sunday - timedelta(days=48)
def get_easter_saturday(self, year):
"Return the Easter Saturday date"
sunday = self.get_easter_sunday(year)
return sunday - timedelta(days=1)
def get_easter_sunday(self, year):
"Return the date of the easter (sunday) -- following the easter method"
return easter.easter(year, self.EASTER_METHOD)
def get_easter_monday(self, year):
"Return the date of the monday after easter"
sunday = self.get_easter_sunday(year)
return sunday + timedelta(days=1)
def get_ascension_thursday(self, year):
easter = self.get_easter_sunday(year)
return easter + timedelta(days=39)
def get_whit_monday(self, year):
easter
|
zrax/pycdc
|
tests/input/test_extendedPrint.py
|
Python
|
gpl-3.0
| 122
| 0
|
import sys
print >>sys.stdout, 'Hello World'
prin
|
t >>sys.stdout, 1, 2, 3
print >>sys.stdout, 1,
|
2, 3,
print >>sys.stdout
|
HyperloopTeam/Hyperloop
|
src/hyperloop/geometry/pod.py
|
Python
|
apache-2.0
| 4,408
| 0.0152
|
from os.path import dirname, join
from openmdao.main.api import Assembly
#from openmdao.lib.components.api import GeomComponent
from openmdao.lib.datatypes.api import Float, Int
#hyperloop sizing calculations
from inlet import InletGeom
from battery import Battery
from passenger_capsule import PassengerCapsule
from tube_structure import TubeStructural
from aero import Aero
#overall geometry assembly
class Pod(Assembly):
#Inputs
area_inlet_in = Float(iotype="in", units="cm**2", desc="flow area required at the front of the inlet")
area_inlet_out = Float(iotype="in", units="cm**2", desc="flow area required at the back of the inlet")
time_mission = Float(iotype="in", units="s", desc="travel time for a single trip")
radius_tube_inner = Float(iotype="in", units="cm", desc="inner tube radius")
rho_air = Float(iotype="in", units="kg/m**3", desc="air density (aero calcs)")
F_net = Float(iotype="in", desc="Thrust generated by the nozzle", units="N")
energy = Float(iotype="in", desc="Energy required from batteries", units="kW*h")
Ps_tube = Float(99, iotype="in", desc="static pressure in the tube", units="Pa")
speed_max = Float(iotype="in", desc="maximum velocity of the pod", units="m/s")
hub_to_tip = Float(.4, iotype="in", desc="hub to tip ratio for the compressor")
coef_drag = Float(2, iotype="in", desc="capsule drag coefficient")
n_rows = Int(14, iotype="in", desc="number of rows of seats in the pod")
length_row = Float(150, iotype="in", units="cm", desc="length of each row of seats")
#Outputs
radius_inlet_back_outer = Float(iotype="out", units="cm", desc="outer radius of the back of the inlet")
area_compressor_bypass = Float(iotype="out", units="cm**2", desc="area available to move compressed air around the passenger capsule")
area_cross_section = Float(iotype="out", units="cm**2", desc="cross sectional area of the passenger capsule")
radius_tube_outer = Float(iotype="out", units="cm", desc="outer radius of tube")
net_force = Float(iotype="out", desc="Net force with drag considerations", units="N")
def configure(self):
#Add Components
capsule = self.add('capsule', PassengerCapsule())
tube = self.add('tube', TubeStructural())
inlet = self.add('inlet', InletGeom())
battery = self.add('battery', Battery())
aero = self.add('aero', Aero())
#Boundary Input Connections
#Pod -> Capsule
self.connect('n_rows','capsule.n_rows')
self.connect('length_row','capsule.length_row')
#Pod->Tube
self.connect('radius_tube_inner', 'tube.radius_inner')
self.connect('Ps_tube', 'tube.Ps_tube')
#Pod->Inlet
self.connect('area_inlet_in','inlet.area_in')
self.connect('area_inlet_out','inlet.area_out')
self.connect('hub_to_tip','inlet.hub_to_tip')
#Pod -> Battery
self.connect('time_mission','battery.time_mission')
self.connect('energy','battery.energy')
#Pod -> Aero
self.connect('coef_drag','aero.coef_drag')
self.connect('rho_air','aero.rho')
self.connect('speed_max','aero.velocity_capsule')
self.connect('F_net','aero.gross_thrust')
#Inter Component Connections
#Capsule -> Inlet
self.connect('capsule.area_cross_section','inlet.area_passenger_capsule')
#Capsule -> Battery
|
self.connect('capsule.area_cross_section','battery.area_cross_section')
|
#Inlet -> Aero
self.connect('inlet.area_frontal','aero.area_capsule')
#Boundary Output Connections
#Capsule -> Pod
self.connect('capsule.area_cross_section','area_cross_section')
#Tube->Pod
self.connect('tube.radius_outer','radius_tube_outer')
#Inlet->Pod
self.connect('inlet.radius_back_outer', 'radius_inlet_back_outer')
self.connect('inlet.area_bypass', 'area_compressor_bypass')
#Aero -> Pod
self.connect('aero.net_force','net_force') #not currently used, eventually passed to mission
#Declare Solver Workflow
self.driver.workflow.add(['capsule','tube','inlet','battery','aero'])
def run(self,*args,**kwargs):
super(Assembly, self).run(*args,**kwargs)
if __name__ == "__main__":
from openmdao.main.api import set_as_top
p = Pod()
p.configure()
g = set_as_top(p)
|
emanueldima/b2share
|
b2share/modules/files/__init__.py
|
Python
|
gpl-2.0
| 908
| 0
|
# -*- coding: utf-8 -*-
#
# This file is part of EUDAT B2Share.
# Copyright (C) 2016 CERN.
#
# B2Share is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# B2Share is distributed in the hope that it will be use
|
ful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
#
|
General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with B2Share; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""B2SHARE"""
from __future__ import absolute_import, print_function
from .ext import B2ShareFiles
__all__ = ('B2ShareFiles')
|
enaut/Minecraft-Overviewer
|
overviewer.py
|
Python
|
gpl-3.0
| 29,543
| 0.003283
|
#!/usr/bin/env python3
# This file is part of the Minecraft Overviewer.
#
# Minecraft Overviewer is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# Minecraft Overviewer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the Overviewer. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import platform
import sys
# quick version check
if sys.version_info[0] == 2 or (sys.version_info[0] == 3 and sys.version_info[1] < 4):
print("Sorry, the Overviewer requires at least Python 3.4 to run.")
|
sys.exit(1)
import os
import os.path
import re
import subprocess
import multiprocessing
import t
|
ime
import logging
from argparse import ArgumentParser
from collections import OrderedDict
from overviewer_core import util
from overviewer_core import logger
from overviewer_core import textures
from overviewer_core import optimizeimages, world
from overviewer_core import config_parser, tileset, assetmanager, dispatcher
from overviewer_core import cache
from overviewer_core import observer
from overviewer_core.nbt import CorruptNBTError
helptext = """
%(prog)s [--rendermodes=...] [options] <World> <Output Dir>
%(prog)s --config=<config file> [options]"""
def main():
# bootstrap the logger with defaults
logger.configure()
if os.name == "posix":
if os.geteuid() == 0:
logging.warning("You are running Overviewer as root. "
"It is recommended that you never do this, "
"as it is dangerous for your system. If you are running "
"into permission errors, fix your file/directory "
"permissions instead. Overviewer does not need access to "
"critical system resources and therefore does not require "
"root access.")
try:
with open("/etc/redhat-release", "r") as release_f:
rel_contents = release_f.read()
try:
major_rel = re.search(r'\d(\.\d+)?', rel_contents).group(0).split('.')[0]
if major_rel == "6":
logging.warning(
"We will be dropping support for this release of your distribution "
"soon. Please upgrade as soon as possible, or you will not receive "
"future Overviewer updates.")
except AttributeError:
pass
except IOError:
pass
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
avail_north_dirs = ['lower-left', 'upper-left', 'upper-right', 'lower-right', 'auto']
# Parse for basic options
parser = ArgumentParser(usage=helptext)
parser.add_argument("-c", "--config", dest="config", action="store",
help="Specify the config file to use.")
parser.add_argument("-p", "--processes", dest="procs", action="store", type=int,
help="The number of local worker processes to spawn. Defaults to the "
"number of CPU cores your computer has.")
parser.add_argument("--pid", dest="pid", action="store", help="Specify the pid file to use.")
# Options that only apply to the config-less render usage
parser.add_argument("--rendermodes", dest="rendermodes", action="store",
help="If you're not using a config file, specify which rendermodes to "
"render with this option. This is a comma-separated list.")
parser.add_argument("world", nargs='?',
help="Path or name of the world you want to render.")
parser.add_argument("output", nargs='?',
help="Output directory for the rendered map.")
# Useful one-time render modifiers:
render_modifiers = parser.add_mutually_exclusive_group()
render_modifiers.add_argument("--forcerender", dest="forcerender", action="store_true",
help="Force re-render the entire map.")
render_modifiers.add_argument("--check-tiles", dest="checktiles", action="store_true",
help="Check each tile on disk and re-render old tiles.")
render_modifiers.add_argument("--no-tile-checks", dest="notilechecks", action="store_true",
help="Only render tiles that come from chunks that have changed "
"since the last render (the default).")
# Useful one-time debugging options:
parser.add_argument("--check-terrain", dest="check_terrain", action="store_true",
help="Try to locate the texture files. Useful for debugging texture"
" problems.")
parser.add_argument("-V", "--version", dest="version",
help="Display version information and then exits.", action="store_true")
parser.add_argument("--check-version", dest="checkversion",
help="Fetch information about the latest version of Overviewer.",
action="store_true")
parser.add_argument("--update-web-assets", dest='update_web_assets', action="store_true",
help="Update web assets. Will *not* render tiles or update "
"overviewerConfig.js.")
# Log level options:
parser.add_argument("-q", "--quiet", dest="quiet", action="count", default=0,
help="Print less output. You can specify this option multiple times.")
parser.add_argument("-v", "--verbose", dest="verbose", action="count", default=0,
help="Print more output. You can specify this option multiple times.")
parser.add_argument("--simple-output", dest="simple", action="store_true", default=False,
help="Use a simple output format, with no colors or progress bars.")
# create a group for "plugin exes"
# (the concept of a plugin exe is only loosely defined at this point)
exegroup = parser.add_argument_group("Other Scripts", "These scripts may accept different "
"arguments than the ones listed above.")
exegroup.add_argument("--genpoi", dest="genpoi", action="store_true",
help="Run the genPOI script.")
exegroup.add_argument("--skip-scan", dest="skipscan", action="store_true",
help="When running GenPOI, don't scan for entities.")
exegroup.add_argument("--skip-players", dest="skipplayers", action="store_true",
help="When running GenPOI, don't scan player data.")
args, unknowns = parser.parse_known_args()
# Check for possible shell quoting issues
if len(unknowns) > 0 and args.world and args.output:
possible_mistakes = []
for i in range(len(unknowns) + 1):
possible_mistakes.append(" ".join([args.world, args.output] + unknowns[:i]))
possible_mistakes.append(" ".join([args.output] + unknowns[:i]))
for mistake in possible_mistakes:
if os.path.exists(mistake):
logging.warning("Looks like you tried to make me use {0} as an argument, but "
"forgot to quote the argument correctly. Try using \"{0}\" "
"instead if the spaces are part of the path.".format(mistake))
parser.error("Too many arguments.")
parser.error("Too many arguments.")
# first thing to do is check for stuff in the exegroup:
if args.genpoi:
# remove the "--genpoi" option from sys.
|
dans-er/resync
|
resync/client_utils.py
|
Python
|
apache-2.0
| 4,699
| 0.008725
|
"""Client Utilities
Factor out code shared by both the resync and resync-explorer
clients.
Copyright 2012,2013 Simeon Warner
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License
"""
import logging
import logging.config
import optparse
import sys
from resync.client import ClientFatalError
from resync.explorer import Explorer
from resync.utils i
|
mport UTCFormatter
def init_logging(to_file=False, logfile=None, default_logfile='/tmp/resync.log',
human=True, verbose=False, eval_mode=False,
default_logger='client', extra_loggers=None):
"""Initialize logging
Use of log levels:
DEBUG - very verbose, for evaluation
|
of output (-e)
INFO - verbose, only seen by users if they ask for it (-v)
WARNING - messages output messages to console
Logging to a file: If to_file is True then output will be written to
a file. This will be logfile if set, else default_logfile (which may
also be overridden).
"""
fmt = '%(asctime)s | %(name)s | %(levelname)s | %(message)s'
formatter = UTCFormatter(fmt)
if human:
# Create a special handler designed just for human readable output
hh = logging.StreamHandler()
hh.setLevel( logging.INFO if (verbose) else logging.WARNING )
hh.setFormatter(logging.Formatter(fmt='%(message)s'))
if to_file:
if (logfile is None):
logfile = default_logfile
fh = logging.FileHandler(filename=logfile, mode='a')
fh.setFormatter(formatter)
fh.setLevel( logging.DEBUG if (eval_mode) else logging.INFO )
loggers = [default_logger,'resync']
if (extra_loggers is not None):
for logger in extra_loggers:
loggers.append(logger)
for logger in loggers:
log = logging.getLogger(logger)
log.setLevel(logging.DEBUG) #control at handler instead
if human:
log.addHandler(hh)
if to_file:
log.addHandler(fh)
log=logging.getLogger(default_logger)
if (to_file):
log.info("Writing detailed log to %s" % (logfile))
def count_true_args(*args):
"""Count number of list of arguments that evaluate True"""
count=0
for arg in args:
if (arg):
count+=1
return(count)
def parse_links(args_link):
links=[]
if (args_link is not None):
for link_str in args_link:
try:
links.append(parse_link(link_str))
except ValueError as e:
raise ClientFatalError("Bad --link option '%s' (%s)"%(link_str,str(e)))
return(links)
def parse_link(link_str):
"""Parse --link option to add to <rs:ln> links
Input string of the form: rel,href,att1=val1,att2=val2
"""
atts={}
help_str = "--link option '%s' (format rel,href,att1=val1...)"%(link_str)
try:
segs = link_str.split(',')
# First segments are relation and subject
atts['rel'] = segs.pop(0)
atts['href'] = segs.pop(0)
if (atts['href']==''):
raise ClientFatalError("Missing uri in " + help_str)
# Remaining segments are attributes
for term in segs:
(k,v)=term.split('=')
if (k=='' or v==''):
raise ClientFatalError("Bad attribute (%s) in " % (term) + help_str)
atts[k]=v
except ValueError as e:
raise ClientFatalError("Bad component of " + help_str)
except IndexError as e:
raise ClientFatalError("Incomplete component of " + help_str)
return(atts)
def parse_capabilities(caps_str):
"""Parse list of capabilities in --capabilitylist option
Input string of the form: cap_name=uri,cap_name=uri
"""
capabilities={}
try:
segs = caps_str.split(',')
for term in segs:
(k,v)=term.split('=')
capabilities[k]=v
except ValueError as e:
raise ClientFatalError("Bad component of --capabilitylist option '%s' (%s)"%(caps_str,str(e)))
return(capabilities)
def parse_capability_lists(cls_str):
"""Parse list of capability lists in --capabilitylistindex option
Input string of the form: uri,uri
"""
return(cls_str.split(','))
|
kandluis/machine-learning
|
prac4/practical4-code/modelbased.py
|
Python
|
mit
| 3,542
| 0.00734
|
from tdlearner import TDLearner
from collections import defaultdict
import numpy as np
# finds Euclidean norm distance between two lists a, b
def find_dist(a,b):
cumsum = 0.0
for i in xrange(len(a)):
cumsum += (a[i]-b[i])**2
return np.sqrt(cumsum/float(len(a)))
class ModelBased(TDLearner):
'''
Implements model based learning algorithm with value iteration.
'''
def __init__(self, discount_fn = lambda i: 0,bucket_height = 1., bucket_width = 28, velocity_bucket = 1000):
super(ModelBased,self).__init__(learn_fn=lambda i:0, discount_fn=discount_fn,bucket_height=bucket_height, bucket_width=bucket_width,
velocity_bucket=velocity_bucket)
# keep track of current optimal policy, maps state s -> action a
self.optimal_policy = defaultdict(int)
# keep track of all states seen to iterate over in value_iter
self.seen_states = set()
def value_iter(self,discount):
if len(self.seen_states) != 0:
while True:
# store off old value function to test for convergence later
old_value_fn = []
for s in self.seen_states:
old_value_fn.append(self.V[s])
for s in self.seen_states:
# compute Q function for jump and no jump
for a in [0,1]:
# print "V states {}".format(self.V)
self.Q[s][a] = self.expected_reward(s,a) + discount * self.optimal_action_helper(s,a)
# find best action from state s
self.optimal_policy[s] = 1 if self.Q[s][1] > self.Q[s][0] else 0
# update value function for state s
self.V[s] = self.Q[s][self.optimal_policy[s]]
# update new value function
new_value_fn = []
for s in self.seen_states:
new_value_fn.append(self.V[s])
# test for convergence
# print "Old value {}".format(old_value_fn)
# print "V value {}".format(self.V)
# print find_dist(old_value_fn, new_value_fn)
if find_dist(old_value_fn, new_value_fn) < 0.1:
break
def action_callback(self, state):
'''
Simple Q-Learning algorithm
'''
# what state are we in?
new_state = self.get_state(state)
if self.last_state is not None:
self.seen_states.add(self.last
|
_state)
# print "Last state {}".format(self.last_state)
|
# increase iteration count and maximize chose action to maximize expected reward
self.iter_num += 1
# we need update our Q for the last state and action
if (self.last_state is not None and
self.last_action is not None and
self.last_reward is not None):
s = self.last_state
a = self.last_action
r = self.last_reward
sp = new_state
self.NSA[(s, a)] += 1
self.NSAS[(s, a, sp)] += 1
self.RSA[(s, a)] += r
self.reachable[(s, a)].add(sp)
new_action = self.optimal_policy[sp]
else:
new_action = 1
# planning stage - updates optimal policy
discount = self.discount_fn(self.iter_num)
self.value_iter(discount)
self.last_action = new_action
self.last_state = new_state
return new_action
|
easies/dentist
|
dentist/main.py
|
Python
|
mit
| 2,740
| 0
|
#!/usr/bin/env python
from . import dentist
from .watchers import LogNotify, Notifier, Poller
import logging
import os
import sys
def parse():
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-d', '--daemonize', dest='daemonize',
action='store_true', default=False)
parser.add_option('-a', '--access_log', dest='access_logs',
action='append', default=[])
parser.add_option('-p', '--home-prefix', dest='prefixes',
action='append', default=[])
parser.add_option('-e', '--error_log', dest='error_logs',
action='append', default=[])
parser.add_option('-u', '--parent_user_dir', dest='parent_user_dir',
default='/home')
parser.add_option('-o', '--output_dir', dest='output_dir',
default=None)
parser.add_option('-l', '--log_file', dest='log_file', metavar='PATH',
default=None)
return parser.parse_args()
def main():
options, args = parse()
log_kwargs = {
'level': logging.INFO,
'format': '%(asctime)-15s %(levelname)-8s %(message)s',
}
if options.log_file is not None:
log_kwargs['filename'] = options.log_file
logging.basicConfig(**log_kwargs)
access_logs = options.access_logs
error_logs = options.error_logs
if len(access_logs) == 0 and len(error_logs) == 0:
sys.stderr.write('You must specify at least one log.\n')
return 1
# Make the list into a set, so we don't duplicate
access_logs = set(map(os.path.abspath, access_logs))
error_logs = set(map(os.path.abspath, error_logs))
# Set the output directory of the user's
|
logs
if options.output_dir:
dentist.LogReader.set_output_directory(options.output_dir)
poller = Poller()
# Setup the log readers
# Add the customized prefixes
clr = dentist
|
.CombinedLogReader
clr.add_prefix(*options.prefixes)
# Set the root of the home directories
elr = dentist.ErrorLogReader
elr.configure(homedir_root=options.parent_user_dir)
notifier = Notifier()
# Create the list of files and log the set of their directories.
directories = set()
for f in access_logs:
notifier.add_log_notify(LogNotify(f, clr))
for f in error_logs:
notifier.add_log_notify(LogNotify(f, elr))
poller.register(notifier.inotify.fileno(), notifier.handler)
if options.daemonize:
from .daemonize import daemonize
daemonize()
try:
while True:
poller.poll()
except KeyboardInterrupt:
logging.shutdown()
return 0
if __name__ == '__main__':
sys.exit(main())
|
Andrei-Stepanov/avocado-vt
|
virttest/utils_misc.py
|
Python
|
gpl-2.0
| 116,705
| 0.000548
|
"""
Virtualization test utility functions.
:copyright: 2008-2009 Red Hat Inc.
"""
import time
import string
import random
import socket
import os
import stat
import signal
import re
import logging
import commands
import fcntl
import sys
import inspect
import tarfile
import shutil
import getpass
import ctypes
import threading
from avocado.core import status
from avocado.core import exceptions
from avocado.utils import git
from avocado.utils import path
from avocado.utils import process
from avocado.utils import genio
from avocado.utils import aurl
from avocado.utils import download
from avocado.utils import linux_modules
from . import data_dir
from . import error_context
from . import cartesian_config
from . import utils_selinux
from .staging import utils_koji
import platform
ARCH = platform.machine()
class UnsupportedCPU(exceptions.TestError):
pass
# TODO: remove this import when log_last_traceback is moved to autotest
import traceback
# TODO: this function is being moved into autotest. For compatibility
# reasons keep it here too but new code should use the one from base_utils.
class InterruptedThread(threading.Thread):
"""
Run a function in a background thread.
"""
def __init__(self, target, args=(), kwargs={}):
"""
Initialize the instance.
:param target: Function to run in the thread.
:param args: Arguments to pass to target.
:param kwargs: Keyword arguments to pass to target.
"""
threading.Thread.__init__(self)
|
self._target = target
self._args = args
self._kwargs = kwargs
def run(self):
"""
Ru
|
n target (passed to the constructor). No point in calling this
function directly. Call start() to make this function run in a new
thread.
"""
self._e = None
self._retval = None
try:
try:
self._retval = self._target(*self._args, **self._kwargs)
except Exception:
self._e = sys.exc_info()
raise
finally:
# Avoid circular references (start() may be called only once so
# it's OK to delete these)
del self._target, self._args, self._kwargs
def join(self, timeout=None, suppress_exception=False):
"""
Join the thread. If target raised an exception, re-raise it.
Otherwise, return the value returned by target.
:param timeout: Timeout value to pass to threading.Thread.join().
:param suppress_exception: If True, don't re-raise the exception.
"""
threading.Thread.join(self, timeout)
try:
if self._e:
if not suppress_exception:
# Because the exception was raised in another thread, we
# need to explicitly insert the current context into it
s = exceptions.exception_context(self._e[1])
s = exceptions.join_contexts(exceptions.get_context(), s)
exceptions.set_exception_context(self._e[1], s)
raise self._e[0], self._e[1], self._e[2]
else:
return self._retval
finally:
# Avoid circular references (join() may be called multiple times
# so we can't delete these)
self._e = None
self._retval = None
def write_keyval(path, dictionary, type_tag=None, tap_report=None):
"""
Write a key-value pair format file out to a file. This uses append
mode to open the file, so existing text will not be overwritten or
reparsed.
If type_tag is None, then the key must be composed of alphanumeric
characters (or dashes+underscores). However, if type-tag is not
null then the keys must also have "{type_tag}" as a suffix. At
the moment the only valid values of type_tag are "attr" and "perf".
:param path: full path of the file to be written
:param dictionary: the items to write
:param type_tag: see text above
"""
if os.path.isdir(path):
path = os.path.join(path, 'keyval')
keyval = open(path, 'a')
if type_tag is None:
key_regex = re.compile(r'^[-\.\w]+$')
else:
if type_tag not in ('attr', 'perf'):
raise ValueError('Invalid type tag: %s' % type_tag)
escaped_tag = re.escape(type_tag)
key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag)
try:
for key in sorted(dictionary.keys()):
if not key_regex.search(key):
raise ValueError('Invalid key: %s' % key)
keyval.write('%s=%s\n' % (key, dictionary[key]))
finally:
keyval.close()
# same for tap
if tap_report is not None and tap_report.do_tap_report:
tap_report.record_keyval(path, dictionary, type_tag=type_tag)
def log_last_traceback(msg=None, log=logging.error):
"""
Writes last traceback into specified log.
:warning: This function is being moved into autotest and your code should
use autotest.client.shared.base_utils function instead.
:param msg: Override the default message. ["Original traceback"]
:param log: Where to log the traceback [logging.error]
"""
if not log:
log = logging.error
if msg:
log(msg)
exc_type, exc_value, exc_traceback = sys.exc_info()
if not exc_traceback:
log('Requested log_last_traceback but no exception was raised.')
return
log("Original " +
"".join(traceback.format_exception(exc_type, exc_value,
exc_traceback)))
def aton(sr):
"""
Transform a string to a number(include float and int). If the string is
not in the form of number, just return false.
:param sr: string to transfrom
:return: float, int or False for failed transform
"""
try:
return int(sr)
except ValueError:
try:
return float(sr)
except ValueError:
return False
def find_substring(string, pattern1, pattern2=None):
"""
Return the match of pattern1 in string. Or return the match of pattern2
if pattern is not matched.
:param string: string
:param pattern1: first pattern want to match in string, must set.
:param pattern2: second pattern, it will be used if pattern1 not match, optional.
:return: Match substing or None
"""
if not pattern1:
logging.debug("pattern1: get empty string.")
return None
pattern = pattern1
if pattern2:
pattern += "|%s" % pattern2
ret = re.findall(pattern, string)
if not ret:
logging.debug("Could not find matched string with pattern: %s",
pattern)
return None
return ret[0]
def lock_file(filename, mode=fcntl.LOCK_EX):
lockfile = open(filename, "w")
fcntl.lockf(lockfile, mode)
return lockfile
def unlock_file(lockfile):
fcntl.lockf(lockfile, fcntl.LOCK_UN)
lockfile.close()
# Utility functions for dealing with external processes
def unique(llist):
"""
Return a list of the elements in list, but without duplicates.
:param list: List with values.
:return: List with non duplicate elements.
"""
n = len(llist)
if n == 0:
return []
u = {}
try:
for x in llist:
u[x] = 1
except TypeError:
return None
else:
return u.keys()
def find_command(cmd):
"""
Try to find a command in the PATH, paranoid version.
:param cmd: Command to be found.
:raise: ValueError in case the command was not found.
"""
common_bin_paths = ["/usr/libexec", "/usr/local/sbin", "/usr/local/bin",
"/usr/sbin", "/usr/bin", "/sbin", "/bin"]
try:
path_paths = os.environ['PATH'].split(":")
except IndexError:
path_paths = []
path_paths = unique(common_bin_paths + path_paths)
for dir_path in path_paths:
cmd_path = os.path.join(dir_path, cmd)
if os.path.isfile(cmd_path):
return os.path.abspath(cmd_path)
raise ValueError('Missing command:
|
plxaye/chromium
|
src/chrome/common/extensions/docs/server2/test_file_system_test.py
|
Python
|
apache-2.0
| 6,461
| 0.002476
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from copy import deepcopy
from file_system import FileNotFoundError, StatInfo
from test_file_system import TestFileSystem
import unittest
_TEST_DATA = {
'404.html': '404.html contents',
'apps': {
'a11y.html': 'a11y.html contents',
'about_apps.html': 'about_apps.html contents',
'fakedir': {
'file.html': 'file.html contents'
}
},
'extensions': {
'activeTab.html': 'activeTab.html contents',
'alarms.html': 'alarms.html contents'
}
}
def _Get(fn):
'''Returns a function which calls Future.Get on the result of |fn|.
'''
return lambda *args: fn(*args).Get()
class TestFileSystemTest(unittest.TestCase):
def testEmptyFileSystem(self):
self._TestMetasyntacticPaths(TestFileSystem({}))
def testNonemptyFileNotFoundErrors(self):
fs = TestFileSystem(deepcopy(_TEST_DATA))
self._TestMetasyntacticPaths(fs)
self.assertRaises(FileNotFoundError, _Get(fs.Read), ['404.html/'])
self.assertRaises(FileNotFoundError, _Get(fs.Read), ['apps/foo/'])
self.assertRaises(FileNotFoundError, _Get(fs.Read), ['apps/foo.html'])
self.assertRaises(FileNotFoundError, _Get(fs.Read), ['apps/foo.html'])
self.assertRaises(FileNotFoundError, _Get(fs.Read), ['apps/foo/',
'apps/foo.html'])
self.assertRaises(FileNotFoundError, _Get(fs.Read), ['apps/foo/',
'apps/a11y.html'])
def _TestMetasyntacticPaths(self, fs):
self.assertRaises(FileNotFoundError, _Get(fs.Read), ['foo'])
self.assertRaises(FileNotFoundError, _Get(fs.Read), ['bar/'])
self.assertRaises(FileNotFoundError, _Get(fs.Read), ['bar/baz'])
self.assertRaises(FileNotFoundError, _Get(fs.Read), ['foo',
'bar/',
'bar/baz'])
self.assertRaises(FileNotFoundError, fs.Stat, 'foo')
self.assertRaises(FileNotFoundError, fs.Stat, 'bar/')
self.assertRaises(FileNotFoundError, fs.Stat, 'bar/baz')
def testNonemptySuccess(self):
fs = TestFileSystem(deepcopy(_TEST_DATA))
self.assertEqual('404.html contents', fs.ReadSingle('404.html'))
self.assertEqual('404.html contents', fs.ReadSingle('/404.html'))
self.assertEqual('a11y.html contents', fs.ReadSingle('apps/a11y.html'))
self.assertEqual(set(['404.html', 'apps/', 'extensions/']),
set(fs.ReadSingle('/')))
self.assertEqual(set(['a11y.html', 'about_apps.html', 'fakedir/']),
set(fs.ReadSingle('apps/')))
self.assertEqual(set(['a11y.html', 'about_apps.html', 'fakedir/']),
set(fs.ReadSingle('/apps/')))
def testStat(self):
fs = TestFileSystem(deepcopy(_TEST_DATA))
self.assertRaises(FileNotFoundError, fs.Stat, 'foo')
self.assertRaises(FileNotFoundError, fs.Stat, '404.html/')
self.assertEquals(StatInfo('0'), fs.Stat('404.html'))
self.assertEquals(StatInfo('0', child_versions={
'activeTab.html': '0',
'alarms.html': '0',
}), fs.Stat('extensions/'))
fs.IncrementStat()
self.assertEquals(StatInfo('1'), fs.Stat('404.html'))
self.assertEquals(StatInfo('1', child_versions={
'activeTab.html': '1',
'alarms.html': '1',
}), fs.Stat('extensions/'))
fs.IncrementStat(path='404.html')
self.assertEquals(StatInfo('2'), fs.Stat('404.html'))
self.assertEquals(StatInfo('1', child_versions={
'activeTab.html': '1',
'alarms.html': '1',
}), fs.Stat('extensions/'))
fs.IncrementStat()
self.assertEquals(StatInfo('3'), fs.Stat('404.html'))
self.assertEquals(StatInfo('2', child_versions={
'activeTab.html': '2',
'alarms.html': '2',
}), fs.Stat('extensions/'))
fs.IncrementStat(path='extensions/')
self.assertEquals(StatInfo('3'), fs.Stat('404.html'))
self.assertEquals(StatInfo('3', child_versions={
'activeTab.html': '2',
'alarms.html': '2',
}), fs.Stat('extensions/'))
fs.IncrementStat(path='extensions/alarms.html')
self.assertEquals(StatInfo('3'), fs.Stat('404.html'))
self.assertEquals(StatInfo('3', child_versions={
'activeTab.html': '2',
'alarms.html': '3',
}), fs.Stat('extensions/'))
def testCheckAndReset(self):
fs = TestFileSystem(deepcopy(_TEST_DATA))
self.assertTrue(fs.CheckAndReset())
self.assertFalse(fs.CheckAndReset(read_count=1))
self.assertFalse(fs.CheckAndReset(stat_count=1))
fs.ReadSingle('apps/')
self.assertTrue(fs.CheckAndReset(read_count=1))
self.assertFalse(fs.CheckAndReset(read_count=1))
self.assertTrue(fs.CheckAndReset())
fs.ReadSingle('apps/')
self.assertFalse(fs.CheckAndReset(read_count=2))
fs.ReadSingle('extensions/')
fs.ReadSingle('extensions/')
self.assertTrue(fs.CheckAndReset(read_count=2))
self.assertFalse(fs.CheckAndReset(read_count=2))
self.assertTrue(fs.CheckAndReset())
fs.ReadSingle('404.html')
fs.Read(['notfound.html', 'apps/'])
self.assertTrue(fs.CheckAndReset(read_count=2))
fs.Stat('404.html')
fs
|
.Stat('404.html')
fs.Stat('apps/')
self.assertFalse(
|
fs.CheckAndReset(stat_count=42))
self.assertFalse(fs.CheckAndReset(stat_count=42))
self.assertTrue(fs.CheckAndReset())
fs.ReadSingle('404.html')
fs.Stat('404.html')
fs.Stat('apps/')
self.assertTrue(fs.CheckAndReset(read_count=1, stat_count=2))
self.assertTrue(fs.CheckAndReset())
def testMoveTo(self):
self.assertEqual({'foo': {'a': 'b', 'c': 'd'}},
TestFileSystem.MoveTo('foo', {'a': 'b', 'c': 'd'}))
self.assertEqual({'foo': {'bar': {'a': 'b', 'c': 'd'}}},
TestFileSystem.MoveTo('foo/bar', {'a': 'b', 'c': 'd'}))
self.assertEqual({'foo': {'bar': {'baz': {'a': 'b'}}}},
TestFileSystem.MoveTo('foo/bar/baz', {'a': 'b'}))
if __name__ == '__main__':
unittest.main()
|
Architektor/PySnip
|
venv/lib/python2.7/site-packages/twisted/python/compat.py
|
Python
|
gpl-3.0
| 19,303
| 0.001813
|
# -*- test-case-name: twisted.test.test_compat -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Compatibility module to provide backwards compatibility for useful Python
features.
This is mainly for use of internal Twisted code. We encourage you to use
the latest version of Python directly from your code, if possible.
@var unicode: The type of Unicode strings, C{unicode} on Python 2 and C{str}
on Python 3.
@var NativeStringIO: An in-memory file-like object that operates on the native
string type (bytes in Python 2, unicode in Python 3).
@var urllib_parse: a URL-parsing module (urlparse on Python 2, urllib.parse on
Python 3)
"""
from __future__ import absolute_import, division
import inspect
import os
import socket
import string
import struct
import sys
from types import MethodType as _MethodType
from io import TextIOBase, IOBase
if sys.version_info < (3, 0):
_PY3 = False
else:
_PY3 = True
def currentframe(n=0):
"""
In Python 3, L{inspect.currentframe} does not take
|
a stack-level argument.
Restore that functionality from Python 2 so we don't have to re-implement
the C{f_back}-walking loop in places where it's called.
@param n: The number of stack levels above the caller to walk.
@type n: L{int}
@return: a frame, n
|
levels up the stack from the caller.
@rtype: L{types.FrameType}
"""
f = inspect.currentframe()
for x in range(n + 1):
f = f.f_back
return f
def inet_pton(af, addr):
if af == socket.AF_INET:
return socket.inet_aton(addr)
elif af == getattr(socket, 'AF_INET6', 'AF_INET6'):
if [x for x in addr if x not in string.hexdigits + ':.']:
raise ValueError("Illegal characters: %r" % (''.join(x),))
parts = addr.split(':')
elided = parts.count('')
ipv4Component = '.' in parts[-1]
if len(parts) > (8 - ipv4Component) or elided > 3:
raise ValueError("Syntactically invalid address")
if elided == 3:
return '\x00' * 16
if elided:
zeros = ['0'] * (8 - len(parts) - ipv4Component + elided)
if addr.startswith('::'):
parts[:2] = zeros
elif addr.endswith('::'):
parts[-2:] = zeros
else:
idx = parts.index('')
parts[idx:idx+1] = zeros
if len(parts) != 8 - ipv4Component:
raise ValueError("Syntactically invalid address")
else:
if len(parts) != (8 - ipv4Component):
raise ValueError("Syntactically invalid address")
if ipv4Component:
if parts[-1].count('.') != 3:
raise ValueError("Syntactically invalid address")
rawipv4 = socket.inet_aton(parts[-1])
unpackedipv4 = struct.unpack('!HH', rawipv4)
parts[-1:] = [hex(x)[2:] for x in unpackedipv4]
parts = [int(x, 16) for x in parts]
return struct.pack('!8H', *parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
def inet_ntop(af, addr):
if af == socket.AF_INET:
return socket.inet_ntoa(addr)
elif af == socket.AF_INET6:
if len(addr) != 16:
raise ValueError("address length incorrect")
parts = struct.unpack('!8H', addr)
curBase = bestBase = None
for i in range(8):
if not parts[i]:
if curBase is None:
curBase = i
curLen = 0
curLen += 1
else:
if curBase is not None:
bestLen = None
if bestBase is None or curLen > bestLen:
bestBase = curBase
bestLen = curLen
curBase = None
if curBase is not None and (bestBase is None or curLen > bestLen):
bestBase = curBase
bestLen = curLen
parts = [hex(x)[2:] for x in parts]
if bestBase is not None:
parts[bestBase:bestBase + bestLen] = ['']
if parts[0] == '':
parts.insert(0, '')
if parts[-1] == '':
parts.insert(len(parts) - 1, '')
return ':'.join(parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
try:
socket.AF_INET6
except AttributeError:
socket.AF_INET6 = 'AF_INET6'
try:
socket.inet_pton(socket.AF_INET6, "::")
except (AttributeError, NameError, socket.error):
socket.inet_pton = inet_pton
socket.inet_ntop = inet_ntop
adict = dict
if _PY3:
# These are actually useless in Python 2 as well, but we need to go
# through deprecation process there (ticket #5895):
del adict, inet_pton, inet_ntop
set = set
frozenset = frozenset
try:
from functools import reduce
except ImportError:
reduce = reduce
def execfile(filename, globals, locals=None):
"""
Execute a Python script in the given namespaces.
Similar to the execfile builtin, but a namespace is mandatory, partly
because that's a sensible thing to require, and because otherwise we'd
have to do some frame hacking.
This is a compatibility implementation for Python 3 porting, to avoid the
use of the deprecated builtin C{execfile} function.
"""
if locals is None:
locals = globals
fin = open(filename, "rbU")
try:
source = fin.read()
finally:
fin.close()
code = compile(source, filename, "exec")
exec(code, globals, locals)
try:
cmp = cmp
except NameError:
def cmp(a, b):
"""
Compare two objects.
Returns a negative number if C{a < b}, zero if they are equal, and a
positive number if C{a > b}.
"""
if a < b:
return -1
elif a == b:
return 0
else:
return 1
def comparable(klass):
"""
Class decorator that ensures support for the special C{__cmp__} method.
On Python 2 this does nothing.
On Python 3, C{__eq__}, C{__lt__}, etc. methods are added to the class,
relying on C{__cmp__} to implement their comparisons.
"""
# On Python 2, __cmp__ will just work, so no need to add extra methods:
if not _PY3:
return klass
def __eq__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c == 0
def __ne__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c != 0
def __lt__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c < 0
def __le__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c <= 0
def __gt__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c > 0
def __ge__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c >= 0
klass.__lt__ = __lt__
klass.__gt__ = __gt__
klass.__le__ = __le__
klass.__ge__ = __ge__
klass.__eq__ = __eq__
klass.__ne__ = __ne__
return klass
if _PY3:
unicode = str
long = int
else:
unicode = unicode
long = long
def ioType(fileIshObject, default=unicode):
"""
Determine the type which will be returned from the given file object's
read() and accepted by its write() method as an argument.
In other words, determine whether the given file is 'opened in text mode'.
@param fileIshObject: Any object, but ideally one which resembles a file.
@type fileIshObject: L{object}
@param default: A default value to return when the type of C{fileIshObject}
cannot be determined.
@type default: L{type}
@return: There are 3 possible return values:
1. L{unicode}, if the file is unambiguously opened in text mode.
2. L{bytes}, if the file is unambiguously opened in binary mode.
3. L{
|
natetrue/ReplicatorG
|
skein_engines/skeinforge-0006/skeinforge_tools/comb.py
|
Python
|
gpl-2.0
| 26,905
| 0.043895
|
"""
Comb is a script to comb the extrusion hair of a gcode file.
The default 'Activate Comb' checkbox is on. When it is on, the functions described below will work, when it is off, the functions
will not be called.
Comb bends the extruder travel paths around holes in the carve, to avoid stringers. It moves the extruder to the inside of outer
perimeters before turning the extruder on so any start up ooze will be inside the shape. It jitters the loop end position to a
different place on each layer to prevent the a ridge from forming. The 'Arrival Inset Follow Distance over Extrusion Width' is the
ratio of the amount before the start of the outer perimeter the extruder will be moved to. A high value means the extruder will
move way before the beginning of the perimeter and a low value means the extruder will be moved just before the beginning.
The "Jitter Over Extrusion Width (ratio)" is the ratio of the amount the loop ends will be jittered. A high value means the loops
will start all over the place and a low value means loops will start at roughly the same place on each layer. The 'Minimum
Perimeter Departure Distance over Extrusion Width' is the ratio of the minimum distance that the extruder will travel and loop
before leaving an outer perimeter. A high value means the extruder will loop many times before leaving, so that the ooze will
finish within the perimeter, a low value means the extruder will not loop and a stringer might be created from the outer
perimeter. To run comb, in a shell type:
> python comb.py
The following examples comb the files Screw Holder Bottom.gcode & Screw Holder Bottom.stl. The examples are run in a terminal in the folder
which contains Screw Holder Bottom.gcode, Screw Holder Bottom.stl and comb.py. The comb function will comb if 'Activate Comb' is true, which
can be set in the dialog or by changing the preferences file 'comb.csv' in the '.skeinforge' folder in your home directory with a text
editor or a spreadsheet program set to separate tabs. The functions writeOutput and getCombChainGcode check to see if the
text has been combed, if not they call getTowerChainGcode in tower.py to tower the text; once they have the towered text, then
they comb. Pictures of combing in action are available from the Metalab blog at:
http://reprap.soup.io/?search=combing
> python comb.py
This brings up the dialog, after clicking 'Comb', the following is printed:
File Screw Holder Bottom.stl is being chain combed.
The combed file is saved as Screw Holder Bottom_comb.gcode
>python
Python 2.5.1 (r251:54863, Sep 22 2007, 01:43:31)
[GCC 4.2.1 (SUSE Linux)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import comb
>>> comb.main()
This brings up the comb dialog.
>>> comb.writeOutput()
Screw Holder Bottom.stl
File Screw Holder Bottom.stl is being chain combed.
The combed file is saved as Screw Holder Bottom_comb.gcode
>>> comb.getCombGcode("
( GCode generated by May 8, 2008 carve.py )
( Extruder Initialization )
..
many lines of gcode
..
")
>>> comb.getCombChainGcode("
( GCode generated by May 8, 2008 carve.py )
( Extruder Initialization )
..
many lines of gcode
..
")
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from skeinforge_tools.skeinforge_utilities import euclidean
from skeinforge_tools.skeinforge_utilities import gcodec
from skeinforge_tools.skeinforge_utilities import intercircle
from skeinforge_tools.skeinforge_utilities import preferences
from skeinforge_tools import analyze
from skeinforge_tools.skeinforge_utilities import interpret
from skeinforge_tools import polyfile
from skeinforge_tools import tower
import cStringIO
import math
import sys
import time
__author__ = "Enrique Perez (perez_enrique@yahoo.com)"
__date__ = "$Date: 2008/21/04 $"
__license__ = "GPL 3.0"
#patched over falling tower comb bug if location.z < self.getBetweens()[ 0 ][ 0 ].z + 0.5 * self.extrusionWidth, but a real solution would be nice
#addLoopsBeforeLeavingPerimeter or something before crossing bug, seen on layer 8 of Screw holder
def getCombChainGcode( fileName, gcodeText, combPreferences = None ):
"Comb a gcode linear move text. Chain comb the gcode if it is not already combed."
gcodeText = gcodec.getGcodeFileText( fileName, gcodeText )
if not gcodec.isProcedureDone( gcodeText, 'tower' ):
gcodeText = tower.getTowerChainGcode( fileName, gcodeText )
return getCombGcode( gcodeText, combPreferences )
def getCombGcode( gcodeText, combPreferences = None ):
"Comb a gcode linear move text."
if gcodeText == '':
return ''
if gcodec.isProcedureDone( gcodeText, 'comb' ):
return gcodeText
if combPreferences == None:
combPreferences = CombPreferences()
preferences.readPreferences( combPreferences )
if not combPreferences.activateComb.value:
return gcodeText
skein = CombSkein()
skein.parseGcode( combPreferences, gcodeText )
return skein.output.getvalue()
def isLoopNumberEqual( betweenX, betweenXIndex, loopNumber ):
"Determine if the loop number is equal."
if betweenXIndex >= len( betweenX ):
return False
return betweenX[ betweenXIndex ].index == loopNumber
def writeOutput( fileName = '' ):
"Comb a gcode linear move file. Chain comb the gcode if it is not already combed. If no fileName is specified, comb the first unmodified gcode file in this folder."
if fileName == '':
unmodified = interpret.getGNUTranslatorFilesUnmodified()
if len( unmodified ) == 0:
print( "There are no unmodified gcode files in this folder." )
return
fileName = unmodified[ 0 ]
combPreferences = CombPreferences()
preferences.readPreferences( combPreferences )
startTime = time.time()
print( 'File ' + gcodec.getSummarizedFilename( fileName ) + ' is being chain combed.' )
suffixFilename = fileName[ : fileName.rfind( '.' ) ] + '_comb.gcode'
combGcode = getCombChainGcode( fileName, '', combPreferences )
if combGcode == '':
return
gcodec.writeFileText( suffixFilename, combGcode )
print( 'The combed file is saved as ' + gcodec.getSummarizedFilename( suffixFilename ) )
analyze.writeOutput( suffixFilename, combGcode )
print( 'It took ' + str( int( round( time.time() - startTime ) ) ) + ' seconds to comb the file.' )
class CombPreferences:
"A class to handle the comb preferences."
def __init__( self ):
"Set the default preferences, execute title & preferences fileName."
#Set the default preferences.
self.archive = []
self.activateComb = preferences.BooleanPreference().getFromValue( 'Activate Comb', True )
self.archive.append( self.activateComb )
self.arrivalInsetFollowDistanceOverExtrusionWidth = preferences.FloatPreference().getFromValue( 'Arrival Inset Follow Distance over Extrusion Width (ratio):', 3.0 )
self.archive.append( self.arrivalInsetFollowDistanceOverExtrusionWidth )
self.jitterOverExtrusionWidth = preferences.FloatPreference().getFromValue( 'Jitter Over Extrusion Width (ratio):', 2.0 )
self.archive.append( self.jitterOverExtrusionWidth )
self.minimumPerimeterDepartureDistanceOverExtrusionWidth = preferences.FloatPreference().getFromValue( 'Minimum Perimeter Departure Distance over Extrusion Width (ratio):', 30.0 )
self.archive.append( self.minimumPerimeterDepartureDistanceOverExtrusionWidth
|
)
self.fileNameInput = preferences.Filename().getFromFilename( interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File to be Combed', '' )
self.archive.append( self.fileNa
|
meInput )
#Create the archive, title of the execute button, title of the dialog & preferences fileName.
self.executeTitle = 'Comb'
self.saveTitle = 'Save Preferences'
preferences.setHelpPreferencesFileNameTitleWindowPosition( self, 'skeinforge_tools.comb.html' )
def execute( self ):
"Comb button has been clicked."
fileNames = polyfile.getFileOrDirectoryTypesUnmodifiedGcode( self.fileNameInput.value, interpret.getImportPluginFilenames(), self.fileNameInput.wasCancelled )
for fileName in fileNames:
writeOutput( fileName )
class CombSkein:
"A class to comb a skein of extru
|
jiemakel/omorfi
|
test/conllu-compare.py
|
Python
|
gpl-3.0
| 7,037
| 0.001279
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Compare two conllu files for matches on each field.
"""
from argparse import ArgumentParser, FileType
from sys import stderr
def main():
a = ArgumentParser()
a.add_argument('-H', '--hypothesis', metavar="HYPFILE", type=open, required=True,
dest="hypfi
|
le", help="analysis results")
a.add_argument('-r', '--reference', metavar="REFFILE", type=open,
required=True,
dest="reffile", help="reference data")
a.add_argument('-l', '--log', metavar="LOGFILE", required=True,
type=FileType('w'),
dest="logfile", help="result file")
a.add_argument('-X', '--realign', action="store_true", default=False,
|
help="Allow fuzzy matches if tokenisation differs")
a.add_argument('-v', '--verbose', action="store_true", default=False,
help="Print verbosely while processing")
a.add_argument('-t', '--thresholds', metavar='THOLDS', default=99, type=int,
help="require THOLD % for lemma, UPOS and UFEAT or exit 1 (for testing)")
options = a.parse_args()
#
lines = 0
deplines = 0
skiplines = 0
# count this
missed_lemmas = 0
missed_uposes = 0
missed_tdtposes = 0
missed_feats = 0
missed_uds = 0
missed_udlabs = 0
missed_deps2 = 0
missed_misc = 0
for hypline in options.hypfile:
refline = next(options.reffile)
lines += 1
infields = hypline.strip().split('\t')
reffields = refline.strip().split('\t')
if len(infields) < 4:
if 'doc-name' in hypline:
continue
elif 'sentence-text' in hypline:
while hypline != refline:
refline = next(options.reffile)
continue
elif hypline == refline:
continue
else:
print("mismatched unknown non-content! IN:", hypline, "REF:", refline,
sep='\n')
exit(1)
if infields[0] != reffields[0]:
if '-' in reffields[0]:
refline = next(options.reffile)
reffields = refline.strip().split('\t')
else:
skiplines += 1
print("misaligned (index)! IN:", infields[0], "REF:", reffields[0],
"\n", hypline, refline, "skipping...", file=stderr)
if options.realign:
while hypline != "":
skiplines += 1
hypline = next(options.hypfile).strip()
while refline != "":
refline = next(options.reffile).strip()
continue
else:
exit(1)
if infields[1] != reffields[1]:
skiplines += 1
print("misaligned (surface)! IN:", infields[1], "REF:", reffields[1],
"\n", hypline, "\n", refline, file=stderr)
if options.realign:
while hypline != "":
skiplines += 1
hypline = next(options.hypfile).strip()
while refline != "":
refline = next(options.reffile).strip()
continue
else:
exit(1)
if infields[2] != reffields[2]:
missed_lemmas += 1
print("LEMMA", infields[2], reffields[2], file=options.logfile)
print("SURFS", infields[1], reffields[1], file=options.logfile)
print("LEMMA|SURF", infields[1], reffields[1], infields[
2], reffields[2], file=options.logfile)
if infields[3] != reffields[3]:
missed_uposes += 1
print("UPOS", infields[3], reffields[3], file=options.logfile)
print("SURFS", infields[1], reffields[1], file=options.logfile)
print("UPOS|SURF", infields[1], reffields[1], infields[
3], reffields[3], file=options.logfile)
if infields[4] != reffields[4]:
missed_tdtposes += 1
print("TDTPOS", infields[4], reffields[4], file=options.logfile)
print("SURFS", infields[1], reffields[1], file=options.logfile)
print("TDTPOS|SURF", infields[1], reffields[1], infields[
4], reffields[4], file=options.logfile)
if infields[5] != reffields[5]:
missed_feats += 1
print("UFEAT", infields[5], reffields[5], file=options.logfile)
print("SURFS", infields[1], reffields[1], file=options.logfile)
print("UFEAT|SURF", infields[1], reffields[1], infields[
5], reffields[5], file=options.logfile)
if infields[6] != reffields[6]:
missed_uds += 1
print("UD", infields[6], reffields[6], file=options.logfile)
if infields[7] != reffields[7]:
missed_udlabs += 1
print("UDLAB", infields[7], reffields[7], file=options.logfile)
if infields[8] != reffields[8]:
missed_deps2 += 1
print("DEPS2", infields[8], reffields[8], file=options.logfile)
if infields[9] != reffields[9]:
missed_misc += 1
print("MISC", infields[9], reffields[9], file=options.logfile)
deplines += 1
print("Lines", "Lemma", "UPOS", "UFEAT", "TDT POS", "UD →", "UD LAB",
"2^ndDEP", "MISC", sep="\t")
print(deplines,
deplines - missed_lemmas,
deplines - missed_uposes,
deplines - missed_feats,
deplines - missed_tdtposes,
deplines - missed_uds,
deplines - missed_udlabs,
deplines - missed_deps2,
deplines - missed_misc,
sep="\t")
print(deplines / deplines * 100,
(deplines - missed_lemmas) / deplines * 100 if deplines != 0 else 0,
(deplines - missed_uposes) / deplines * 100if deplines != 0 else 0,
(deplines - missed_feats) / deplines * 100if deplines != 0 else 0,
(deplines - missed_tdtposes) / deplines * 100if deplines != 0 else 0,
(deplines - missed_uds) / deplines * 100if deplines != 0 else 0,
(deplines - missed_udlabs) / deplines * 100if deplines != 0 else 0,
(deplines - missed_deps2) / deplines * 100if deplines != 0 else 0,
(deplines - missed_misc) / deplines * 100if deplines != 0 else 0,
sep="\t")
print("Skipped due to tokenisation etc. (no fuzz):", skiplines)
if deplines == 0 or \
((deplines - missed_lemmas) / deplines * 100 < options.thresholds) or\
((deplines - missed_uposes) / deplines * 100 < options.thresholds) or\
((deplines - missed_feats) / deplines * 100 < options.thresholds):
print("needs to have", options.thresholds,
"% matches to pass regress test\n",
file=stderr)
exit(1)
else:
exit(0)
if __name__ == "__main__":
main()
|
varses/awsch
|
lantz/drivers/andor/ccd.py
|
Python
|
bsd-3-clause
| 74,992
| 0.00028
|
# -*- coding: utf-8 -*-
# pylint: disable=E265
"""
lantz.drivers.andor.ccd
~~~~~~~~~~~~~~~~~~~~~~~
Low level driver wrapping library for CCD and Intensified CCD cameras.
Only functions for iXon EMCCD cameras were tested.
Only tested in Windows OS.
The driver was written for the single-camera scenario. If more than one
camera is present, some 'read_once=True' should be erased but it
shouldn't be necessary to make any more changes.
Sources::
- Andor SDK 2.96 Manual
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import numpy as np
import ctypes as ct
from collections import namedtuple
from lantz import Driver, Feat, Action, DictFeat
from lantz.errors import InstrumentError
from lantz.foreign import LibraryDriver
from lantz import Q_
degC = Q_(1, 'degC')
us = Q_(1, 'us')
MHz = Q_(1, 'MHz')
seg = Q_(1, 's')
_ERRORS = {
20002: 'DRV_SUCCESS',
20003: 'DRV_VXDNOTINSTALLED',
20004: 'DRV_ERROR_SCAN',
20005: 'DRV_ERROR_CHECK_SUM',
20006: 'DRV_ERROR_FILELOAD',
20007: 'DRV_UNKNOWN_FUNCTION',
20008: 'DRV_ERROR_VXD_INIT',
20009: 'DRV_ERROR_ADDRESS',
20010: 'DRV_ERROR_PAGELOCK',
20011: 'DRV_ERROR_PAGE_UNLOCK',
20012: 'DRV_ERROR_BOARDTEST',
20013: 'Unable to communicate with card.',
20014: 'DRV_ERROR_UP_FIFO',
20015: 'DRV_ERROR_PATTERN',
20017: 'DRV_ACQUISITION_ERRORS',
20018: 'Computer unable to read the data via the ISA slot at the required rate.',
20019: 'DRV_ACQ_DOWNFIFO_FULL',
20020: 'RV_PROC_UNKNOWN_INSTRUCTION',
20021: 'DRV_ILLEGAL_OP_CODE',
20022: 'Unable to meet Kinetic cycle time.',
20023: 'Unable to meet Accumulate cycle time.',
20024: 'No acquisition has taken place',
20026: 'Overflow of the spool buffer.',
20027: 'DRV_SPOOLSETUPERROR',
20033: 'DRV_TEMPERATURE_CODES',
20034: 'Temperature is OFF.',
20035: 'Temperature reached but not stabilized.',
20036: 'Temperature has stabilized at set point.',
20037: 'Temperature has not reached set point.',
20038: 'DRV_TEMPERATURE_OUT_RANGE',
20039: 'DRV_TEMPERATURE_NOT_SUPPORTED',
20040: 'Temperature had stabilized but has since drifted.',
20049: 'DRV_GENERAL_ERRORS',
20050: 'DRV_INVALID_AUX',
20051: 'DRV_COF_NOTLOADED',
20052: 'DRV_FPGAPROG',
20053: 'DRV_FLEXERROR',
20054: 'DRV_GPIBERROR',
20064: 'DRV_DATATYPE',
20065: 'DRV_DRIVER_ERRORS',
20066: 'Invalid parameter 1',
20067: 'Invalid parameter 2',
20068: 'Invalid parameter 3',
20069: 'Invalid parameter 4',
20070: 'DRV_INIERROR',
20071: 'DRV_COFERROR',
20072: 'Acquisition in progress',
20073: 'The system is not currently acquiring',
20074: 'DRV_TEMPCYCLE',
20075: 'System not initialized',
20076: 'DRV_P5INVALID',
20077: 'DRV_P6INVALID',
20078: 'Not a valid mode',
20079: 'DRV_INVALID_FILTER',
20080: 'DRV_I2CERRORS',
20081: 'DRV_DRV_I2CDEVNOTFOUND',
20082: 'DRV_I2CTIMEOUT',
20083: 'DRV_P7INVALID',
20089: 'DRV_USBERROR',
20090: 'DRV_IOCERROR',
20091: 'DRV_VRMVERSIONERROR',
20093: 'DRV_USB_INTERRUPT_ENDPOINT_ERROR',
20094: 'DRV_RANDOM_TRACK_ERROR',
20095: 'DRV_INVALID_TRIGGER_MODE',
20096: 'DRV_LOAD_FIRMWARE_ERROR',
20097: 'DRV_DIVIDE_BY_ZERO_ERROR',
20098: 'DRV_INVALID_RINGEXPOSURES',
20099: 'DRV_BINNING_ERROR',
20990: 'No camera present',
20991: 'Feature not supported on this camera.',
20992: 'Feature is not available at the moment.',
20115: 'DRV_ERROR_MAP',
20116: 'DRV_ERROR_UNMAP',
20117: 'DRV_ERROR_MDL',
20118: 'DRV_ERROR_UNMDL',
20119: 'DRV
|
_ERROR_BUFFSIZE',
20121: 'DRV_ERROR_NOHANDLE',
20130: 'DRV_GATING_NOT_AVAILABLE',
20131: 'DRV_FPGA_VOLTAGE_ERROR',
20100: 'DRV_INVALID_AMPLIFIER',
20101: 'DRV_INVALID_COUNTCONVERT_MODE'
}
class CCD(LibraryDriver):
LIBRARY_NAME = 'atmcd64d.dll'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cameraIndex = ct.c_int(0)
def _patch_functions(self):
internal = self.lib.internal
internal.GetCameraSerialNumber.argtypes
|
= [ct.pointer(ct.c_uint)]
internal.Filter_SetAveragingFactor.argtypes = [ct.c_int]
internal.Filter_SetThreshold.argtypes = ct.c_float
internal.Filter_GetThreshold.argtypes = ct.c_float
def _return_handler(self, func_name, ret_value):
excl_func = ['GetTemperatureF', 'IsCountConvertModeAvailable',
'IsAmplifierAvailable', 'IsTriggerModeAvailable']
if ret_value != 20002 and func_name not in excl_func:
raise InstrumentError('{}'.format(_ERRORS[ret_value]))
return ret_value
def initialize(self):
""" This function will initialize the Andor SDK System. As part of the
initialization procedure on some cameras (i.e. Classic, iStar and
earlier iXion) the DLL will need access to a DETECTOR.INI which
contains information relating to the detector head, number pixels,
readout speeds etc. If your system has multiple cameras then see the
section Controlling multiple cameras.
"""
self.lib.Initialize()
self.triggers = {'Internal': 0, 'External': 1, 'External Start': 6,
'External Exposure': 7, 'External FVB EM': 9,
'Software Trigger': 10,
'External Charge Shifting': 12}
self.savetypes = {'Signed16bits': 1, 'Signed32bits': 2, 'Float': 3}
# Initial values
self.readout_packing_state = False
self.readout_packing = self.readout_packing_state
self.readout_mode_mode = 'Image'
self.readout_mode = self.readout_mode_mode
self.photon_counting_mode_state = False
self.photon_counting_mode = self.photon_counting_mode_state
self.frame_transfer_mode_state = False
self.frame_transfer_mode = self.frame_transfer_mode_state
self.fan_mode_index = 'onfull'
self.fan_mode = self.fan_mode_index
self.EM_gain_mode_index = 'RealGain'
self.EM_gain_mode = self.EM_gain_mode_index
self.cooled_on_shutdown_value = False
self.cooled_on_shutdown = self.cooled_on_shutdown_value
self.baseline_offset_value = 100
self.baseline_offset = self.baseline_offset_value
self.adv_trigger_mode_state = True
self.adv_trigger_mode = self.adv_trigger_mode_state
self.acq_mode = 'Single Scan'
self.acquisition_mode = self.acq_mode
self.amp_typ = 0
self.horiz_shift_speed_index = 0
self.horiz_shift_speed = self.horiz_shift_speed_index
self.vert_shift_speed_index = 0
self.vert_shift_speed = self.vert_shift_speed_index
self.preamp_index = 0
self.preamp = self.preamp_index
self.temperature_sp = 0 * degC
self.temperature_setpoint = self.temperature_sp
self.auxout = np.zeros(4, dtype=bool)
for i in np.arange(1, 5):
self.out_aux_port[i] = False
self.trigger_mode_index = 'Internal'
self.trigger_mode = self.trigger_mode_index
def finalize(self):
"""Finalize Library. Concluding function.
"""
if self.status != 'Camera is idle, waiting for instructions.':
self.abort_acquisition()
self.cooler_on = False
self.free_int_mem()
self.lib.ShutDown()
### SYSTEM INFORMATION
@Feat(read_once=True)
def ncameras(self):
"""This function returns the total number of Andor cameras currently
installed. It is possible to call this function before any of the
cameras are initialized.
"""
n = ct.c_long()
self.lib.GetAvailableCameras(ct.pointer(n))
return n.value
def camera_handle(self, index):
"""This function returns the handle for the camera specified by
cameraIndex. When multiple Andor cameras are installed the handle of
each camera must be retrieved in order to select a camera using the
|
kk6/poco
|
tests/test_utils.py
|
Python
|
mit
| 1,427
| 0.000701
|
# -*- coding: utf-8 -*-
from datetime import datetime, timezone, timedelta
import pytest
class TestStr2Datetime(object):
@pytest.fixture
def target_func(self):
from poco.utils import str2datetime
return str2datetime
@pytest.mark.parametrize(
's,expected',
[
('2016-01-01 00:00:00 +0900',
datetime(2016, 1, 1, 0, 0, 0, tzinfo=timezone(timedelta(0, 32400)))),
('', None),
],
)
def test_call(self, target_func, s, expected):
assert target_func(s) == expected
@pytest.mark.parametrize('non_str_value', [None, 1, object])
def test_type_error(self, target_func, non_str_value):
with pytest.raises(TypeError):
target_func(non_str_value)
class TestForceInt(object):
@pytest.fixture
def target_func(self):
fr
|
om poco.utils import force_int
return force_int
@pytest.mark.parametrize(
's,expecte
|
d',
[
('100', 100),
(100, 100),
(2.5, 2),
('', None),
(True, 1),
(False, 0),
],
)
def test_call(self, target_func, s, expected):
assert target_func(s) == expected
@pytest.mark.parametrize('non_str_value', [None, object])
def test_type_error(self, target_func, non_str_value):
with pytest.raises(TypeError):
target_func(non_str_value)
|
maxamillion/atomic-reactor
|
tests/test_rpm_util.py
|
Python
|
bsd-3-clause
| 2,380
| 0.00042
|
"""
Copyright (c) 2017 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import absolute_import, print_function
import pytest
from atomic_reactor.rpm_util import rpm_qf_args, parse_rpm_output
FAKE_SIGMD5 = b'0' * 32
FAKE_SIGNATURE = "RSA/SHA256, Tue 30 Aug 2016 00:00:00, Key ID 01234567890abc"
@pytest.mark.parametrize(('tags', 'separator', 'expected'), [
(None
|
, None,
r"-qa --qf '%{NAME};%{VERSION};%{RELEASE};%{ARCH};%{EPOCH};%{SIZE};%{SIGMD5};%{BUILDTIME};%{SIGPGP:pgpsig};%{SIGGPG:pgpsig}\n'"), # noqa
(['NAME', 'VERSION'], "|",
r"-qa --qf '%{NAME}|%{VERSION}\n'"),
])
def test_rpm_qf_args(tags, separator, expected):
kwargs = {}
if tags is not None:
kwargs['tags'] = tags
if separator is not None:
kwargs['separator'] = separator
assert rpm_qf_args(**kwargs) == expected
def test_p
|
arse_rpm_output():
res = parse_rpm_output([
"name1;1.0;1;x86_64;0;2000;" + FAKE_SIGMD5.decode() + ";23000;" +
FAKE_SIGNATURE + ";(none)",
"name2;2.0;1;x86_64;0;3000;" + FAKE_SIGMD5.decode() + ";24000;" +
"(none);" + FAKE_SIGNATURE,
"gpg-pubkey;64dab85d;57d33e22;(none);(none);0;(none);1473461794;(none);(none)",
])
assert res == [
{
'type': 'rpm',
'name': 'name1',
'version': '1.0',
'release': '1',
'arch': 'x86_64',
'epoch': 0,
'sigmd5': FAKE_SIGMD5.decode(),
'signature': "01234567890abc",
},
{
'type': 'rpm',
'name': 'name2',
'version': '2.0',
'release': '1',
'arch': 'x86_64',
'epoch': 0,
'sigmd5': FAKE_SIGMD5.decode(),
'signature': "01234567890abc",
}
]
# Tests with different fields and separator
res = parse_rpm_output(["1|1.0|name1"],
tags=['RELEASE', 'VERSION', 'NAME'],
separator="|")
assert res == [
{
'type': 'rpm',
'name': 'name1',
'version': '1.0',
'release': '1',
'arch': None,
'epoch': None,
'sigmd5': None,
'signature': None,
}
]
|
HenryCorse/Project_Southstar
|
CharacterApp/creator/apps.py
|
Python
|
mit
| 89
| 0
|
from django.apps
|
impo
|
rt AppConfig
class CreatorConfig(AppConfig):
name = 'creator'
|
hanleilei/note
|
python/vir_manager/utils/libvirt_utils.py
|
Python
|
cc0-1.0
| 3,602
| 0.000943
|
import hashlib
import libvirt
from django.forms.models import model_to_dict
from utils.common import gen_passwd
from utils.common import gen_user_passwd
from backstage.models import LibvirtPass
from utils.salt_utils import SaltApiHandler
def get_host_passwd(host):
"""
* desc 获取机器的用户名和密码
* input 主机ip
* output 主机 libvirt用户名 libvirt密码
"""
db_info = LibvirtPass.objects.filter(host=host)
if db_info:
# 数据库生成的有用户名和密码
line = db_info[0]
return model_to_dict(line)
else:
# 数据库没有存储用户名和密码 需要重新生成
user, password = gen_libvirt_passwd(host)
# 远程设置
if host.startswith('192.168'):
handler = SaltApiHandler(host='test')
else:
handler = SaltApiHandler(host='online')
set_libvirt_passwd(handler, host, user, password)
# 检测是否能链接上远程服务器
for i in range(3):
try:
# 测试链接
flag = test_conn(host, user=user, password=password)
if not flag:
# 设置用户名和密码
p = LibvirtPass(host=host, user=user, password=password)
p.save()
return {'host': host, 'user': user, 'password': password}
except:
pass
raise Exception('远程链接libvirt失败')
def gen_libvirt_passwd(hos
|
t):
""
|
"
* desc 生成libvirt的用户名和密码
* input 主机ip
* output libvirt用户名和libvirt密码
"""
hash_md5 = hashlib.md5()
hash_md5.update(bytes(host.strip(), encoding='utf-8'))
user = hash_md5.hexdigest()
password = gen_passwd()
return user, password
def set_libvirt_passwd(handler, host, user, password):
"""
* desc 设置libvirt的远程链接的用户名和密码
* input salt handler 主机ip 用户名 密码
* output 设置成功与否
"""
cmd = 'echo %s | echo %s | saslpasswd2 -a libvirt %s' % (
password, password, user)
params = {
'client': 'local',
'fun': 'cmd.run',
'tgt': host,
'arg': cmd
}
result = handler.saltCmd(params)
if host in result:
if not result[host]:
return 0
else:
raise Exception('远程libvirt密码失败 %s' % result[host])
else:
raise Exception('salt 设置远程libvirt密码失败')
def test_conn(host, user=None, password=None):
"""
* desc 检测 是否可以链接到远程机器
* input 主机ip 用户名 密码
* output 0远程可以链接上 1 数据库没有用户名和密码
"""
# 检测是否传入用户名和密码
if not user:
# 从数据库取数据
db_line = LibvirtPass.objects.filter(host=host)
if not db_line:
return 1
line = db_line[0]
user = line.user
password = line.password
# 认证函数
def authcb(credentials, user_data):
for credential in credentials:
if credential[0] == libvirt.VIR_CRED_AUTHNAME:
credential[4] = user
elif credential[0] == libvirt.VIR_CRED_PASSPHRASE:
credential[4] = password
return 0
auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE], authcb, None]
try:
conn = libvirt.openAuth('qemu+tcp://%s/system' % host, auth, 0)
return 0
except:
raise Exception('认证失败')
|
marchon/poker
|
setup.py
|
Python
|
mit
| 1,020
| 0.025515
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
install_requires = [
'pytz',
'requests',
'lxml',
'python-dateutil',
'parsedatetime',
'cached-pro
|
perty',
'click',
'enum34', # backported versions from Pyth
|
on3
'pathlib',
'configparser',
]
console_scripts = [
'poker = poker.commands:poker',
]
classifiers = [
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
]
setup(
name = 'poker',
version = '0.22.3',
description = 'Poker Framework',
classifiers = classifiers,
keywords = 'poker',
author = u'Kiss György',
author_email = "kissgyorgy@me.com",
url = "https://github.com/pokerregion/poker",
license = "MIT",
packages = find_packages(),
install_requires = install_requires,
entry_points = {'console_scripts': console_scripts},
tests_require = ['pytest', 'coverage', 'coveralls'],
)
|
jevinw/rec_utilities
|
babel_util/recommenders/ef2.py
|
Python
|
agpl-3.0
| 1,698
| 0.004122
|
#!/usr/bin/env python
import itertools
from operator import attrgetter, itemgetter
class ClusterRecommendation(object):
__slots__ = ("cluster_id", "papers")
def __init__(self, cluster_id, papers):
self.cluster_id = cluster_id
self.papers = [(p.pid, p.score) for p in papers]
def __str__(self):
return "%s %s" % (self.cluster_id, len(self.papers))
def __repr__(self):
return "<ClusterRecommendation %s>" % self.cluster_id
def get_papers(self):
"""Only return a tuple of papers"""
return tuple(zip(*self.papers))[0]
def get_parent(cluster_id):
parent = ":".join(cluster_id.split(":")[:-1])
if parent == "":
return None
return parent
def get_subtree(cluster_id):
subtree = ":".join(cluster_id.split(":")[1:])
if subtree == "":
return None
return subtree
def make_leaf_rec(stream, rec_limit=10)
|
:
leaf_stream = itertools.groupby(stream, lambda e: e.local)
for (cluster_id, stream) in leaf_stream:
papers = [e for e in stream]
|
papers = sorted(papers, key=attrgetter('score'), reverse=True)
yield ClusterRecommendation(cluster_id, papers[:rec_limit])
def parse_tree(stream, rec_limit=10):
mstream = make_leaf_rec(stream, rec_limit)
child_stream = itertools.groupby(mstream, lambda e: get_parent(e.cluster_id))
for (parent_cluster_id, recs) in child_stream:
child_recs = [r for r in recs]
papers = itertools.chain.from_iterable(map(attrgetter('papers'), child_recs))
parent_papers = tuple(zip(*sorted(papers, key=itemgetter(1), reverse=True)))[0]
yield (parent_cluster_id, parent_papers[:rec_limit], child_recs)
|
lreis2415/PyGeoC
|
examples/ex06_model_performace_index.py
|
Python
|
mit
| 874
| 0.002288
|
# -*- coding: utf-8 -*-
# Exercise 6: Calculate model performance indexes with PyGeoC
from pygeoc.utils import MathClass
def cal_model_performance(obsl, siml):
"""Calculate model performance indexes."""
nse = MathClass.nashcoef(obsl, siml)
r2 = MathClass.rsquare(obsl, siml)
rmse = MathClass.rmse(obsl, siml)
pbias = MathClass.pbias(obsl, siml)
rsr = MathClass.rsr(obsl, siml)
print('NSE: %.2f, R-square: %.2f, PBIAS: %.2f%%, RMSE: %.2f, RSR: %.
|
2f' %
(nse, r2, pbias, rm
|
se, rsr))
if __name__ == "__main__":
obs_list = [2.92, 2.75, 2.01, 1.09, 2.87, 1.43, 1.96, 4.00, 2.24, 17.00, 5.88, 0.86, 13.21,
10.00, 11.00, 6.60]
sim_list = [0.40, 4.88, 1.92, 0.49, 0.28, 5.36, 1.89, 4.08, 1.50, 10.00, 7.02, 0.33, 8.40,
7.8, 12, 3.8]
cal_model_performance(obs_list, sim_list)
|
havard024/prego
|
venv/lib/python2.7/site-packages/PIL/MspImagePlugin.py
|
Python
|
mit
| 2,173
| 0.004602
|
#
# The Python Imaging Library.
# $Id$
#
# MSP file handling
#
# This is the format used by the Paint program in Windows 1 and 2.
#
# History:
# 95-09-05 fl Created
# 97-01-03 fl Read/write MSP images
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1995-97.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.1"
from PIL import Image, ImageFile, _binary
#
# read MSP files
i16 = _binary.i16le
def _accept(prefix):
return prefix[:4] in [b"DanM", b"LinS"]
##
# Image plugin for Windows MSP images. This plugin supports both
# uncompressed (Windows 1.0).
class MspImageFile(ImageFile.ImageFile):
format = "MSP"
format_description = "Windows Paint"
def _open(self):
# Header
s = self.fp.read(32)
if s[:4] not in [b"DanM", b"LinS"]:
raise SyntaxError("not an MSP file")
# Header checksum
sum = 0
for i in range(0, 32, 2):
sum = sum ^ i16(s[i:i+2])
if sum != 0:
raise SyntaxError("bad MSP checksum")
self.mode = "1"
self.size = i16(s[4:]), i16(s[6:])
if s[:4] == b"DanM":
self.tile = [("raw", (0,0)+self.size, 32, ("1", 0, 1))]
else:
self.tile = [("msp", (0,0)+self.size, 32+2*self.size[1], None)]
#
# write MSP files (uncompressed only)
o16 = _binary.o16le
def _save(im, fp, filename):
if im.mode != "1":
raise IOError("cannot write mode %s as MSP" % im.mode)
# create MSP header
header = [0] * 16
header[0], header[1] = i16(b"Da"
|
), i16(b"nM") # version 1
header[2], header[3] = im.size
header[4], header[5] = 1, 1
header[6], header[7] = 1, 1
header[8], header[9] = im.size
sum = 0
for h in header:
sum = sum ^ h
header[12] = sum # FIXME: is this the right field?
|
# header
for h in header:
fp.write(o16(h))
# image body
ImageFile._save(im, fp, [("raw", (0,0)+im.size, 32, ("1", 0, 1))])
#
# registry
Image.register_open("MSP", MspImageFile, _accept)
Image.register_save("MSP", _save)
Image.register_extension("MSP", ".msp")
|
Dehyrf/python_gates
|
window.py
|
Python
|
gpl-3.0
| 3,800
| 0.002632
|
from gi.repository import Gtk, Gdk, GdkPixbuf
(TARGET_ENTRY_TEXT, TARGET_ENTRY_PIXBUF) = range(2)
(COLUMN_TEXT, COLUMN_PIXBUF) = range(2)
DRAG_ACTION = Gdk.DragAction.COPY
class DragDropWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Drag and Drop")
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
self.add(vbox)
hbox = Gtk.Box(spacing=12)
vbox.pack_start(hbox, True, True, 0)
self.iconview = DragSourceIconView()
self.drop_area = DropArea()
hbox.pack_start(self.iconview, True, True, 0)
hbox.pack_start(self.drop_area, True, True, 0)
button_box = Gtk.Box(spacing=6)
vbox.pack_start(button_box, True, False, 0)
image_button = Gtk.RadioButton.new_with_label_from_widget(None,
"Images")
image_button.connect("toggled", self.add_image_targets)
button_box.pack_start(image_button, True, False, 0)
text_button = Gtk.RadioButton.new_with_label_from_widget(image_button,
"Text")
text_button.connect("toggled", self.add_text_targets)
button_box.pack_start(text_button, True, False, 0)
self.add_image_targets()
def add_image_targets(self, button=None):
targets = Gtk.TargetList.new([])
targets.add_image_targets(TARGET_ENTRY_PIXBUF, True)
self.drop_area.drag_dest_set_target_list(targets)
self.iconview.drag_source_set_target_list(targets)
def add_text_targets(self, button=None):
self.drop_area.drag_dest_set_target_list(None)
self.iconview.drag_source_set_target_list(None)
self.drop_area.drag_dest_add_text_targets()
self.iconview.drag_source_add_text_targets
|
()
class DragSourceIconView(Gtk.IconView):
def __init__(self):
Gtk.IconView.__init__(self)
self.set_text_column(COLUMN_TEXT)
self.set_pixbuf_column(COLUMN_PIXBUF)
model = Gtk.ListStore(str, GdkPixbuf.Pixbuf)
self.set_model(model)
self.add_item("Item 1", "image-missing")
self.add_item("Item 2", "help-about")
self.add_item("Item 3", "edit-copy")
self.enable_model_drag_source(Gdk.ModifierType.BUTTON1_MASK, [],
|
DRAG_ACTION)
self.connect("drag-data-get", self.on_drag_data_get)
def on_drag_data_get(self, widget, drag_context, data, info, time):
selected_path = self.get_selected_items()[0]
selected_iter = self.get_model().get_iter(selected_path)
if info == TARGET_ENTRY_TEXT:
text = self.get_model().get_value(selected_iter, COLUMN_TEXT)
data.set_text(text, -1)
elif info == TARGET_ENTRY_PIXBUF:
pixbuf = self.get_model().get_value(selected_iter, COLUMN_PIXBUF)
data.set_pixbuf(pixbuf)
def add_item(self, text, icon_name):
pixbuf = Gtk.IconTheme.get_default().load_icon(icon_name, 16, 0)
self.get_model().append([text, pixbuf])
class DropArea(Gtk.Label):
def __init__(self):
Gtk.Label.__init__(self, "Drop something on me!")
self.drag_dest_set(Gtk.DestDefaults.ALL, [], DRAG_ACTION)
self.connect("drag-data-received", self.on_drag_data_received)
def on_drag_data_received(self, widget, drag_context, x,y, data,info, time):
if info == TARGET_ENTRY_TEXT:
text = data.get_text()
print("Received text: %s" % text)
elif info == TARGET_ENTRY_PIXBUF:
pixbuf = data.get_pixbuf()
width = pixbuf.get_width()
height = pixbuf.get_height()
print("Received pixbuf with width %spx and height %spx" % (width,
height))
win = DragDropWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
|
glenngillen/dotfiles
|
.vscode/extensions/ms-python.python-2021.5.842923320/pythonFiles/lib/jedilsp/pydantic/json.py
|
Python
|
mit
| 3,365
| 0.001189
|
import datetime
import re
import sys
from collections import deque
from decimal import Decimal
from enum import Enum
from ipaddress import IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network
from pathlib import Path
from types import GeneratorType
from typing import Any, Callable, Dict, Type, Union
from uuid import UUID
if sys.version_info >= (3, 7):
Pattern = re.Pattern
else:
# python 3.6
Pattern = re.compile('a').__class__
from .color import Color
from .types import SecretBytes, SecretStr
__all__ = 'pydantic_encoder', 'custom_pydantic_encoder', 'timedelta_isoformat'
def isoformat(o: Union[datetime.date, datetime.time]) -> str:
return o.isoformat()
def decimal_encoder(dec_value: Decimal) -> Union[int, float]:
"""
Encodes a Decimal as int of there's no exponent, otherwise float
This is useful when we use ConstrainedDecimal to represent Numeric(x,0)
where a integer (but not int typed) is used. Encoding this as a float
results in failed round-tripping between encode and prase.
Our Id type is a prime example of this.
>>> decimal_encoder(Decimal("1.0"))
1.0
>>> decimal_encoder(Decimal("1"))
1
"""
if dec_value.as_tuple().exponent >= 0:
return int(dec_value)
else:
return float(dec_value)
ENCODERS_BY_TYPE: Dict[Type[Any], Callable[[Any], Any]] = {
bytes: lambda o: o.decode(),
Color: str,
datetime.date: isoformat,
datetime.datetime: isoformat,
datetime.time: isoformat,
datetime.timedelta: lambda td: td.total_seconds(),
Decimal: decimal_encoder,
Enum: lambda o: o.value,
frozenset: list,
deque: list,
GeneratorType: list,
IPv4Address: str,
IPv4Interface: str,
IPv4Network: str,
IPv6Address: str,
IPv6Interface: str,
IPv6Network:
|
str,
Path: str,
Pattern: lambda o: o.pattern,
SecretBytes: str,
SecretStr: str,
set: list,
UUID: str,
}
def pydantic_encoder(obj: Any) -> Any:
from dataclasses import asdict, is_dataclass
from .main import BaseModel
if isinstance(obj, BaseModel):
return obj.dict()
elif is_dataclass(obj):
return asdict(obj)
# Check the class type and its superclasses for a matching encoder
for base in obj.__class__.__mro__[:-1]:
try:
|
encoder = ENCODERS_BY_TYPE[base]
except KeyError:
continue
return encoder(obj)
else: # We have exited the for loop without finding a suitable encoder
raise TypeError(f"Object of type '{obj.__class__.__name__}' is not JSON serializable")
def custom_pydantic_encoder(type_encoders: Dict[Any, Callable[[Type[Any]], Any]], obj: Any) -> Any:
# Check the class type and its superclasses for a matching encoder
for base in obj.__class__.__mro__[:-1]:
try:
encoder = type_encoders[base]
except KeyError:
continue
return encoder(obj)
else: # We have exited the for loop without finding a suitable encoder
return pydantic_encoder(obj)
def timedelta_isoformat(td: datetime.timedelta) -> str:
"""
ISO 8601 encoding for timedeltas.
"""
minutes, seconds = divmod(td.seconds, 60)
hours, minutes = divmod(minutes, 60)
return f'P{td.days}DT{hours:d}H{minutes:d}M{seconds:d}.{td.microseconds:06d}S'
|
urashima9616/Leetcode_Python
|
Leet154_FindMinRotatedArrayII.py
|
Python
|
gpl-3.0
| 1,397
| 0.012169
|
"""
Duplicate entries allowed
this raises the complexity from log n to n/2 log n
key idea:
if mid equal to two ends, there must be constant on
one side and min on the other.
if left end < mid < right: no rotation
if mid less than both ends => min on your left
if min greater than both ends => min on your right
"""
class Solution(object):
def findMin(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
length = len(nums)
if length ==1:
return nums[0]
elif length == 2:
return nums[0] if nums[0]<=nums[1] else nums[1]
|
else:
mid = length/2
right = 0
if nums[mid] == nums[0] and nums[mid] == nums[-1]:#Need to search both direction
prev = nums[mid]
for i in xrange(mid,len(nums)):
if prev != nums[i]:
right = 1
return self.findMin(nums[mid:]) if right == 1 else self.findMin(nums[
|
:mid+1])
elif nums[mid]>= nums[0] and nums[mid]<=nums[-1]: # no rotation
return nums[0]
elif nums[mid] >= nums[0] and nums[mid] >= nums[-1]:# on the right hand side
return self.findMin(nums[mid:])
elif nums[mid] <= nums[0] and nums[mid] <= nums[-1]: # on the left hand side
return self.findMin(nums[:mid+1])
|
julienmalard/Tikon
|
pruebas/test_central/rcrs/modelo_valid.py
|
Python
|
agpl-3.0
| 1,156
| 0.000877
|
import numpy as np
import pandas as pd
import xarray as xr
from tikon.central import Módulo, SimulMódulo, Modelo, Exper, Parcela
from tikon.central.res import Resultado
from tikon.datos import Obs
from tikon.utils import EJE_TIEMPO, EJE_PARC
f_inic = '2000-01-01'
crds = {'eje 1': ['a', 'b'], 'eje 2': ['x', 'y', 'z']}
class Res(Resultado):
def __init__(símismo, sim, coords, vars_interés):
coords = {**crds, **coords}
super().__init__(sim, coords, vars_interés)
nombre = 'res'
unids = None
class SimulMóduloValid(SimulMódulo):
resultados = [Res]
def incrementar(símismo, paso, f):
super().incrementar(paso, f)
símismo.poner_valor('res', 1, rel=True)
class MóduloValid(Módulo):
nombre = 'módulo'
cls_simul = SimulMóduloValid
class MiObs(Obs):
mód = 'módulo'
var = 'res'
obs = MiObs(
datos=xr.DataArray(
np.arange(10),
coords={EJE_TIEMPO: pd.date_range(f_inic, periods=10, freq='D')}, dims=
|
[EJE_TIEMPO]
|
).expand_dims({EJE_PARC: ['parcela'], **crds})
)
exper = Exper('exper', Parcela('parcela'), obs=obs)
modelo = Modelo(MóduloValid)
|
asljivo1/802.11ah-ns3
|
ns-3/.waf-1.8.12-f00e5b53f6bbeab1384a38c9cc5d51f7/waflib/Tools/fc_scan.py
|
Python
|
gpl-2.0
| 1,859
| 0.069392
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import re
from waflib import Utils,Task,TaskGen,Logs
from waflib.TaskGen import feature,before_method,after_method,extension
from waflib.Configure import conf
INC_REGEX="""(?:^|['">]\s*;)\s*(?:|#\s*)INCLUDE\s+(?:\w+_)?[<"'](.+?)(?=["'>])"""
USE_REGEX="""(?:^|;)\s*USE(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(\w+)"""
MOD_REGEX="""(?:^|;)\s*MO
|
DULE(?!\s*PROCEDURE)(?:\s+|(?:(?:\s*,\s*(?:NON_
|
)?INTRINSIC)?\s*::))\s*(\w+)"""
re_inc=re.compile(INC_REGEX,re.I)
re_use=re.compile(USE_REGEX,re.I)
re_mod=re.compile(MOD_REGEX,re.I)
class fortran_parser(object):
def __init__(self,incpaths):
self.seen=[]
self.nodes=[]
self.names=[]
self.incpaths=incpaths
def find_deps(self,node):
txt=node.read()
incs=[]
uses=[]
mods=[]
for line in txt.splitlines():
m=re_inc.search(line)
if m:
incs.append(m.group(1))
m=re_use.search(line)
if m:
uses.append(m.group(1))
m=re_mod.search(line)
if m:
mods.append(m.group(1))
return(incs,uses,mods)
def start(self,node):
self.waiting=[node]
while self.waiting:
nd=self.waiting.pop(0)
self.iter(nd)
def iter(self,node):
path=node.abspath()
incs,uses,mods=self.find_deps(node)
for x in incs:
if x in self.seen:
continue
self.seen.append(x)
self.tryfind_header(x)
for x in uses:
name="USE@%s"%x
if not name in self.names:
self.names.append(name)
for x in mods:
name="MOD@%s"%x
if not name in self.names:
self.names.append(name)
def tryfind_header(self,filename):
found=None
for n in self.incpaths:
found=n.find_resource(filename)
if found:
self.nodes.append(found)
self.waiting.append(found)
break
if not found:
if not filename in self.names:
self.names.append(filename)
|
Domatix/stock-logistics-workflow
|
stock_pack_operation_auto_fill/models/stock_move.py
|
Python
|
agpl-3.0
| 1,537
| 0
|
# Copyright 2017 Pedro M. Baeza <pedro.baeza@tecnativa.com>
# Copyright 2018 David Vidal <david.vidal@tecnativa.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models
class StockMove(models.Model):
_inherit = 'stock.move'
def _create_extra_move(self):
"""
When user set on stock move line done a quantity greater than initial
demand Odoo creates an ext
|
ra stock move with the difference a
|
nd it is
posible to create an extra stock move line with qty_done = 0 which will
be deleted in _action_done method.
This method set a context variable to prevent set qty_done for these
cases.
"""
my_self = self
if self.picking_id.auto_fill_operation:
my_self = self.with_context(skip_auto_fill_operation=True)
return super(StockMove, my_self)._create_extra_move()
def _prepare_move_line_vals(self, quantity=None, reserved_quant=None):
"""Auto-assign as done the quantity proposed for the lots"""
res = super(StockMove, self)._prepare_move_line_vals(
quantity, reserved_quant,
)
if (self.env.context.get('skip_auto_fill_operation') or
not self.picking_id.auto_fill_operation):
return res
elif (self.picking_id.picking_type_id.avoid_lot_assignment and
res.get('lot_id')):
return res
res.update({
'qty_done': res.get('product_uom_qty', 0.0),
})
return res
|
rst2pdf/rst2pdf
|
rst2pdf/tests/input/sphinx-issue252/conf.py
|
Python
|
mit
| 1,417
| 0.002117
|
# -*- coding: utf-8 -*-
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.imgmath', 'rst2pdf.pdfbuilder']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'issue252'
copyright = u'2009, RA'
# The version info for the project you're documen
|
ting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
#
|
The short X.Y version.
version = 'test'
# The full version, including alpha/beta/rc tags.
release = 'test'
# -- Options for PDF output ----------------------------------------------------
# Grouping the document tree into PDF files. List of tuples
# (source start file, target name, title, author).
pdf_documents = [('index', u'MyProject', u'My Project', u'Author Name')]
# A comma-separated list of custom stylesheets. Example:
pdf_stylesheets = ['sphinx']
# Language to be used for hyphenation support
pdf_language = "en_US"
# If false, no index is generated.
pdf_use_index = False
# If false, no modindex is generated.
pdf_use_modindex = False
# If false, no coverpage is generated.
pdf_use_coverpage = False
pdf_verbosity = 0
pdf_invariant = True
pdf_real_footnotes = True
|
vicenteneto/python-cartolafc
|
examples/examples.py
|
Python
|
mit
| 1,020
| 0.00098
|
import cartolafc
api = cartolafc.Api(email='email@email.com', password='s3nh4', attempts=5, redis_url='redis://localhost:6379/0')
print(api.amigos())
print(api.clubes())
print(a
|
pi.liga(nome='Virtus Premier League'))
print(api.liga(nome='Nacional', page=2))
print(api.liga(nome='Nacional', page=3, order_by=cartolafc.RODADA))
print(api.liga(slug='virtus-premier-league'))
print(api.ligas(query='Virtus'))
print(api.ligas_patrocinadores())
print(api.mercado())
print
|
(api.mercado_atletas())
print(api.parciais())
print(api.partidas(1))
print(api.pontuacao_atleta(81682))
print(api.pos_rodada_destaques())
print(api.time(id=2706236))
print(api.time(id=2706236, as_json=True))
print(api.time(nome='ALCAFLA FC'))
print(api.time(nome='ALCAFLA FC', as_json=True))
print(api.time(slug='alcafla-fc'))
print(api.time(slug='alcafla-fc', as_json=True))
print(api.time_logado())
print(api.time_parcial(id=2706236))
print(api.time_parcial(nome='ALCAFLA FC'))
print(api.time_parcial(slug='alcafla-fc'))
print(api.times(query='Faly'))
|
YunoHost/moulinette
|
test/src/authenticators/dummy.py
|
Python
|
agpl-3.0
| 2,129
| 0.001409
|
# -*- coding: utf-8 -*-
import logging
from moulinette.utils.text import random_ascii
from moulinette.core import MoulinetteError, MoulinetteAuthenticationError
from moulinette.authentication import BaseAuthenticator
logger = logging.getLogger("moulinette.authenticator.yoloswag")
# Dummy authenticator implementation
session_secret = random_ascii()
class Authenticator(BaseAuthenticator):
"""Dummy authenticator used for tests"""
name = "dummy"
def __init__(self, *args, **kwargs):
pass
def _authenticate_credentials(self, credentials=None):
if not credentials == self.name:
raise MoulinetteError("invalid_password", raw_msg=True)
return
def set_session_cookie(self, infos):
from bottle import response
assert isinstance(infos, dict)
# This allows to generate a new session id or keep the existing one
current_infos = self.get_se
|
ssion_cookie(raise_if_no_session_exists=False)
new_infos = {"id": current_infos["id"]}
new_infos.update(infos)
response.set_cookie(
|
"moulitest",
new_infos,
secure=True,
secret=session_secret,
httponly=True,
# samesite="strict", # Bottle 0.12 doesn't support samesite, to be added in next versions
)
def get_session_cookie(self, raise_if_no_session_exists=True):
from bottle import request
try:
infos = request.get_cookie("moulitest", secret=session_secret, default={})
except Exception:
if not raise_if_no_session_exists:
return {"id": random_ascii()}
raise MoulinetteAuthenticationError("unable_authenticate")
if not infos and raise_if_no_session_exists:
raise MoulinetteAuthenticationError("unable_authenticate")
if "id" not in infos:
infos["id"] = random_ascii()
return infos
def delete_session_cookie(self):
from bottle import response
response.set_cookie("moulitest", "", max_age=-1)
response.delete_cookie("moulitest")
|
avoorhis/vamps-node.js
|
public/scripts/node_process_scripts/vamps_script_create_json_dataset_files.py
|
Python
|
mit
| 17,471
| 0.022208
|
#!/usr/bin/env python
"""
create_counts_lookup.py
"""
print('3)-->files')
import sys,os
import argparse
import pymysql as MySQLdb
import json
import configparser as ConfigParser
"""
SELECT sum(seq_count), dataset_id, domain_id,domain
FROM sequence_pdr_info
JOIN sequence_uniq_info USING(sequence_id)
JOIN silva_taxonomy_info_per_seq USING(silva_taxonomy_info_per_seq_id)
JOIN silva_taxonomy USING(silva_taxonomy_id)
JOIN domain USING(domain_id)
JOIN phylum USING(phylum_id)
where dataset_id = '426'
GROUP BY dataset_id, domain_id
SELECT sum(seq_count), dataset_id, domain_id,domain,phylum_id,phylum
FROM sequence_pdr_info
JOIN sequence_uniq_info USING(sequence_id)
JOIN silva_taxonomy_info_per_seq USING(silva_taxonomy_info_per_seq_id)
JOIN silva_taxonomy USING(silva_taxonomy_id)
JOIN domain USING(domain_id)
JOIN phylum USING(phylum_id)
where dataset_id = '426'
GROUP BY dataset_id, domain_id, phylum_id
"""
query_coreA = " FROM sequence_pdr_info"
# query_coreA += " JOIN sequence_uniq_info USING(sequence_id)"
query_core_join_silva119 = " JOIN silva_taxonomy_info_per_seq USING(sequence_id)"
query_core_join_silva119 += " JOIN silva_taxonomy USING(silva_taxonomy_id)"
query_core_join_rdp = " JOIN rdp_taxonomy_info_per_seq USING(sequence_id)"
query_core_join_rdp += " JOIN rdp_taxonomy USING(rdp_taxonomy_id)"
#SELECT sum(seq_count), dataset_id, domain_id
query_coreA_matrix = " FROM generic_taxonomy_info"
query_core_join_matrix = " JOIN generic_taxonomy USING(generic_taxonomy_id)"
#JOIN generic_taxonomy USING(generic_taxonomy_id) WHERE dataset_id in ('4413','4414','4415','4416','4417') GROUP BY dataset_id, domain_id ORDER BY NULL
where_part = " WHERE dataset_id in ('%s')"
# query_core = " FROM sequence_pdr_info"
# query_core += " JOIN sequence_uniq_info USING(sequence_id)"
# query_core += " JOIN silva_taxonomy_info_per_seq USING(silva_taxonomy_info_per_seq_id)"
# query_core += " JOIN silva_taxonomy USING(silva_taxonomy_id)"
domain_queryA = "SELECT sum(seq_count), dataset_id, domain_id"
#domain_query += query_core
domain_queryB = where_part
domain_queryB += " GROUP BY dataset_id, domain_id"
phylum_queryA = "SELECT sum(seq_count), dataset_id, domain_id, phylum_id"
#phylum_query += query_core
phylum_queryB = where_part
phylum_queryB += " GROUP BY dataset_id, domain_id, phylum_id"
class_queryA = "SELECT sum(seq_count), dataset_id, domain_id, phylum_id, klass_id"
#class_query += query_core
class_queryB = where_part
class_queryB += " GROUP BY dataset_id, domain_id, phylum_id, klass_id"
order_queryA = "SELECT sum(seq_count), dataset_id, domain_id, phylum_id, klass_id, order_id"
#order_query += query_core
order_queryB = where_part
order_queryB += " GROUP BY dataset_id, domain_id, phylum_id, klass_id, order_id"
family_queryA = "SELECT sum(seq_count), dataset_id, domain_id, phylum_id, klass_id, order_id, family_id"
#family_query += query_core
family_queryB = where_part
family_queryB += " GROUP BY dataset_id, domain_id, phylum_id, klass_id, order_id, family_id"
genus_queryA = "SELECT sum(seq_count), dataset_id, domain_id, phylum_id, klass_id, order_id, family_id, genus_id"
#genus_query += query_core
genus_queryB = where_part
genus_queryB += " GROUP BY dataset_id, domain_id, phylum_id, klass_id, order_id, family_id, genus_id"
species_queryA = "SELECT sum(seq_count), dataset_id, domain_id, phylum_id, klass_id, order_id, family_id, genus_id, species_id"
#species_query += query_core
species_queryB = where_part
species_queryB += " GROUP BY dataset_id, domain_id, phylum_id, klass_id, order_id, family_id, genus_id, species_id"
strain_queryA = "SELECT sum(seq_count), dataset_id, domain_id, phylum_id, klass_id, order_id, family_id, genus_id, species_id, strain_id"
#strain_query += query_core
strain_queryB = where_part
strain_queryB += " GROUP BY dataset_id, domain_id, phylum_id, klass_id, order_id, family_id, genus_id, species_id, strain_id"
end_group_query = " ORDER BY NULL"
required_metadata_fields = [ "collection_date","env_biome_id", "env_feature_id", "env_material_id", "env_package_id","geo_loc_name_id","latitude", "longitude", "dna_region_id",'adapter_sequence_id','sequencing_platform_id','target_gene_id','domain_id','illumina_index_id','primer_suite_id', 'run_id'];
req_query = "SELECT dataset_id, "+','.join(required_metadata_fields)+" from required_metadata_info WHERE dataset_id in ('%s')"
cust_pquery = "SELECT project_id,field_name from custom_metadata_fields WHERE project_id = '%s'"
#queries = [domain_query,phylum_query,class_query,order_query,family_query,genus_query,species_query,strain_query]
queries = [{"rank": "domain", "queryA": domain_queryA, "queryB": domain_queryB},
{"rank": "phylum", "queryA": phylum_queryA, "queryB": phylum_quer
|
yB},
{"rank": "klass", "queryA": class_queryA, "queryB": class_queryB},
{"rank": "order", "queryA": order_queryA, "queryB": order_queryB},
{"rank": "family", "queryA": family_queryA, "queryB": family_queryB},
{"rank": "genus", "queryA": genus_queryA, "queryB": genus_queryB},
{"rank": "species", "queryA": species_queryA, "queryB": species_queryB},
{"rank": "strain", "queryA": strain_queryA, "queryB": strain_queryB}
]
# G
|
lobals
CONFIG_ITEMS = {}
DATASET_ID_BY_NAME = {}
#
#
#
class Dict2Obj(object):
"""
Turns a dictionary into a class
"""
#----------------------------------------------------------------------
def __init__(self, dictionary):
"""Constructor"""
for key in dictionary:
setattr(self, key, dictionary[key])
def go_add(args):
print ("Starting "+os.path.basename(__file__))
print('Changing dict to obj')
try:
args = Dict2Obj(args)
except:
pass
print(type(args))
global mysql_conn, cur
if args.host == 'vamps' or args.host == 'vampsdb' or args.host == 'bpcweb8':
hostname = 'vampsdb'
elif args.host == 'vampsdev' or args.host == 'bpcweb7':
hostname = 'bpcweb7'
else:
hostname = 'localhost'
args.NODE_DATABASE = 'vamps_development'
mysql_conn = MySQLdb.connect(db = args.NODE_DATABASE, host=hostname, read_default_file=os.path.expanduser("~/.my.cnf_node") )
cur = mysql_conn.cursor()
get_config_data(args)
pid = CONFIG_ITEMS['project_id']
counts_lookup = {}
if args.units == 'rdp':
file_prefix = os.path.join(args.jsonfile_dir,args.NODE_DATABASE+'--datasets_rdp2.6')
elif args.units == 'generic':
file_prefix = os.path.join(args.jsonfile_dir,args.NODE_DATABASE+'--datasets_generic')
elif args.units == 'matrix': # add matrix files to generic
file_prefix = os.path.join(args.jsonfile_dir,args.NODE_DATABASE+'--datasets_generic')
else: # default 'silva119'
file_prefix = os.path.join(args.jsonfile_dir,args.NODE_DATABASE+'--datasets_silva119')
if not os.path.exists(file_prefix):
os.makedirs(file_prefix)
if args.verbose:
print (file_prefix)
#DATASET_ID_BY_NAME[ds] = did
dids = [str(x) for x in DATASET_ID_BY_NAME.values()]
print ('dids',dids)
#dids = get_dataset_ids(pid)
# delete old did files if any
for did in dids:
pth = os.path.join(file_prefix,str(did)+'.json')
try:
os.remove(pth)
except:
pass
did_sql = "','".join(dids)
#print counts_lookup
for q in queries:
if args.units == 'rdp':
query = q["queryA"] + query_coreA + query_core_join_rdp + q["queryB"] % did_sql + end_group_query
elif args.units == 'generic' or args.units == 'matrix':
query = q["queryA"] + query_coreA_matrix + query_core_join_matrix + q["queryB"] % did_sql + end_group_query
else: # default 'silva119'
query = q["queryA"] + query_coreA + query_core_join_silva119 + q["queryB"] % did_sql + end_group_query
if args.verbose:
print (query)
dirs = []
cur.execute(query)
for row in cur.fetchall():
#print row
count = int(row[0])
did =
|
sagarsane/abetterportfolio
|
github/GitTag.py
|
Python
|
apache-2.0
| 3,535
| 0.04017
|
# Copyright 2012 Vincent Jacques
# vincent@vincent-jacques.net
# This file is part of PyGithub. http://vincent-jacques.net/PyGithub
# PyGithub is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License along with PyGithub. If not, see <http://www.gnu.org/licenses/>.
import GithubObject
import GitAuthor
import GitObject
class GitTag( GithubObject.GithubObject ):
@property
def message( self ):
self._completeIfNotSet( self._message )
return self._NoneIfNotSet( self._message )
@property
def object( self ):
self._completeIfNotSet( self._object )
return self._NoneIfNotSet( self._object )
@property
def sha( self ):
self._completeIfNotSet( self._sha )
return self._NoneIfNotSet( self._sha )
@property
def tag( self ):
self._completeIfNotSet( self._tag )
return self._NoneIfNotSet( self._tag )
@property
def tagger( self ):
self._completeIfNotSet( self._tagger )
return self._NoneIfNotSet( self._tagger )
@property
def url( self ):
self._completeIfNotSet( self._url )
return self._NoneIfNotSet( self._url )
def _initAttributes( self ):
self._message = GithubObject.NotSet
self._object = GithubObject.NotSet
self._sha = GithubObject.NotSet
self._tag = GithubObject.NotSet
self._tagger = GithubObject.NotSet
self._url = GithubObject.NotSet
def _useAttributes( self, attributes ):
if "message" in attributes: # pragma no branch
assert attributes[ "message" ] is None or isinstance( attributes[ "message" ], ( str, unicode ) ), attributes[ "message" ]
self._message = attributes[ "message" ]
if "object" in attributes: # pragma no branch
assert attributes[ "object" ] is None or isinstance( attributes[ "object" ], dict ), attributes[ "object" ]
self._object = None if attributes[ "object" ] is None else GitObject.GitObject( self._requester, attributes[ "object" ], completed = False )
if "sha" in attributes: # pragma no branch
assert attributes[ "sha" ] is None
|
or isinstance( attributes[ "sha" ], ( str, unicode ) ), attributes[ "sha" ]
self._sha = attributes[ "sha" ]
if "tag" in attributes: # pragma no branch
assert attributes[ "tag" ] is None or isinstance( attributes[ "tag" ], ( str, unicode ) ), attributes[ "tag" ]
self._tag = attributes[ "tag" ]
if "tagger" in attributes: # pragma n
|
o branch
assert attributes[ "tagger" ] is None or isinstance( attributes[ "tagger" ], dict ), attributes[ "tagger" ]
self._tagger = None if attributes[ "tagger" ] is None else GitAuthor.GitAuthor( self._requester, attributes[ "tagger" ], completed = False )
if "url" in attributes: # pragma no branch
assert attributes[ "url" ] is None or isinstance( attributes[ "url" ], ( str, unicode ) ), attributes[ "url" ]
self._url = attributes[ "url" ]
|
jor-/simulation
|
simulation/util/args.py
|
Python
|
agpl-3.0
| 12,072
| 0.00439
|
import numpy as np
import measurements.all.data
import simulation.model.options
import simulation.model.cache
import simulation.optimization.constants
import simulation.accuracy.linearized
def init_model_options(model_name, time_step=1,
concentrations=None, concentrations_index=None, parameters=None, parameters_index=None,
spinup_years=None, spinup_tolerance=None, spinup_satisfy_years_and_tolerance=True,
derivative_years=None, derivative_step_size=None, derivative_accuracy_order=None,
parameters_relative_tolerance=None, parameters_absolute_tolerance=None,
concentrations_relative_tolerance=None, concentrations_absolute_tolerance=None,
concentrations_must_be_set=False, parameters_must_be_set=False):
# prepare model options
model_options = simulation.model.options.ModelOptions()
model_options.model_name = model_name
model_options.time_step = time_step
# set spinup options
if spinup_years is not None and spinup_tolerance is None:
spinup_tolerance = 0
spinup_satisfy_years_and_tolerance = False
if spinup_tolerance is not None and spinup_years is None:
spinup_years = 10**10
spinup_satisfy_years_and_tolerance = False
if spinup_years is not None and spinup_tolerance is not None:
if spinup_satisfy_years_and_tolerance:
combination = 'and'
else:
combination = 'or'
spinup_options = {'years': spinup_years, 'tolerance': spinup_tolerance, 'combination': combination}
model_options.spinup_options = spinup_options
# set derivative options
derivative_options = {}
if derivative_step_size is not None:
derivative_options['step_size'] = derivative_step_size
if derivative_years is not None:
derivative_options['years'] = derivative_years
if derivative_accuracy_order is not None:
derivative_options['accuracy_order'] = derivative_accuracy_order
if len(derivative_options) > 0:
model_options.derivative_options = derivative_options
# set model parameters tolerance options
if parameters_relative_tolerance is not None or parameters_absolute_tolerance is not None:
parameter_tolerance_options = model_options['parameter_tolerance_options']
if parameters_relative_tolerance is not None:
parameter_tolerance_options['relative'] = parameters_relative_tolerance
if parameters_absolute_tolerance is not None:
parameter_tolerance_options['absolute'] = parameters_absolute_tolerance
# set initial concentration tolerance options
if concentrations_relative_tolerance is not None or concentrations_absolute_tolerance is not None:
tolerance_options = model_options['initial_concentration_options']['tolerance_options']
if concentrations_relative_tolerance is not None:
tolerance_options['relative'] = concentrations_relative_tolerance
if concentrations_absolute_tolerance is not None:
tolerance_options['absolute'] = concentrations_absolute_tolerance
# create model
if concentrations_index is not None or parameter
|
s_index is not None:
model = simulation.model.cache.Model(model_options=model_options)
# set initia
|
l concentration
if concentrations is not None:
c = np.array(concentrations)
elif concentrations_index is not None:
c = model._constant_concentrations_db.get_value(concentrations_index)
if concentrations is not None or concentrations_index is not None:
model_options.initial_concentration_options.concentrations = c
elif concentrations_must_be_set:
raise ValueError('Concentrations or concentrations_index must be set.')
# set model parameters
if parameters is not None:
p = np.array(parameters)
elif parameters_index is not None:
p = model._parameters_db.get_value(parameters_index)
if parameters is not None or parameters_index is not None:
model_options.parameters = p
elif parameters_must_be_set:
raise ValueError('Parameters or parameters_index must be set.')
return model_options
def init_measurements(model_options,
min_measurements_standard_deviation=None, min_standard_deviation=None,
min_measurements_correlation=None, correlation_decomposition_min_value_D=None, correlation_decomposition_min_abs_value_L=None,
max_box_distance_to_water=None):
measurements_object = measurements.all.data.all_measurements(
tracers=model_options.tracers,
min_measurements_standard_deviation=min_measurements_standard_deviation,
min_standard_deviation=min_standard_deviation,
min_measurements_correlation=min_measurements_correlation,
correlation_decomposition_min_value_D=correlation_decomposition_min_value_D,
correlation_decomposition_min_abs_value_L=correlation_decomposition_min_abs_value_L,
max_box_distance_to_water=max_box_distance_to_water,
water_lsm='TMM',
sample_lsm='TMM')
return measurements_object
def argparse_add_model_options(parser):
parser.add_argument('model_name', choices=simulation.model.constants.MODEL_NAMES, help='The name of the model that should be used.')
parser.add_argument('--time_step', type=int, default=1, help='The time step of the model that should be used. Default: 1')
parser.add_argument('--concentrations', type=float, nargs='+', help='The constant concentration values for the tracers in the initial spinup that should be used. If not specified the default model concentrations are used.')
parser.add_argument('--concentrations_index', type=int, help='The constant concentration index that should be used if no constant concentration values are specified.')
parser.add_argument('--parameters', type=float, nargs='+', help='The model parameters that should be used.')
parser.add_argument('--parameters_index', type=int, help='The model parameter index that should be used if no model parameters are specified.')
parser.add_argument('--spinup_years', type=int, default=10000, help='The number of years for the spinup.')
parser.add_argument('--spinup_tolerance', type=float, default=0, help='The tolerance for the spinup.')
parser.add_argument('--spinup_satisfy_years_and_tolerance', action='store_true', help='If used, the spinup is terminated if years and tolerance have been satisfied. Otherwise, the spinup is terminated as soon as years or tolerance have been satisfied.')
parser.add_argument('--derivative_step_size', type=float, default=None, help='The step size used for the finite difference approximation.')
parser.add_argument('--derivative_years', type=int, default=None, help='The number of years for the finite difference approximation spinup.')
parser.add_argument('--derivative_accuracy_order', type=int, default=None, help='The accuracy order used for the finite difference approximation. 1 = forward differences. 2 = central differences.')
parser.add_argument('--parameters_relative_tolerance', type=float, nargs='+', default=10**-6, help='The relative tolerance up to which two model parameter vectors are considered equal.')
parser.add_argument('--parameters_absolute_tolerance', type=float, nargs='+', default=10**-6, help='The absolute tolerance up to which two model parameter vectors are considered equal.')
parser.add_argument('--concentrations_relative_tolerance', type=float, default=10**-6, help='The relative tolerance up to which two initial concentration vectors are considered equal.')
parser.add_argument('--concentrations_absolute_tolerance', type=float, default=10**-6, help='The absolute tolerance up to which two initial concentration vectors are considered equal.')
return parser
def argparse_add_measurement_options(parser):
parser.add_argument('--min_measurements_standard_deviations', nargs='+', type=int, default=None, help='The minimal number of measurements used to calculate standard deviations applied to eac
|
pauron/ShaniXBMCWork
|
script.video.F4mProxy/lib/f4mDownloader.py
|
Python
|
gpl-2.0
| 39,022
| 0.016478
|
import xml.etree.ElementTree as etree
import base64
from struct import unpack, pack
import sys
import io
import os
import time
import itertools
import xbmcaddon
import xbmc
import urllib2,urllib
import traceback
import urlparse
import posixpath
import re
import hmac
import hashlib
import binascii
import zlib
from hashlib import sha256
import cookielib
#import youtube_dl
#from youtube_dl.utils import *
addon_id = 'script.video.F4mProxy'
selfAddon = xbmcaddon.Addon(id=addon_id)
__addonname__ = selfAddon.getAddonInfo('name')
__icon__ = selfAddon.getAddonInfo('icon')
downloadPath = xbmc.translatePath(selfAddon.getAddonInfo('profile'))#selfAddon["profile"])
F4Mversion=''
#from Crypto.Cipher import AES
value_unsafe = '%+&;#'
VALUE_SAFE = ''.join(chr(c) for c in range(33, 127)
if chr(c) not in value_unsafe)
def urlencode_param(value):
"""Minimal URL encoding for query parameter"""
return urllib.quote_plus(value, safe=VALUE_SAFE)
class FlvReader(io.BytesIO):
"""
Reader for Flv files
The file format is documented in https://www.adobe.com/devnet/f4v.html
"""
# Utility functions for reading numbers and strings
def read_unsigned_long_long(self):
return unpack('!Q', self.read(8))[0]
def read_unsigned_int(self):
return unpack('!I', self.read(4))[0]
def read_unsigned_char(self):
return unpack('!B', self.read(1))[0]
def read_string(self):
res = b''
while True:
char = self.read(1)
if char == b'\x00':
break
res+=char
return res
def read_box_info(self):
"""
Read a box and return the info as a tuple: (box_size, box_type, box_data)
"""
real_size = size = self.read_unsigned_int()
box_type = self.read(4)
|
header_end = 8
if size == 1:
real_size = self.read_unsigned_long_long()
header_end = 16
return real_size, box_type, self.read(real_size-header_end)
def read_asrt(self, debug=False):
version = self.read_unsigned_char()
self.read(3) # flags
quality_entry_count = self.read_unsigned_char()
quality_modifiers = []
for i in range(qualit
|
y_entry_count):
quality_modifier = self.read_string()
quality_modifiers.append(quality_modifier)
segment_run_count = self.read_unsigned_int()
segments = []
#print 'segment_run_count',segment_run_count
for i in range(segment_run_count):
first_segment = self.read_unsigned_int()
fragments_per_segment = self.read_unsigned_int()
segments.append((first_segment, fragments_per_segment))
#print 'segments',segments
return {'version': version,
'quality_segment_modifiers': quality_modifiers,
'segment_run': segments,
}
def read_afrt(self, debug=False):
version = self.read_unsigned_char()
self.read(3) # flags
time_scale = self.read_unsigned_int()
quality_entry_count = self.read_unsigned_char()
quality_entries = []
for i in range(quality_entry_count):
mod = self.read_string()
quality_entries.append(mod)
fragments_count = self.read_unsigned_int()
#print 'fragments_count',fragments_count
fragments = []
for i in range(fragments_count):
first = self.read_unsigned_int()
first_ts = self.read_unsigned_long_long()
duration = self.read_unsigned_int()
if duration == 0:
discontinuity_indicator = self.read_unsigned_char()
else:
discontinuity_indicator = None
fragments.append({'first': first,
'ts': first_ts,
'duration': duration,
'discontinuity_indicator': discontinuity_indicator,
})
#print 'fragments',fragments
return {'version': version,
'time_scale': time_scale,
'fragments': fragments,
'quality_entries': quality_entries,
}
def read_abst(self, debug=False):
version = self.read_unsigned_char()
self.read(3) # flags
bootstrap_info_version = self.read_unsigned_int()
streamType=self.read_unsigned_char()#self.read(1) # Profile,Live,Update,Reserved
islive=False
if (streamType & 0x20) >> 5:
islive=True
print 'LIVE',streamType,islive
time_scale = self.read_unsigned_int()
current_media_time = self.read_unsigned_long_long()
smpteTimeCodeOffset = self.read_unsigned_long_long()
movie_identifier = self.read_string()
server_count = self.read_unsigned_char()
servers = []
for i in range(server_count):
server = self.read_string()
servers.append(server)
quality_count = self.read_unsigned_char()
qualities = []
for i in range(server_count):
quality = self.read_string()
qualities.append(server)
drm_data = self.read_string()
metadata = self.read_string()
segments_count = self.read_unsigned_char()
#print 'segments_count11',segments_count
segments = []
for i in range(segments_count):
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'asrt'
segment = FlvReader(box_data).read_asrt()
segments.append(segment)
fragments_run_count = self.read_unsigned_char()
#print 'fragments_run_count11',fragments_run_count
fragments = []
for i in range(fragments_run_count):
# This info is only useful for the player, it doesn't give more info
# for the download process
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'afrt'
fragments.append(FlvReader(box_data).read_afrt())
return {'segments': segments,
'movie_identifier': movie_identifier,
'drm_data': drm_data,
'fragments': fragments,
},islive
def read_bootstrap_info(self):
"""
Read the bootstrap information from the stream,
returns a dict with the following keys:
segments: A list of dicts with the following keys
segment_run: A list of (first_segment, fragments_per_segment) tuples
"""
total_size, box_type, box_data = self.read_box_info()
assert box_type == b'abst'
return FlvReader(box_data).read_abst()
def read_bootstrap_info(bootstrap_bytes):
return FlvReader(bootstrap_bytes).read_bootstrap_info()
def build_fragments_list(boot_info, startFromFregment=None, live=True):
""" Return a list of (segment, fragment) for each fragment in the video """
res = []
segment_run_table = boot_info['segments'][0]
#print 'segment_run_table',segment_run_table
# I've only found videos with one segment
#if len(segment_run_table['segment_run'])>1:
# segment_run_table['segment_run']=segment_run_table['segment_run'][-2:] #pick latest
frag_start = boot_info['fragments'][0]['fragments']
#print boot_info['fragments']
# sum(j for i, j in segment_run_table['segment_run'])
first_frag_number=frag_start[0]['first']
last_frag_number=frag_start[-1]['first']
if last_frag_number==0:
last_frag_number=frag_start[-2]['first']
endfragment=0
segment_to_start=None
for current in range (len(segment_run_table['segment_run'])):
seg,fregCount=segment_run_table['segment_run'][current]
#print 'segmcount',seg,fregCount
if (not live):
frag_end=last_frag_number
else:
frag_end=first_frag
|
maaku/django-reuse
|
templates/apps/basic/myapp/urls.py
|
Python
|
agpl-3.0
| 886
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##
# myapp.urls
##
##
# Copyright (C) $YEAR$, $AUTHOR_NAME$ <$AUTHOR_EMAIL$>
#
# This program is free software: you can redistrib
|
ute it and/or modify it
# under the terms of version 3 of the GNU Affero General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MER
|
CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this source code; if not, see <http://www.gnu.org/licenses/>,
# or write to
#
# Free Software Foundation, Inc.
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301 USA
##
##
# End of File
##
|
kslundberg/pants
|
src/python/pants/backend/jvm/tasks/junit_run.py
|
Python
|
apache-2.0
| 37,903
| 0.00802
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import copy
import fnmatch
import os
import sys
from abc import abstractmethod
from collections import defaultdict, namedtuple
from six.moves import range
from twitter.common.collections import OrderedSet
from pants.backend.jvm.subsystems.shader import Shader
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.java_tests import JavaTests as junit_tests
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.backend.jvm.tasks.jvm_task import JvmTask
from pants.backend.jvm.tasks.jvm_tool_task_mixin import JvmToolTaskMixin
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TargetDefinitionException, TaskError, TestFailedTaskError
from pants.base.revision import Revision
from pants.base.workunit import WorkUnitLabel
from pants.binaries import binary_util
from pants.java.distribution.distribution import DistributionLocator
from pants.util.contextutil import temporary_file_path
from pants.util.dirutil import (relativize_paths, safe_delete, safe_mkdir, safe_open, safe_rmtree,
touch)
from pants.util.strutil import safe_shlex_split
from pants.util.xml_parser import XmlParser
# TODO(ji): Add unit tests.
# The helper classes (_JUnitRunner and its subclasses) need to use
# methods inherited by JUnitRun from Task. Rather than pass a reference
# to the entire Task instance, we isolate the methods that are used
# in a named tuple and pass that one around.
# TODO(benjy): Why? This seems unnecessarily clunky. The runners only exist because we can't
# (yet?) pick a Task type based on cmd-line flags. But they act "as-if" they were Task types,
# so it seems prefectly reasonable for them to have a reference to the task.
# This trick just makes debugging harder, and requires extra work when a runner implementation
# needs some new thing from the task.
# TODO(ji): (responding to benjy's) IIRC, I was carrying the reference to the Task in very early
# versions, and jsirois suggested that I switch to the current form.
_TaskExports = namedtuple('_TaskExports',
['classpath',
'task_options',
'jvm_options',
'args',
'confs',
'register_jvm_tool',
'tool_classpath',
'workdir'])
def _classfile_to_classname(cls):
clsname, _ = os.path.splitext(cls.replace('/', '.'))
return clsname
class _JUnitRunner(object):
"""Helper class to run JUnit tests with or without coverage.
The default behavior is to just run JUnit tests."""
@classmethod
def register_options(cls, register, register_jvm_tool):
register('--skip', action='store_true', help='Skip running junit.')
register('--fail-fast', action='store_true',
help='Fail fast on the first test failure in a suite.')
register('--batch-size', advanced=True, type=int, default=sys.maxint,
help='Run at most this many tests in a single test process.')
register('--test', action='append',
help='Force running of just these tests. Tests can be specified using any of: '
'[classname], [classname]#[methodname], [filename] or [filename]#[methodname]')
register('--per-test-timer', action='store_true', help='Show progress and timer for each test.')
register('--default-parallel', advanced=True, action='store_true',
help='Run classes without @TestParallel or @TestSerial annotations in parallel.')
register('--parallel-threads', advanced=True, type=int, default=0,
help='Number of threads to run tests in parallel. 0 for autoset.')
register('--test-shard', advanced=True,
help='Subset of tests to run, in the form M/N, 0 <= M < N. '
'For example, 1/3 means run tests number 2, 5, 8, 11, ...')
register('--suppress-output', action='store_true', default=True,
help='Redirect test output to files in .pants.d/test/junit.')
register('--cwd', advanced=True,
help='Set the working directory. If no argument is passed, use the build root. '
'If cwd is set on a target, it will supersede this argument.')
register('--strict-jvm-version', action='store_true', default=False, advanced=True,
help='If true, will strictly require running junits with the same version of java as '
'the platform -target level. Otherwise, the platform -target level will be '
'treated as the minimum jvm to run.')
register_jvm_tool(register,
'junit',
classpath=[
JarDependency(org='org.pantsbuild', name='junit-runner', rev='0.0.8'),
],
main=JUnitRun._MAIN,
# TODO(John Sirois): Investigate how much less we can get away with.
# Clearly both tests and the runner need access to the same @Test, @Before,
# as well as other annotations, but there is also the Assert class and some
# subset of the @Rules, @Theories and @RunWith APIs.
custom_rules=[
Shader.exclude_package('org.junit', recursive=True),
Shader.exclude_package('org.hamcrest', recursive=True)
])
def __init__(self, task_exports, context):
self._task_exports = task_exports
self._context = context
options = task_exports.task_options
self._tests_to_run = options.test
self._batch_size = options.batch_size
self._fail_fast = options.fail_fast
self._working_dir = options.cwd or get_buildroot()
self._strict_jvm_version = options.strict_jvm_version
self._args = copy.copy(task_exports.args)
if options.suppress_output:
self._args.append('-suppress-output')
if self._fail_fast:
self._args.append('-fail-fast')
self._args.append('-outdir')
self._args.append(task_exports.workdir)
if options.per_test_timer:
self._args.append('-per-test-timer')
if options.default_parallel:
self._args.append('-default-parallel')
self._args.append('-parallel-threads')
self._args.append(str(options.parallel_threads))
if options.test_shard:
self._args.append('-test-shard')
self._args.append(options.test_shard)
def execute(self, targets):
# We only run tests within java_tests/junit_tests targets.
#
# But if coverage options are specified, we want to instrument
# and report on all the original targets, not just the test targets.
#
# Thus, we filter out the non-java-tests targets first but
# keep the original targets set intact for coverages.
tests_and_targets = self._collect_test_targets(targets)
if not tests_and_targets:
return
bootstrapped_cp = self._task_exports.tool_classpath('junit')
def c
|
ompute_complete_classpath():
return self._task_exports.classpath(targets, cp=bootstrapped_cp)
self._context.releas
|
e_lock()
self.instrument(targets, tests_and_targets.keys(), compute_complete_classpath)
def _do_report(exception=None):
self.report(targets, tests_and_targets.keys(), tests_failed_exception=exception)
try:
self.run(tests_and_targets)
_do_report(exception=None)
except TaskError as e:
_do_report(exception=e)
raise
def instrument(self, targets, tests, compute_junit_classpath):
"""Called from coverage classes. Run any code instrumentation needed.
Subclasses should override this if they need more work done.
:param targets: an iterable that contains the targets to run tests for.
:param tests: an iterable that contains all the test class names
e
|
nagyistoce/devide
|
modules/viewers/MaskComBinar.py
|
Python
|
bsd-3-clause
| 62,014
| 0.006305
|
from wxPython._controls import wxLIST_MASK_STATE
from wxPython._controls import wxLIST_STATE_SELECTED
import os.path
# Modified by Francois Malan, LUMC / TU Delft
# December 2009
#
# based on the SkeletonAUIViewer:
# skeleton of an AUI-based viewer module
# Copyright (c) Charl P. Botha, TU Delft.
# set to False for 3D viewer, True for 2D image viewer
IMAGE_VIEWER = False
# import the frame, i.e. the wx window containing everything
import MaskComBinarFrame
# and do a reload, so that the GUI is also updated at reloads of this
# module.
reload(MaskComBinarFrame)
from module_base import ModuleBase
from module_mixins import IntrospectModuleMixin
import module_utils
import os
import vtk
import itk
import wx
import copy
import subprocess
#import numpy as np
from OverlaySliceViewer import OverlaySliceViewer
class Mask(object):
def __init__(self, name, file_path, image_data):
self.name = name
self.file_path = file_path
self.data = image_data
# def deepcopy(self):
# return Mask(self.name, self.file_path, self.data.DeepCopy())
class MaskComBinar(IntrospectModuleMixin, ModuleBase):
def __init__(self, module_manager):
"""Standard constructor. All DeVIDE modules have these, we do
the required setup actions.
"""
# we record the setting here, in case the user changes it
# during the lifetime of this model, leading to different
# states at init and shutdown.
self.IMAGE_VIEWER = IMAGE_VIEWER
ModuleBase.__init__(self, module_manager)
# create the view frame
self._view_frame = module_utils.instantiate_module_view_frame(
self, self._module_manager,
MaskComBinarFrame.MaskComBinarFrame)
# change the title to something more spectacular
self._view_frame.SetTitle('MaskComBinar - a tool for measuring and manipulating binary masks')
#initialise data structures
self._init_data_structures()
self._init_2d_render_window()
self._init_3d_render_window()
self.reset_camera_on_mask_display = True
self.first_save_warning = True
# hook up all event handlers
self._bind_events()
# anything you stuff into self._config will be saved
self._config.last_used_dir = ''
# make our window appear (this is a viewer after all)
self.view()
# all modules should toggle this once they have shown their
# views.
self.view_initialised = True
# apply config information to underlying logic
self.sync_module_logic_with_config()
# then bring it all the way up again to the view
self.sync_module_view_with_logic()
#This tool can be used for introspection of wx components
#
def _init_2d_render_window(self):
#create the necessary VTK objects for the 2D window. We use Charl's CMSliceViewer
#which defines all the nice goodies we'll need
self.ren2d = vtk.vtkRenderer()
self.ren2d.SetBackground(0.4,0.4,0
|
.4)
self.slice_viewer = OverlaySliceViewer(self._view_frame.rwi2d, self.ren2d)
self._view_frame.rwi2d.GetRenderWindow().AddRenderer(self.ren2d)
self.slice_viewer.add_overlay('a', [0, 0, 1, 1]) #Blue for selection A
self.slice_viewer.add_overlay('b', [1, 0, 0, 1]) #Red for selection B
self.slice_viewer.add_overlay('intersect', [1, 1, 0, 1]) #Yellow for for intersection
def _init_3d_render_win
|
dow(self):
# create the necessary VTK objects for the 3D window: we only need a renderer,
# the RenderWindowInteractor in the view_frame has the rest.
self.ren3d = vtk.vtkRenderer()
self.ren3d.SetBackground(0.6,0.6,0.6)
self._view_frame.rwi3d.GetRenderWindow().AddRenderer(self.ren3d)
def _init_data_structures(self):
self.opacity_3d = 0.5
self.rgb_blue = [0,0,1]
self.rgb_red = [1,0,0]
self.rgb_yellow = [1,1,0]
self.masks = {}
self.surfaces = {} #This prevents recomputing surface meshes
self.actors3d = {}
self.rendered_masks_in_a = set()
self.rendered_masks_in_b = set()
self.rendered_overlap = False
def _load_mask_from_file(self, file_path):
print "Opening file: %s" % (file_path)
filename = os.path.split(file_path)[1]
reader = None
extension = os.path.splitext(filename)[1]
if extension == '.vti': # VTI
reader = vtk.vtkXMLImageDataReader()
elif extension == '.mha': # MHA
reader = vtk.vtkMetaImageReader()
else:
self._view_frame.dialog_error('Unknown file extension: %s' % extension, 'Unable to handle extension')
return
reader.SetFileName(file_path)
reader.Update()
result = vtk.vtkImageData()
result.DeepCopy(reader.GetOutput())
return result
def load_binary_mask_from_file(self, file_path):
mask_image_data = self._load_mask_from_file(file_path)
filename = os.path.split(file_path)[1]
fileBaseName =os.path.splitext(filename)[0]
mask = Mask(fileBaseName, file_path, mask_image_data)
self.add_mask(mask)
def load_multi_mask_from_file(self, file_path):
mask_image_data = self._load_mask_from_file(file_path)
filename = os.path.split(file_path)[1]
fileBaseName =os.path.splitext(filename)[0]
#Now we have to create a separate mask for each integer level.
accumulator = vtk.vtkImageAccumulate()
accumulator.SetInput(mask_image_data)
accumulator.Update()
max_label = int(accumulator.GetMax()[0])
#We assume all labels to have positive values.
for i in range(1,max_label+1):
label_data = self._threshold_image(mask_image_data, i, i)
new_name = '%s_%d' % (fileBaseName, i)
mask = Mask(new_name, file_path, label_data)
self.add_mask(mask)
def save_mask_to_file(self, mask_name, file_path):
if os.path.exists(file_path):
result = self._view_frame.dialog_yesno("%s already exists! \nOverwrite?" % file_path,"File already exists")
if result == False:
print 'Skipped writing %s' % file_path
return #skip this file if overwrite is denied
mask = self.masks[mask_name]
mask.file_path = file_path
self._save_image_to_file(mask.data, file_path)
print 'Wrote mask %s to %s' % (mask_name, file_path)
def _save_image_to_file(self, imagedata, file_path):
filename = os.path.split(file_path)[1]
extension = os.path.splitext(filename)[1]
writer = None
if extension == '.vti': # VTI
writer = vtk.vtkXMLImageDataWriter()
elif extension == '.mha': # MHA
print 'Attempting to create an mha writer. This has failed in the past (?)'
writer = vtk.vtkMetaImageWriter()
writer.SetCompression(True)
else:
self._view_frame.dialog_error('Unknown file extension: %s' % extension, 'Unable to handle extension')
return
writer.SetInput(imagedata)
writer.SetFileName(file_path)
writer.Update()
result = writer.Write()
if result == 0:
self._view_frame.dialog_error('Error writing %s' % filename, 'Error writing file')
print 'ERROR WRITING FILE!!!'
else:
self._view_frame.dialog_info('Successfully wrote %s' % filename, 'Success')
print 'Successfully wrote %s' % file_path
def add_mask(self, mask):
[accept, name] = self._view_frame.dialog_inputtext('Please choose a name for the new mask','Choose a name', mask.name)
if accept:
mask.name = name
if self.masks.has_key(name):
i=1
new_name = '%s%d' % (name, i)
while self.masks.has_key(new_name):
i += 1
new_name = '%s%d' % (mask.name, i)
|
thedod/boilerplate-peewee-flask
|
application/sitepack/babel_by_url.py
|
Python
|
gpl-3.0
| 3,249
| 0.004617
|
import flask_babel
from flask import request, url_for, current_app, g, session
from flask_nav.elements import View
class LanguageCodeFromPathMiddleware(object):
def __init__(self, app, babel_by_url):
self.app = app
self.babel_by_url = babel_by_url
def __call__(self, environ, start_response):
path = env
|
iron['PATH_INFO']
language_code = self.babel_by_url.language_code_from_path(path)
if language_code:
environ['PATH_INFO'] = path[1+len(language_code):]
environ['LANGUAGE_CODE_FROM_URL'] =
|
language_code
return self.app(environ, start_response)
class BabelByUrl(object):
app = None
babel = None
default_locale = None
locales = []
locale_map = {}
def __init__(self, app=None, *args, **kwargs):
if app is not None:
self.init_app(app)
def get_language_code(self):
try:
return session['language_code']
except:
return self.default_locale.language
def set_language_code(self, language_code):
session['language_code'] = language_code
session['language_direction'] = \
self.lookup_locale().character_order=='right-to-left' and 'rtl' \
or 'ltr'
def lookup_locale(self, language_code=None):
return self.locale_map.get(
language_code or self.get_language_code(),
self.default_locale)
def init_app(self, app, *args, **kwargs):
self.app = app
app.wsgi_app = LanguageCodeFromPathMiddleware(app.wsgi_app, self)
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['babel_by_url'] = self
self.babel = app.extensions.get('babel',
flask_babel.Babel(app, *args, **kwargs))
self.default_locale = self.babel.default_locale
locales = set(self.babel.list_translations())
locales.add(self.default_locale)
self.locale_map = dict([(l.language, l) for l in locales])
@self.babel.localeselector
def get_locale():
return self.lookup_locale()
@app.before_request
def init_request_locale():
language_code = request.environ.get('LANGUAGE_CODE_FROM_URL')
if language_code and language_code!=self.get_language_code():
self.set_language_code(language_code)
flask_babel.refresh()
app.context_processor(self.context_processor)
def language_code_from_path(self, path):
for l in self.locale_map.keys():
if path.startswith('/{}/'.format(l)):
return l
return None
def babel_config(self, key, babel_language=None):
babel_language = babel_language or self.get_language_code()
return self.app.config.get(
'{}_{}'.format(key,babel_language.upper()),
self.app.config.get(key))
def context_processor(self):
return {
'babel_config': self.babel_config,
}
def get_language_code():
return current_app.extensions['babel_by_url'].get_language_code()
def babel_config(key, babel_language=None):
return current_app.extensions['babel_by_url'].babel_config(key, babel_language)
|
zalf-lsa/carbiocial-project
|
monica/monica-cluster-mpi/run_carbiocial_simulation.py
|
Python
|
mpl-2.0
| 9,103
| 0.018455
|
#!/usr/bin/python
# -*- coding: ISO-8859-15-*-
import sys
sys.path.append('.') # path to monica.py
import mpi_helper
import monica
import os
import datetime
import numpy
import analyse_monica_outputs
import shutil
import time
import csv
from mpi4py import MPI
# MPI related initialisations
comm = MPI.COMM_WORLD
rank = comm.Get_rank() # number of the processor
size = comm.Get_size() # number of all participating processes
name = MPI.Get_processor_name()
pathToCarbiocialData = "/media/san1_data1/data1/berg/carbiocial/macsur_scaling/"
pathToClimateData = "/media/archiv/md/berg/carbiocial/climate-data-out-0-2544/"
pathToClimateDataReorderingFile = "/media/archiv/md/berg/carbiocial/final_order_dates_l9_sres_a1b_2013-2040.dat"
sep = ","
remove_monica_files_after_simulation = False # True
startDate = "1981-01-01"
endDate = "2012-12-31"
asciiGridHeaders = []
noOfGridRows = 2545
noOfGridCols = 1928
noDataValue = -9999
noSoilValue = -8888
"""
main routine of the carbiocial cluster simulation
"""
def main():
ts = time.time()
output_path = pathToCarbiocialData + "runs/" + datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H-%M') + "/"
print "processor #", rank
input_path = pathToCarbiocialData + "input_data/"
#ini_file = "soybean.ini"
ini_file = "maize.ini"
splittedGridDataMap = None
if (rank == 0):
# only one processor reads in the meta information
splittedGridDataMap = splitAsciiGrid(pathToCarbiocialData + "input_data/solos-profile-ids_brazil_900.asc", size)
###################################################
# parallel part
##################################################
# send each sublist of the splitted list to on processor
nodeSpecificDataMap = comm.scatter(splittedGridDataMap, root=0)
# each processor received a specific number of meta_info_objects
# that he has to process
print rank, "Received data map with ", len(nodeSpecificDataMap), " elements"
monica_simulation_config = monica.CarbiocialConfiguration()
monica_simulation_config.setInputPath(input_path)
monica_simulation_config.setIniFile(ini_file)
#monica_simulation_config.pathToClimateDataReorderingFile = pathToClimateDataReorderingFile;
#monica_simulation_config.create2013To2040ClimateData = True
#monica_simulation_config.setCropName(crop)
#node_simulation_results = []
coord2year2yield = {}
index = 0
for coord, profileId in nodeSpecificDataMap.iteritems():
row, col = coord
#row, col = (86, 820)
monica_simulation_config.setStartDate(startDate)
monica_simulation_config.setEndDate(endDate)
monica_simulation_config.setClimateFile(pathToClimateData + "row-" + str(row) + "/col-" + str(col) + ".asc")
#monica_simulation_config.setClimateFile(pathToCarbiocialData+"input_data/row-0/col-0.asc")
monica_simulation_config.setRowId(row)
monica_simulation_config.setColId(col)
monica_simulation_config.setProfileId(profileId)
print rank, "###################################"
print rank, "coord: ", coord, " profileId: ", monica_simulation_config.getProfileId()
print rank, "startDate: ", startDate, " endDate: ", endDate
print rank, "climateFile: ", monica_simulation_config.getClimateFile()
path = output_path + "row-" + str(row) + "/col-" + str(col) + "/"
monica_simulation_config.setOutputPath(path)
#if not (os.path.exists(path)):
# print rank, "create_directory: ", path
# os.makedirs(path)
monica_simulation_config.setLatitude(-9.41)
monica_simulation_config.setElevation(300.0)
#monica.activateDebugOutput(True);
monica.activateDebugOutput(False);
#monica.activateDebugFileOutput(False);
#monica.setPathToDebugFile(output_path + "row-" + str(row) + "/col-" + str(col) + "-debug.out");
year2yield = monica.runCarbiocialSimulation(monica_simulation_config)
#print rank, "type(year2yield): ", type(year2yield)
#simResult = getYieldsFromSimulationOutput(path, row, col)
#coord2year2yield[simResult[0]] = simResult[1]
y2y = {}
if len(year2yield) > 0:
#outputFile = open(output_path + "row-" + str(row) + "/col-" + str(col) + "-yields.txt", "wb")
#outputFile.write("year yield\n")
for year, yield_ in year2yield.iteritems():
# outputFile.write(str(year) + " " + str(yield_) + "\n")
y2y[year] = yield_
#outputFile.close()
coord2year2yield[(row, col)] = y2y
# remove simulation result dir
#if remove_monica_files_after_simulation:
# shutil.rmtree(path, True)
print rank, "###################################"
#if index == 1:
# break
index = index + 1
###################################################
# end of parallel part
##################################################
resultList = comm.gather(coord2year2yield, root=0)
if rank == 0:
print "outputing results ..."
#sorted values for creation of yearly grids
row2col2year2yield = {}
#sorted values for creation of avg yield grid over all years
row2col2yields = {}
#print "resultList: ", resultList
years = resultList[0].items()[0][1].keys();
print "years: ", years
#collect data into nested maps to access them below
for c2y2y in resultList:
for (row, col), y2y in c2y2y.iteritems():
if not row in row2col2year2yield:
row2col2year2yield[row] = {}
row2col2yields[row] = {}
row2col2year2yield[row][col] = y2y
row2col2yields[row][col] = y2y.values()
if not (os.path.exists(output_path)):
print "creating output directory: ", output_path
os.makedirs(output_path)
outputGridFilename = "yields-year-"
outputAvgGridFile = open(output_path + "yields-avg.asc", "wb")
outputAvgGridFile.writelines(asciiGridHeaders)
currentColAvgYields = []
year2openFile = {}
year2currentColYields = {}
#open for every available year a file
for year in years:
year2openFile[year] = open(output_path + outputGridFilename + str(year) + ".asc", "wb")
year2openFile[year].writelines(asciiGridHeaders)
#iterate over all rows and cols, avg years, and assemble a ascii grid line with the column values
for row in range(noOfGridRows):
for col in range(noOfGridCols):
if row in row2col2year2yield and col in row2col2year2yield[row]:
#collect column values for single years
for year, yield_ in row2col2year2yield[row][col].iteritems():
if not year in year2currentColYields:
year2currentColYields[year] = []
year2currentColYields[year].append(yield_)
else:
for year in years:
if not year in year2currentColYields:
year2currentColYields[year] = []
year2currentColYields[year].append(noDataValue)
#collect column values for the averaged years
if row in row2col2yields and col in row2col2yields[row]:
yields = row2col2yields[row][col]
if len(yields) > 0:
currentColAvgYields.append(sum(yields) / len(yields))
else:
currentColAvgYields.append(0)
else:
currentColAvgYields.append(noDataValue)
#write the yearly column values to the according file
for year, f in ye
|
ar2openFile.iteritems():
line = " ".join([str(ys) for ys in year2currentColYields[year]]) + "\n"
f.write(line)
year2currentCol
|
Yields[year] = []
#write the averaged column values to the file
avgLine = " ".join([str(ys) for ys in currentColAvgYields]) + "\n"
outputAvgGridFile.write(avgLine)
currentColAvgYields = []
for year, f in year2openFile.iteritems():
f.close()
outputAvgGridFile.close()
def splitAsciiGrid(pathToFile, noOfNodes):
#pathToFile = "B:\development\cluster\macsur-scaling-code\solos-profile-ids_brazil_900.asc"
f = open(pathToFile)
lines = f.readlines();
f.close()
#store grid header for reuse when creating the output grid
[asciiGridHeaders.append(lines[i]) for i in range(6)]
#print "stored grid header: ", asciiGridHeader
# list that will store all
|
OA-DeepGreen/sphinx-doc
|
conf.py
|
Python
|
apache-2.0
| 4,810
| 0.000416
|
# -*- coding: utf-8 -*-
#
# DeepGreen documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 12 18:24:23 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, lik
|
e shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.a
|
bspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'DeepGreen'
copyright = u'2016, DeepGreen Konsortium'
author = u'green'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'de'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_logo = '_static/deepgreen_logo.svg'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'DeepGreendoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DeepGreen.tex', u'DeepGreen Documentation',
u'green', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'deepgreen', u'DeepGreen Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DeepGreen', u'DeepGreen Documentation',
author, 'DeepGreen', 'One line description of project.',
'Miscellaneous'),
]
|
bnoordhuis/mongrel2
|
examples/configs/multi_conf.py
|
Python
|
bsd-3-clause
| 1,384
| 0.007225
|
from mongrel2.config import *
main = Server(
uuid="f400bf85-4538-4f7a-8908-67e313d515c2",
access_log="/logs/access.log",
error_log="/logs/error.log",
chroot="./",
default_host="localhost",
name="test",
pid_file="/run/mongrel2.pid",
port=6767,
hosts = [
Host(name="localhost", routes={
r'/dev/null/(.*)': Dir(base='tests/', index_file='index.html',
default_ctype='text/plain')
})
]
)
sub = Server(
uuid="e3ce7982-086f-4374-adde-bee320d509e6",
access_log="/logs/access.log",
error_log="/logs/error.log",
chroot="./",
default_host="sub",
name="sub",
pid_file="/run/mongrel2.pid",
port=6767,
hosts = [
Host(name="sub", routes={
r'/dev/null/(.*)': Dir(base='tests/', index_file='index.html',
|
default_ctype='text/plain')
})
]
)
foo = Server(
uuid="2355e656-fac6-41c8-9cba-4977b937cb94",
access_log="/logs/access.log",
error_log="/logs/error.log",
chroot="./",
default_h
|
ost="foo",
name="sub",
pid_file="/run/mongrel2.pid",
port=6767,
hosts = [
Host(name="foo", routes={
r'/dev/null/(.*)': Dir(base='tests/', index_file='index.html',
default_ctype='text/plain')
})
]
)
commit([main, foo, sub])
|
zetaops/pyoko
|
tests/test_model_to_solr_schema.py
|
Python
|
gpl-3.0
| 1,268
| 0.002366
|
# -*- coding: utf-8 -*-
"""
"""
# Copyright (C) 2015 ZetaOps Inc.
#
# This file is licensed under the GNU General Public License v3
# (GPLv3). See LICENSE.txt for details.
from pyoko.conf import settings
from pyoko.db.schema_update import SchemaUpdater
from tests.data.solr_schema import test_data_so
|
lr_fields_debug_zero, test_data_solr_fields_debug_not_zero,\
test_data_solr_s
|
chema_debug_zero, test_data_solr_schema_debug_not_zero
from tests.models import Student
def test_collect_index_fields():
st = Student()
result = st._collect_index_fields()
sorted_result = sorted(result, key=lambda x: x[0])
if not settings.SOLR['store']:
sorted_data = sorted(test_data_solr_fields_debug_zero, key=lambda x: x[0])
assert sorted_result == sorted_data
else:
sorted_data = sorted(test_data_solr_fields_debug_not_zero, key=lambda x: x[0])
assert sorted_result == sorted_data
def test_create_solr_schema():
st = Student()
fields = st._collect_index_fields()
result = SchemaUpdater.get_schema_fields(fields)
if not settings.SOLR['store']:
assert sorted(result) == sorted(test_data_solr_schema_debug_zero)
else:
assert sorted(result) == sorted(test_data_solr_schema_debug_not_zero)
|
kowey/attelo
|
attelo/decoding/greedy.py
|
Python
|
gpl-3.0
| 5,290
| 0
|
'''
Implementation of the locally greedy approach similar with DuVerle & Predinger
(2009, 2010) (but adapted for SDRT, where the notion of adjacency includes
embedded segments)
July 2012
@author: stergos
'''
from __future__ import print_function
import sys
from .interface import Decoder
from .util import (convert_prediction,
get_sorted_edus,
get_prob_map,
simple_candidates)
# pylint: disable=too-few-public-methods
def are_strictly_adjacent(one, two, edus):
""" returns True in the following cases ::
[one] [two]
[two] [one]
in the rest of the cases (when there is an edu between one and two) it
returns False
"""
for edu in edus:
if edu.id != one.id and edu.id != two.id:
if one.end <= edu.start and edu.start <= two.start:
return False
if one.end <= edu.end and edu.end <= two.start:
return False
if two.end <= edu.start and edu.start <= one.start:
return False
if two.end <= edu.end and edu.end <= one.start:
return False
return True
def is_embedded(one, two):
""" returns True when one is embedded in two, that is ::
[two ... [one] ... ]
returns False in all other cases
"""
return two.id != one.id and two.start <= one.start and one.end <= two.end
def get_neighbours(edus):
'''
Return a mapping from each EDU to its neighbours
:type edus: [Edu]
:rtype: Dict Edu [Edu]
'''
neighbours = dict()
for one in edus:
one_neighbours = []
one_neighbours_ids = set()
for two in edus:
if
|
one.id != two.id:
|
if are_strictly_adjacent(one, two, edus):
if two.id not in one_neighbours_ids:
one_neighbours_ids.add(two.id)
one_neighbours.append(two)
if is_embedded(one, two) or is_embedded(two, one):
if two.id not in one_neighbours_ids:
one_neighbours_ids.add(two.id)
one_neighbours.append(two)
neighbours[one] = one_neighbours
return neighbours
class LocallyGreedyState(object):
'''
the mutable parts of the locally greedy algorithm
'''
def __init__(self, instances):
self._edus = get_sorted_edus(instances)
self._edu_ids = set(x.id for x in self._edus)
self._neighbours = get_neighbours(self._edus)
self._prob_dist = get_prob_map(instances)
def _remove_edu(self, original, target):
'''
Given a locally greedy state, an original EDU, and a target EDU
(that the original in meant to point to): remove the original
edu and merge its neighbourhood into that of the target
'''
self._edus.remove(original)
self._edu_ids.remove(original.id)
# PM : added to propagate locality to percolated span heads
tgt_neighbours = self._neighbours[target]
tgt_neighbours.extend(self._neighbours[original])
# print(neighbours[new_span], file=sys.stderr)
tgt_neighbours = [x for x in tgt_neighbours
if x.id in self._edu_ids and x.id != target.id]
def _attach_best(self):
'''
Single pass of the locally greedy algorithm: pick the
highest probability link between any two neighbours.
Remove the source EDU from future consideration.
:rtype: None
'''
highest = 0.0
to_remove = None
attachment = None
new_span = None
for source in self._edus:
for target in self._neighbours[source]:
if (source.id, target.id) in self._prob_dist:
label, prob = self._prob_dist[(source.id, target.id)]
if prob > highest:
highest = prob
to_remove = source
new_span = target
attachment = (source.id, target.id, label)
if to_remove is not None:
self._remove_edu(to_remove, new_span)
return attachment
else: # stop if nothing to attach, but this is wrong
# print("warning: no attachment found", file=sys.stderr)
# print(edus)
# print(edus_id)
# print([neighbours[x] for x in edus])
# sys.exit(0)
self._edus = []
return None
def decode(self):
'''
Run the decoder
:rtype [(EDU, EDU, string)]
'''
attachments = []
while len(self._edus) > 1:
print(len(self._edus), end=' ', file=sys.stderr)
attach = self._attach_best()
if attach is not None:
attachments.append(attach)
print("", file=sys.stderr)
return attachments
# pylint: disable=unused-argument
class LocallyGreedy(Decoder):
'''
The locally greedy decoder
'''
def decode(self, dpack):
cands = simple_candidates(dpack)
prediction = LocallyGreedyState(cands).decode()
return convert_prediction(dpack, prediction)
# pylint: enable=unused-argument
|
DiegoGuidaF/telegram-raspy
|
modules.py
|
Python
|
mit
| 358
| 0.011173
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 14 02:17:11 2017
@author: guida
"""
import json
import requests
def get_url(url):
response = requests.get(url)
content = response.content.d
|
ecode("utf8")
|
return content
#Json parser
def get_json_from_url(url):
content = get_url(url)
js = json.loads(content)
return js
|
whiskerlabs/mmringbuffer
|
mmringbuffer/constants.py
|
Python
|
mit
| 292
| 0.013699
|
#
|
Constant indices within mmap buffers.
_POS_VALUE_LEN = 8
_READ_POS_IDX = 0
_WRITE_POS_IDX = _POS_VALUE_LEN
_HEADER_LEN = _POS_VALUE_LEN * 2
# Item size constants.
_ITEM_SIZE_LEN = 4
# struct.[un]pack format string for length fields
_POS_VALUE_FORMAT = "q"
_ITEM_SIZE_FORMA
|
T = "i"
|
Moonshile/fast12306
|
src/core/settings.py
|
Python
|
apache-2.0
| 2,707
| 0.002277
|
#coding=utf-8
import os
# Basic settings
# requests settings
TIMEOUT = 5
VERIFY = False
# directories might be used
LOCATIONS = {
'log': 'log',
'data': 'data',
}
# stderr is redirected to this file
ERR_LOG_FILE = os.path.join(LOCATIONS['log'], 'err.log')
# log in this file
LOGGING_FILE = os.path.join(LOCATIONS['log'], 'requests.log')
STATION_NAME_FILE = os.path.join(LOCATIONS['data'], 'station_name.js')
CAPTCHA_FILE = os.path.join(LOCATIONS['data'], 'captcha.png')
CRYPTO_JS = os.path.join(LOCATIONS['data'], 'crypto.js')
CRYPTO_SCRIPT = os.path.join(LOCATIONS['data'], 'do_crypto.js')
# Query settings
QUERY_INTERVAL = 1
QUERY_ARGS_NS = 'leftTicketDTO'
TRAIN_DATA_JSON_KEY = 'queryLeftNewDTO'
LOGIN_NS = 'loginUserDTO'
USER_NS = 'userDTO'
PURPOSE_CODES = {'学生': '0X00', '普通': 'ADULT'}
PURPOSE_ID = {'0X00': 3, '学生': 3, 'ADULT': 1, '普通': 1}
SEAT_CODES = {
'商务座': 'swz',
'特等座': 'tz',
'一等座': 'zy',
'二等座': 'ze',
'高级软卧': 'gr',
'软卧': 'rw',
'硬卧': 'yw',
'软座': 'rz',
'硬座': 'yz',
'无座': 'wz',
'其他': 'qt',
}
SEAT_ID = {
'SWZ': '9',
'TZ': 'P',
'ZY': 'M',
'ZE': 'O',
'GR': '6',
'RW': '4',
'YW': '3',
'RZ': '2',
'YZ': '1',
'WZ': 'WZ',
'QT': '',
}
URL_BASE = 'https://kyfw.12306.cn/'
URLS = {
'entry': URL_BASE + 'otn/',
'station_name': URL_BASE + 'otn/resources/js/framework/station_name.js?station_version=1.8260',
'query': URL_BASE + 'otn/leftTicket/queryT',
'query_log': URL_BASE + 'otn/leftTicket/log',
'login_captcha': URL_BASE + 'otn/passcodeNew/getPassCodeNew?module=login&rand=sjrand',
'order_captcha': URL_BASE + 'otn/passcodeNew/getPassCodeNew?module=passenger&rand=randp',
'check_captcha': URL_BASE + 'otn/passcodeNew/checkRandCodeAnsyn',
'login_token': URL_BASE + 'otn/login/init',
'order_init_token': URL_BASE + 'otn/leftTicket/init',
'login': URL_BASE + 'otn/login/loginAysnSuggest',
'check_login': URL_BASE + 'otn/login/checkUser',
'passengers': URL_BASE + 'otn/confirmPassenger/getPassengerDTOs',
'order_init_submit': URL_BASE + 'otn/leftTicket/submitOrderRequest',
'order_confirm
|
': URL_BASE + 'otn/confirmPassenger/initDc',
'order_
|
check': URL_BASE + 'otn/confirmPassenger/checkOrderInfo',
}
# 3rd party tools settings
# Setup for settings
import socket
if socket.gethostname() in ['duankq-ThinkPad-X201', ]:
DEBUG = True
else:
DEBUG = False
import os
for loc in LOCATIONS.values():
if not os.path.isdir(loc):
os.mkdir(loc)
for (k, v) in SEAT_CODES.iteritems():
SEAT_ID[k] = SEAT_ID[v.upper()]
SEAT_ID[v] = SEAT_ID[v.upper()]
|
polyaxon/polyaxon
|
core/polyaxon/vendor/shell_pty.py
|
Python
|
apache-2.0
| 6,910
| 0.001013
|
# This code is based on logic from
# http://sqizit.bartletts.id.au/2011/02/14/pseudo-terminals-in-python/
# Licensed under the MIT license:
# Copyright (c) 2011 Joshua D. Bartlett
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import errno
import fcntl
import json
import os
import pty
import select
import signal
import struct
import termios
import tty
from polyaxon.client.transport import ws_client
class PseudoTerminal:
"""Wraps the pseudo-TTY (PTY) allocated to a container.
The PTY is managed via the current process' TTY until it is closed.
"""
START_ALTERNATE_MODE = set("\x1b[?{0}h".format(i) for i in ("1049", "47", "1047"))
END_ALTERNATE_MODE = set("\x1b[?{0}l".format(i) for i in ("1049", "47", "1047"))
ALTERNATE_MODE_FLAGS = tuple(START_ALTERNATE_MODE) + tuple(END_ALTERNATE_MODE)
def __init__(self, client_shell=None):
self.client_shell = client_shell
self.master_fd = None
def start(self, argv=None):
"""
Create a spawned process.
Based on the code for pty.spawn().
"""
if not argv:
argv = [os.environ["SHELL"]]
pid, master_fd = pty.fork()
self.master_fd = master_fd
if pid == pty.CHILD:
os.execlp(argv[0], *argv)
old_handler = signal.signal(signal.SIGWINCH, self._signal_winch)
try:
mode = tty.tcgetattr(pty.STDIN_FILENO)
tty.setraw(pty.STDIN_FILENO)
restore = 1
except tty.error: # This is the same as termios.error
restore = 0
self._init_fd()
try:
self._loop()
except (IOError, OSError):
if restore:
tty.tcsetattr(pty.STDIN_FILENO, tty.TCSAFLUSH, mode)
self.client_shell.close()
self.client_shell = None
if self.master_fd:
os.close(self.master_fd)
self.master_fd = None
signal.signal(signal.SIGWINCH, old_handler)
def _init_fd(self):
"""
Called once when the pty is first set up.
"""
self._set_pty_size()
def _signal_winch(self, signum, frame):
"""
Signal handler for SIGWINCH - window size has changed
|
.
"""
self._set_pty_size()
def _set_pty_size(self):
"""
Sets the window size of the child pty based on the window size of
our own controlling terminal.
"""
packed = fcntl.ioctl(
pty.STDOUT_FILENO, termios.TIOCGWINSZ, struct.pack("HHHH", 0, 0, 0, 0)
)
rows, cols, h_pixels, v_pixels = struct.unpack("HHHH", packed)
self.client_shell.write_channel(
ws_client.RESIZE_CHANNE
|
L, json.dumps({"Height": rows, "Width": cols})
)
def _loop(self):
"""
Main select loop. Passes all data to self.master_read() or self.stdin_read().
"""
assert self.client_shell is not None
client_shell = self.client_shell
while 1:
try:
rfds, wfds, xfds = select.select(
[pty.STDIN_FILENO, client_shell.sock.sock], [], []
)
except select.error as e:
no = e.errno
if no == errno.EINTR:
continue
if pty.STDIN_FILENO in rfds:
data = os.read(pty.STDIN_FILENO, 1024)
self.stdin_read(data)
if client_shell.sock.sock in rfds:
# read from client_shell
if client_shell.peek_stdout():
self.master_read(client_shell.read_stdout())
if client_shell.peek_stderr():
self.master_read(client_shell.read_stderr())
# error occurs
if client_shell.peek_channel(ws_client.ERROR_CHANNEL):
break
def write_stdout(self, data):
"""
Writes to stdout as if the child process had written the data.
"""
os.write(pty.STDOUT_FILENO, data.encode())
def write_master(self, data):
"""
Writes to the child process from its controlling terminal.
"""
assert self.client_shell is not None
self.client_shell.write_stdin(data)
def master_read(self, data):
"""
Called when there is data to be sent from the child process back to the user.
"""
flag = self.findlast(data, self.ALTERNATE_MODE_FLAGS)
if flag is not None:
if flag in self.START_ALTERNATE_MODE:
# This code is executed when the child process switches the
# terminal into alternate mode. The line below
# assumes that the user has opened vim, and writes a
# message.
self.write_master("IEntering special mode.\x1b")
elif flag in self.END_ALTERNATE_MODE:
# This code is executed when the child process switches the
# terminal back out of alternate mode. The line below
# assumes that the user has returned to the command
# prompt.
self.write_master('echo "Leaving special mode."\r')
self.write_stdout(data)
def stdin_read(self, data):
"""
Called when there is data to be sent from the user/controlling
terminal down to the child process.
"""
self.write_master(data)
@staticmethod
def findlast(s, substrs):
"""
Finds whichever of the given substrings occurs last in the given string
and returns that substring, or returns None if no such strings occur.
"""
i = -1
result = None
for substr in substrs:
pos = s.rfind(substr)
if pos > i:
i = pos
result = substr
return result
|
Azure/azure-sdk-for-python
|
sdk/identity/azure-identity/azure/identity/aio/_credentials/certificate.py
|
Python
|
mit
| 3,645
| 0.003841
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from typing import TYPE_CHECKING
import msal
from .._internal import AadClient, AsyncContextManager
from .._internal.get_token_mixin import GetTokenMixin
from ..._credentials.certificate import get_client_credential
from ..._internal import AadClientCertificate, validate_tenant_id
from ..._persistent_cache import _load_persistent_cache
if TYPE_CHECKING:
from typing import Any, Optional
from azure.core.credentials import AccessToken
class CertificateCredential(AsyncContextManager, GetTokenMixin):
"""Authenticates as a service principal using a certificate.
The certificate must have an RSA private key, because this credential signs assertions using RS256. See
`Azure Active
|
Directory documentation
<https://docs.microsoft.com/azure/active-directory/develop/active-directory-certificate-credentials#register-your-certificate-with-microsoft-identity-platform>`_
for more information on configuring certificate authentication.
:param str tenant_id: ID of the service principal's tenant. Also called its 'directory' ID.
:param str client_id: the service principa
|
l's client ID
:param str certificate_path: path to a PEM-encoded certificate file including the private key. If not provided,
`certificate_data` is required.
:keyword str authority: Authority of an Azure Active Directory endpoint, for example 'login.microsoftonline.com',
the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.AzureAuthorityHosts`
defines authorities for other clouds.
:keyword bytes certificate_data: the bytes of a certificate in PEM format, including the private key
:keyword password: The certificate's password. If a unicode string, it will be encoded as UTF-8. If the certificate
requires a different encoding, pass appropriately encoded bytes instead.
:paramtype password: str or bytes
:keyword cache_persistence_options: configuration for persistent token caching. If unspecified, the credential
will cache tokens in memory.
:paramtype cache_persistence_options: ~azure.identity.TokenCachePersistenceOptions
"""
def __init__(self, tenant_id, client_id, certificate_path=None, **kwargs):
# type: (str, str, Optional[str], **Any) -> None
validate_tenant_id(tenant_id)
client_credential = get_client_credential(certificate_path, **kwargs)
self._certificate = AadClientCertificate(
client_credential["private_key"], password=client_credential.get("passphrase")
)
cache_options = kwargs.pop("cache_persistence_options", None)
if cache_options:
cache = _load_persistent_cache(cache_options)
else:
cache = msal.TokenCache()
self._client = AadClient(tenant_id, client_id, cache=cache, **kwargs)
self._client_id = client_id
super().__init__()
async def __aenter__(self):
await self._client.__aenter__()
return self
async def close(self):
"""Close the credential's transport session."""
await self._client.__aexit__()
async def _acquire_token_silently(self, *scopes: str, **kwargs: "Any") -> "Optional[AccessToken]":
return self._client.get_cached_access_token(scopes, **kwargs)
async def _request_token(self, *scopes: str, **kwargs: "Any") -> "AccessToken":
return await self._client.obtain_token_by_client_certificate(scopes, self._certificate, **kwargs)
|
uw-it-aca/bridge-sis-provisioner
|
sis_provisioner/csv/writer.py
|
Python
|
apache-2.0
| 1,345
| 0
|
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import logging
import traceback
from sis_provisioner.dao.uw_account import get_all_uw_accounts
from sis_provisioner.csv import get_filepath
from sis_provisioner.csv.user_writer import make_import_user_csv_files
from sis_provisioner.util.log import log_exception
from sis_provisioner.util.settings import get_csv_file_path_prefix
logger = logging.getLogger(__name__)
class CsvMaker:
"""
For the given loader, create the corresponsing csv files.
"""
def __init__(self,):
"""
@param: loader an account_managers.loader subclass object
"""
self.file_wrote = False
self.filepath = get_filepath(path_prefix=get_csv_file_path_prefix())
def fetch_users(self):
return get_all_uw_accounts()
def load_files(self):
try:
number_users_wrot
|
e = make_import_user_csv_files(
self.fetch_users(), self.filepath)
logger.info("Total {0:d} users wrote into {1}\n".format(
number_users_wrote, self.filepath))
return number_users_wrote
|
except Exception:
log_exception(
logger,
"Failed to make user csv file in {0}".format(self.filepath),
traceback.format_exc())
|
our-city-app/oca-backend
|
src/rogerthat/bizz/features.py
|
Python
|
apache-2.0
| 3,441
| 0.001453
|
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from functools import total_ordering
from google.appengine.ext import db
from mcfw.consts import MISSING
from mcfw.properties import long_property, typed_property
from mcfw.rpc impor
|
t returns, arguments
from mcfw.utils import Enum
from rogerthat.dal.mobile import get_mobile_settings_cached
from rogerthat.rpc.models import Mobile
@total_ordering # @total_ordering uses __lt__ and __eq__ to create __gt__, __ge__, __le__ and __ne__
class Version(object):
major = long_property('1') # actually minor
minor = long_property('2') # actually patch
def __init__(self,
|
major=MISSING, minor=MISSING):
if major is not MISSING:
self.major = major
if minor is not MISSING:
self.minor = minor
def __eq__(self, other):
return (self.major, self.minor) == (other.major, other.minor)
def __lt__(self, other):
return (self.major, self.minor) < (other.major, other.minor)
def __str__(self):
return '%s.%s' % (self.major, self.minor)
__repr__ = __str__
class Feature(object):
ios = typed_property('1', Version)
android = typed_property('2', Version)
def __init__(self, ios=MISSING, android=MISSING):
if ios is not MISSING:
self.ios = ios
if android is not MISSING:
self.android = android
class Features(Enum):
FRIEND_SET = Feature(ios=Version(0, 162), android=Version(0, 1003))
ADVANCED_ORDER = Feature(ios=Version(0, 765), android=Version(0, 1626))
NEWS = Feature(ios=Version(0, 1334), android=Version(0, 2448))
ASSETS = Feature(ios=Version(0, 1334), android=Version(0, 2448))
SPLIT_USER_DATA = Feature(ios=Version(1, 2517), android=Version(1, 3608))
PAYMENTS = Feature(ios=Version(1, 2637), android=Version(1, 3874)) # TODO bump to newest version
EMBEDDED_APPS = Feature(ios=Version(1, 2637), android=Version(1, 3883))
ASK_TOS = Feature(ios=Version(1, 2729), android=Version(1, 3989))
FORMS = Feature(ios=Version(1, 3383), android=Version(1, 4890))
EMBEDDED_APPS_IN_SMI = Feature(ios=Version(1, 3793), android=Version(1, 5558))
@returns(bool)
@arguments(mobile=Mobile, feature=Feature)
def mobile_supports_feature(mobile, feature):
if mobile.is_ios:
version = feature.ios
elif mobile.is_android:
version = feature.android
else:
return True
@db.non_transactional
def get_mobile_settings():
return get_mobile_settings_cached(mobile)
mobile_settings = get_mobile_settings()
return Version(mobile_settings.majorVersion, mobile_settings.minorVersion) >= version
@returns(bool)
@arguments(mobiles=[Mobile], feature=Feature)
def all_mobiles_support_feature(mobiles, feature):
return all((mobile_supports_feature(mobile, feature) for mobile in mobiles))
|
serbinsh/UniSpec_Processing
|
src/BasicProcessing/Main.py
|
Python
|
gpl-2.0
| 2,919
| 0.015416
|
'''
Created on Aug 4, 2015
@author: amcmahon
'''
from BasicProcessing import UnispecProcessing
from BasicProcessing import consts
import os.path
import sys
def main():
"""
Main function for generating CSV files from a directory of Unispec data.
Input/Output paths, white plate identifier string and header size should be specified in "config.txt".
Outputs one CSV file per tram run, where each row represets a stop and columns are wavelengths interpolated to 1nm.
"""
path = str(os.path.realpath('.'))
# Edited by SPS on 11/06/2015
#Spec = UnispecProcessing(path + r'\config.txt')
Spec = UnispecProcessing(os.path.join(path, "config.txt"))
run_count, WP_count, stop_count = Spec.GetFileLists()
for run in range(0,run_c
|
ount):
if (WP_count[run] == 0) or (stop_count[run] == 0):
|
continue
WP_data = [[[None], [None]] for item in range(0,WP_count[run])]
Stop_data = [[[None], [None]] for item in range(0,stop_count[run])]
sat = [[None], [None]]
#When getting data from these, they are formatted as:
# var[file index][header/data][row index][CH_B_WL/CH_B/CH_A_WL/CH_A]
WP_data = Spec.ReadFiles(Spec.WPs[run], Spec.HeaderLines)
Stop_data = Spec.ReadFiles(Spec.Stops[run], Spec.HeaderLines)
#Formatted as:
# var[file index][CH_B/CH_A][WL]
# value of var is the WL saturation occurred at
sat_WP = Spec.CheckSaturation(WP_data)
sat_stops = Spec.CheckSaturation(Stop_data)
print("Saturated Measurement Count\n\t\tCh_B\tCh_A")
for idx, curfile in enumerate(sat_WP):
print("WP " + str(idx) + ":\t\t" + str(curfile[1]) + "\t" + str(curfile[2]))
for idx, curfile in enumerate(sat_stops):
print("Stop " + str(idx) + ":\t\t" + str(curfile[1]) + "\t" + str(curfile[2]))
print("\n" + str(len(sat_WP)) + " WPs and " + str(len(sat_stops)) + " stops saturated.")
#Spec.RemoveSaturated(WP_data, sat_WP)
#Spec.RemoveSaturated(Stop_data, sat_stops)
#Formatted as:
# var[file, WL/CH_B/CH_A] = [1 dim array of values]
intdata_WPs = Spec.Interp(WP_data)
intdata_Stops = Spec.Interp(Stop_data)
avg_WP = Spec.AvgWPs(intdata_WPs)
#Plot all WPs with average
#Spec.plot_Averaging(intdata_WPs, avg_WP)
R = Spec.Refl(intdata_Stops, avg_WP)
#Plot reflectance for a particular stop
# plot_R_A(Refl data, Stop #)
#Spec.plot_R(R,20)
dt = Spec.GetDateTime(WP_data[0])
Spec.WriteOutput(R, Spec.OutputPath, Spec.OutputPrefix + dt[consts.date] + "__" + dt[consts.time].replace(':','_') + ".csv")
if __name__ == "__main__":
main()
|
ktaneishi/deepchem
|
deepchem/feat/tests/test_mol_graphs.py
|
Python
|
mit
| 6,110
| 0.0018
|
"""
Tests for Molecular Graph data structures.
"""
from __future__ import division
from __future__ import unicode_literals
__author__ = "Han Altae-Tran and Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import unittest
import os
import sys
import numpy as np
from deepchem.feat.mol_graphs import ConvMol
from deepchem.feat.mol_graphs import MultiConvMol
class TestMolGraphs(unittest.TestCase):
"""
Test mol graphs.
"""
def test_construct_conv_mol(self):
"""Tests that ConvMols can be constructed without crash."""
N_feat = 4
# Artificial feature array.
atom_features = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
adj_list = [[1], [0, 2], [1]]
mol = ConvMol(atom_features, adj_list)
def test_conv_mol_deg_slice(self):
"""Tests that deg_slice works properly."""
atom_features = np.array([[20, 21, 22, 23], [24, 25, 26, 27],
[28, 29, 30, 31], [32, 33, 34, 35]])
adj_list = [[1, 2], [0, 3], [0, 3], [1, 2]]
mol = ConvMol(atom_features, adj_list)
assert np.array_equal(
mol.get_deg_slice(),
# 0 atoms of degree 0
# 0 atoms of degree 1
# 4 atoms of degree 2
# 0 atoms of degree 3
# 0 atoms of degree 4
# 0 atoms of degree 5
# 0 atoms of degree 6
# 0 atoms of degree 7
# 0 atoms of degree 8
# 0 atoms of degree 9
# 0 atoms of degree 10
np.array([[0, 0], [0, 0], [0, 4], [0, 0], [0, 0], [0, 0], [0, 0],
[0, 0], [0, 0], [0, 0], [0, 0]]))
def test_get_atom_features(self):
"""Test that the atom features are computed properly."""
atom_features = np.array([[40, 41, 42, 43], [44, 45, 46, 47],
[48, 49, 50, 51], [52, 53, 54,
55], [56, 57, 58, 59]])
canon_adj_list = [[1, 2], [0, 3], [0, 3], [1, 2, 4], [3]]
mol = ConvMol(atom_features, canon_adj_list)
# atom 4 has 0 neighbors
# atom 0 has 2 neighbors
# atom 1 has 2 neighbors
# atom 2 has 2 neighbors
# atom 3 has 3 neighbors.
# Verify that atom features have been sorted by atom degree.
assert np.array_equal(
mol.get_atom_features(),
np.array([[56, 57, 58, 59], [40, 41, 42, 43], [44, 45, 46, 47],
[48, 49, 50, 51], [52, 53, 54, 55]]))
def test_get_adjacency_list(self):
"""Tests that adj-list is canonicalized properly."""
atom_features = np.array([[40, 41, 42, 43], [44, 45, 46, 47],
[48, 49, 50, 51], [52, 53, 54,
55], [56, 57, 58, 59]])
canon_adj_list = [[1, 2], [0, 3], [0, 3], [1, 2, 4], [3]]
mol = ConvMol(atom_features, canon_adj_list)
# Sorting is done by atom degree as before. So the ordering goes
# 4, 0, 1, 2, 3 now in terms of the original ordering. The mapping
# from new position to old position is
# {(4, 0), (0, 1), (1, 2), (2, 3), (3, 4)}. Check that adjacency
# list respects this reordering and returns correct adjacency list.
assert (mol.get_adjacency_list() == [[4], [2, 3], [1, 4], [1, 4], [2, 3,
0]])
def test_agglomerate_molecules(self):
"""Test AggrMol.agglomerate_mols."""
molecules = []
#### First example molecule
N_feat = 4
# Artificial feature array.
atom_features = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
adj_list = [[1], [0, 2], [1]]
molecules.append(ConvMol(atom_features, adj_list))
#### Second example molecule
atom_features = np.array([[20, 21, 22, 23], [24, 25, 26, 27],
[28, 29, 30, 31], [32, 33, 34, 35]])
adj_list = [[1, 2], [0, 3], [0, 3], [1, 2]]
molecules.append(ConvMol(atom_features, adj_list))
### Third example molecule
atom_features = np.array([[40, 41, 42, 43], [44, 45, 46, 47],
[48, 49, 50, 51], [52, 53, 54,
55], [56, 57, 58, 59]])
adj_list = [[1, 2], [0, 3], [0, 3], [1, 2, 4], [3]]
molecules.append(ConvMol(atom_features, adj_list))
# Test agglomerate molecule method
concat_mol = ConvMol.agglomerate_mols(molecules)
assert concat_mol.get_num_atoms() == 12
assert concat_mol.get_num_molecules() == 3
atom_features = concat_mol.get_atom_features()
assert np.array_equal(atom_features[0, :], [1, 2, 3, 4])
assert np.array_equal(atom_features[2, :], [56, 57, 58, 59])
assert np.array_equal(atom_features[11, :], [52, 53, 54, 55])
assert np.array_equal(atom_features[4, :], [20, 21, 22, 23])
deg_adj_lists = concat_mol.get_deg_adjacency_lists()
# No atoms of degree 0
assert np.array_equal(deg_adj_lists[0], np.zeros([0, 0]))
# 3 atoms of degree 1
assert np.array_equal(deg_adj_lists[1], [[3], [3], [11]])
# 8 atoms of degree 2
assert np.array_equal(
deg_adj_lists[2],
[[0, 1], [5, 6], [4, 7], [4, 7], [5, 6], [9, 10], [8, 11], [8, 11]])
# 1 atom of degree 3
assert np.array_equal(deg_adj_lists[3], [[9, 10, 2]])
# 0 atoms of degree 4
assert np.arra
|
y_equal(deg_adj_lists[4], np.zeros([0, 4]))
|
# 0 atoms of degree 5
assert np.array_equal(deg_adj_lists[5], np.zeros([0, 5]))
def test_null_conv_mol(self):
"""Running Null AggrMol Test. Only works when max_deg=6 and min_deg=0"""
num_feat = 4
min_deg = 0
null_mol = ConvMol.get_null_mol(num_feat)
deg_adj_lists = null_mol.get_deg_adjacency_lists()
# Check that atoms are only connected to themselves.
assert np.array_equal(deg_adj_lists[10],
[[10, 10, 10, 10, 10, 10, 10, 10, 10, 10]])
assert np.array_equal(deg_adj_lists[1], [[1]])
# Check that there's one atom of each degree.
assert np.array_equal(null_mol.get_deg_slice(),
[[0, 1], [1, 1], [2, 1], [3, 1], [4, 1], [5, 1],
[6, 1], [7, 1], [8, 1], [9, 1], [10, 1]])
|
andrzejgorski/whylog
|
whylog/constraints/verifier.py
|
Python
|
bsd-3-clause
| 8,521
| 0.003286
|
import itertools
from whylog.config.investigation_plan import Clue
from whylog.constraints.exceptions import TooManyConstraintsToNegate
from whylog.front.utils import FrontInput
class Verifier(object):
UNMATCHED = Clue(None, None, None, None)
@classmethod
def _create_investigation_result(cls, clues_combination, constraints, linkage):
"""
basing on clues combination and constraints,
returns appropriate InvestigationResult object
which collects information about lines
(FrontInput objects) instead of Clues
"""
return InvestigationResult(
[FrontInput.from_clue(clue) for clue in clues_combination], constraints, linkage
)
@classmethod
def _verify_constraint(cls, combination, effect, index, constraint, constraint_manager):
"""
checks if specified clues (which represents parsers: 1,2,.. for some rule) and
effect (which represents parser 0 from this rule) satisfy one given constraint.
returns True if so, or False otherwise
"""
constraint_verifier = constraint_manager.get_constraint_object(index, constraint)
groups = []
for group_info in constraint['clues_groups']:
parser_num, group_num = group_info
if parser_num == 0:
groups.append(effect.regex_parameters[group_num - 1])
else:
if combination[parser_num - 1] == Verifier.UNMATCHED:
return False
groups.append(combination[parser_num - 1].regex_parameters[group_num - 1])
return constraint_verifier.verify(groups, constraint['params'])
@classmethod
def _clues_combinations(cls, clues_tuples, collected_subset=[]):
"""
recursive generator that returns all permutations according to schema:
from first pair (list, number) of clues_tuples,
produces permutations with size 'number' from 'list's elements
and concatenates it with _clues_combinations invoked on the rest of clues_tuples.
example:
>>> xs = [([1, 2], 1), ('abc', 2)]
>>> for l in Verifier._clues_combinations(xs):
>>> print l
[1, 'a', 'b']
[1, 'a', 'c']
[1, 'b', 'a']
[1, 'b', 'c']
[1, 'c', 'a']
[1, 'c', 'b']
[2, 'a', 'b']
[2, 'a', 'c']
[2, 'b', 'a']
[2, 'b', 'c']
[2, 'c', 'a']
[2, 'c', 'b']
it always should be called with empty accumulator,
that is collected_subset=[]
"""
if len(clues_tuples) != 0:
first_list, repetitions_number = clues_tuples[0]
for clues in itertools.permutations(first_list, repetitions_number):
for subset in cls._clues_combinations(
clues_tuples[1:], collected_subset + list(clues)
):
yield subset
else:
yield collected_subset
@classmethod
def _construct_proper_clues_lists(cls, original_clues_lists):
clues_lists = []
for clues, occurrences in original_clues_lists:
if clues:
clues_lists.append((clues, occurrences))
else:
clues_lists.append(([Verifier.UNMATCHED], occurrences))
return clues_lists
@classmethod
def _pack_results_for_constraint_or(cls, combination, constraints):
return cls._create_investigation_result(
(clue for clue in combination if not clue == Verifier.UNMATCHED), constraints,
InvestigationResult.OR
)
@classmethod
def constraints_and(cls, clues_lists, effect, constraints, constraint_manager):
"""
for each combination of clues (they are generated by _clues_combinations)
checks if for all given constraints their requirements are satisfied
and for each such combination produces InvestigationResult object.
returns list of all produced InvestigationResults
"""
cl
|
ues_lists = cls._construct_proper_clues_lists(clues_lists)
causes = []
for combination in cls._clues_combinations(clues_lists):
if all(
cls._verify_constraint(combination, effect, idx, constraint, c
|
onstraint_manager)
for idx, constraint in enumerate(constraints)
):
causes.append(
cls._create_investigation_result(
combination, constraints, InvestigationResult.AND
)
)
return causes
@classmethod
def constraints_or(cls, clues_lists, effect, constraints, constraint_manager):
"""
for each combination of clues (they are generated by _clues_combinations)
checks if for any of given constraints their requirements are satisfied
and for each such combination produces InvestigationResult object.
returns list of all produced InvestigationResults
"""
if not constraints:
# when there is lack of constraints, but there are existing clues combinations,
# each of them should be returned
return [
cls._pack_results_for_constraint_or(combination, constraints)
for combination in cls._clues_combinations(clues_lists)
]
causes = []
clues_lists = cls._construct_proper_clues_lists(clues_lists)
for combination in cls._clues_combinations(clues_lists):
verified_constraints = [
constraint
for idx, constraint in enumerate(constraints)
if cls._verify_constraint(combination, effect, idx, constraint, constraint_manager)
] # yapf: disable
if verified_constraints:
causes.append(
cls._pack_results_for_constraint_or(combination, verified_constraints)
)
return causes
@classmethod
def constraints_not(cls, clues_lists, effect, constraints, constraint_manager):
"""
provide investigation if there is zero or one constraint,
because only in such cases NOT linkage has sense
"""
if len(constraints) > 1:
raise TooManyConstraintsToNegate()
if constraints:
if clues_lists:
return cls.single_constraint_not(
clues_lists, effect, constraints[0], constraint_manager
)
else:
if clues_lists:
# if all parsers found their matched logs, the NOT requirement isn't satisfied
return []
return [cls._create_investigation_result([], [], InvestigationResult.NOT)]
@classmethod
def single_constraint_not(cls, clues_lists, effect, constraint, constraint_manager):
"""
for each combination of clues (they are generated by _clues_combinations)
checks for given constraint if its requirements are not satisfied
and if they are not, it produces InvestigationResult object.
returns list of all produced InvestigationResults
"""
clues_lists = cls._construct_proper_clues_lists(clues_lists)
for combination in cls._clues_combinations(clues_lists):
if cls._verify_constraint(combination, effect, 0, constraint, constraint_manager):
# called with constraint index = 0, because this function assumes that there is one constraint
return []
return [cls._create_investigation_result([], [constraint], InvestigationResult.NOT)]
class InvestigationResult(object):
AND = "AND"
OR = "OR"
NOT = "NOT"
def __init__(self, lines, constraints, cons_linkage):
self.lines = lines
self.constraints = constraints
self.constraints_linkage = cons_linkage
def __repr__(self):
if self.constraints_linkage in [self.AND, self.OR]:
return "\n(\n result lines: %s;\n due to '%s' constraints: %s\n)" % (
self.lines, self.constraints_linkage, self.constraints
)
else:
return "\n(\n
|
xray/xray
|
xarray/backends/api.py
|
Python
|
apache-2.0
| 50,534
| 0.000772
|
import os.path
import warnings
from glob import glob
from io import BytesIO
from numbers import Number
from pathlib import Path
from typing import (
TYPE_CHECKING,
Callable,
Dict,
Hashable,
Iterable,
Mapping,
Tuple,
Union,
)
import numpy as np
from .. import backends, coding, conventions
from ..core import indexing
from ..core.combine import (
_infer_concat_order_from_positions,
_nested_combine,
combine_by_coords,
)
from ..core.dataarray import DataArray
from ..core.dataset import Dataset
from ..core.utils import close_on_error, is_grib_path, is_remote_uri
from .common import AbstractDataStore, ArrayWriter
from .locks import _get_scheduler
if TYPE_CHECKING:
try:
from dask.delayed import Delayed
except ImportError:
Delayed = None
DATAARRAY_NAME = "__xarray_dataarray_name__"
DATAARRAY_VARIABLE = "__xarray_dataarray_variable__"
def _get_default_engine_remote_uri():
try:
import netCDF4 # noqa: F401
engine = "netcdf4"
except ImportError: # pragma: no cover
try:
import pydap # noqa: F401
engine = "pydap"
except ImportError:
raise ValueError(
"netCDF4 or pydap is required for accessing "
"remote datasets via OPeNDAP"
)
return engine
def _get_default_engine_grib():
msgs = []
try:
import Nio # noqa: F401
msgs += ["set engine='pynio' to access GRIB files with PyNIO"]
except ImportError: # pragma: no cover
pass
try:
import cfgrib # noqa: F401
msgs += ["set engine='cfgrib' to access GRIB files with cfgrib"]
except ImportError: # pragma: no cover
pass
if msgs:
raise ValueError(" or\n".join(msgs))
else:
raise ValueError("PyNIO or cfgrib is required for accessing " "GRIB files")
def _get_default_engine_gz():
try:
import scipy # noqa: F401
engine = "scipy"
except ImportError: # pragma: no cover
raise ValueError("scipy is required for accessing .gz files")
return engine
def _get_default_engine_netcdf():
try:
import netCDF4 # noqa: F401
engine = "netcdf4"
except ImportError: # pragma: no cover
try:
import scipy.io.netcdf # noqa: F401
engine = "scipy"
except ImportError:
raise ValueError(
"cannot read or write netCDF files without "
|
"netCDF4-python or scipy installed"
)
return engine
def _get_engine_from_magic_number(filename_or_obj):
# check byte header to determine file type
if isinstance(filename_or_obj, bytes):
magic_number = filename_or_obj[:8]
el
|
se:
if filename_or_obj.tell() != 0:
raise ValueError(
"file-like object read/write pointer not at zero "
"please close and reopen, or use a context "
"manager"
)
magic_number = filename_or_obj.read(8)
filename_or_obj.seek(0)
if magic_number.startswith(b"CDF"):
engine = "scipy"
elif magic_number.startswith(b"\211HDF\r\n\032\n"):
engine = "h5netcdf"
if isinstance(filename_or_obj, bytes):
raise ValueError(
"can't open netCDF4/HDF5 as bytes "
"try passing a path or file-like object"
)
else:
if isinstance(filename_or_obj, bytes) and len(filename_or_obj) > 80:
filename_or_obj = filename_or_obj[:80] + b"..."
raise ValueError(
"{} is not a valid netCDF file "
"did you mean to pass a string for a path instead?".format(filename_or_obj)
)
return engine
def _get_default_engine(path, allow_remote=False):
if allow_remote and is_remote_uri(path):
engine = _get_default_engine_remote_uri()
elif is_grib_path(path):
engine = _get_default_engine_grib()
elif path.endswith(".gz"):
engine = _get_default_engine_gz()
else:
engine = _get_default_engine_netcdf()
return engine
def _normalize_path(path):
if is_remote_uri(path):
return path
else:
return os.path.abspath(os.path.expanduser(path))
def _validate_dataset_names(dataset):
"""DataArray.name and Dataset keys must be a string or None"""
def check_name(name):
if isinstance(name, str):
if not name:
raise ValueError(
"Invalid name for DataArray or Dataset key: "
"string must be length 1 or greater for "
"serialization to netCDF files"
)
elif name is not None:
raise TypeError(
"DataArray.name or Dataset key must be either a "
"string or None for serialization to netCDF files"
)
for k in dataset.variables:
check_name(k)
def _validate_attrs(dataset):
"""`attrs` must have a string key and a value which is either: a number,
a string, an ndarray or a list/tuple of numbers/strings.
"""
def check_attr(name, value):
if isinstance(name, str):
if not name:
raise ValueError(
"Invalid name for attr: string must be "
"length 1 or greater for serialization to "
"netCDF files"
)
else:
raise TypeError(
"Invalid name for attr: {} must be a string for "
"serialization to netCDF files".format(name)
)
if not isinstance(value, (str, Number, np.ndarray, np.number, list, tuple)):
raise TypeError(
"Invalid value for attr: {} must be a number, "
"a string, an ndarray or a list/tuple of "
"numbers/strings for serialization to netCDF "
"files".format(value)
)
# Check attrs on the dataset itself
for k, v in dataset.attrs.items():
check_attr(k, v)
# Check attrs on each variable within the dataset
for variable in dataset.variables.values():
for k, v in variable.attrs.items():
check_attr(k, v)
def _protect_dataset_variables_inplace(dataset, cache):
for name, variable in dataset.variables.items():
if name not in variable.dims:
# no need to protect IndexVariable objects
data = indexing.CopyOnWriteArray(variable._data)
if cache:
data = indexing.MemoryCachedArray(data)
variable.data = data
def _finalize_store(write, store):
""" Finalize this store by explicitly syncing and closing"""
del write # ensure writing is done first
store.close()
def load_dataset(filename_or_obj, **kwargs):
"""Open, load into memory, and close a Dataset from a file or file-like
object.
This is a thin wrapper around :py:meth:`~xarray.open_dataset`. It differs
from `open_dataset` in that it loads the Dataset into memory, closes the
file, and returns the Dataset. In contrast, `open_dataset` keeps the file
handle open and lazy loads its contents. All parameters are passed directly
to `open_dataset`. See that documentation for further details.
Returns
-------
dataset : Dataset
The newly created Dataset.
See Also
--------
open_dataset
"""
if "cache" in kwargs:
raise TypeError("cache has no effect in this context")
with open_dataset(filename_or_obj, **kwargs) as ds:
return ds.load()
def load_dataarray(filename_or_obj, **kwargs):
"""Open, load into memory, and close a DataArray from a file or file-like
object containing a single data variable.
This is a thin wrapper around :py:meth:`~xarray.open_dataarray`. It differs
from `open_dataarray` in that it loads the Dataset into memory, closes the
file, and returns the Dataset. In contrast, `open_dataarray` keeps the file
handle open and lazy loads its contents. All parameters are passed directly
to `open_dataarray`. See that documentation
|
bcoca/ansible
|
test/lib/ansible_test/_util/controller/sanity/yamllint/yamllinter.py
|
Python
|
gpl-3.0
| 8,386
| 0.002146
|
"""Wrapper around yamllint that supports YAML embedded in Ansible modules."""
from __future__ import annotations
import ast
import json
import os
import re
import sys
import typing as t
import yaml
from yaml.resolver import Resolver
from yaml.constructor import SafeConstructor
from yaml.error import MarkedYAMLError
from _yaml import CParser # pylint: disable=no-name-in-module
from yamllint import linter
from yamllint.config import YamlLintConfig
def main():
"""Main program body."""
paths = sys.argv[1:] or sys.stdin.read().splitlines()
checker = YamlChecker()
checker.check(paths)
checker.report()
class TestConstructor(SafeConstructor):
"""Yaml Safe Constructor that knows about Ansible tags."""
def construct_yaml_unsafe(self, node):
"""Construct an unsafe tag."""
try:
constructor = getattr(node, 'id', 'object')
if constructor is not None:
constructor = getattr(self, 'construct_%s' % constructor)
except AttributeError:
constructor = self.construct_object
value = constructor(node)
return value
TestConstructor.add_constructor(
u'!unsafe',
TestConstructor.construct_yaml_unsafe)
TestConstructor.add_constructor(
u'!vault',
TestConstructor.construct_yaml_str)
TestConstructor.add_constructor(
u'!vault-encrypted',
TestConstructor.construct_yaml_str)
class TestLoader(CParser, TestConstructor, Resolver):
"""Custom YAML loader that recognizes custom Ansible tags."""
def __init__(self, stream):
CParser.__init__(self, stream)
TestConstructor.__init__(self)
Resolver.__init__(self)
class YamlChecker:
"""Wrapper around yamllint that supports YAML embedded in Ansible modules."""
def __init__(self):
self.messages = []
def report(self):
"""Print yamllint report to stdout."""
report = dict(
messages=self.messages,
)
print(json.dumps(report, indent=4, sort_keys=True))
def check(self, paths): # type: (t.List[str]) -> None
"""Check the specified paths."""
config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config')
yaml_conf = YamlLintConfig(file=os.path.join(config_path, 'default.yml'))
module_conf = YamlLintConfig(file=os.path.join(config_path, 'modules.yml'))
plugin_conf = YamlLintConfig(file=os.path.join(config_path, 'plugins.yml'))
for path in paths:
extension = os.path.splitext(path)[1]
with open(path) as file:
contents = file.read()
if extension in ('.yml', '.yaml'):
self.check_yaml(yaml_conf, path, contents)
elif extension == '.py':
if path.startswith('lib/ansible/modules/') or path.startswith('plugins/modules/'):
conf = module_conf
else:
conf = plugin_conf
self.check_module(conf, path, contents)
else:
raise Exception('unsupported extension: %s' % extension)
def check_yaml(self, conf, path, contents): # type: (YamlLintConfig, str, str) -> None
"""Check the given YAML."""
self.check_parsable(path, contents)
self.messages += [self.result_to_message(r, path) for r in linter.run(contents, conf, path)]
def check_module(self, conf, path, contents): # type: (YamlLintConfig, str, str) -> None
"""Check the given module."""
docs = self.get_module_docs(path, contents)
for key, value in docs.items():
yaml_data = value['yaml']
lineno = value['lineno']
fmt = value['fmt']
if fmt != 'yaml':
continue
if yaml_data.startswith('\n'):
yaml_data = yaml_data[1:]
lineno += 1
self.check_parsable(path, yaml_data, lineno)
messages = list(linter.run(yaml_data, conf, path))
self.messages += [self.result_to_message(r, path, lineno - 1, key) for r in messages]
def check_parsable(self, path, contents, lineno=1): # type: (str, str, int) -> None
"""Check the given contents to verify they can be parsed as YAML."""
try:
yaml.load(contents, Loader=TestLoader)
except MarkedYAMLError as ex:
self.messages += [{'code': 'unparsable-with-libyaml',
'message': '%s - %s' % (ex.args[0], ex.args[2]),
'path': path,
'line': ex.problem_mark.line + lineno,
'column': ex.problem_mark.column + 1,
'level': 'error',
}]
@staticmethod
def result_to_message(result, path, line_offset=0, prefix=''): # type: (t.Any, str, int, str) -> t.Dict[str, t.Any]
"""Convert the given result to a dictionary and return it."""
if prefix:
prefix = '%s: ' % prefix
return dict(
code=result.rule or result.level,
message=prefix + result.desc,
path=path,
line=result.line + line_offset,
column=result.column,
level=result.level,
)
def get_module_docs(self, path, contents): # type: (str, str) -> t.Dict[str, t.Any]
"""Return the module documentation for the given module contents."""
module_doc_types = [
'DOCUMENTATION',
'EXAMPLES',
'RETURN',
]
docs = {}
fmt_re = re.compile(r'^# fmt:\s+(\S+)')
def check_assignment(statement, doc_types=None):
"""Check the given statement for a documentation assignment."""
for target in statement.targets:
if not isinstance(target, ast.Name):
continue
if doc_types and target.id not in doc_types:
continue
fmt_match = fmt_re.match(statement.value.s.lstrip())
fmt = 'yaml'
if fmt_match:
fmt = fmt_match.group(1)
docs[target.id] = dict(
yaml=statement.value.s,
lineno=statement.lineno,
end_lineno=statement.lineno + len(statement.value.s.splitlines()),
fmt=fmt.lower(),
)
module_ast = self.parse_module(path, contents)
|
if not module_ast:
return {}
is_plugin = path.startswith('lib/ansible/modules/') or path.startswith('lib/ansible/plugins/') or path.startswith('plugins/')
is_doc_fragment = path.startswith('lib/ansible/plugins/doc_fragments/') or path.startswith('plugins/do
|
c_fragments/')
if is_plugin and not is_doc_fragment:
for body_statement in module_ast.body:
if isinstance(body_statement, ast.Assign):
check_assignment(body_statement, module_doc_types)
elif is_doc_fragment:
for body_statement in module_ast.body:
if isinstance(body_statement, ast.ClassDef):
for class_statement in body_statement.body:
if isinstance(class_statement, ast.Assign):
check_assignment(class_statement)
else:
raise Exception('unsupported path: %s' % path)
return docs
def parse_module(self, path, contents): # type: (str, str) -> t.Optional[ast.Module]
"""Parse the given contents and return a module if successful, otherwise return None."""
try:
return ast.parse(contents)
except SyntaxError as ex:
self.messages.append(dict(
code='python-syntax-error',
message=str(ex),
path=path,
line=ex.lineno,
column=ex.offset,
level='error',
))
except Exception as ex: # pylint: disable=broad-except
self.messages.append(dict(
code='python-parse-error',
|
edouard-lopez/django-learner
|
django_learner/settings/local.py
|
Python
|
bsd-3-clause
| 279
| 0
|
from .base import * # noqa
DEBUG = True
# Databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'd
|
jango_learner',
'USER': 'django_learner',
'PASSWORD': 'admin',
'HOST': 'localhost'
}
}
| |
borni-dhifi/vatnumber
|
vatnumber/__init__.py
|
Python
|
gpl-3.0
| 8,864
| 0.000451
|
#This file is part of vatnumber. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
'''
Check the VAT number depending of the country based on formula from
http://sima-pc.com/nif.php (dead link, was http://web.archive.org/web/20120118023804/http://sima-pc.com/nif.php)
http://en.wikipedia.org/wiki/Vat_number
'''
__version__ = '1.3'
def _posint(x):
value = int(x)
if value < 0:
raise ValueError('not a positive integer')
return value
def countries():
'''
Return the list of country's codes that have check function
'''
res = [x.replace('check_vat_', '').upper() for x in globals()
if x.startswith('check_vat_')]
res.sort()
return res
def mult_add(i, j):
'''
Sum each digits of the multiplication of i and j.
'''
mult = i * j
res = 0
for i in range(len(str(mult))):
res += int(str(mult)[i])
return res
def mod1110(value):
'''
Compute ISO 7064, Mod 11,10
'''
t = 10
for i in value:
c = int(i)
t = (2 * ((t + c) % 10 or 10)) % 11
return (11 - t) % 10
def check_vat_at(vat):
'''
Check Austria VAT number.
'''
import stdnum.at.uid
return stdnum.at.uid.is_valid(vat)
def check_vat_al(vat):
'''
Check Albania VAT number.
'''
if len(vat) != 10:
return False
if vat[0] not in ('J', 'K'):
return False
try:
_posint(vat[1:9])
except ValueError:
return False
if ord(vat[9]) < 65 or ord(vat[9]) > 90:
return False
return True
def check_vat_ar(vat):
'''
Check Argentina VAT number.
'''
if len(vat) != 11:
return False
base = [5, 4, 3, 2, 7, 6, 5, 4, 3, 2]
aux = 0
for i in xrange(10):
aux += int(vat[i]) * base[i]
aux = 11 - (aux - (int(aux / 11) * 11))
if aux == 11:
aux = 0
if aux == 10:
aux = 9
return aux == int(vat[10])
def check_vat_be(vat):
'''
Check Belgium VAT number.
'''
import stdnum.be.vat
return stdnum.be.vat.is_valid(vat)
def check_vat_bg(vat):
'''
Check Bulgaria VAT number.
'''
import stdnum.bg.vat
return stdnum.bg.vat.is_valid(vat)
def check_vat_cl(rut):
'''
Check Chile RUT number.
'''
try:
_posint(rut[:-1])
except ValueError:
return False
sum = 0
for i in range(len(rut) - 2, -1, -1):
sum += int(rut[i]) * (((len(rut) - 2 - i) % 6) + 2)
check = 11 - (sum % 11)
if check == 11:
return rut[-1] == '0'
elif check == 10:
return rut[-1].upper() == 'K'
else:
return check == int(rut[-1])
def check_vat_co(rut):
'''
Check Colombian RUT number.
'''
if len(rut) != 10:
return False
try:
_posint(rut)
except ValueError:
return False
nums = [3, 7, 13, 17, 19, 23, 29, 37, 41, 43, 47, 53, 59, 67, 71]
sum = 0
for i in range(len(rut) - 2, -1, -1):
sum += int(rut[i]) * nums[len(rut) - 2 - i]
if sum % 11 > 1:
return int(rut[-1]) == 11 - (sum % 11)
else:
return int(rut[-1]) == sum % 11
def check_vat_cy(vat):
'''
Check Cyprus VAT number.
'''
import stdnum.cy.vat
return stdnum.cy.vat.is_valid(vat)
def check_vat_cz(vat):
'''
Check Czech Republic VAT number.
'''
import stdnum.cz.dic
return stdnum.cz.dic.is_valid(vat)
def check_vat_de(vat):
|
'''
Check Germany VAT number.
'''
import stdnum.de
|
.vat
return stdnum.de.vat.is_valid(vat)
def check_vat_dk(vat):
'''
Check Denmark VAT number.
'''
import stdnum.dk.cvr
return stdnum.dk.cvr.is_valid(vat)
def check_vat_ee(vat):
'''
Check Estonia VAT number.
'''
import stdnum.ee.kmkr
return stdnum.ee.kmkr.is_valid(vat)
def check_vat_es(vat):
'''
Check Spain VAT number.
'''
import stdnum.es.nif
return stdnum.es.nif.is_valid(vat)
def check_vat_fi(vat):
'''
Check Finland VAT number.
'''
import stdnum.fi.alv
return stdnum.fi.alv.is_valid(vat)
def check_vat_fr(vat):
'''
Check France VAT number.
'''
import stdnum.fr.tva
return stdnum.fr.tva.is_valid(vat)
def check_vat_gb(vat):
'''
Check United Kingdom VAT number.
'''
import stdnum.gb.vat
return stdnum.gb.vat.is_valid(vat)
def check_vat_gr(vat):
'''
Check Greece VAT number.
'''
import stdnum.gr.vat
return stdnum.gr.vat.is_valid(vat)
def check_vat_el(vat):
'''
Check Greece VAT number.
'''
return check_vat_gr(vat)
def check_vat_hr(vat):
'''
Check Croatia VAT number.
'''
import stdnum.hr.oib
return stdnum.hr.oib.is_valid(vat)
def check_vat_hu(vat):
'''
Check Hungary VAT number.
'''
import stdnum.hu.anum
return stdnum.hu.anum.is_valid(vat)
def check_vat_ie(vat):
'''
Check Ireland VAT number.
'''
import stdnum.ie.vat
return stdnum.ie.vat.is_valid(vat)
def check_vat_it(vat):
'''
Check Italy VAT number.
'''
import stdnum.it.iva
return stdnum.it.iva.is_valid(vat)
def check_vat_lt(vat):
'''
Check Lithuania VAT number.
'''
import stdnum.lt.pvm
return stdnum.lt.pvm.is_valid(vat)
def check_vat_lu(vat):
'''
Check Luxembourg VAT number.
'''
import stdnum.lu.tva
return stdnum.lu.tva.is_valid(vat)
def check_vat_lv(vat):
'''
Check Latvia VAT number.
'''
import stdnum.lv.pvn
return stdnum.lv.pvn.is_valid(vat)
def check_vat_mt(vat):
'''
Check Malta VAT number.
'''
import stdnum.mt.vat
return stdnum.mt.vat.is_valid(vat)
def check_vat_nl(vat):
'''
Check Netherlands VAT number.
'''
import stdnum.nl.btw
return stdnum.nl.btw.is_valid(vat)
def check_vat_pl(vat):
'''
Check Poland VAT number.
'''
import stdnum.pl.nip
return stdnum.pl.nip.is_valid(vat)
def check_vat_pt(vat):
'''
Check Portugal VAT number.
'''
import stdnum.pt.nif
return stdnum.pt.nif.is_valid(vat)
def check_vat_ro(vat):
'''
Check Romania VAT number.
'''
import stdnum.ro.cf
return stdnum.ro.cf.is_valid(vat)
def check_vat_se(vat):
'''
Check Sweden VAT number.
'''
import stdnum.se.vat
return stdnum.se.vat.is_valid(vat)
def check_vat_si(vat):
'''
Check Slovenia VAT number.
'''
import stdnum.si.ddv
return stdnum.si.ddv.is_valid(vat)
def check_vat_sk(vat):
'''
Check Slovakia VAT number.
'''
import stdnum.sk.dph
return stdnum.sk.dph.is_valid(vat)
def check_vat_sm(vat):
'''
Check San Marino VAT number.
'''
if len(vat) != 5:
return False
try:
_posint(vat)
except ValueError:
return False
return True
def check_vat_ua(vat):
'''
Check Ukraine VAT number.
'''
if len(vat) != 8:
return False
try:
_posint(vat)
except ValueError:
return False
return True
def check_vat_uk(vat):
'''
Check United Kingdom VAT number.
'''
return check_vat_gb(vat)
def check_vat_ru(vat):
'''
Check Russia VAT number.
'''
if len(vat) != 10 and len(vat) != 12:
return False
try:
_posint(vat)
except ValueError:
return False
if len(vat) == 10:
check_sum = 2 * int(vat[0]) + 4 * int(vat[1]) + 10 * int(vat[2]) + \
3 * int(vat[3]) + 5 * int(vat[4]) + 9 * int(vat[5]) + \
4 * int(vat[6]) + 6 * int(vat[7]) + 8 * int(vat[8])
check = check_sum % 11
if check % 10 != int(vat[9]):
return False
else:
check_sum1 = 7 * int(vat[0]) + 2 * int(vat[1]) + 4 * int(vat[2]) + \
10 * int(vat[3]) + 3 * int(vat[4]) + 5 * int(vat[5]) + \
9 * int(vat[6]) + 4 * int(vat[7]) + 6 * int(vat[8]) + \
8 * int(vat[9])
check = check_sum1 % 11
if check != int(vat[10]):
return False
check_sum2 = 3 * int(vat[0]) + 7 * int(vat[1]) + 2 * int(vat[2]) + \
4 * int(vat[3])
|
akrherz/iem
|
htdocs/plotting/auto/scripts200/p215.py
|
Python
|
mit
| 7,271
| 0
|
"""KDE of Temps."""
import calendar
from datetime import date, datetime
import pandas as pd
from pyiem.plot.util import fitbox
from pyiem.plot import figure
from pyiem.util import get_autoplot_context, get_sqlalchemy_conn
from pyiem.exceptions import NoDataFound
from matplotlib.ticker import MaxNLocator
from scipy.stats import gaussian_kde
import numpy as np
from sqlalchemy import text
PDICT = {
"high": "High Temperature [F]",
"low": "Low Temperature [F]",
"avgt": "Average Temperature [F]",
}
MDICT = dict(
[
("all", "No Month/Time Limit"),
("spring", "Spring (MAM)"),
("fall", "Fall (SON)"),
("winter", "Winter (DJF)"),
("summer", "Summer (JJA)"),
("jan", "January"),
("feb", "February"),
("mar", "March"),
("apr", "April"),
("may", "May"),
("jun", "June"),
("jul", "July"),
("aug", "August"),
("sep", "September"),
("oct", "October"),
("nov", "November"),
|
("dec", "December"),
]
)
def get_description():
"""Return a dict d
|
escribing how to call this plotter"""
desc = {}
desc["cache"] = 3600
desc["data"] = True
desc[
"description"
] = """This autoplot generates some metrics on the distribution of temps
over a given period of years. The plotted distribution in the upper panel
is using a guassian kernel density estimate.
"""
desc["arguments"] = [
dict(
type="station",
name="station",
default="IATDSM",
label="Select station:",
network="IACLIMATE",
),
dict(
type="select",
options=PDICT,
name="v",
default="high",
label="Daily Variable to Plot:",
),
dict(
type="select",
name="month",
default="all",
label="Month Limiter",
options=MDICT,
),
dict(
type="year",
min=1880,
name="sy1",
default=1981,
label="Inclusive Start Year for First Period of Years:",
),
dict(
type="year",
min=1880,
name="ey1",
default=2010,
label="Inclusive End Year for First Period of Years:",
),
dict(
type="year",
min=1880,
name="sy2",
default=1991,
label="Inclusive Start Year for Second Period of Years:",
),
dict(
type="year",
min=1880,
name="ey2",
default=2020,
label="Inclusive End Year for Second Period of Years:",
),
]
return desc
def get_df(ctx, period):
"""Get our data."""
table = "alldata_%s" % (ctx["station"][:2])
month = ctx["month"]
ctx["mlabel"] = f"{month.capitalize()} Season"
if month == "all":
months = range(1, 13)
ctx["mlabel"] = "All Year"
elif month == "fall":
months = [9, 10, 11]
elif month == "winter":
months = [12, 1, 2]
elif month == "spring":
months = [3, 4, 5]
elif month == "summer":
months = [6, 7, 8]
else:
ts = datetime.strptime("2000-" + month + "-01", "%Y-%b-%d")
months = [ts.month]
ctx["mlabel"] = calendar.month_name[ts.month]
with get_sqlalchemy_conn("coop") as conn:
df = pd.read_sql(
text(
f"SELECT high, low, (high+low)/2. as avgt from {table} WHERE "
"day >= :d1 and day <= :d2 and station = :station "
"and high is not null "
"and low is not null and month in :months"
),
conn,
params={
"d1": date(ctx[f"sy{period}"], 1, 1),
"d2": date(ctx[f"ey{period}"], 12, 31),
"station": ctx["station"],
"months": tuple(months),
},
)
return df
def f2s(value):
"""HAAAAAAAAAAAAACK."""
return ("%.5f" % value).rstrip("0").rstrip(".")
def plotter(fdict):
"""Go"""
ctx = get_autoplot_context(fdict, get_description())
df1 = get_df(ctx, "1")
df2 = get_df(ctx, "2")
if df1.empty or df2.empty:
raise NoDataFound("Failed to find data for query!")
kern1 = gaussian_kde(df1[ctx["v"]])
kern2 = gaussian_kde(df2[ctx["v"]])
xpos = np.arange(
min([df1[ctx["v"]].min(), df2[ctx["v"]].min()]),
max([df1[ctx["v"]].max(), df2[ctx["v"]].max()]) + 1,
dtype="i",
)
period1 = "%s-%s" % (ctx["sy1"], ctx["ey1"])
period2 = "%s-%s" % (ctx["sy2"], ctx["ey2"])
label1 = "%s-%s %s" % (ctx["sy1"], ctx["ey1"], ctx["v"])
label2 = "%s-%s %s" % (ctx["sy2"], ctx["ey2"], ctx["v"])
df = pd.DataFrame({label1: kern1(xpos), label2: kern2(xpos)}, index=xpos)
fig = figure(apctx=ctx)
title = "[%s] %s %s Distribution\n%s vs %s over %s" % (
ctx["station"],
ctx["_nt"].sts[ctx["station"]]["name"],
PDICT[ctx["v"]],
period2,
period1,
ctx["mlabel"],
)
fitbox(fig, title, 0.12, 0.9, 0.91, 0.99)
ax = fig.add_axes([0.12, 0.38, 0.75, 0.52])
C1 = "blue"
C2 = "red"
alpha = 0.4
ax.plot(
df.index.values,
df[label1],
lw=2,
c=C1,
label=rf"{label1} - $\mu$={df1[ctx['v']].mean():.2f}",
zorder=4,
)
ax.fill_between(xpos, 0, df[label1], color=C1, alpha=alpha, zorder=3)
ax.axvline(df1[ctx["v"]].mean(), color=C1)
ax.plot(
df.index.values,
df[label2],
lw=2,
c=C2,
label=rf"{label2} - $\mu$={df2[ctx['v']].mean():.2f}",
zorder=4,
)
ax.fill_between(xpos, 0, df[label2], color=C2, alpha=alpha, zorder=3)
ax.axvline(df2[ctx["v"]].mean(), color=C2)
ax.set_ylabel("Guassian Kernel Density Estimate")
ax.legend(loc="best")
ax.grid(True)
ax.xaxis.set_major_locator(MaxNLocator(20))
# Sub ax
ax2 = fig.add_axes([0.12, 0.1, 0.75, 0.22])
delta = df[label2] - df[label1]
ax2.plot(df.index.values, delta)
dam = delta.abs().max() * 1.1
ax2.set_ylim(0 - dam, dam)
ax2.set_xlabel(PDICT[ctx["v"]])
ax2.set_ylabel("%s minus\n%s" % (period2, period1))
ax2.grid(True)
ax2.fill_between(xpos, 0, delta, where=delta > 0, color=C2, alpha=alpha)
ax2.fill_between(xpos, 0, delta, where=delta < 0, color=C1, alpha=alpha)
ax2.axhline(0, ls="--", lw=2, color="k")
ax2.xaxis.set_major_locator(MaxNLocator(20))
# Percentiles
levels = [0.001, 0.005, 0.01, 0.05, 0.1, 0.25, 0.5, 0.75]
levels.extend([0.9, 0.95, 0.99, 0.995, 0.999])
p1 = df1[ctx["v"]].describe(percentiles=levels)
p2 = df2[ctx["v"]].describe(percentiles=levels)
y = 0.8
fig.text(0.88, y, "Percentile", rotation=70)
fig.text(0.91, y, period1, rotation=70)
fig.text(0.945, y, period2, rotation=70)
for ptile in levels:
y -= 0.03
val = f2s(ptile * 100.0)
fig.text(0.88, y, val)
fig.text(0.91, y, "%.1f" % (p1[f"{val}%"],))
fig.text(0.95, y, "%.1f" % (p2[f"{val}%"],))
return fig, df
if __name__ == "__main__":
plotter(dict())
|
peterseymour/django-storages-1.1.8
|
storages/tests/s3boto.py
|
Python
|
bsd-3-clause
| 8,692
| 0.001266
|
import os
import mock
from uuid import uuid4
from urllib.request import urlopen
import datetime
from django.test import TestCase
from django.core.files.base import ContentFile
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from boto.s3.key import Key
from storages.backends import s3boto
__all__ = (
'ParseTsExtendedCase',
'SafeJoinTest',
'S3BotoStorageTests',
#'S3BotoStorageFileTests',
)
class ParseTsExtendedCase(TestCase):
def test_normal(self):
value = s3boto.parse_ts_extended("Wed, 13 Mar 2013 12:45:49 GMT")
self.assertEquals(value, datetime.datetime(2013, 3, 13, 12, 45, 49))
class S3BotoTestCase(TestCase):
@mock.patch('storages.backends.s3boto.S3Connection')
def setUp(self, S3Connection):
self.storage = s3boto.S3BotoStorage()
self.storage._bucket = mock.MagicMock()
class SafeJoinTest(TestCase):
def test_normal(self):
path = s3boto.safe_join("", "path/to/somewhere", "other", "path/to/somewhere")
self.assertEquals(path, "path/to/somewhere/other/path/to/somewhere")
def test_with_dot(self):
path = s3boto.safe_join("", "path/./somewhere/../other", "..",
".", "to/./somewhere")
self.assertEquals(path, "path/to/somewhere")
def test_base_url(self):
path = s3boto.safe_join("base_url", "path/to/somewhere")
self.assertEquals(path, "base_url/path/to/somewhere")
def test_base_url_with_slash(self):
path = s3boto.safe_join("base_url/", "path/to/somewhere")
self.assertEquals(path, "base_url/path/to/somewhere")
def test_suspicious_operation(self):
self.assertRaises(ValueError,
s3boto.safe_join, "base", "../../../../../../../etc/passwd")
class S3BotoStorageTests(S3BotoTestCase):
def test_storage_save(self):
"""
Test saving a file
"""
name = 'test_storage_save.txt'
content = ContentFile('new content')
self.storage.save(name, content)
self.storage.bucket.get_key.assert_called_once_with(name)
key = self.storage.bucket.get_key.return_value
key.set_metadata.assert_called_with('Content-Type', 'text/plain')
key.set_contents_from_file.assert_called_with(
content,
headers={'Content-Type': 'text/plain'},
policy=self.storage.default_acl,
reduced_redundancy=self.storage.reduced_redundancy,
rewind=True
)
def test_storage_save_gzip(self):
"""
Test saving a file with gzip enabled.
"""
if not s3boto.S3BotoStorage.gzip: # Gzip not available.
return
name = 'test_storage_save.css'
content = ContentFile("I should be gzip'd")
self.storage.save(name, content)
key = self.storage.bucket.get_key.return_value
key.set_metadata.assert_called_with('Content-Type', 'text/css')
key.set_contents_from_file.assert_called_with(
content,
headers={'Content-Type': 'text/css', 'Content-Encoding': 'gzip'},
policy=self.storage.default_acl,
reduced_redundancy=self.storage.reduced_redundancy,
rewind=True,
)
def test_compress_content_len(self):
"""
Test that file returned by _compress_content() is readable.
"""
if not s3boto.S3BotoStorage.gzip: # Gzip not available.
return
content = ContentFile("I should be gzip'd")
content = self.storage._compress_content(content)
self.assertTrue(len(content.read()) > 0)
def test_storage_open_write(self):
"""
Test opening a file in write mode
"""
name = 'test_open_for_writing.txt'
content = 'new content'
# Set the ACL header used when creating/writing data.
self.storage.bucket.connection.provider.acl_header = 'x-amz-acl'
# Set the mocked key's bucket
self.storage.bucket.get_key.return_value.bucket = self.storage.bucket
# Set the name of the mock object
self.storage.bucket.get_key.return_value.name = name
file = self.storage.open(name, 'w')
self.storage.bucket.get_key.assert_called_with(name)
file.write(content)
self.storage.bucket.initiate_multipart_upload.assert_called_with(
name,
headers={'x-amz-acl': 'public-read'},
reduced_redundancy=self.storage.reduced_redundancy,
)
# Save the internal file before closing
_file = file.file
file.close()
file._multipart.upload_part_from_file.assert_called_with(
_file, 1, headers=self.storage.headers,
)
file._multipart.complete_upload.assert_called_once()
#def test_storage_exists_and_delete(self):
# # show file does not exist
# name = self.prefix_path('test_exists.txt')
# self.assertFalse(self.storage.exists(name))
#
# # create the file
# content = 'new content'
# file = self.storage.open(name, 'w')
# file.write(content)
# file.close()
#
# # show file exists
# self.assertTrue(self.storage.exists(name))
#
# # delete the file
# self.storage.delete(name)
#
# # show file does not exist
# self.assertFalse(self.storage.exists(name))
def test_storage_listdir_base(self):
file_names = ["some/path/1.txt", "2.txt", "other/path/3.txt", "4.txt"]
self.storage.bucket.list.return_value = []
for p in file_names:
key = mock.MagicMock(spec=Key)
key.name = p
self.storage.bucket.list.return_value.append(key)
dirs, files = self.storage.listdir("")
self.assertEqual(len(dirs), 2)
for directory in ["some", "other"]:
self.assertTrue(directory in dirs,
""" "%s" not in directory list "%s".""" % (
directory, dirs))
self.assertEqual(len(files), 2)
for filename in ["2.txt", "4.txt"]:
self.assertTrue(filename in files,
""" "%s" not in file list "%s".""" % (
|
filename, files))
def test_storage_listdir_subdir(self):
file_names = ["some/path/1.txt", "some/2.txt"]
self.storage.bucket.list.return_value = []
for p in file_names:
|
key = mock.MagicMock(spec=Key)
key.name = p
self.storage.bucket.list.return_value.append(key)
dirs, files = self.storage.listdir("some/")
self.assertEqual(len(dirs), 1)
self.assertTrue('path' in dirs,
""" "path" not in directory list "%s".""" % (dirs,))
self.assertEqual(len(files), 1)
self.assertTrue('2.txt' in files,
""" "2.txt" not in files list "%s".""" % (files,))
#def test_storage_size(self):
# name = self.prefix_path('test_storage_size.txt')
# content = 'new content'
# f = ContentFile(content)
# self.storage.save(name, f)
# self.assertEqual(self.storage.size(name), f.size)
#
#def test_storage_url(self):
# name = self.prefix_path('test_storage_size.txt')
# content = 'new content'
# f = ContentFile(content)
# self.storage.save(name, f)
# self.assertEqual(content, urlopen(self.storage.url(name)).read())
#class S3BotoStorageFileTests(S3BotoTestCase):
# def test_multipart_upload(self):
# nparts = 2
# name = self.prefix_path("test_multipart_upload.txt")
# mode = 'w'
# f = s3boto.S3BotoStorageFile(name, mode, self.storage)
# content_length = 1024 * 1024# 1 MB
# content = 'a' * content_length
#
# bytes = 0
# target = f._write_buffer_size * nparts
# while bytes < target:
# f.
|
shimpe/pyvectortween
|
examples/example_pointandcoloranimation.py
|
Python
|
mit
| 3,258
| 0.003069
|
if __name__ == "__main__":
import gizeh
import moviepy.editor as mpy
from vectortween.TimeConversion import TimeConversion
from vectortween.PointAnimation import PointAnimation
from vectortween.ColorAnimation import ColorAnimation
W, H = 250, 250 # width, height, in pixels
duration = 10 # duration of the clip, in seconds
fps = 25
tc = TimeConversion()
def draw_line(startpoint, endpoint, radius, linewidth, startpointfill, linefill, surface):
if None not in startpoint and None not in endpoint and linefill is not None \
and startpointfill is not None and radius is not None and linewidth is not None:
circle = gizeh.circle(radius, xy=(startpoint[0], startpoint[1]), fill=startpointfill)
circle2 = gizeh.circle(radius, xy=(endpoint[0], endpoint[1]), fill=startpointfill)
line = gizeh.polyline([startpoint, endpoint], False, stroke=linefill, stroke_width=linewidth)
circle.draw(surface)
circle2.draw(surface)
line.draw(surface)
def make_frame(t):
p = PointAnimation((0 + 75, 0 + 75),
(100 + 75, 0 + 75),
tween=['easeOutElastic', 0.1, 0.1])
p2 = PointAnimation((100 + 75, 0 + 75),
|
(0 + 75, 100 + 75),
tween=['easeOutElastic', 0.1, 0.5])
p3 = PointAnimation((100 + 75 + 10, 0 + 75 + 10),
(0 + 75 + 10, 100 + 75 + 10),
tween=['easeOutCubic'])
c = ColorAnimation((1, 0, 0),
(
|
0.3, 0.6, 0.2),
tween=['easeOutElastic', 0.1, 0.1])
surface = gizeh.Surface(W, H)
f = p.make_frame(frame=tc.sec2frame(t, fps),
birthframe=None,
startframe=tc.sec2frame(0.2, fps),
stopframe=tc.sec2frame(9.8, fps),
deathframe=None)
f2 = p2.make_frame(frame=tc.sec2frame(t, fps),
birthframe=None,
startframe=tc.sec2frame(0.2, fps),
stopframe=tc.sec2frame(9.8, fps),
deathframe=None)
f3 = p3.make_frame(frame=tc.sec2frame(t, fps),
birthframe=None,
startframe=tc.sec2frame(0.2, fps),
stopframe=tc.sec2frame(9.8, fps),
deathframe=None)
coloranim = c.make_frame(frame=tc.sec2frame(t, fps),
birthframe=tc.sec2frame(0.2, fps),
startframe=tc.sec2frame(2, fps),
stopframe=tc.sec2frame(8, fps),
deathframe=tc.sec2frame(9.8, fps))
red = (1, 0, 0)
green = (0, 1, 0)
blue = (0, 0, 1)
draw_line(f, f2, 10, 3, red, green, surface)
draw_line(f, f3, 10, 3, blue, coloranim, surface)
return surface.get_npimage()
clip = mpy.VideoClip(make_frame, duration=duration)
clip.write_gif("example_pointandcoloranimation.gif", fps=fps, opt="OptimizePlus", fuzz=10)
|
mpreisler/openscap
|
release_tools/query-milestones.py
|
Python
|
lgpl-2.1
| 1,404
| 0
|
#!/usr/bin/python3
import argparse as ap
import shared
ACTIONS = dict()
def action(key):
def wrapper(function):
ACTIONS[key] = function
return function
return wrapper
def get_closed_issues(repo, milestone):
issues_and_prs = repo.get_issues(milestone=milestone, state="closed")
issues_only = [i for i in issues_and_prs if i.pull_request is None]
return issues_only
def get_closed_prs(repo, milestone)
|
:
issues_and_prs = repo.get_issues(milestone=milestone, state="closed")
prs_only = [i for i in issues_and_prs if i.pull_request is not None]
return prs_only
@action("issues-closed")
def print_closed_issues(repo, milestone):
for issue in get_closed_issues(repo, milestone):
print(issue.title)
@action("prs-merged")
def print_closed_prs(repo, milesto
|
ne):
for pr in get_closed_prs(repo, milestone):
print(pr.title)
def create_parser():
parser = ap.ArgumentParser()
parser.add_argument("version", type=shared.version_type)
parser.add_argument("what", choices=(ACTIONS.keys()))
shared.update_parser_with_common_stuff(parser)
return parser
if __name__ == "__main__":
parser = create_parser()
args = parser.parse_args()
gh = shared.get_github(args)
repo = shared.get_repo(gh, "OpenSCAP")
milestone = shared.get_milestone(repo, args.version)
ACTIONS[args.what](repo, milestone)
|
msbeta/apollo
|
scripts/record_map_data.py
|
Python
|
apache-2.0
| 6,330
| 0.001106
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
Start apollo data recorder.
It lists all available disks mounted under /media, and prioritize them in order:
- Disk#1. Largest NVME disk
- Disk#2. Smaller NVME disk
- ...
- Disk#x. Largest Non-NVME disk
- Disk#y. Smaller Non-NVME disk
- ...
Run with '--help' to see more options.
"""
import argparse
import datetime
import os
import subprocess
import sys
import psutil
MAP_COLLECTION_DATA_TOPICS = [
'/apollo/monitor/system_status',
'/apollo/sensor/gnss/best_pose',
'/apollo/sensor/gnss/gnss_status',
'/apollo/sensor/gnss/imu',
'/apollo/sensor/gnss/ins_stat',
'/apollo/sensor/gnss/odometry',
'/apollo/sensor/gnss/raw_data',
'/tf',
'/tf_static',
'/apollo/sensor/camera/front_12mm/image/compressed',
'/apollo/sensor/camera/front_6mm/image/compressed',
'/apollo/sensor/lidar16/front/up/Scan',
'/apollo/sensor/lidar16/front/up/PointCloud2',
'/apollo/sensor/lidar16/front/up/compensator/PointCloud2',
'/apollo/sensor/lidar128/PointCloud2',
'/apollo/sensor/lidar128/compensator/PointCloud2',
]
def shell_cmd(cmd, alert_on_failure=True):
"""Execute shell command and return (ret-code, stdout, stderr)."""
print('SHELL > {}'.format(cmd))
proc = subprocess.Popen(cmd, shell=True, close_fds=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ret = proc.wait()
stdout = proc.stdout.read().decode('utf-8') if proc.stdout else None
stderr = proc.stderr.read().decode('utf-8') if proc.stderr else None
if alert_on_failure and stderr and ret != 0:
sys.stderr.write('{}\n'.format(stderr))
return (ret, stdout, stderr)
class ArgManager(object):
"""Arguments manager."""
def __init__(self):
self.parser = argparse.ArgumentParser(
description="Manage apollo data recording.")
self.parser.add_argument('--start', default=Fal
|
se, action="store_true",
help='Start recorder. It is the default '
'action if no other actions are triggered. In '
'that case, the False value is ignored.')
self.parser.add_argument('--stop', default=False, action="store_t
|
rue",
help='Stop recorder.')
self.parser.add_argument('--split_duration', default="1m",
help='Duration to split bags, will be applied '
'as parameter to "rosbag record --duration".')
self._args = None
def args(self):
"""Get parsed args."""
if self._args is None:
self._args = self.parser.parse_args()
return self._args
class DiskManager(object):
"""Disk manager."""
def __init__(self):
"""Manage disks."""
disks = []
for disk in psutil.disk_partitions():
if not disk.mountpoint.startswith('/media/'):
continue
disks.append({
'mountpoint': disk.mountpoint,
'available_size': DiskManager.disk_avail_size(disk.mountpoint),
'is_nvme': disk.mountpoint.startswith('/media/apollo/internal_nvme'),
})
# Prefer NVME disks and then larger disks.
self.disks = sorted(
disks, reverse=True,
key=lambda disk: (disk['is_nvme'], disk['available_size']))
@staticmethod
def disk_avail_size(disk_path):
"""Get disk available size."""
statvfs = os.statvfs(disk_path)
return statvfs.f_frsize * statvfs.f_bavail
class Recorder(object):
"""Data recorder."""
def __init__(self, args):
self.args = args
self.disk_manager = DiskManager()
def start(self):
"""Start recording."""
if Recorder.is_running():
print('Another data recorder is running, skip.')
return
disks = self.disk_manager.disks
# Use the best disk, or fallback '/apollo' if none available.
disk_to_use = disks[0]['mountpoint'] if len(disks) > 0 else '/apollo'
topics = list(MAP_COLLECTION_DATA_TOPICS)
self.record_task(disk_to_use, topics)
def stop(self):
"""Stop recording."""
shell_cmd('pkill -f "cyber_recorder record"')
def record_task(self, disk, topics):
"""Record tasks into the <disk>/data/bag/<task_id> directory."""
task_id = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
task_dir = os.path.join(disk, 'data/bag', task_id)
print('Recording bag to {}'.format(task_dir))
log_file = '/apollo/data/log/apollo_record.out'
topics_str = ' -c '.join(topics)
os.makedirs(task_dir)
cmd = '''
cd "{}"
source /apollo/scripts/apollo_base.sh
source /apollo/framework/install/setup.bash
nohup cyber_recorder record -c {} >{} 2>&1 &
'''.format(task_dir, topics_str, log_file)
shell_cmd(cmd)
@staticmethod
def is_running():
"""Test if the given process running."""
_, stdout, _ = shell_cmd('pgrep -c -f "cyber_recorder record"', False)
# If stdout is the pgrep command itself, no such process is running.
return stdout.strip() != '1' if stdout else False
def main():
"""Main entry."""
arg_manager = ArgManager()
args = arg_manager.args()
recorder = Recorder(args)
if args.stop:
recorder.stop()
else:
recorder.start()
if __name__ == '__main__':
main()
|
mibanescu/pulp
|
server/test/unit/server/db/test_model.py
|
Python
|
gpl-2.0
| 22,379
| 0.00219
|
# -*- coding: utf-8 -*-
"""
Tests for the pulp.server.db.model module.
"""
from mock import patch, Mock
from mongoengine import ValidationError, DateTimeField, DictField, Document, IntField, StringField
from pulp.common import error_codes, dateutils
from pulp.common.compat import unittest
from pulp.server.exceptions import PulpCodedException
from pulp.server.db import model
from pulp.server.db.fields import ISO8601StringField
from pulp.server.db.querysets import CriteriaQuerySet
@patch('pulp.server.db.model.UnsafeRetry')
class TestAutoRetryDocument(unittest.TestCase):
"""
Test base class for pulp docs.
"""
def test_decorate_on_init(self, m_retry):
"""
Ensure that subclass's of AutoRetryDocuments are decorated on init.
"""
class MockDoc(model.AutoRetryDocument):
pass
doc = MockDoc()
m_retry.decorate_instance.assert_called_once_with(instance=doc, full_name=type(doc))
def test_abstact(self, m_retry):
"""
Ensure that AutoRetryDocument is an abstract document.
"""
self.assertDictEqual(model.AutoRetryDocument._meta, {'abstract': True})
class TestContentUnit(unittest.TestCase):
"""
Test ContentUnit model
"""
def test_model_fields(self):
self.assertTrue(isinstance(model.ContentUnit.id, StringField))
self.assertTrue(model.ContentUnit.id.primary_key)
self.assertTrue(isinstance(model.ContentUnit.last_updated, IntField))
self.assertTrue(model.ContentUnit.last_updated.required)
self.assertEquals(model.ContentUnit.last_updated.db_field, '_last_updated')
self.assertTrue(isinstance(model.ContentUnit.user_metadata, DictField))
self.assertEquals(model.ContentUnit.user_metadata.db_field, 'pulp_user_metadata')
self.assertTrue(isinstance(model.ContentUnit.storage_path, StringField))
self.assertEquals(model.ContentUnit.storage_path.db_field, '_storage_path')
self.assertTrue(isinstance(model.ContentUnit._ns, StringField))
self.assertTrue(model.ContentUnit._ns)
self.assertTrue(isinstance(model.ContentUnit.unit_type_id, StringField))
self.assertTrue(model.ContentUnit.unit_type_id)
def test_meta_abstract(self):
self.assertEquals(model.ContentUnit._meta['abstract'], True)
@patch('pulp.server.db.model.signals')
def test_attach_signals(self, mock_signals):
class ContentUnitHelper(model.ContentUnit):
unit_type_id = StringField(default='foo')
unit_key_fields = ['apple', 'pear']
ContentUnitHelper.attach_signals()
mock_signals.pre_save.connect.assert_called_once_with(ContentUnitHelper.pre_save_signal,
sender=ContentUnitHelper)
self.assertEquals('foo', ContentUnitHelper.NAMED_TUPLE.__name__)
self.assertEquals(('apple', 'pear'), ContentUnitHelper.NAMED_TUPLE._fields)
def test_attach_signals_without_unit_key_fields_defined(self):
class ContentUnitHelper(model.ContentUnit):
unit_type_id = StringField(default='foo')
try:
ContentUnitHelper.attach_signals()
self.fail("Previous call should have raised a PulpCodedException")
except PulpCodedException, raised_error:
self.assertEquals(raised_error.error_code, error_codes.PLP0035)
self.assertEqual(raised_error.error_data, {'class_name': 'ContentUnitHelper'})
@patch('pulp.server.db.model.dateutils.now_utc_timestamp')
def test_pre_save_signal(self, mock_now_utc):
"""
Test the pre_save signal handler
"""
class ContentUnitHelper(model.ContentUnit):
id = None
last_updated = None
mock_now_utc.return_value = 'foo'
helper = ContentUnitHelper()
helper.last_updated = 50
model.ContentUnit.pre_save_signal({}, helper)
self.assertIsNotNone(helper.id)
# make sure the last updated time has been updated
self.assertEquals(helper.last_updated, 'foo')
def test_pre_save_signal_leaves_existing_id(self):
"""
Test the pre_save signal handler leaves an existing id on an object in place
"""
class ContentUnitHelper(model.ContentUnit):
id = None
last_updated = None
helper = ContentUnitHelper()
helper.id = "foo"
model.ContentUnit.pre_save_signal({}, helper)
# make sure the id wasn't replaced
self.assertEquals(helper.id, 'foo')
@patch('pulp.server.db.model.Repository.objects')
@patch('pulp.server.db.model.RepositoryContentUnit.objects')
def test_get_repositories(self, mock_rcu_query, mock_repository_query):
class ContentUnitHelper(model.ContentUnit):
pass
c1 = model.RepositoryContentUnit(repo_id='repo1')
c2 = model.RepositoryContentUnit(repo_id='repo2')
mock_rcu_query.return_value = [c1, c2]
mock_repository_query.return_value = ['apples']
unit = ContentUnitHelper(id='foo_id')
self.assertEquals(unit.get_repositories(), ['apples'])
mock_rcu_query.assert_called_once_with(unit_id='foo_id')
mock_repository_query.assert_called_once_with(repo_id__in=['repo1', 'repo2'])
@patch('pulp.server.db.model.signals')
def test_as_named_tuple(self, m_signal):
class ContentUnitHelper(model.ContentUnit):
apple = StringField()
pear = StringField()
unit_key_fields = ('apple', 'pear')
unit_type_id = StringField(default='bar')
# create the named tuple
ContentUnitHelper.attach_signals()
helper = ContentUnitHelper(apple='foo', pear='bar')
n_tuple = helper.unit_key_as_named_tuple
self.assertEquals(n_tuple, ContentUnitHelper.NAMED_TUPLE(apple='foo', pear='bar'))
def test_id_to_dict(self):
cl
|
ass ContentUnitHelper(model.ContentUnit):
apple = StringField()
pear = StringField()
unit_key_fields = ('apple', 'pear')
unit_type_id = StringField(default='bar')
my_unit = ContentUnitHelper(apple='apple', pear='pear')
ret = my_unit.to_id_dic
|
t()
expected_dict = {'unit_key': {'pear': u'pear', 'apple': u'apple'}, 'type_id': 'bar'}
self.assertEqual(ret, expected_dict)
def test_type_id(self):
class ContentUnitHelper(model.ContentUnit):
unit_type_id = StringField()
my_unit = ContentUnitHelper(unit_type_id='apple')
self.assertEqual(my_unit.type_id, 'apple')
class TestFileContentUnit(unittest.TestCase):
class TestUnit(model.FileContentUnit):
pass
def test_init(self):
unit = TestFileContentUnit.TestUnit()
self.assertEqual(unit._source_location, None)
@patch('os.path.exists')
def test_set_content(self, exists):
path = '1234'
unit = TestFileContentUnit.TestUnit()
exists.return_value = True
unit.set_content(path)
exists.assert_called_once_with(path)
self.assertEquals(unit._source_location, path)
@patch('os.path.exists')
def test_set_content_bad_source_location(self, exists):
"""
Test that the appropriate exception is raised when set_content
is called with a non existent source_location
"""
exists.return_value = False
unit = TestFileContentUnit.TestUnit()
try:
unit.set_content('1234')
self.fail("Previous call should have raised a PulpCodedException")
except PulpCodedException as raised_error:
self.assertEquals(raised_error.error_code, error_codes.PLP0036)
@patch('pulp.server.db.model.FileStorage.put')
@patch('pulp.server.db.model.FileStorage.open')
@patch('pulp.server.db.model.FileStorage.close')
def test_pre_save_signal(self, close, _open, put):
sender = Mock()
kwargs = {'a': 1, 'b': 2}
# test
unit = TestFileContentUnit.TestUnit()
unit._source_location = '1234'
with patch('pulp.server.db.model.C
|
pmisik/buildbot
|
master/buildbot/test/util/querylog.py
|
Python
|
gpl-2.0
| 3,257
| 0.000614
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import contextlib
import logging
from twisted.python import log
# These routines provides a way to dump SQLAlchemy SQL commands and their
# results into Twisted's log.
# Logging wrappers are not re-entrant.
class _QueryToTwistedHandler(logging.Handler):
def __init__(self, log_query_result=False, record_mode=False):
super().__init__()
self._log_query_result = log_query_result
self.recordMode = record_mode
self.records = []
def emit(self, record):
if self.recordMode:
self.records.append(record.getMessage())
return
if record.levelno == logging.DEBUG:
if self._log_query_result:
l
|
og.msg(f"{record.name}:{record.threadName}:result: {record.getMessage()}")
else:
log.msg(f"{record.name}:{record.threadName}:query: {record.getMessage()}")
def start_log_queries(log_query_result=False, record_mode=False):
handler = _QueryToTwistedHandler(
log_query_result=log_query_result, record_mode=record_mode)
# In 'sqlalchemy.engine' logging namespace SQLAlchemy outputs SQL queries
# on INFO level, and SQL queries results on DEBUG level.
logger = log
|
ging.getLogger('sqlalchemy.engine')
# TODO: this is not documented field of logger, so it's probably private.
handler.prev_level = logger.level
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
# Do not propagate SQL echoing into ancestor handlers
handler.prev_propagate = logger.propagate
logger.propagate = False
# Return previous values of settings, so they can be carefully restored
# later.
return handler
def stop_log_queries(handler):
assert isinstance(handler, _QueryToTwistedHandler)
logger = logging.getLogger('sqlalchemy.engine')
logger.removeHandler(handler)
# Restore logger settings or set them to reasonable defaults.
logger.propagate = handler.prev_propagate
logger.setLevel(handler.prev_level)
@contextlib.contextmanager
def log_queries():
handler = start_log_queries()
try:
yield
finally:
stop_log_queries(handler)
class SqliteMaxVariableMixin:
@contextlib.contextmanager
def assertNoMaxVariables(self):
handler = start_log_queries(record_mode=True)
try:
yield
finally:
stop_log_queries(handler)
for line in handler.records:
self.assertFalse(line.count("?") > 999,
"too much variables in " + line)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.