repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
yxdong/ybk
|
zg/models.py
|
Python
|
mit
| 3,701
| 0.000277
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
# from datetime import timedelta, datetime
from yamo import (
Connection,
EmbeddedDocument, Document, IDFormatter, Index,
StringField,
IntField,
FloatField,
BooleanField,
ListField,
EmbeddedField,
DateTimeField,
)
conn = Connection('mongodb://localhost/zg')
log = logging.getLogger('zg')
class User(Document):
""" 跌零用户 """
class Meta:
idf = IDFormatter('{mobile}')
idx1 = Index('mobile', unique=True)
idx2 = Index('username')
mobile = StringField(required=True)
username = StringField(required=True)
password = StringField(required=True)
paid = FloatField(default=0)
total_money = FloatField(default=0)
total_capital = FloatField(default=0)
total_profit = FloatField(default=0)
_is_admin = BooleanField(default=False)
def get_id(self):
return self._id
def is_active(self):
return True
def is_anonymous(self):
return False
def is_authenticated(self):
return True
def is_admin(self):
return self._is_admin
class Account(Document):
""" 跌零用户的账号 """
c
|
lass Meta:
idf = IDFormatter('{user_id}_{login_name}')
user_id = StringField(required=True)
login_name = StringField(required=True)
login_password = StringField(required=True)
class MyPosition(EmbeddedDocument):
""" 持仓汇总 """
name = StringField(required=True)
symbol = StringField(required=True)
average_price = FloatField(required=True)
|
quantity = IntField(required=True)
price = FloatField(required=True)
sellable = IntField()
profit = FloatField()
@property
def increase(self):
if self.price > 0:
return '{:4.2f}%'.format(
(self.price / self.average_price - 1) * 100)
else:
return '0%'
class MyOrder(EmbeddedDocument):
""" 成交订单汇总 """
type_ = StringField(required=True)
name = StringField(required=True)
symbol = StringField(required=True)
price = FloatField(required=True)
current_price = FloatField(required=True)
quantity = IntField(required=True)
commision = FloatField(required=True)
profit = FloatField(required=True)
class MyStatus(EmbeddedDocument):
""" 挂单情况 """
order = StringField(required=True)
order_at = StringField(required=True)
type_ = StringField(required=True)
name = StringField(required=True)
symbol = StringField(required=True)
price = FloatField(required=True)
quantity = IntField(required=True)
pending_quantity = IntField(required=True)
status = StringField(required=True)
class Position(Document):
""" 当日持仓汇总 """
class Meta:
idf = IDFormatter('{user_id}_{date}')
idx1 = Index(['user_id', 'date'], unique=True)
user_id = StringField(required=True)
date = DateTimeField(required=True)
position_list = ListField(EmbeddedField(MyPosition))
class Order(Document):
""" 当日订单汇总 """
class Meta:
idf = IDFormatter('{user_id}_{date}')
idx1 = Index(['user_id', 'date'], unique=True)
user_id = StringField(required=True)
date = DateTimeField(required=True)
order_list = ListField(EmbeddedField(MyOrder))
class Status(Document):
""" 当日挂单汇总 """
class Meta:
idf = IDFormatter('{user_id}_{date}')
idx1 = Index(['user_id', 'date'], unique=True)
user_id = StringField(required=True)
date = DateTimeField(required=True)
status_list = ListField(EmbeddedField(MyStatus))
conn.register_all()
|
derrickorama/image_optim
|
image_optim/__init__.py
|
Python
|
mit
| 52
| 0.019231
|
from .c
|
ore impor
|
t ImageOptim, NoImagesOptimizedError
|
nanolearning/edx-platform
|
lms/djangoapps/shoppingcart/views.py
|
Python
|
agpl-3.0
| 9,715
| 0.0035
|
import logging
import datetime
import pytz
from django.conf import settings
from django.contrib.auth.models import Group
from django.http import (HttpResponse, HttpResponseRedirect, HttpResponseNotFound,
HttpResponseBadRequest, HttpResponseForbidden, Http404)
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_POST
from django.core.urlresolvers import reverse
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from edxmako.shortcuts import render_to_response
from xmodule.modulestore.locations import SlashSe
|
paratedCourseKey
from shoppingcart.reports import RefundReport, ItemizedPurchaseReport, UniversityRevenueShareR
|
eport, CertificateStatusReport
from student.models import CourseEnrollment
from .exceptions import ItemAlreadyInCartException, AlreadyEnrolledInCourseException, CourseDoesNotExistException, ReportTypeDoesNotExistException
from .models import Order, PaidCourseRegistration, OrderItem
from .processors import process_postpay_callback, render_purchase_form_html
log = logging.getLogger("shoppingcart")
EVENT_NAME_USER_UPGRADED = 'edx.course.enrollment.upgrade.succeeded'
REPORT_TYPES = [
("refund_report", RefundReport),
("itemized_purchase_report", ItemizedPurchaseReport),
("university_revenue_share", UniversityRevenueShareReport),
("certificate_status", CertificateStatusReport),
]
def initialize_report(report_type, start_date, end_date, start_letter=None, end_letter=None):
"""
Creates the appropriate type of Report object based on the string report_type.
"""
for item in REPORT_TYPES:
if report_type in item:
return item[1](start_date, end_date, start_letter, end_letter)
raise ReportTypeDoesNotExistException
@require_POST
def add_course_to_cart(request, course_id):
"""
Adds course specified by course_id to the cart. The model function add_to_order does all the
heavy lifting (logging, error checking, etc)
"""
assert isinstance(course_id, basestring)
if not request.user.is_authenticated():
log.info("Anon user trying to add course {} to cart".format(course_id))
return HttpResponseForbidden(_('You must be logged-in to add to a shopping cart'))
cart = Order.get_cart_for_user(request.user)
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
# All logging from here handled by the model
try:
PaidCourseRegistration.add_to_order(cart, course_key)
except CourseDoesNotExistException:
return HttpResponseNotFound(_('The course you requested does not exist.'))
except ItemAlreadyInCartException:
return HttpResponseBadRequest(_('The course {0} is already in your cart.'.format(course_id)))
except AlreadyEnrolledInCourseException:
return HttpResponseBadRequest(_('You are already registered in course {0}.'.format(course_id)))
return HttpResponse(_("Course added to cart."))
@login_required
def show_cart(request):
cart = Order.get_cart_for_user(request.user)
total_cost = cart.total_cost
cart_items = cart.orderitem_set.all()
form_html = render_purchase_form_html(cart)
return render_to_response("shoppingcart/list.html",
{'shoppingcart_items': cart_items,
'amount': total_cost,
'form_html': form_html,
})
@login_required
def clear_cart(request):
cart = Order.get_cart_for_user(request.user)
cart.clear()
return HttpResponse('Cleared')
@login_required
def remove_item(request):
item_id = request.REQUEST.get('id', '-1')
try:
item = OrderItem.objects.get(id=item_id, status='cart')
if item.user == request.user:
item.delete()
except OrderItem.DoesNotExist:
log.exception('Cannot remove cart OrderItem id={0}. DoesNotExist or item is already purchased'.format(item_id))
return HttpResponse('OK')
@csrf_exempt
@require_POST
def postpay_callback(request):
"""
Receives the POST-back from processor.
Mainly this calls the processor-specific code to check if the payment was accepted, and to record the order
if it was, and to generate an error page.
If successful this function should have the side effect of changing the "cart" into a full "order" in the DB.
The cart can then render a success page which links to receipt pages.
If unsuccessful the order will be left untouched and HTML messages giving more detailed error info will be
returned.
"""
params = request.POST.dict()
result = process_postpay_callback(params)
if result['success']:
return HttpResponseRedirect(reverse('shoppingcart.views.show_receipt', args=[result['order'].id]))
else:
return render_to_response('shoppingcart/error.html', {'order': result['order'],
'error_html': result['error_html']})
@login_required
def show_receipt(request, ordernum):
"""
Displays a receipt for a particular order.
404 if order is not yet purchased or request.user != order.user
"""
try:
order = Order.objects.get(id=ordernum)
except Order.DoesNotExist:
raise Http404('Order not found!')
if order.user != request.user or order.status != 'purchased':
raise Http404('Order not found!')
order_items = OrderItem.objects.filter(order=order).select_subclasses()
any_refunds = any(i.status == "refunded" for i in order_items)
receipt_template = 'shoppingcart/receipt.html'
__, instructions = order.generate_receipt_instructions()
# we want to have the ability to override the default receipt page when
# there is only one item in the order
context = {
'order': order,
'order_items': order_items,
'any_refunds': any_refunds,
'instructions': instructions,
}
if order_items.count() == 1:
receipt_template = order_items[0].single_item_receipt_template
context.update(order_items[0].single_item_receipt_context)
# Only orders where order_items.count() == 1 might be attempting to upgrade
attempting_upgrade = request.session.get('attempting_upgrade', False)
if attempting_upgrade:
course_enrollment = CourseEnrollment.get_or_create_enrollment(request.user, order_items[0].course_id)
course_enrollment.emit_event(EVENT_NAME_USER_UPGRADED)
request.session['attempting_upgrade'] = False
return render_to_response(receipt_template, context)
def _can_download_report(user):
"""
Tests if the user can download the payments report, based on membership in a group whose name is determined
in settings. If the group does not exist, denies all access
"""
try:
access_group = Group.objects.get(name=settings.PAYMENT_REPORT_GENERATOR_GROUP)
except Group.DoesNotExist:
return False
return access_group in user.groups.all()
def _get_date_from_str(date_input):
"""
Gets date from the date input string. Lets the ValueError raised by invalid strings be processed by the caller
"""
return datetime.datetime.strptime(date_input.strip(), "%Y-%m-%d").replace(tzinfo=pytz.UTC)
def _render_report_form(start_str, end_str, start_letter, end_letter, report_type, total_count_error=False, date_fmt_error=False):
"""
Helper function that renders the purchase form. Reduces repetition
"""
context = {
'total_count_error': total_count_error,
'date_fmt_error': date_fmt_error,
'start_date': start_str,
'end_date': end_str,
'start_letter': start_letter,
'end_letter': end_letter,
'requested_report': report_type,
}
return render_to_response('shoppingcart/download_report.html', context)
@login_required
def csv_report(request):
"""
Downloads csv reporting of orderitems
"""
if not _can_download_report(request.user):
return HttpResponseForbidden(_('You do not have permission to view this page.'))
if request.method == 'POST':
start
|
zen/openstack-dashboard
|
django-openstack/django_openstack/tests/view_tests/dash/container_tests.py
|
Python
|
apache-2.0
| 4,367
| 0.001145
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cloudfiles.errors import ContainerNotEmpty
from django import http
from django.contrib import messages
from django.core.urlresolvers import reverse
from django_openstack import api
from django_openstack.tests.view_tests import base
from mox import IgnoreArg, IsA
class ContainerViewTests(base.BaseViewTests):
def setUp(self):
super(ContainerViewTests, self).setUp()
self.container = self.mox.CreateMock(api.Container)
self.container.name = 'containerName'
def test_index(self):
self.mox.StubOutWithMock(api, 'swift_get_containers')
api.swift_get_containers(
IsA(http.HttpRequest)).AndReturn([self.container])
self.mox.ReplayAll()
res = self.client.get(reverse('dash_containers', args=['tenant']))
self.assertTemplateUsed(res,
'django_openstack/dash/containers/index.html')
self.assertIn('containers', res.context)
containers = res.context['containers']
self.assertEqual(len(containers), 1)
|
self.assertEqual(containers[0].name, 'containerName')
self.mox.VerifyAll()
def test_delete_container(self):
formData = {'contain
|
er_name': 'containerName',
'method': 'DeleteContainer'}
self.mox.StubOutWithMock(api, 'swift_delete_container')
api.swift_delete_container(IsA(http.HttpRequest),
'containerName')
self.mox.ReplayAll()
res = self.client.post(reverse('dash_containers', args=['tenant']),
formData)
self.assertRedirectsNoFollow(res, reverse('dash_containers',
args=['tenant']))
self.mox.VerifyAll()
def test_delete_container_nonempty(self):
formData = {'container_name': 'containerName',
'method': 'DeleteContainer'}
exception = ContainerNotEmpty('containerNotEmpty')
self.mox.StubOutWithMock(api, 'swift_delete_container')
api.swift_delete_container(
IsA(http.HttpRequest),
'containerName').AndRaise(exception)
self.mox.StubOutWithMock(messages, 'error')
messages.error(IgnoreArg(), IsA(unicode))
self.mox.ReplayAll()
res = self.client.post(reverse('dash_containers', args=['tenant']),
formData)
self.assertRedirectsNoFollow(res, reverse('dash_containers',
args=['tenant']))
self.mox.VerifyAll()
def test_create_container_get(self):
res = self.client.get(reverse('dash_containers_create',
args=['tenant']))
self.assertTemplateUsed(res,
'django_openstack/dash/containers/create.html')
def test_create_container_post(self):
formData = {'name': 'containerName',
'method': 'CreateContainer'}
self.mox.StubOutWithMock(api, 'swift_create_container')
api.swift_create_container(
IsA(http.HttpRequest), 'CreateContainer')
self.mox.StubOutWithMock(messages, 'success')
messages.success(IgnoreArg(), IsA(str))
res = self.client.post(reverse('dash_containers_create',
args=[self.request.user.tenant]),
formData)
self.assertRedirectsNoFollow(res, reverse('dash_containers',
args=[self.request.user.tenant]))
|
mir-group/flare
|
tests/test_mgp.py
|
Python
|
mit
| 12,643
| 0.000791
|
import numpy as np
import os
import pickle
import pytest
import re
import time
import shutil
from copy import deepcopy
from numpy import allclose, isclose
from flare import struc, env, gp
from flare.parameters import Parameters
from flare.mgp import MappedGaussianProcess
from flare.lammps import lammps_calculator
from flare.utils.element_coder import _Z_to_mass, _Z_to_element, _element_to_Z
from flare.ase.calculator import FLARE_Calculator
from flare.ase.atoms import FLARE_Atoms
from ase.calculators.lammpsrun import LAMMPS
from .fake_gp import get_gp, get_random_structure
from .mgp_test import clean, compare_triplet, predict_atom_diag_var
body_list = ["2", "3"]
multi_list = [True, False]
force_block_only = False
curr_path = os.getcwd()
@pytest.mark.skipif(
not os.environ.get("lmp", False),
reason=(
"lmp not found "
"in environment: Please install LAMMPS "
"and set the $lmp env. "
"variable to point to the executatble."
),
)
@pytest.fixture(scope="module")
def all_gp():
allgp_dict = {}
np.random.seed(123)
for bodies in body_list:
for multihyps in multi_list:
gp_model = get_gp(
bodies,
"mc",
multihyps,
cellabc=[1.5, 1, 2],
force_only=
|
force_block_only,
noa=5,
)
gp_model.parallel = True
gp_model.n_cpus = 2
allgp_dict[f"{bodies}{multihyps}"] = gp_model
yield allgp_dict
del
|
allgp_dict
@pytest.fixture(scope="module")
def all_mgp():
allmgp_dict = {}
for bodies in ["2", "3", "2+3"]:
for multihyps in [False, True]:
allmgp_dict[f"{bodies}{multihyps}"] = None
yield allmgp_dict
del allmgp_dict
@pytest.fixture(scope="module")
def all_lmp():
all_lmp_dict = {}
species = ["H", "He"]
specie_symbol_list = " ".join(species)
masses = [
f"{i} {_Z_to_mass[_element_to_Z[species[i]]]}" for i in range(len(species))
]
parameters = {
"command": os.environ.get("lmp"), # set up executable for ASE
"newton": "off",
"pair_style": "mgp",
"mass": masses,
}
# set up input params
for bodies in body_list:
for multihyps in multi_list:
# create ASE calc
label = f"{bodies}{multihyps}"
files = [f"{label}.mgp"]
by = "yes" if bodies == "2" else "no"
ty = "yes" if bodies == "3" else "no"
parameters["pair_coeff"] = [
f"* * {label}.mgp {specie_symbol_list} {by} {ty}"
]
lmp_calc = LAMMPS(
label=label,
keep_tmp_files=True,
tmp_dir="./tmp/",
parameters=parameters,
files=files,
specorder=species,
)
all_lmp_dict[f"{bodies}{multihyps}"] = lmp_calc
yield all_lmp_dict
del all_lmp_dict
@pytest.mark.parametrize("bodies", body_list)
@pytest.mark.parametrize("multihyps", multi_list)
def test_init(bodies, multihyps, all_mgp, all_gp):
"""
test the init function
"""
clean()
gp_model = all_gp[f"{bodies}{multihyps}"]
# grid parameters
grid_params = {}
if "2" in bodies:
grid_params["twobody"] = {"grid_num": [160], "lower_bound": [0.02]}
if "3" in bodies:
grid_params["threebody"] = {"grid_num": [31, 32, 33], "lower_bound": [0.02] * 3}
lammps_location = f"{bodies}{multihyps}"
data = gp_model.training_statistics
try:
mgp_model = MappedGaussianProcess(
grid_params=grid_params,
unique_species=data["species"],
n_cpus=1,
lmp_file_name=lammps_location,
var_map="simple",
)
except:
mgp_model = MappedGaussianProcess(
grid_params=grid_params,
unique_species=data["species"],
n_cpus=1,
lmp_file_name=lammps_location,
var_map=None,
)
all_mgp[f"{bodies}{multihyps}"] = mgp_model
@pytest.mark.parametrize("bodies", body_list)
@pytest.mark.parametrize("multihyps", multi_list)
def test_build_map(all_gp, all_mgp, bodies, multihyps):
"""
test the mapping for mc_simple kernel
"""
gp_model = all_gp[f"{bodies}{multihyps}"]
mgp_model = all_mgp[f"{bodies}{multihyps}"]
mgp_model.build_map(gp_model)
# with open(f'grid_{bodies}_{multihyps}.pickle', 'wb') as f:
# pickle.dump(mgp_model, f)
@pytest.mark.parametrize("bodies", body_list)
@pytest.mark.parametrize("multihyps", multi_list)
def test_write_model(all_mgp, bodies, multihyps):
"""
test the mapping for mc_simple kernel
"""
mgp_model = all_mgp[f"{bodies}{multihyps}"]
mgp_model.write_model(f"my_mgp_{bodies}_{multihyps}")
mgp_model.write_model(f"my_mgp_{bodies}_{multihyps}", format="pickle")
# Ensure that user is warned when a non-mean_only
# model is serialized into a Dictionary
with pytest.warns(Warning):
mgp_model.var_map = "pca"
mgp_model.as_dict()
mgp_model.var_map = "simple"
mgp_model.as_dict()
@pytest.mark.parametrize("bodies", body_list)
@pytest.mark.parametrize("multihyps", multi_list)
def test_load_model(all_mgp, bodies, multihyps):
"""
test the mapping for mc_simple kernel
"""
name = f"my_mgp_{bodies}_{multihyps}.json"
all_mgp[f"{bodies}{multihyps}"] = MappedGaussianProcess.from_file(name)
os.remove(name)
name = f"my_mgp_{bodies}_{multihyps}.pickle"
all_mgp[f"{bodies}{multihyps}"] = MappedGaussianProcess.from_file(name)
os.remove(name)
@pytest.mark.parametrize("bodies", body_list)
@pytest.mark.parametrize("multihyps", multi_list)
def test_cubic_spline(all_gp, all_mgp, bodies, multihyps):
"""
test the predict for mc_simple kernel
"""
mgp_model = all_mgp[f"{bodies}{multihyps}"]
delta = 1e-4
if "3" in bodies:
body_name = "threebody"
elif "2" in bodies:
body_name = "twobody"
nmap = len(mgp_model.maps[body_name].maps)
print("nmap", nmap)
for i in range(nmap):
maxvalue = np.max(np.abs(mgp_model.maps[body_name].maps[i].mean.__coeffs__))
if maxvalue > 0:
comp_code = mgp_model.maps[body_name].maps[i].species_code
if "3" in bodies:
c_pt = np.array([[0.3, 0.4, 0.5]])
c, cderv = (
mgp_model.maps[body_name].maps[i].mean(c_pt, with_derivatives=True)
)
cderv = cderv.reshape([-1])
for j in range(3):
a_pt = deepcopy(c_pt)
b_pt = deepcopy(c_pt)
a_pt[0][j] += delta
b_pt[0][j] -= delta
a = mgp_model.maps[body_name].maps[i].mean(a_pt)[0]
b = mgp_model.maps[body_name].maps[i].mean(b_pt)[0]
num_derv = (a - b) / (2 * delta)
print("spline", comp_code, num_derv, cderv[j])
assert np.isclose(num_derv, cderv[j], rtol=1e-2)
elif "2" in bodies:
center = np.sum(mgp_model.maps[body_name].maps[i].bounds) / 2.0
a_pt = np.array([[center + delta]])
b_pt = np.array([[center - delta]])
c_pt = np.array([[center]])
a = mgp_model.maps[body_name].maps[i].mean(a_pt)[0]
b = mgp_model.maps[body_name].maps[i].mean(b_pt)[0]
c, cderv = (
mgp_model.maps[body_name].maps[i].mean(c_pt, with_derivatives=True)
)
cderv = cderv.reshape([-1])[0]
num_derv = (a - b) / (2 * delta)
print("spline", num_derv, cderv)
assert np.isclose(num_derv, cderv, rtol=1e-2)
@pytest.mark.parametrize("bodies", body_list)
@pytest.mark.parametrize("multihyps", multi_list)
def test_predict(all_gp, all_mgp, bodies, multihyps):
"""
test the predict for mc_simple kernel
"""
gp_model = all_gp[f"{bodies}{multihyps}"]
mgp_model = all_mgp[f"{bodies}{multihyps}"]
|
Kami/libcloud
|
contrib/generate_provider_logos_collage_image.py
|
Python
|
apache-2.0
| 4,224
| 0
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
# Script which generates a collage of provider logos from multiple provider
# logo files.
#
# It works in two steps:
#
# 1. Resize all the provider logo files (reduce the dimensions)
# 2. Assemble a final image from the resized images
import os
import sys
import argparse
import subprocess
import random
from os.path import join as pjoin
DIMENSIONS = '150x150' # Dimensions of the resized image (<width>x<height>)
GEOMETRY = '+4+4' # How to arrange images (+<rows>+<columns>)
TO_CREATE_DIRS = ['resized/', 'final/']
def setup(output_path):
"""
Create missing directories.
"""
for directory in TO_CREATE_DIRS:
final_path = pjoin(output_path, directory)
if not os.path.exists(final_path):
os.makedirs(final_path)
def get_logo_files(input_path):
logo_files = os.listdir(input_path)
logo_files = [name for name in logo_files if
'resized' not in name and name.endswith('png')]
logo_files = [pjoin(input_path, name) for name in logo_files]
return logo_files
def resize_images(logo_files, output_path):
resized_images = []
for logo_file in logo_files:
name, ext = os.path.splitext(os.path.basename(logo_file))
new_name = '%s%s' % (name, ext)
out_name = pjoin(output_path, 'resized/', new_name)
print('Resizing image: %(name)s' % {'name': logo_file})
values = {'name': logo_file, 'out_name': out_name,
'dimensions': DIMENSIONS}
cmd = 'convert %(name)s -resize %(dimensions)s %(out_name)s'
cmd = cmd % values
subprocess.call(cmd, shell=True)
resized_images.append(out_name)
return resized_images
def assemble_final_image(resized_images, output_path):
final_name = pjoin(output_path, 'final/logos.png')
random.shuffle(resized_images)
values = {'images': ' '.join(resized_images), 'geometry': GEOMETRY,
'out_name': final_name}
cmd = 'montage %(images)s -geometry %(geometry)s %(out_name)s'
cmd = cmd % values
print('Generating final image: %(name)s' % {'name': final_name})
subprocess.call(cmd, shell=True)
def main(input_path, output_path):
if not os.path.exists(input_path):
print('Path doesn\'t exist: %s' % (input_path))
sys.exit(2)
if not os.path.exists(output_path):
print('Path doesn\'t exist: %s' % (output_path))
sys.exit(2)
logo_files = get_logo_files(input_path=input_path)
setup(output_path=output_path)
resized_images = resize_images(logo_files=logo_files,
output_path=output_path)
assemble_final_image(resized_images=resized_images,
output_path=output_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Assemble provider logos '
' in a single image')
parser.add_argument('--input-path', action='store',
help='Path to directory which contains provider '
'log
|
o files')
parser.add_argument('--output-path', action='store',
help='Path where the new files will be written')
args = parser.parse_args()
input_path = os.path.abspath(args.input_path)
output_path = os.path.abspath(args.output_path)
main(
|
input_path=input_path, output_path=output_path)
|
Azure/azure-sdk-for-python
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/operations/_virtual_machine_scale_set_vms_operations.py
|
Python
|
mit
| 56,685
| 0.004869
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_reimage_request_initial(
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2016-03-30"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/reimage')
path_format_arguments = {
"resourceGrou
|
pName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"instanceId": _SERIALIZER.url("instance_id", instance_id, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscr
|
iption_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_deallocate_request_initial(
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2016-03-30"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/deallocate')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"instanceId": _SERIALIZER.url("instance_id", instance_id, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request_initial(
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2016-03-30"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"instanceId": _SERIALIZER.url("instance_id", instance_id, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2016-03-30"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"instanceId": _SERIALIZER.url("instance_id", instance_id, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_instance_view_request(
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2016-03-30"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/instanceView')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"instanceId": _SERIALIZER.url("instance_id", instance_id, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters =
|
ankanch/tieba-zhuaqu
|
DSV-user-application-plugin-dev-kit/lib/result_functions_file.py
|
Python
|
gpl-3.0
| 3,240
| 0.01997
|
import os
import datetime
import lib.maglib as MSG
#这是一个对结果进行初步处理的库
#用来分离抓取结果,作者,发帖时间
#抓取结果应该储存在【用户端根目录】并以result命名
#在测试情况下,抓取结果文件为results.txt
#重要全局变量
PATH_SUFFIX = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
print(PATH_SUFFIX)
PATH_SUFFIX = PATH_SUFFIX[::-1]
PATH_SUFFIX = PATH_SUFFIX[PATH_SUFFIX.find('\\'):]
PATH_SUFFIX = PATH_SUFFIX[::-1]
print(PATH_SUFFIX)
PATH_RESULT_FILE = PATH_SUFFIX + "\\datasource.ini"
DBSETTINGS = {'H':'', #HOST
'U':'', #USER
'P':'', #PASSWORD
'D':''} #DATABASE_NAME
#该函数用于读取数据源信息
#返回值:成功true,否则false
def loadDataSource():
print("加载数据源配置:",PATH_RESULT_FILE)
f = open(PATH_RESULT_FILE,'rb')
data = f.read()
f.close()
data = data.decode('gbk', 'ignore')
dbl = data.split
|
("\r\n")
for db in dbl:
DBSETTINGS[db[0]] = db[db.find('=')+1:].replace('\'','').replace(' ','')
return data
loadDataSource()
DBCONN = pymysql.connect(host=DBSETTINGS['H'], port=3306,user=DBSETTINGS['U'],passwd=DBSETTINGS['P'],db=DBSETTINGS['D'],charset='UTF8')
|
DBCUR = DBCONN.cursor()
#从数据库查询包含指定字词的所有数据集
#返回值:包含指定字词的数据集列表
def queryWordContainPostListbyKeyword(word):
SEL = "select CONTENT from `postdata` where CONTENT like('%" + word +"%')"
DBCUR.execute("SET names 'utf8mb4'")
DBCUR.execute(SEL)
DBCONN.commit()
datalist = DBCUR.fetchall()
return datalist
#从数据库查询指定作者的所有帖子信息
#返回值:指定作者的所有回帖信息
# [ [主题帖链接,贴吧名,作者,帖子内容,发帖时间,回复给sb,所在页面],[......],..... ]
def queryPostdataListbyAuthor(author):
SEL = "select * from `postdata` where AUTHOR=\"" + author +"\""
DBCUR.execute("SET names 'utf8mb4'")
DBCUR.execute(SEL)
DBCONN.commit()
datalist = DBCUR.fetchall()
return datalist
#从数据库查询最大日期
#返回值:一个最大日期
def queryDatasourceLatestTime():
SEL = "select MAX(DATE) from `postdata`"
DBCUR.execute("SET names 'utf8mb4'")
DBCUR.execute(SEL)
DBCONN.commit()
datalist = DBCUR.fetchall()
return datalist[0][0]
#从数据库查询小日期
#返回值:一个最小日期
def queryDatasourceEarlyTime():
SEL = "select MIN(DATE) from `postdata`"
DBCUR.execute("SET names 'utf8mb4'")
DBCUR.execute(SEL)
DBCONN.commit()
datalist = DBCUR.fetchall()
return datalist[0][0]
#从数据库查询指定作者的指定日期之间的数据集
#返回值:指定日期之间的数据集列表
# [ [主题帖链接,贴吧名,作者,帖子内容,发帖时间,回复给sb,所在页面],[......],..... ]
def queryPostdataListAfterTime(author,earlydatestr):
SEL = "select * from `postdata` where AUTHOR=\"" + author + "\" and DATE>'" + earlydatestr + "'"
DBCUR.execute("SET names 'utf8mb4'")
DBCUR.execute(SEL)
DBCONN.commit()
datalist = DBCUR.fetchall()
print(len(datalist))
return datalist
|
mfcloud/python-zvm-sdk
|
zvmsdk/tests/unit/sdkwsgi/handlers/test_version.py
|
Python
|
apache-2.0
| 1,674
| 0
|
# Copyright 2017,2018 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
import unittest
from zvmsdk.sdkwsgi.handlers import version
from zvmsdk import version as sdk_version
class HandlersRootTest(unittest.TestCase):
def setUp(self):
pass
def test_
|
version(self):
req = mock.Mock()
ver_str = {"rc": 0,
"overallRC": 0,
"errmsg": "",
"modID": None,
"output":
{"api_version": version.APIVERSION,
"min_version": version.APIVERSION,
"version": sdk_version.__version__,
"max_version": version.APIVERSION,
},
|
"rs": 0}
res = version.version(req)
self.assertEqual('application/json', req.response.content_type)
# version_json = json.dumps(ver_res)
# version_str = utils.to_utf8(version_json)
ver_res = json.loads(req.response.body.decode('utf-8'))
self.assertEqual(ver_str, ver_res)
self.assertEqual('application/json', res.content_type)
|
hillst/RnaMaker
|
bin/daemons/testclient.py
|
Python
|
mit
| 789
| 0.010139
|
#!/usr/bin/python
import socket
import sys
HOST, PORT = "24.21.106.140", 8080
# Create a socket (SOCK_STREAM means a TCP socket)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
try:
# Connect to server and send data
print "Connected to ", HOST, ":", PORT, "\n","Awaiting input\n"
data = sys.stdin.readline()
sock.connect((HOST, PORT))
print "Connected to ", HOST, ":", PORT, "\n","Awaiting input\
|
n"
exit = False
while exit != True:
sock.sendall(data + "\n")
if data.strip() == 'bye':
exit = True
received = sock.recv(1024)
print "Sent: " , data
print "Received: " , received
data = sys.stdin.readline()
# Receive data from the server and shut down
finally:
sock.close()
|
Pierre-Sassoulas/django-survey
|
survey/admin.py
|
Python
|
agpl-3.0
| 1,693
| 0.001772
|
from django.contrib import admin
|
from survey.actions import make_published
from survey.exporter.csv import Survey2Csv
from survey.exporter.tex import Survey2Tex
from survey.models import Answer, Category, Question, Response, Survey
class QuestionInline(admin.StackedInline):
model = Question
ordering = ("order", "category")
extra = 1
def get_formset(self, request, survey_obj, *args, **kwargs):
formset = super
|
().get_formset(request, survey_obj, *args, **kwargs)
if survey_obj:
formset.form.base_fields["category"].queryset = survey_obj.categories.all()
return formset
class CategoryInline(admin.TabularInline):
model = Category
extra = 0
class SurveyAdmin(admin.ModelAdmin):
list_display = ("name", "is_published", "need_logged_user", "template")
list_filter = ("is_published", "need_logged_user")
inlines = [CategoryInline, QuestionInline]
actions = [make_published, Survey2Csv.export_as_csv, Survey2Tex.export_as_tex]
class AnswerBaseInline(admin.StackedInline):
fields = ("question", "body")
readonly_fields = ("question",)
extra = 0
model = Answer
class ResponseAdmin(admin.ModelAdmin):
list_display = ("interview_uuid", "survey", "created", "user")
list_filter = ("survey", "created")
date_hierarchy = "created"
inlines = [AnswerBaseInline]
# specifies the order as well as which fields to act on
readonly_fields = ("survey", "created", "updated", "interview_uuid", "user")
# admin.site.register(Question, QuestionInline)
# admin.site.register(Category, CategoryInline)
admin.site.register(Survey, SurveyAdmin)
admin.site.register(Response, ResponseAdmin)
|
jorgegarciadev/estacaidooque
|
run.py
|
Python
|
mit
| 166
| 0.012048
|
# -*- coding: utf-8 -*-
from app import app
import os
if __name__ == "__ma
|
in__":
port = int(os.environ.ge
|
t('PORT', 33507))
app.run(port=port, debug = True)
|
candlepin/subscription-manager
|
test/zypper_test/test_serviceplugin.py
|
Python
|
gpl-2.0
| 2,745
| 0.0051
|
import configparser
from unittest import TestCase
import os
import subprocess
import tempfile
from test import subman_marker_functional, subman_marker_needs_envvars, subman_marker_zypper
@subman_marker_functional
@subman_marker_zypper
@subman_marker_needs_envvars('RHSM_USER', 'RHSM_PASSWORD', 'RHSM_URL', 'RHSM_POOL', 'RHSM_TEST_REPO', 'RHSM_TEST_PACKAGE')
class TestServicePlugin(TestCase):
SUB_MAN = "PYTHONPATH=./src python -m subscription_manager.scripts.subscription_manager"
def setUp(self):
# start in a non-registered state
subprocess.call('{sub_man} unregister'.format(sub_man=self.SUB_MAN), shell=True)
def has_subman_repos(self):
repos = configparser.ConfigParser()
with tempfile.NamedTemporaryFile(suffix='.repo') as repofile:
subprocess.call('zypper lr -e {0}'.format(repofile.name), shell=True)
repos.read(repofile.name)
for repo in repos.sections():
repo_info = dict(repos.items(repo))
service = repo_info.get('service', None)
if service == 'rhsm':
return True
return False
def test_provides_no_subman_repos_if_unregistered(self):
self.assertFalse(self.has_subman_repos())
def test_provides_subman_repos_if_registered_and_subscribed(self):
subprocess.call('{sub_man} register --username={RHSM_USER} --password={RHSM_PASSWORD} --serverurl={RHSM_URL}'.format(sub_man=self.SUB_MAN, **os.environ), shell=
|
True)
subprocess.call('{sub_man} attach --pool={RHSM_POOL}'.format(sub_man=self.SUB_MAN, **os.environ), shell=True)
self.assertTrue(self.has_subman_repos())
def test_can_download_rpm(self):
subprocess.check_call('{sub_man} register --username={RHSM_USER} --password={RHSM_PASSWORD} --serverurl={RHSM_URL}'.format(sub_man=self.SUB_MAN, **os.environ), shell=True)
subprocess.check_call('{sub_man} attach --pool={RHSM_POOL}'.format(sub_man=self.SUB_MAN, **os.env
|
iron), shell=True)
subprocess.check_call('{sub_man} repos --enable={RHSM_TEST_REPO}'.format(sub_man=self.SUB_MAN, **os.environ), shell=True)
# remove cached subman packages
subprocess.call('rm -rf /var/cache/zypp/packages/subscription-manager*', shell=True)
# remove test package if installed
subprocess.call('PYTHONPATH=./src zypper --non-interactive rm {RHSM_TEST_PACKAGE}'.format(**os.environ), shell=True)
subprocess.call('PYTHONPATH=./src zypper --non-interactive --no-gpg-checks in --download-only {RHSM_TEST_PACKAGE}'.format(**os.environ), shell=True)
subprocess.check_call('test "$(find /var/cache/zypp/packages/ -name \'{RHSM_TEST_PACKAGE}*.rpm\' | wc -l)" -gt 0'.format(**os.environ), shell=True)
|
kingsdigitallab/tvof-django
|
tvof/text_search/es_indexes.py
|
Python
|
mit
| 12,832
| 0.000935
|
from collections import deque, OrderedDict
from django.conf import settings
from elasticsearch_dsl import Document, Date, Integer, Keyword, Text, Search, Index, Boolean, Completion, \
SearchAsYouType, normalizer, analyzer
from elasticsearch_dsl.connections import connections
from tqdm import tqdm
from elasticsearch.helpers import bulk, parallel_bulk, BulkIndexError
from .utils import normalise_lemma, normalise_form, get_ascii_from_unicode
'''
http://localhost:9200/_cat/indices
# TODO: print -> log or sys.out
'''
# Define a default Elasticsearch client
from . import utils
c = connections.configure(**settings.ELASTICSEARCH_DSL)
# https://github.com/elastic/elasticsearch-dsl-py/issues/669
# https://sunscrapers.com/blog/elasticsearch-with-python-7-tips-and-best-practices/
normalizer_insensitive = normalizer(
'insensitive_normalizer',
filter=['lowercase', 'asciifolding']
)
analyzer_insensitive = analyzer(
'insensitive_analyzer',
tokenizer='standard',
filter=['lowercase', 'asciifolding']
)
class KeywordInsensitive(Keyword):
'''A ES Keyword field with a .insensitive subfield
which is case-insensitive'''
def __init__(self, *args, **kwargs):
kwargs['fields'] = {
'insensitive': Keyword(normalizer=normalizer_insensitive)
}
super().__init__(*args, **kwargs)
class AnnotatedToken(Document):
'''An ElasticSearch document for an annotated token in the text.
The tokens and annotations come from the kwic file.
Constraints:
Text():
searchable (any token), case-insensitive
but can't be sorted
(unless we use fielddata=True which is not recommended)
accent-sensitive by default
Keyword():
exact search only ('julius cesar' won't match 'cesar')
accent-sensitive & case-sensitive search
'''
# string
token = Keyword()
form = KeywordInsensitive()
lemma = Keyword()
# searchable
searchable = Text(analyzer=analyzer_insensitive)
pos = Keyword()
lemmapos = Keyword()
speech_cat = Keyword()
verse_cat = Keyword()
manuscript_number = Integer()
section_number = Keyword()
is_rubric = Boolean()
preceding = Text()
following = Text()
para_number = Integer()
seg_number = Integer()
# n
token_number = Integer()
previous_word = KeywordInsensitive()
next_word = KeywordInsensitive()
# the seq order of appearance in the text
# for efficient sorting.
seq_order = Integer()
class Index:
name = 'tokens'
def set_derived_fields(self):
self.form = self.token
if self.pos != 'nom propre':
# capital in first letter may be due to:
# . proper name (form should preserve it)
# . capital at beginning of sentence (we lowercase it)
self.form = self.form.lower()
self.searchable = '{} {}'.format(
self.form,
self.lemma
)
@classmethod
def new_from_token_element(cls, token_element, parsing_context):
attrib = utils.get_data_from_kwik_item(None, token_element)
ret = cls()
# print(attrib)
for k, v in attrib.items():
field = None
if k == 'string':
field = 'token'
v = (v or '').strip()
elif k == 'n':
field = 'token_number'
elif k == 'type':
field = 'is_rubric'
v = v == 'rubric_item'
elif k == 'location':
parts = v.split('_')
ret.manuscript_number = int('edRoyal20D1' in v)
ret.seg_number = parts[2] if len(parts) > 2 else 0
field = 'para_number'
v = int(parts[1])
elif k == 'following':
field = k
ret.next_word = v.split(' ')[0]
elif k == 'preceding':
field = k
ret.previous_word = v.split(' ')[-1]
if field is None and hasattr(ret, k):
field = k
if field:
setattr(ret, field, v)
else:
# print('WARNING: no field mapped to kwic attribute: {}'.format(k))
# TODO: sp
pass
ret.meta.id = '{}.{:03d}'.format(attrib['location'], int(attrib['n']))
ret.set_derived_fields()
if not str(getattr(ret, 'lemma', '') or '').strip():
# we don't index unlemmatised tokens (asked by partners, 01/2021)
return []
return [ret]
class LemmaDocument(Document):
'''Indexing model for a lemma'''
# for as-is
lemma = Keyword(
fields={
# for search
'searchable': Text(analyzer=analyzer_insensitive),
# for sorting
'insensitive': Keyword(normalizer=normalizer_insensitive)
}
)
# TODO?
forms = Keyword()
pos = Keyword()
name_type = Keyword()
class Index:
name = 'lemmata'
@classmethod
def new_from_token_element(cls, token_element, parsing_context):
tokenised_names = parsing_context.get('tokenised_names', None)
if tokenised_names is None:
tokenised_names = utils.read_tokenised_name_types()
parsing_context['tokenised_names'] = tokenised_names
ret = []
lemma = normalise_lemma(token_element.attrib.get('lemma', ''))
if lemma:
location_full = '__'.join([
token_element.attrib.get('location', ''),
token_element.attrib.get('n', '')
])
doc = cls(
lemma=lemma,
# lemma_sort=lemma.split(',')[0].strip().lower(),
pos=token_element.attrib.get('pos', 'Other').strip(),
name_type=tokenised_names.get(
location_full,
'Other'
)
)
if 0 and lemma == 'Ester':
print(location_full, doc.name_type)
# ES won't produce duplicates thanks to that id
doc.meta.id = lemma
ret.append(doc)
# doc.set_derived_fields()
return ret
class AutocompleteDocument(Document):
'''Indexing model for a form or lemma
TODO: check if there's a more standard or efficient way
of doing such suggestions.
'''
# autocomplete = Completion()
# For searching only, not displayed
# Basically we want a field with multiple tokens that can be search
# by prefix (e.g. par*) and we can sort by the first token.
autocomplete = SearchAsYouType()
# I don't think SearchAsYouType can be sorted or accepts sub-fields
# so we define a sortable version separately just for sorting purpose
autocomplete_sortable = Keyword()
# for display
form = Keyword()
|
lemma = Keyword()
class Index:
name = 'autocomplete'
@classmethod
def new_from_token_element(cls, token_element, parsing_context):
ret = []
lemma =
|
normalise_lemma(token_element.attrib.get('lemma', ''))
if lemma:
form = normalise_form(token_element)
for i in [0, 2]:
autocomplete = '{} {}'.format(form or lemma, lemma)
doc = cls(
lemma=lemma,
form=form,
autocomplete=get_ascii_from_unicode(autocomplete).lower(),
autocomplete_sortable=get_ascii_from_unicode(autocomplete).lower(),
)
doc.meta.id = autocomplete + ' ' + str(i)
ret.append(doc)
form = ''
return ret
class Indexer:
'''
Manages the search indexes.
The index_names argument used in the methods
is a list of index names the action should work on.
If empty or None the action will apply to all available indexes.
'''
# available indexes
indexes = OrderedDict([
['tokens', {
'document_class': AnnotatedToken,
}],
['lemmata', {
'document_class': LemmaDocument,
}],
['auto
|
drslump/pyshould
|
tests/expect.py
|
Python
|
mit
| 856
| 0
|
import unittest
from pyshould import *
from pyshould.expect import expect, expect_all, expect_any, expect_none
class ExpectTestCase(unittest.TestCase):
""" Simple tests for the expect based api """
def test_expect(self):
expect(1).to_equal(1)
expect(1).to_not_equal(0)
def test_expect_all(self)
|
:
expect_all([1, 2]).to_be_integer()
expect_all(1, 2).to_be_integer()
def test_expect_any(self):
expect_any([1, 2]).to_equal(2)
expect_any(1, 2).to_equal(2)
def test_expect_none(self):
expect_none([1, 2]).to_equal(0)
expect_none(1, 2).to_equal(0)
def test_expect_quantifier
|
s(self):
expect(all_of(1, 2)).to_be_integer()
expect(any_of([1, 2])).to_eq(1)
def test_ignore_keywords(self):
it(1).should.be_an_int()
expect(1).to.equal(1)
|
IsabellKonrad/Shugou
|
combineFiles.py
|
Python
|
mit
| 1,675
| 0.001194
|
import os
import sys
import re
def read_text_from_include(line):
match = re.search("^#:include *(.*.kv)", line)
filename = match.group(1)
return open(filename, 'r').read()
def main(combine):
if combine:
if not os.path.isfile('shugou_original.kv'):
os.rename('shugou.kv', 'shugou_original.kv')
infile = open('shugou_original.kv', 'r')
outfile = open('shugou.kv', 'w')
for line in infile:
if '#:include' in line:
text_from_include = read_text_from_include(line)
outfile.write(text_from_include + '\n')
else:
outfile.write(line)
infile.close()
outfile.close()
print("Files successfully concatenated.")
else:
try:
os.rename('shugou_original.kv', 'shugou.kv')
print("Original file restore
|
d.")
except:
print("No backup file.\
Maybe the original file has been restored already?")
if __name__ == '__main__':
if len(sys.argv) == 1:
print("Missing necessary argument. Use \n\
'combine' to concatenate the include files into shugou.kv \n\
'clean' to restore the original shugou.kv file")
else:
if (sys.argv[1] == 'Combine' or
sys.argv[1] == 'combine' or
|
sys.argv[1] == 'True'):
main(True)
elif (sys.argv[1] == 'Clean' or
sys.argv[1] == 'clean' or sys.argv[1] == 'False'):
main(False)
else:
print("Can not understand the argument.\
Call this file again with no arguments to see possible arguments.")
|
waheedahmed/edx-platform
|
lms/djangoapps/email_marketing/tests/test_signals.py
|
Python
|
agpl-3.0
| 8,373
| 0.001672
|
"""Tests of email marketing signal handlers."""
import logging
import ddt
from django.test import TestCase
from django.test.utils import override_settings
from mock import patch
from util.json_request import JsonResponse
from email_marketing.signals import handle_unenroll_done, \
email_marketing_register_user, \
email_marketing_user_field_changed, \
add_email_marketing_cookies
from email_marketing.tasks import update_user, update_user_email
from email_marketing.models import EmailMarketingConfiguration
from django.test.client import RequestFactory
from student.tests.factories import UserFactory, UserProfileFactory
from sailthru.sailthru_client import SailthruClient
from sailthru.sailthru_response import SailthruResponse
from sailthru.sailthru_error import SailthruClientError
log = logging.getLogger(__name__)
TEST_EMAIL = "test@edx.org"
def update_email_marketing_config(enabled=False, key='badkey', secret='badsecret', new_user_list='new list',
template='Activation'):
"""
Enable / Disable Sailthru integration
"""
EmailMarketingConfiguration.objects.create(
enabled=enabled,
sailthru_key=key,
sailthru_secret=secret,
sailthru_new_user_list=new_user_list,
sailthru_activation_template=template
)
@ddt.ddt
class EmailMarketingTests(TestCase):
"""
Tests for the EmailMarketing signals and tasks classes.
"""
def setUp(self):
self.request_factory = RequestFactory()
self.user = UserFactory.create(username='test', email=TEST_EMAIL)
self.profile = self.user.profile
self.request = self.request_factory.get("foo")
update_email_marketing_config(enabled=True)
super(EmailMarketingTests, self).setUp()
@patch('email_marketing.signals.SailthruClient.api_post')
def test_drop_cookie(self, mock_sailthru):
"""
Test add_email_marketing_cookies
"""
response = JsonResponse({
"success": True,
"redirect_url": 'test.com/test',
})
mock_sailthru.return_value = SailthruResponse(JsonResponse({'keys': {'cookie': 'test_cookie'}}))
add_email_marketing_cookies(None, response=response, user=self.user)
self.assertTrue('sailthru_hid' in response.cookies)
self.assertEquals(mock_sailthru.call_args[0][0], "user")
userparms = mock_sailthru.call_args[0][1]
self.assertEquals(userparms['fields']['keys'], 1)
self.assertEquals(userparms['id'], TEST_EMAIL)
self.assertEquals(response.cookies['sailthru_hid'].value, "test_cookie")
@patch('email_marketing.signals.SailthruClient.api_post')
def test_drop_cookie_error_path(self, mock_sailthru):
"""
test that error paths return no cookie
"""
response = JsonResponse({
"success": True,
"redirect_url": 'test.com/test',
})
mock_sailthru.return_value = SailthruResponse(JsonResponse({'keys': {'cookiexx': 'test_cookie'}}))
add_email_marketing_cookies(None, response=response, user=self.user)
self.assertFalse('sailthru_hid' in response.cookies)
mock_sailthru.return_value = SailthruResponse(JsonResponse({'error': "error", "errormsg": "errormsg"}))
add_email_marketing_cookies(None, response=response, user=self.user)
self.assertFalse('sailthru_hid' in response.cookies)
mock_sailthru.side_effect = SailthruClientError
add_email_marketing_cookies(None, response=response, user=self.user)
self.assertFalse('sailthru_hid' in response.cookies)
@patch('email_marketing.tasks.log.error')
@patch('email_marketing.tasks.SailthruClient.api_post')
def test_add_user(self, mock_sailthru, mock_log_error):
"""
test async method in tasks that actually updates Sailthru
"""
mock_sailthru.return_value = SailthruResponse(JsonResponse({'ok': True}))
update_user.delay(self.user.username, new_user=True)
self.assertFalse(mock_log_error.called)
self.assertEquals(mock_sailthru.call_args[0][0], "user")
userparms = mock_sailthru.call_args[0][1]
self.assertEquals(userparms['key'], "email")
self.assertEquals(userparms['id'], TEST_EMAIL)
self.assertEquals(userparms['vars']['gender'], "m")
self.assertEquals(userparms['vars']['username'], "test")
self.assertEquals(userparms['vars']['activated'], 1)
self.assertEquals(userparms['lists']['new list'], 1)
@patch('email_marketing.tasks.SailthruClient.api_post')
def test_activation(self, mock_sailthru):
"""
test send of activation template
"""
mock_sailthru.return_va
|
lue = SailthruResponse(JsonResponse({'ok': True}))
update_user.delay(self.user.username, new_user=True, activation=True)
# look for call args for 2nd call
self.assertEquals(mock_sailthru.call_args[0][0], "send")
userparms = mock_sailthru.call_args[0][1]
sel
|
f.assertEquals(userparms['email'], TEST_EMAIL)
self.assertEquals(userparms['template'], "Activation")
@patch('email_marketing.tasks.log.error')
@patch('email_marketing.tasks.SailthruClient.api_post')
def test_error_logging(self, mock_sailthru, mock_log_error):
"""
Ensure that error returned from Sailthru api is logged
"""
mock_sailthru.return_value = SailthruResponse(JsonResponse({'error': 100, 'errormsg': 'Got an error'}))
update_user.delay(self.user.username)
self.assertTrue(mock_log_error.called)
@patch('email_marketing.tasks.log.error')
@patch('email_marketing.tasks.SailthruClient.api_post')
def test_just_return(self, mock_sailthru, mock_log_error):
"""
Ensure that disabling Sailthru just returns
"""
update_email_marketing_config(enabled=False)
update_user.delay(self.user.username)
self.assertFalse(mock_log_error.called)
self.assertFalse(mock_sailthru.called)
update_user_email.delay(self.user.username, "newemail2@test.com")
self.assertFalse(mock_log_error.called)
self.assertFalse(mock_sailthru.called)
update_email_marketing_config(enabled=True)
@patch('email_marketing.tasks.SailthruClient.api_post')
def test_change_email(self, mock_sailthru):
"""
test async method in task that changes email in Sailthru
"""
mock_sailthru.return_value = SailthruResponse(JsonResponse({'ok': True}))
#self.user.email = "newemail@test.com"
update_user_email.delay(self.user.username, "old@edx.org")
self.assertEquals(mock_sailthru.call_args[0][0], "user")
userparms = mock_sailthru.call_args[0][1]
self.assertEquals(userparms['key'], "email")
self.assertEquals(userparms['id'], "old@edx.org")
self.assertEquals(userparms['keys']['email'], TEST_EMAIL)
@patch('email_marketing.tasks.log.error')
@patch('email_marketing.tasks.SailthruClient.api_post')
def test_error_logging1(self, mock_sailthru, mock_log_error):
"""
Ensure that error returned from Sailthru api is logged
"""
mock_sailthru.return_value = SailthruResponse(JsonResponse({'error': 100, 'errormsg': 'Got an error'}))
update_user_email.delay(self.user.username, "newemail2@test.com")
self.assertTrue(mock_log_error.called)
@patch('lms.djangoapps.email_marketing.tasks.update_user.delay')
def test_register_user(self, mock_update_user):
"""
make sure register user call invokes update_user
"""
email_marketing_register_user(None, user=self.user, profile=self.profile)
self.assertTrue(mock_update_user.called)
@patch('lms.djangoapps.email_marketing.tasks.update_user.delay')
@ddt.data(('auth_userprofile', 'gender', 'f', True),
('auth_user', 'is_active', 1, True),
('auth_userprofile', 'shoe_size', 1, False))
@ddt.unpack
def test_modify_field(self, table, setting, value, result, mock_update_user):
"""
Test that correct fields c
|
pli3/e2-openwbif
|
plugin/controllers/views/web/getservices.py
|
Python
|
gpl-2.0
| 5,478
| 0.011683
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1447321436.264256
__CHEETAH_genTimestamp__ = 'Thu Nov 12 18:43:56 2015'
__CHEETAH_src__ = '/home/knuth/openpli-oe-core/build/tmp/work/fusionhd-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+5837c87afc-r0/git/plugin/controllers/views/web/getservices.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Nov 12 18:43:41 2015'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class getservices(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(getservices, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_18987506 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
|
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2servicelis
|
t>
''')
for service in VFFSL(SL,"services",True): # generated from line 4, col 2
write(u'''\t<e2service>
\t\t<e2servicereference>''')
_v = VFFSL(SL,"service.servicereference",True) # u'$service.servicereference' on line 6, col 23
if _v is not None: write(_filter(_v, rawExpr=u'$service.servicereference')) # from line 6, col 23.
write(u'''</e2servicereference>
\t\t<e2servicename>''')
_v = VFFSL(SL,"service.servicename",True) # u'$service.servicename' on line 7, col 18
if _v is not None: write(_filter(_v, rawExpr=u'$service.servicename')) # from line 7, col 18.
write(u'''</e2servicename>
\t</e2service>
''')
write(u'''</e2servicelist>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_18987506
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_getservices= 'respond'
## END CLASS DEFINITION
if not hasattr(getservices, '_initCheetahAttributes'):
templateAPIClass = getattr(getservices, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(getservices)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=getservices()).run()
|
skyostil/tracy
|
src/analyzer/Charting.py
|
Python
|
mit
| 2,058
| 0.014091
|
# Copyright (c) 2011 Nokia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import Common
from common import Log
# Try to import matplotlib for charting
try:
import matplotlib
matplotlib.use("Agg")
import pylab
except ImportError, e:
matplotlib = None
pylab = None
Log.warn("Matplotlib or one of it's dependencies not found (%s). Charts will not be generated
|
." % e)
def slicePlot(x, y, sliceLength = 100, st
|
yle = "line", *args, **kwargs):
assert len(x) == len(y)
if style == "line":
plotFunc = pylab.plot
elif style == "bar":
plotFunc = pylab.bar
else:
raise RuntimeError("Unknown plotting style: %s" % style)
if len(x) < sliceLength:
plotFunc(x, y, *args, **kwargs)
return
slices = int(len(x) / sliceLength)
pylab.figure(figsize = (8, slices * 1))
for i in range(slices):
pylab.subplot(slices, 1, i + 1)
plotFunc(x[i * sliceLength: (i + 1) * sliceLength], y[i * sliceLength: (i + 1) * sliceLength], *args, **kwargs)
|
rhyolight/nupic.core
|
bindings/py/src/nupic/bindings/regions/TestNode.py
|
Python
|
agpl-3.0
| 10,011
| 0.005894
|
# ------------------------------------
|
----------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by t
|
he Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
from nupic.bindings.regions.PyRegion import PyRegion
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.proto.TestNodeProto_capnp import TestNodeProto
class TestNode(PyRegion):
@classmethod
def getSpec(cls):
if hasattr(TestNode, '_failIngetSpec'):
assert False, 'Failing in TestNode.getSpec() as requested'
result = dict(
description='The node spec of the NuPIC 2 Python TestNode',
singleNodeOnly=False,
inputs=dict(
bottomUpIn=dict(
description='Primary input for the node',
dataType='Real64',
count=0,
required=True,
regionLevel=False,
isDefaultInput=True,
requireSplitterMap=False
)
),
outputs=dict(
bottomUpOut=dict(
description='Primary output for the node',
dataType='Real64',
count=0,
regionLevel=False,
isDefaultOutput=True
)
),
parameters=dict(
int32Param=dict(
description='Int32 scalar parameter',
dataType='Int32',
count=1,
constraints='',
defaultValue='32',
accessMode='ReadWrite'
),
uint32Param=dict(
description='UInt32 scalar parameter',
dataType='UInt32',
count=1,
constraints='',
defaultValue='33',
accessMode='ReadWrite'
),
int64Param=dict(
description='Int64 scalar parameter',
dataType='Int64',
count=1,
constraints='',
defaultValue='64',
accessMode='ReadWrite'
),
uint64Param=dict(
description='UInt64 scalar parameter',
dataType='UInt64',
count=1,
constraints='',
defaultValue='65',
accessMode='ReadWrite'
),
real32Param=dict(
description='Real32 scalar parameter',
dataType='Real32',
count=1,
constraints='',
defaultValue='32.1',
accessMode='ReadWrite'
),
real64Param=dict(
description='Real64 scalar parameter',
dataType='Real64',
count=1,
constraints='',
defaultValue='64.1',
accessMode='ReadWrite'
),
boolParam=dict(
description='bool parameter',
dataType='Bool',
count=1,
constraints='',
defaultValue='false',
accessMode='ReadWrite'
),
real32arrayParam=dict(
description='Real32 array parameter',
dataType='Real32',
count=0, # array
constraints='',
defaultValue='',
accessMode='ReadWrite'
),
int64arrayParam=dict(
description='Int64 array parameter',
dataType='Int64',
count=0, # array
constraints='',
defaultValue='',
accessMode='ReadWrite'
),
boolArrayParam=dict(
description='bool array parameter',
dataType='Bool',
count=0, # array
constraints='',
defaultValue='',
accessMode='ReadWrite'
),
stringParam=dict(
description='String parameter',
dataType='Byte',
count=0, # string is conventionally Byte/0
constraints='',
defaultValue='nodespec value',
accessMode='ReadWrite'
),
failInInit=dict(
description='For testing failure in __init__()',
dataType='Int32',
count=1,
constraints='',
defaultValue='0',
accessMode='ReadWrite'
),
failInCompute=dict(
description='For testing failure in compute()',
dataType='Int32',
count=1,
constraints='',
defaultValue='0',
accessMode='ReadWrite'
),
),
commands=dict()
)
print result
return result
def __init__(self, *args, **kwargs):
""" """
# Facilitate failing in __init__ to test error handling
if 'failInInit' in kwargs:
assert False, 'TestNode.__init__() Failing on purpose as requested'
# Check if should fail in compute to test error handling
self._failInCompute = kwargs.pop('failInCompute', False)
# set these to a bunch of incorrect values, just to make
# sure they are set correctly by the nodespec.
self.parameters = dict(
int32Param=32,
uint32Param=33,
int64Param=64,
uint64Param=65,
real32Param=32.1,
real64Param=64.1,
boolParam=False,
real32ArrayParam=numpy.arange(10).astype('float32'),
real64ArrayParam=numpy.arange(10).astype('float64'),
# Construct int64 array in the same way as in C++
int64ArrayParam=numpy.arange(4).astype('int64'),
boolArrayParam=numpy.array([False]*4),
stringParam="nodespec value")
for key in kwargs:
if not key in self.parameters:
raise Exception("TestNode found keyword %s but there is no parameter with that name" % key)
self.parameters[key] = kwargs[key]
self.outputElementCount = 2 # used for computation
self._delta = 1
self._iter = 0
for i in xrange(0,4):
self.parameters["int64ArrayParam"][i] = i*64
def getParameter(self, name, index):
assert name in self.parameters
return self.parameters[name]
def setParameter(self, name, index, value):
assert name in self.parameters
self.parameters[name] = value
def initialize(self):
print 'TestNode.initialize() here.'
def compute(self, inputs, outputs):
if self._failInCompute:
assert False, 'TestNode.compute() Failing on purpose as requested'
def getOutputElementCount(self, name):
assert name == 'bottomUpOut'
return self.outputElementCount
def getParameterArrayCount(self, name, index):
assert name.endswith('ArrayParam')
print 'len(self.parameters[%s]) = %d' % (name, len(self.parameters[name]))
return len(self.parameters[name])
def getParameterArray(self, name, index, array):
assert name.endswith('ArrayParam')
assert name in self.parameters
v = self.parameters[name]
assert len(array) == len(v)
assert array.dtype == v.dtype
array[:] = v
def setParameterArray(self, name, index, array):
assert name.endswith('ArrayParam')
assert name in self.parameters
assert array.dtype == self.parameters[name].dtype
self.parameters[name] = numpy.array(array)
def getSchema():
return None
def writeArray(self, regionImpl, name, dtype, castFn):
count = self.getParameterArrayCount(name, 0)
param = numpy.zeros(count, dtype=dtype)
self.getParameterArray(name, 0, param)
field = regionImpl.init(name, count)
for i in range(count):
field[i] = castFn(param[i])
def write(self, proto):
regionImpl = proto.regionImpl.as_struct(TestNodeProto)
regionImpl.int32Param = self.getParameter("int32Param", 0)
regionImpl.uint32Param = self.getParameter("uint32Param", 0);
regionImpl.int64Param = self.getParameter("int64Param", 0);
re
|
antoinecarme/pyaf
|
tests/artificial/transf_Logit/trend_PolyTrend/cycle_7/ar_12/test_artificial_1024_Logit_PolyTrend_7_12_20.py
|
Python
|
bsd-3-clause
| 262
| 0.087786
|
import pyaf.Bench.TS_datasets as tsds
|
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 7, transform = "Logit", sigma = 0.0, exog_count = 20, ar_orde
|
r = 12);
|
vhaupert/mitmproxy
|
examples/addons/events-websocket-specific.py
|
Python
|
mit
| 1,300
| 0
|
"""WebSocket-specific events."""
import mitmproxy.http
import mitmproxy.websocket
class Events:
# Websocket lifecycle
def websocket_handshake(self, flow: mitmproxy.http.HTTPFlow):
"""
Called when a client wants to establish a WebSocket connection. The
WebSocket-specific headers can be manipulated to alter the
handshake. The flow object is guaranteed to have a non-None request
attribute.
"""
def websocket_start(self, flow: mitmproxy.websocket.WebSocketFlow):
"""
A websocket connection has commenced.
"""
def websocket_message(self, flow: mitmproxy.websocket.WebSocketFlow):
"""
Called when a WebSocket message is received from the client or
server. The most recent message will be flow.messages[-1]. The
message i
|
s user-modifiable. Currently there are two types of
messages, corresponding to the BINARY and TEXT frame types.
"""
def websocket_error(self, flow: mitmproxy.websocket.WebSocketFlow):
"""
A websocket conne
|
ction has had an error.
"""
def websocket_end(self, flow: mitmproxy.websocket.WebSocketFlow):
"""
A websocket connection has ended.
"""
|
jvandijk/pla
|
pla/version.py
|
Python
|
mit
| 23
| 0
|
_
|
_version__ = "master"
| |
yetty/django-embed-video
|
example_project/example_project/wsgi.py
|
Python
|
mit
| 1,447
| 0.000691
|
"""
WSGI config for example_project project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a
|
module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standa
|
rd Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "example_project.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example_project.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
rmarkello/pyls
|
pyls/matlab/io.py
|
Python
|
gpl-2.0
| 6,754
| 0
|
# -*- coding: utf-8 -*-
from collections.abc import MutableMapping
import numpy as np
import scipy.io as sio
from ..structures import PLSResults
_result_mapping = (
('u', 'x_weights'),
('s', 'singvals'),
('v', 'y_weights'),
('usc', 'x_scores'),
('vsc', 'y_scores'),
('lvcorrs', 'y_loadings'),
# permres
('perm_result_sprob', 'pvals'),
('perm_result_permsamp', 'permsamples'),
# bootres
('boot_result_compare_u', 'x_weights_normed'),
('boot_result_u_se', 'x_weights_stderr'),
('boot_result_bootsamp', 'bootsamples'),
# splitres
('perm_splithalf_orig_ucorr', 'ucorr'),
('perm_splithalf_orig_vcorr', 'vcorr'),
('perm_splithalf_ucorr_prob', 'ucorr_pvals'),
('perm_splithalf_vcorr_prob', 'vcorr_pvals'),
('perm_splithalf_ucorr_ul', 'ucorr_uplim'),
('perm_splithalf_vcorr_ul', 'vcorr_lolim'),
('perm_splithalf_ucorr_ll', 'ucorr_uplim'),
('perm_splithalf_vcorr_ll', 'vcorr_lolim'),
# inputs
('inputs_X', 'X'),
('stacked_behavdata', 'Y'),
('num_subj_lst', 'groups'),
('num_conditions', 'n_cond'),
('perm_result_num_perm', 'n_perm'),
('boot_result_num_boot', 'n_boot'),
('perm_splithalf_num_split', 'n_split'),
('boot_result_clim', 'ci'),
('other_input_meancentering_type', 'mean_centering'),
('method', 'method')
)
_mean_centered_mapping = (
('boot_result_orig_usc', 'contrast'),
('boot_result_distrib', 'contrast_boot'),
('boot_result_ulusc', 'contrast_ci_up'),
('boot_result_llusc', 'contrast_ci_lo'),
)
_behavioral_mapping = (
('boot_result_orig_corr', 'y_loadings'),
('boot_result_distrib', 'y_loadings_boot'),
('boot_result_ulcorr', 'y_loadings_ci_up'),
('boot_result_llcorr', 'y_loadings_ci_lo'),
)
def _coerce_void(value):
"""
Converts `value` to `value.dtype`
Parameters
----------
value : array_like
Returns
-------
value : dtype
`Value` coerced to `dtype`
"""
if np.squeeze(value).ndim == 0:
return value.dtype.type(value.squeeze())
else:
return np.squeeze(value)
def _flatten(d, parent_key='', sep='_'):
"""
Flattens nested dictionary `d` into single dictionary with new keyset
Parameters
----------
d : dict
Dictionary to be flattened
parent_key : str, optional
Key of parent dictionary of `d`. Default: ''
sep : str, optional
How to join keys of `d` with `parent_key`, if provided. Default: '_'
Returns
-------
flat : dict
Flattened input dictionary `d`
Notes
-----
Taken directly from https://stackoverflow.com/a/6027615
"""
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, MutableMapping):
items.extend(_flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def _rename_keys(d, mapping):
"""
Renames keys in dictionary `d` based on tuples in `mapping`
Parameters
----------
d : dict
Dictionary with keys
|
to be renamed
mapping : list of tuples
List of (oldkey, newkey) pairs to rename entries in `d`
Returns
-------
renamed : dict
Input dictionary `d` with keys renamed
"""
new_dict = d.copy()
for oldkey, newkey in mapping:
try:
new_dict[newkey] = new_dict.pop(oldkey)
except KeyError:
pass
return new_dict
def impo
|
rt_matlab_result(fname, datamat='datamat_lst'):
"""
Imports `fname` PLS result from Matlab
Parameters
----------
fname : str
Filepath to output mat file obtained from Matlab PLS toolbox. Should
contain at least a result struct object.
datamat : str, optional
Variable name of datamat ('X' array) provided to original PLS if it
exists `fname`. By default the datamat is not stored in the PLS results
structure, but if it is was saved in `fname` it can be loaded and
cached in the returned results object. Default: 'datamat_lst'
Returns
-------
results : :obj:`~.structures.PLSResults`
Matlab results in a Python-friendly format
"""
def get_labels(fields):
labels = [k for k, v in sorted(fields.items(),
key=lambda x: x[-1][-1])]
return labels
# load mat file using scipy.io
matfile = sio.loadmat(fname)
# if 'result' key is missing then consider this a malformed PLS result mat
try:
result = matfile.get('result')[0, 0]
except (IndexError, TypeError):
raise ValueError('Cannot get result struct from provided mat file')
# convert result structure to a dictionary using dtypes as keys
labels = get_labels(result.dtype.fields)
result = {labels[n]: value for n, value in enumerate(result)}
# convert sub-structures to dictionaries using dtypes as keys
struct = ['boot_result', 'perm_result', 'perm_splithalf', 'other_input']
for attr in struct:
if result.get(attr) is not None:
labels = get_labels(result[attr].dtype.fields)
result[attr] = {labels[n]: _coerce_void(value) for n, value
in enumerate(result[attr][0, 0])}
# get input data from results file, if it exists
X = matfile.get(datamat)
result['inputs'] = dict(X=np.vstack(X[:, 0])) if X is not None else dict()
# squeeze all the values so they're a bit more interpretable
for key, val in result.items():
if isinstance(val, np.ndarray):
result[key] = _coerce_void(val)
# flatten the dictionary and rename the keys according to our mapping
result = _rename_keys(_flatten(result), _result_mapping)
if result['method'] == 3:
result = _rename_keys(result, _behavioral_mapping)
if 'y_loadings_ci_up' in result:
result['y_loadings_ci'] = np.stack([
result['y_loadings_ci_lo'], result['y_loadings_ci_up']
], axis=-1)
else:
result = _rename_keys(result, _mean_centered_mapping)
if 'contrast_ci_up' in result:
result['contrast_ci'] = np.stack([
result['contrast_ci_lo'], result['contrast_ci_up']
], axis=-1)
# index arrays - 1 to account for Matlab vs Python 1- vs 0-indexing
for key in ['bootsamples', 'permsamples']:
try:
result[key] -= 1
except KeyError:
continue
if result.get('n_split', None) is None:
result['n_split'] = None
# pack it into a `PLSResults` class instance for easy attribute access
results = PLSResults(**result)
return results
|
jprk/sirid
|
aapi/AAPI.py
|
Python
|
gpl-3.0
| 131,251
| 0.012107
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 1.3.36
#
# Don't modify this file, modify the SWIG interface instead.
# This file is compatible with both classic and new-style classes.
import _AAPI
import new
new_instancemethod = new.instancemethod
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'PySwigObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
class intp(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, intp, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, intp, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _AAPI.new_intp(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _AAPI.delete_intp
__del__ = lambda self : None;
def assign(*args): return _AAPI.intp_assign(*args)
def value(*args): return _AAPI.intp_value(*args)
def cast(*args): return _AAPI.intp_cast(*args)
__swig_getmethods__["frompointer"] = lambda x: _AAPI.intp_frompointer
if _newclass:frompointer = staticmethod(_AAPI.intp_frompointer)
intp_swigregister = _AAPI.intp_swigregister
intp_swigregister(intp)
intp_frompointer = _AAPI.intp_frompointer
class floatp(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, floatp, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, floatp, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _AAPI.new_floatp(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _AAPI.delete_floatp
__del__ = lambda self : None;
def assign(*args): return _AAPI.floatp_assign(*args)
def value(*args): return _AAPI.floatp_value(*args)
def cast(*args): return _AAPI.floatp_cast(*args)
__swig_getmethods__["frompointer"] = lambda x: _AAPI.floatp_frompointer
if _newclass:frompointer = staticmethod(_AAPI.floatp_frompointer)
floatp_swigregister = _AAPI.floatp_swigregister
floatp_swigregister(floatp)
floatp_frompointer = _AAPI.floatp_frompointer
class doublep(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, doublep, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, doublep, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _AAPI.new_doublep(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _AAPI.delete_doublep
__del__ = lambda self : None;
def assign(*args): return _AAPI.doublep_assign(*args)
def value(*args): return _AAPI.doublep_value(*args)
def cast(*args): return _AAPI.doublep_cast(*args)
__swig_getmethods__["frompointer"] = lambda x: _AAPI.doublep_frompointer
if _newclass:frompointer = staticmethod(_AAPI.doublep_frompointer)
doublep_swigregister = _AAPI.doublep_swigregister
doublep_swigregister(doublep)
doublep_frompointer = _AAPI.doublep_frompointer
class boolp(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, boolp, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, boolp, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _AAPI.new_boolp(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _AAPI.delete_boolp
__del__ = lambda self : None;
def assign(*args): return _AAPI.boolp_assign(*args)
def value(*args): return _AAPI.boolp_value(*args)
def cast(*args): return _AAPI.boolp_cast(*args)
__swig_getmethods__["frompointer"] = lambda x: _AAPI.boolp_frompointer
if _newclass:frompointer = staticmethod(_AAPI.boolp_frompointer)
boolp_swigregister = _AAPI.boolp_swigregister
boolp_swigregister(boolp)
boolp_frompointer = _AAPI.boolp_frompointer
AKIActionAddSpeedAction = _AAPI.AKIActionAddSpeedAction
AKIActionAddDetailedSpeedAction = _AAPI.AKIActionAddDetailedSpeedAction
AKIActionCloseLaneAction = _AAPI.AKIActionCloseLaneAction
AKIActionCloseLaneActionBySegment = _AAPI.AKIActionCloseLaneActionBySegment
AKIActionCloseLaneDetailedAction = _AAPI.AKIActionCloseLaneDetailedAction
AKIActionAddNextTurningODAction = _AAPI.AKIActionAddNextTurningODAction
AKIActionAddNextTurningResultAction = _AAPI.AKIActionAddNextTurningResultAction
AKIActionAddChangeDestAction = _AAPI.AKIActionAddChangeDestAction
AKIActionChangeTurningProbAction = _AAPI.AKIActionChangeTurningProbAction
AKIActionDisableReservedLaneAction = _AAPI.AKIActionDisableReservedLaneAction
AKIActionCongestionPricingODAction = _AAPI.AKIActionCongestionPricingODAction
AKIActionRemoveAction = _AAPI.AKIActionRemoveAction
AKIActionReset = _AAPI.AKIActionRe
|
set
AKIActionAddNextSubPathODAction = _AAPI.AKIActionAddNextSubPathODAction
AKIActionAddNextSubPathResultAction = _AAPI.AKIActionAddNextSubPathResultAction
AKIActionAddNextSubPathPTAction = _AAPI.AKI
|
ActionAddNextSubPathPTAction
AKIActionModifyNextTurningODAction = _AAPI.AKIActionModifyNextTurningODAction
AKIActionModifyNextTurningResultAction = _AAPI.AKIActionModifyNextTurningResultAction
AKIActionModifyChangeDestAction = _AAPI.AKIActionModifyChangeDestAction
AKIActionModifyNextSubPathResultAction = _AAPI.AKIActionModifyNextSubPathResultAction
AKIActionModifyNextSubPathODAction = _AAPI.AKIActionModifyNextSubPathODAction
AKIActionModifyCloseTurningODAction = _AAPI.AKIActionModifyCloseTurningODAction
class InfVeh(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, InfVeh, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, InfVeh, name)
__repr__ = _swig_repr
__swig_setmethods__["report"] = _AAPI.InfVeh_report_set
__swig_getmethods__["report"] = _AAPI.InfVeh_report_get
if _newclass:report = _swig_property(_AAPI.InfVeh_report_get, _AAPI.InfVeh_report_set)
__swig_setmethods__["idVeh"] = _AAPI.InfVeh_idVeh_set
__swig_getmethods__["idVeh"] = _AAPI.InfVeh_idVeh_get
if _newclass:idVeh = _swig_property(_AAPI.InfVeh_idVeh_get, _AAPI.InfVeh_idVeh_set)
__swig_setmethods__["type"] = _AAPI.InfVeh_type_set
__swig_getmethods__["type"] = _AAPI.InfVeh_type_get
if _newclass:type = _swig_property(_AAPI.InfVeh_type_get, _AAPI.InfVeh_type_set)
__swig_setmethods__["idSection"] = _AAPI.InfVeh_idSection_set
__swig_getmethods__["idSection"] = _AAPI.InfVeh_idSection_get
if _newclass:idSection = _swig_property(_AAPI.InfVeh_idSection_get, _AAPI.InfVeh_idSection_set)
__swig_setmethods__["segment"] = _AAPI.InfVeh_segment_set
__swig_getmethods__["segment"] = _AAPI.InfVeh_segment_get
if _newclass:segment = _swig_property(_AAPI.InfVeh_segment_get, _AAPI.InfVeh_segment_set)
__swig_setmethods__["numberLane"] = _AAPI.InfVeh_numberLane_set
__swig_g
|
lavish205/olympia
|
src/olympia/devhub/tests/test_utils.py
|
Python
|
bsd-3-clause
| 10,323
| 0
|
import os.path
from django.conf import settings
from django.test.utils import override_settings
import mock
from celery.result import AsyncResult
from olympia import amo
from olympia.amo.tests import TestCase, addon_factory, version_factory
from olympia.devhub import tasks, utils
from olympia.files.models import FileUpload
class TestValidatorBase(TestCase):
def setUp(self):
# Create File objects for version 1.0 and 1.1.
self.addon = addon_factory(
guid='test-desktop@nowhere', slug='test-amo-addon',
version_kw={'version': '1.0'})
self.version = self.addon.current_version
self.file = self.version.files.get()
self.version_1_1 = version_factory(addon=self.addon, version='1.1')
self.file_1_1 = self.version_1_1.files.get()
# Creating the files and versions above resets this.
self.addon.update(status=amo.STATUS_PUBLIC)
# Create a FileUpload object for an XPI containing version 1.1.
path = os.path.join(settings.ROOT,
'src/olympia/devhub/tests/addons/desktop.xpi')
self.file_upload = FileUpload.objects.create(path=path)
self.xpi_version = '1.1'
# Patch validation tasks that we expect the validator to call.
self.patchers = []
self.save_file = self.patch(
'olympia.devhub.tasks.handle_file_validation_result').subtask
self.save_upload = self.patch(
'olympia.devhub.tasks.handle_upload_validation_result').subtask
self.validate_file = self.patch(
'olympia.devhub.tasks.validate_file').subtask
self.validate_upload = self.patch(
'olympia.devhub.tasks.validate_file_path').subtask
def patch(self, thing):
"""Patch the given "thing", and revert the patch on test teardown."""
patcher = mock.patch(thing)
self.addCleanup(patcher.stop)
return patcher.start()
def check_upload(self, file_upload, listed=True):
"""Check that the given new file upload is validated properly."""
# Run validator.
utils.Validator(file_upload, listed=listed)
# We shouldn't be attempting to validate an existing file.
assert not self.validate_file.called
# Make sure we run the correct validation task for the upload.
self.validate_upload.assert_called_once_with(
[file_upload.path],
{'hash_': file_upload.hash, 'listed': listed,
'is_webextension': False})
# Make sure we run the correct save validation task, with a
# fallback error handler.
channel = (amo.RELEASE_CHANNEL_LISTED if listed
else amo.RELEASE_CHANNEL_UNLISTED)
self.save_upload.assert_has_calls([
mock.call([mock.ANY, file_upload.pk, channel, False],
immutable=True),
mock.call([file_upload.pk, channel, False], link_error=mock.ANY)])
def check_file(self, file_):
"""Check that the given file is validated properly."""
# Run validator.
utils.Validator(file_)
# We shouldn't be attempting to validate a bare upload.
assert not self.validate_upload.called
# Make sure we run the correct validation task.
self.validate_file.assert_called_once_with(
[file_.pk],
{'hash_': file_.original_hash, 'is_webextension': False})
# Make sure we run the correct save validation task, with a
# fallback error handler.
self.save_file.assert_has_calls([
mock.call([mock.ANY, file_.pk, file_.version.channel, False],
immutable=True),
mock.call([file_.pk, file_.version.channel, False],
link_error=mock.ANY)])
class TestValidatorListed(TestValidatorBase):
@mock.patch('olympia.devhub.utils.chain')
def test_run_once_per_file(self, chain):
"""Tests that only a single validation task is run for a given file."""
task = mock.Mock()
chain.return_value = task
task.delay.return_value = mock.Mock(task_id='42')
assert isinstance(tasks.validate(self.file), mock.Mock)
assert task.delay.call_count == 1
assert isinstance(tasks.validate(self.file), AsyncResult)
assert task.delay.call_count == 1
assert isinstance(tasks.validate(self.file_1_1), mock.Mock)
assert task.delay.call_count == 2
@mock.patch('olympia.devhub.utils.chain')
def test_run_once_file_upload(self, chain):
"""Tests that only a single validation task is run for a given file
upload."""
task = mock.Mock()
chain.return_value = task
task.delay.return_value = mock.Mock(task_id='42')
assert isinstance(
tasks.validate(self.file_upload, listed=True), mock.Mock)
assert task.delay.call_count == 1
assert isinstance(
tasks.validate(self.file_upload, listed=True), AsyncResult)
assert task.delay.call_count == 1
def test_cache_key(self):
"""Tests that the correct cache key is generated for a given object."""
assert (utils.Validator(self.file).cache_key ==
'validation-task:files.File:{0}:None'.format(self.file.pk))
assert (utils.Validator(self.file_upload, listed=False).cache_key ==
'validation-task:files.FileUpload:{0}:False'.format(
self.file_upload.pk))
@mock.patch('olympia.devhub.utils.parse_addon')
def test_search_plugin(self, parse_addon):
"""Test that search plugins are handled correctly."""
parse_addon.return_value = {
'guid': None,
'version': '20140103',
'is_webextension': False,
}
addon = addon_factory(type=amo.ADDON_SEARCH,
version_kw={'version': '20140101'})
assert addon.guid is None
self.check_upload(self.file_upload)
self.validate_upload.reset_mock()
self.save_file.reset_mock()
version = version_factory(addon=addon, version='20140102')
self.check_file(version.files.get())
class TestLimitValidationResults(TestCase):
"""Test that higher priority messages are truncated last."""
def make_validation(self, types):
"""Take a list of error types and make a
validation results dict."""
validation = {
'messages': [],
'errors': 0,
'warnings': 0,
'notices': 0,
}
severities = ['low', 'medium', 'high']
for type_ in types:
if type_ in severities:
type_ = 'warning'
validation[type_ + 's'] += 1
validation['messages'].append({'type': type_})
return validation
@override_settings(VALIDATOR_MESSAGE_LIMIT=2)
def test_errors_are_first(self):
validation = self.make_v
|
alidation(
['error', 'warning', 'notice', 'error'])
utils.limit_val
|
idation_results(validation)
limited = validation['messages']
assert len(limited) == 3
assert '2 messages were truncated' in limited[0]['message']
assert limited[1]['type'] == 'error'
assert limited[2]['type'] == 'error'
class TestFixAddonsLinterOutput(TestCase):
def test_fix_output(self):
original_output = {
'count': 4,
'summary': {
'errors': 0,
'notices': 0,
'warnings': 4
},
'metadata': {
'manifestVersion': 2,
'name': 'My Dogs New Tab',
'type': 1,
'version': '2.13.15',
'architecture': 'extension',
'emptyFiles': [],
'jsLibs': {
'lib/vendor/jquery.js': 'jquery.2.1.4.jquery.js'
}
},
'errors': [],
'notices': [],
'warnings': [
{
'_type': 'warning',
'code': 'MANIFEST_PERMISSIONS',
'message': '/permissions:
|
ngoduykhanh/PowerDNS-Admin
|
powerdnsadmin/__init__.py
|
Python
|
mit
| 4,275
| 0.000702
|
import os
import logging
from flask import Flask
from flask_seasurf import SeaSurf
from flask_mail import Mail
from werkzeug.middleware.proxy_fix import ProxyFix
from flask_session import Session
from .lib import utils
def create_app(config=None):
from . import models, routes, services
from .assets import assets
app = Flask(__name__)
# Read log level from environment variable
log_level_name = os.environ.get('PDNS_ADMIN_LOG_LEVEL', 'WARNING')
log_level = logging.getLevelName(log_level_name.upper())
# Setting logger
logging.basicConfig(
level=log_level,
format=
"[%(asctime)s] [%(filename)s:%(lineno)d] %(levelname)s - %(message)s")
# If we use Docker + Gunicorn, adjust the
# log handler
if "GUNICORN_LOGLEVEL" in os.environ:
gunicorn_logger = logging.getLogger("gunicorn.error")
app.logger.handlers = gunicorn_logger.handlers
app.logger.setLevel(gunicorn_logger.level)
# Proxy
app.wsgi_app = ProxyFix(app.wsgi_app)
# CSRF protection
csrf = SeaSurf(app)
csrf.exempt(routes.index.dyndns_checkip)
csrf.exempt(routes.index.dyndns_update)
csrf.exempt(routes.index.saml_authorized)
csrf.exempt(routes.api.api_login_create_zone)
csrf.exempt(routes.api.api_login_delete_zone)
csrf.exempt(routes.api.api_generate_apikey)
csrf.exempt(routes.api.api_delete_apikey)
csrf.exempt(routes.api.api_update_apikey)
csrf.exempt(routes.api.api_zone_subpath_forward)
csrf.exempt(routes.api.api_zone_forward)
csrf.exempt(routes.api.api_create_zone)
csrf.exempt(routes.api.api_create_account)
csrf.exempt(routes.api.api_delete_account)
csrf.exempt(routes.api.api_update_account)
csrf.exempt(routes.api.api_create_user)
csrf.exempt(routes.api.api_delete_user)
csrf.exempt(routes.api.api_update_user)
csrf.exempt(routes.api.api_list_account_users)
csrf.exempt(routes.api.api_add_account_user)
csrf.exempt(routes.api.api_remove_account_user)
csrf.exempt(routes.api.api_zone_cryptokeys)
csrf.exempt(routes.api.api_zone_cryptokey)
# Load config from env variables if using docker
if os.path.exists(os.path.join(app.root_path, 'docker_config.py')):
app.config.from_object('powerdnsadmin.docker_config')
else:
# Load default configuration
app.config.from_object('powerdnsadmin.default_config')
# Load config file from FLASK_CONF env variable
if 'FLASK_CONF' in os.environ:
app.config.from_envvar('FLASK_CONF')
# Load app sepecified configuration
if config is not None:
if isinstance(config, dict):
app.config.update(config)
elif config.endswith('.py'):
app.config.from_pyfile(config)
# HSTS
if app.config.get('HSTS_ENABLED'):
from flask_sslify import SSLify
_sslify = SSLify(app) # lgtm [py/unused-local-variable]
# Load Flask-Session
if app.config.get('FILESYSTEM_SESSIONS_ENABLED'):
app.config['SESSION_TYPE'] = 'filesystem'
sess = Session()
sess.init_app(app)
# SMTP
app.mail = Mail(app)
# Load app's components
assets.init_app(app)
models.init_app(app)
routes.init_app(app)
services.init_app(app)
# Register filters
app.jinja_env.filters['display_record
|
_name'] = utils.display_record_name
app.jinja_env.filters['display_master_name'] = utils.display_master_name
app.jinja_env.filters['display_second_to_time'] = utils.display_time
app.jinja_env.filters[
'email_to_gravatar_url'] = utils.email_to_gravatar_url
app.jinja_env.filters[
'display_setting_state'] = utils.display_setting_state
app.jinja_env.filters['pretty_domain_name'] = utils
|
.pretty_domain_name
# Register context proccessors
from .models.setting import Setting
@app.context_processor
def inject_sitename():
setting = Setting().get('site_name')
return dict(SITE_NAME=setting)
@app.context_processor
def inject_setting():
setting = Setting()
return dict(SETTING=setting)
@app.context_processor
def inject_mode():
setting = app.config.get('OFFLINE_MODE', False)
return dict(OFFLINE_MODE=setting)
return app
|
slackapi/python-slackclient
|
tutorial/PythOnBoardingBot/onboarding_tutorial.py
|
Python
|
mit
| 2,881
| 0.003124
|
class OnboardingTutorial:
"""Constructs the onboarding message and stores the state of which tasks were completed."""
WELCOME_BLOCK = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": (
"Welcome to Slack! :wave: We're so glad you're here. :blush:\n\n"
"*Get started by completing the steps below:*"
),
},
}
DIVIDER_BLOCK = {"type": "divider"}
def __init__(self, channel):
self.channel = channel
self.username = "pythonboardingbot"
self.icon_emoji = ":robot_face:"
self.timestamp = ""
self.reaction_task_completed = False
self.pin_task_completed = False
def get_message_payload(self):
return {
"ts": self.timestamp,
"channel": self.channel,
"username": self.username,
"icon_emoji": self.icon_emoji,
"blocks": [
self.WELCOME_BLOCK,
self.DIVIDER_BLOCK,
*self._get_reaction_block(),
self.DIVIDER_BLOCK,
*self._get_pin_block(),
],
}
def _get_reaction_block(self):
task_checkmark = self._get_checkmark(self.reaction_task_completed)
text = (
f"{task_checkmark} *Add an emoji reaction to this message* :thinking_face:\n"
"You can quickly respond to any message on Slack with an emoji reaction."
"Reactions can be used for any purpose: voting, checking off to-do items, showing excitement."
)
information = (
":information_source: *<h
|
ttps://get.slack.help/hc/en-us/articles/206870317-Emoji-reactions|"
"Learn How to Use Emoji Reactions>*"
)
return self._get_task_block(text, information)
def _get_pin_block(self):
task_checkmark = self._get_checkmark(self.pin_task_completed)
text = (
f"{task_checkmark} *Pin this mes
|
sage* :round_pushpin:\n"
"Important messages and files can be pinned to the details pane in any channel or"
" direct message, including group messages, for easy reference."
)
information = (
":information_source: *<https://get.slack.help/hc/en-us/articles/205239997-Pinning-messages-and-files"
"|Learn How to Pin a Message>*"
)
return self._get_task_block(text, information)
@staticmethod
def _get_checkmark(task_completed: bool) -> str:
if task_completed:
return ":white_check_mark:"
return ":white_large_square:"
@staticmethod
def _get_task_block(text, information):
return [
{"type": "section", "text": {"type": "mrkdwn", "text": text}},
{"type": "context", "elements": [{"type": "mrkdwn", "text": information}]},
]
|
gnuvet/gnuvet
|
options_ui.py
|
Python
|
gpl-3.0
| 1,879
| 0.00958
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Dipl.Tzt. Enno Deimel <ennodotvetatgmxdotnet>
#
# This file is part of gnuvet, published under the GNU General Public License
# version 3 or later (GPLv3+ in short). See the file LICENSE for information.
# Initially created: Fri Apr 23 00:02:26 2010 by: PyQt4 UI code generator 4.4.2
from PyQt4.QtGui import (QApplication, QCheckBox, QDialogButtonBox)
def tl(txt=''):
return QApplication.translate("Options", txt, None, 1)
class Ui_Options(object):
def setupUi(self, Options):
Options.resize(245,175)
Options.setMinimumSize(245,175)
self.buttonBox = QDialogButtonBox(Options)
self.buttonBox.setGeometry(10,120,221,32)
self.buttonBox.setStandardButtons(
QDialogButtonBox.Cancel|QDialogButtonBox.Ok)
self.buttonBox.setCenterButtons(True)
self.autoConsCb = QCheckBox(Options)
self.autoConsCb.setGeometry(60,20,107,23)
self.
|
autoHistCb = QCheckBox(Options)
self.autoHistCb.setGeometry(60,50,104,23)
self.lSympCb = QCheckBox(Options)
self.lSympCb.setGeometry(60,80,122,23)
self.retranslateUi(Options)
def retranslateUi(self, Options):
Options.setWindowTitle(tl("GnuVet: Set Options"))
self
|
.autoConsCb.setToolTip(tl("To automatically book consultation"))
self.autoConsCb.setText(tl("Auto-Consult"))
self.autoHistCb.setToolTip(tl("To automatically open History Window."))
self.autoHistCb.setText(tl("Auto-History"))
self.lSympCb.setToolTip(tl("To use the Lead Symptom feature."))
self.lSympCb.setText(tl("Lead Symptom"))
if __name__ == '__main__':
from PyQt4.QtGui import QMainWindow, QShortcut
a = QApplication([])
b = Ui_Options()
w = QMainWindow()
b.setupUi(w)
QShortcut('Ctrl+W', w, quit)
w.show()
exit(a.exec_())
|
quantmind/pulsar-queue
|
tests/example/executable.py
|
Python
|
bsd-3-clause
| 235
| 0
|
"""
|
An example of a python script which can be executed by the task queue
"""
import sys
def execute():
"""Simply write the python executable
"""
sys.stdout.write(sy
|
s.executable)
if __name__ == '__main__':
execute()
|
vmlaker/pythonwildmagic
|
tool/parse-xml.py
|
Python
|
mit
| 1,870
| 0.004278
|
#!/usr/bin/env python
"""Parse GCC-XML output files and produce a list of class names."""
# import system modules.
import multiprocessing
import xml.dom.minidom
import sys
import os
# Import application modules.
import mpipe
import util
# Configure and parse the command line.
NAME = os.path.basename(sys.argv[0])
ARGS = [('out_file', 'output file'),
('xml_dir', 'directory with XML files'),]
ARGS = util.parse_cmd(NAME, ARGS)
# Create a list of input files.
fnames = list()
for entry in os.listdir(ARGS['xml_dir']):
fname = os.path.join(ARGS['xml_dir'], entry)
if not os.path.isfile(fname):
continue
fnames.append(fname)
num_cpus = multiprocessing.cpu_count()
print('Parsing %d files on %d CPUs'%(len(fnames), num_cpus,))
# Parse files in a pipeline.
def parseFile(fname):
"""Parse the XML file looking for fully demangled class
names, and communicate the result."""
names = list()
doc = xml.dom.minidom.parse(fname)
classes = doc.getElementsByTagName('Class')
for entry in classes:
|
name = entry.getAttribute('demangled')
NSPACE = 'Wm5::'
if name[:len(NSPACE)] != NSPACE:
continue
names.append(name)
return names
pipe = mpipe.Pipeline(mpipe.UnorderedStage(parseFile, num_cpus))
for fname in fnames:
pipe.put(fname)
pipe.put(None)
# Report on progress in realtime.
total_names = dict()
done_count = 0
for result in pipe.results():
for name in result:
total_names[name] = None
done_count += 1
percent = float(don
|
e_count) / len(fnames) * 100
sys.stdout.write('\r' + '%d of %d (%.1f%%)'%(done_count, len(fnames), percent))
sys.stdout.flush()
# End on a newline.
print()
print('Writing file %s'%ARGS['out_file'])
fout = open(ARGS['out_file'], 'w')
for key in sorted(total_names):
fout.write('%s\n'%key)
fout.close()
# The end.
|
hclivess/Stallion
|
nuitka/Cryptodome/Util/__init__.py
|
Python
|
gpl-3.0
| 1,951
| 0.00205
|
# -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Miscellaneous modules
Contains useful modules that don't belong into any of the
other Cryptodome.* subpackages.
======================== =============================================
Module Description
======================== =============================================
`Cryptodome.Util.number` Number-theoretic functions (primality testing, etc.)
`Cryptodome.Util.Counter` Fast counter functions for CTR cipher modes.
`Cryptodome.Util.RFC1751` Converts between 128-bit keys and human-readable
strings of words.
`Cryptodome.Util.asn1` Minimal support for ASN.1 DER encoding
`Cryptodome.Util.Padding` Set of functions for adding and r
|
emoving p
|
adding.
======================== =============================================
:undocumented: _galois, _number_new, cpuid, py3compat, _raw_api
"""
__all__ = ['RFC1751', 'number', 'strxor', 'asn1', 'Counter', 'Padding']
|
bokeh/bokeh
|
examples/app/taylor.py
|
Python
|
bsd-3-clause
| 2,238
| 0.003128
|
''' A taylor series visualization graph. This example demonstrates
the ability of Bokeh for inputted expressions to reflect on a chart.
'''
import numpy as np
import sympy as sy
from bokeh.core.properties import value
from bokeh.io import curdoc
from bokeh.layouts import column
from bokeh.models import (ColumnDataSource, Legend, LegendItem,
PreText, Slider, TextInput)
from bokeh.plotting import figure
xs = sy.Symbol('x')
expr = sy.exp(-xs)*sy.sin(xs)
def taylor(fx, xs
|
, order, x_range=(0, 1), n=200):
x0, x1 = x_range
x = np.linspace(float(x0), float(x1), n)
fy = sy.lambdify(xs, fx, modules=['numpy'])(x)
tx = fx.series(xs, n=order).removeO()
if tx.is_Number:
ty = np.zeros_like(x)
ty.fill(float(tx))
else:
ty = sy.lambdify(xs, tx, modules
|
=['numpy'])(x)
return x, fy, ty
source = ColumnDataSource(data=dict(x=[], fy=[], ty=[]))
p = figure(x_range=(-7,7), y_range=(-100, 200), width=800, height=400)
line_f = p.line(x="x", y="fy", line_color="navy", line_width=2, source=source)
line_t = p.line(x="x", y="ty", line_color="firebrick", line_width=2, source=source)
p.background_fill_color = "lightgrey"
legend = Legend(location="top_right")
legend.items = [
LegendItem(label=value(f"{expr}"), renderers=[line_f]),
LegendItem(label=value(f"taylor({expr})"), renderers=[line_t]),
]
p.add_layout(legend)
def update():
try:
expr = sy.sympify(text.value, dict(x=xs))
except Exception as exception:
errbox.text = str(exception)
else:
errbox.text = ""
x, fy, ty = taylor(expr, xs, slider.value, (-2*sy.pi, 2*sy.pi), 200)
p.title.text = "Taylor (n=%d) expansion comparison for: %s" % (slider.value, expr)
legend.items[0].label = value(f"{expr}")
legend.items[1].label = value(f"taylor({expr})")
source.data = dict(x=x, fy=fy, ty=ty)
slider = Slider(start=1, end=20, value=1, step=1, title="Order")
slider.on_change('value', lambda attr, old, new: update())
text = TextInput(value=str(expr), title="Expression:")
text.on_change('value', lambda attr, old, new: update())
errbox = PreText()
update()
inputs = column(text, slider, errbox, width=400)
curdoc().add_root(column(inputs, p))
|
ashapochka/saapy
|
saapy/analysis/actor.py
|
Python
|
apache-2.0
| 11,608
| 0.000258
|
# coding=utf-8
from typing import List
import networkx as nx
import pyisemail
from fuzzywuzzy import fuzz
from recordclass import recordclass
import pandas as pd
import saapy.util as su
from .lexeme import cleanup_proper_name
def connect_actors(actor_frame, connectivity_sets, connectivity_column):
"""
:param actor_frame:
:param connectivity_sets:
:param connectivity_column:
:return:
Examples:
same_actors = {
'ccason': [3, 14, 15], 'clipka': [4, 5, 13],
'wfpokorny': [11, 17], 'anshuarya': [0],
'bentsm': [1], 'cbarton': [2], 'dbodor': [6],
'jlecher': [7], 'jgrimbert': [8], 'nalvarez': [9],
'selvik': [10], 'wverhelst': [12], 'gryken': [16],
'github': [18]}
actor_frame = connect_actors(actor_frame, same_actors, 'actor_id')
"""
connectivity = {}
for actor_id, connectivity_set in connectivity_sets.items():
for actor in connectivity_set:
connectivity[actor] = actor_id
actor_frame[connectivity_column] = su.categorize(pd.Series(connectivity))
return actor_frame
def combine_actors(actor_frame, connectivity_column):
"""
:param actor_frame:
:param connectivity_column:
:return:
Examples:
combine_actors(actor_frame, 'actor_id')
"""
aggregator = {'name': 'first', 'email': 'first',
'author_commits': 'sum',
'committer_commits': 'sum'}
return actor_frame.groupby(connectivity_column).agg(
aggregator).reset_index()
def insert_actor_ids(commit_frame, actor_frame, drop_name_email=True):
actor_columns = ['author_name', 'author_email',
'committer_name', 'committer_email']
cf = commit_frame[actor_columns]
af = actor_frame[['name', 'email', 'actor_id']]
author = pd.merge(
cf, af, left_on=actor_columns[:2],
right_on=('name', 'email'),
how='left')['actor_id']
committer = pd.merge(
cf, af, left_on=actor_columns[2:],
right_on=('name', 'email'),
how='left')['actor_id']
commit_frame.insert(3, 'author', author)
commit_frame.insert(4, 'committer', committer)
if drop_name_email:
commit_frame.drop(actor_columns, axis=1, inplace=True)
return commit_frame
PARSED_EMAIL_FIELDS = ['email', 'valid', 'name', 'domain', 'parsed_name']
ParsedEmail = recordclass('ParsedEmail', PARSED_EMAIL_FIELDS)
PARSED_NAME_FIELDS = ['name', 'name_type']
ParsedName = recordclass('ParsedName', PARSED_NAME_FIELDS)
def proper(name: ParsedName):
return name.name_type == 'proper' or name.name_type == 'personal'
class Actor:
name: str
email: str
actor_id: str
parsed_email: ParsedEmail
parsed_name: ParsedName
def __init__(self, name: str, email: str):
self.name = name
self.email = email
self.actor_id = '{} <{}>'.format(name, email).lower()
self.parsed_email = None
self.parsed_name = None
def __repr__(self):
return "Actor('{}')".format(self.actor_id)
class ActorParser:
role_names = None
def __init__(self):
self.role_names = dict()
def add_role_names(self, name_roles):
for name, role in name_roles:
self.role_names[name] = role
def parse_name(self, name: str) -> List[str]:
"""
splits a name into parts separated by ., _, camel casing and
similar
:param name: potentially human name
:return: list of name parts
"""
parsed_name = ParsedName(**su.empty_dict(PARSED_NAME_FIELDS))
lower_name = name.lower()
if lower_name in self.role_names:
parsed_name.name_type = self.role_names[lower_name]
parsed_name.name = lower_name
else:
parsed_name.name_type = 'proper'
parsed_name.name = cleanup_proper_name(name)
return parsed_name
def parse_email(self, email: str) -> ParsedEmail:
lower_email = email.lower()
parsed_email = ParsedEmail(**su.empty_dict(PARSED_EMAIL_FIELDS))
parsed_email.email = lower_email
parsed_email.valid = pyisemail.is_email(lower_email)
email_parts = lowe
|
r_email.split('@')
parsed_email.name = email_parts[0]
if len(email_parts) == 2:
parsed_email.domain = email_parts[1]
else:
parsed_email.domain = ''
parsed_email.parsed_name = self.parse_name(parsed_email.name)
return parsed_email
def parse_actor(self, name: str, email: str, name_from_email=True) -> Actor:
parsed_email = self.parse_email(email)
|
if not name and name_from_email:
name = parsed_email.parsed_name.name
actor = Actor(name, email)
actor.parsed_name = self.parse_name(name)
actor.parsed_email = parsed_email
return actor
ACTOR_SIMILARITY_FIELDS = ['possible',
'identical',
'same_name',
'same_email',
'same_email_name',
'name_ratio',
'email_name_ratio',
'email_domain_ratio',
'name1_email_ratio',
'name2_email_ratio',
'proper_name1',
'proper_name2',
'proper_email_name1',
'proper_email_name2',
'explicit']
ActorSimilarity = recordclass('ActorSimilarity', ACTOR_SIMILARITY_FIELDS)
ACTOR_SIMILARITY_SETTINGS_FIELDS = ['min_name_ratio',
'min_email_domain_ratio',
'min_email_name_ratio',
'min_name_email_ratio']
ActorSimilaritySettings = recordclass('ActorSimilaritySettings',
ACTOR_SIMILARITY_SETTINGS_FIELDS)
class ActorSimilarityGraph:
actor_graph: nx.Graph
settings: ActorSimilaritySettings
def __init__(self, settings=None):
self.actor_graph = nx.Graph()
self.similarity_checks = [self.identical_actors,
self.similar_emails,
self.similar_proper_names]
if settings is None:
settings = ActorSimilaritySettings(min_name_ratio=55,
min_email_domain_ratio=55,
min_email_name_ratio=55,
min_name_email_ratio=55)
self.settings = settings
def add_actor(self, actor: Actor, link_similar=True):
if self.actor_graph.has_node(actor.actor_id):
return
self.actor_graph.add_node(actor.actor_id, actor=actor)
for actor_id, actor_attrs in self.actor_graph.nodes_iter(data=True):
if actor.actor_id == actor_id:
continue
other_actor = actor_attrs['actor']
if link_similar:
similarity = self.evaluate_similarity(actor, other_actor)
if similarity.possible:
self.actor_graph.add_edge(actor.actor_id,
other_actor.actor_id,
similarity=similarity,
confidence=None)
def link_actors(self, actor1_id: str, actor2_id: str,
confidence: float = 1):
self.actor_graph.add_edge(actor1_id, actor2_id, confidence=confidence)
if 'similarity' not in self.actor_graph[actor1_id][actor2_id]:
self.actor_graph[actor1_id][actor2_id]['similarity'] = None
def unlink_actors(self, actor1_id: str, actor2_id: str):
self.actor_graph.remove_edge(actor1_id, actor2_id)
def evaluate_similarity(self, actor: Actor,
other_actor: Actor) -> ActorSimilarity:
similarity = self.build_similarity(actor, other_actor)
checks = list(self.similarity_checks)
while not similarity.possible and len(checks):
|
Stanford-Online/edx-platform
|
openedx/stanford/djangoapps/student_utils/tests/test_bulk_user_activate.py
|
Python
|
agpl-3.0
| 2,256
| 0.000887
|
"""
Tests for the bulk_user_activate command.
"""
from django.contrib.auth.models import User
from django.core.management import call_command
from django.test import TestCase
from openedx.stanford.djangoapps.student_utils.helpers import get_users_by_email
class BulkUserActivateTests(TestCase):
"""
Test the bulk_user_activate command.
"""
help = __doc__
NUMBER_USERS = 10
NUMBER_DOMAINS = 3
def setUp(self):
super(BulkUserActivateTests, sel
|
f).setUp()
self.domain = [
"{i}.example.com".format(
|
i=i,
)
for i in xrange(BulkUserActivateTests.NUMBER_DOMAINS)
]
self.users = [
User.objects.create(
username="user{i}".format(
i=i,
),
email="user{i}@{domain}".format(
i=i,
domain=self.domain[(i % BulkUserActivateTests.NUMBER_DOMAINS)],
),
is_active=(i % 2),
)
for i in xrange(BulkUserActivateTests.NUMBER_USERS)
]
def test_bulk_without_force(self):
"""
Verify that nothing is changed when force is set to false.
"""
domain = self.domain[0]
users_before = get_users_by_email(domain, is_active=True)
count_before = users_before.count()
self.assertGreater(count_before, 0)
call_command(
'bulk_user_activate',
'--domain', domain,
)
users_after = get_users_by_email(domain, is_active=True)
count_after = users_after.count()
self.assertEqual(count_before, count_after)
def test_bulk_with_force(self):
"""
Verify that users is activated when force is set to true.
"""
domain = self.domain[0]
users_before = get_users_by_email(domain, is_active=False)
count_before = users_before.count()
self.assertGreater(count_before, 0)
call_command(
'bulk_user_activate',
'--domain', domain,
'--force',
)
users_after = get_users_by_email(domain, is_active=False)
count = users_after.count()
self.assertEqual(count, 0)
|
carpedm20/Bias
|
scripts/download.py
|
Python
|
bsd-3-clause
| 449
| 0.004454
|
"""
Downloads the following:
- Korean Wikipedia texts
- Korean
"""
from sqlparse import parsestream
from sqlparse.sql impo
|
rt Parenthesis
for statement in parsestream(open('data/test.sql')):
texts = [str(token.tokens[1].tokens[-1]).decode('string_escape') for token in statement.tokens if isinstance(token, Parenthesis)]
print texts
texts = [text for text in texts if tex
|
t[0] != '#']
if texts:
print "\n===\n".join(texts)
|
google/makani
|
config/base_station/sim/perch_sim.py
|
Python
|
apache-2.0
| 5,391
| 0.002968
|
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perch simulator parameters."""
from makani.analysis.control import geometry
from makani.config import mconfig
from makani.config import physics_util
from makani.config.sensor_util import MakeEncoderParams
from makani.control import system_types
from makani.sim import sim_types as m
import numpy as np
@mconfig.Config(deps={
'common_params': 'common.common_params',
'gs_model': 'base_station.gs_model',
'perch': 'base_station.perch',
'sim_options': 'common.sim.sim_options',
'flight_plan': 'common.flight_plan'
})
def MakeParams(params):
"""Make perch simulator parameters."""
# pylint: disable=invalid-name
A, B = physics_util.CalcTwoLinkageDynamics(
params['perch']['I_perch_and_drum'], params['perch']['I_drum'],
params['perch']['b_perch'], params['perch']['b_drum'])
flight_plan = params['flight_plan']
# Pick an initial azimuth [rad] in the ground frame to rotate the
# wing and the perch. azi_g is 0 when the wing is on the negative
# xg axis.
azi_g = 0.0
# Set perch angle based on flight plan.
if (flight_plan in [m.kFlightPlanStartDownwind]
and not params['sim_options'] & m.kSimOptConstraintSystem):
theta_p_0 = m.Wrap(azi_g, -np.pi, np.pi)
initialize_in_crosswind_config = True
elif flight_plan in [m.kFlightPlanDisengageEngage,
m.kFlightPlanHighHover,
m.kFlightPlanHoverInPlace,
m.kFlightPlanLaunchPerch,
m.kFlightPlanManual,
m.kFlightPlanTurnKey]:
theta_p_0 = m.Wrap(azi_g + np.pi, -np.pi, np.pi)
initialize_in_crosswind_config = False
else:
assert False
# The tophat has one perch azimuth encoder; GSv1 has none.
perch_azi_enabled = [
params['gs_model'] == system_types.kGroundStationModelTopHat, False]
return {
# Radius [m] of the levelwind.
'levelwind_radius': 1.5,
# Position [m] of the levelwind in perch x and y coordinates
# when the levelwind elevation is zero.
'levelwind_hub_p': [1.8, -1.5],
# Minimum tension [N] for the levelwind to engage.
'levelwind_engage_min_tension': 1e3,
# The matrices describing the linearized dynamics of the perch and
# winch drum system.
'A': {'d': A.tolist()},
'B': {'d': B.tolist()},
# Initial angle [rad] of the perch relative to ground coordinates.
'theta_p_0': theta_p_0,
# Boolean [#] that describes whether the perch begins in the
# crosswind configuration (i.e. winch drum angle is 0.0 rad) or
# a reeled-in configuration.
'initialize_in_crosswind_config': initialize_in_crosswind_config,
# Properties of the perch panel. The perch panel is modeled as
# the union of two cylinders, which are pitched and rolled about
# the perch axes by the specified angles, then truncated at
# planes parallel to the perch z-plane. The port cylinder
# corresponds to the port wing side and vice versa.
#
# Parameters are from hachtmann on 2015-08-21 (with corrections on
# 2015-09-22), and are confirmed using the drawings loca
|
ted here:
# go/makaniwiki/perch-geometry.
'panel': {
|
# For each panel, the center [m] and radius [m] describe the
# cylinder modeling it. The z_extents_p [m] specify the planes,
# parallel to the perch z-plane, at which the cylinders are
# cut.
'port': {
'center_panel': [3.122, 0.763],
'radius': 4.0,
'z_extents_p': [-0.625, 2.656],
},
'starboard': {
'center_panel': [3.203, -1.103],
'radius': 4.0,
'z_extents_p': [-0.306, 2.656],
},
'origin_pos_p': [0.0, 0.0, 0.0],
# Rotation matrix from the perch frame to the panel frame.
'dcm_p2panel': {'d': geometry.AngleToDcm(
np.deg2rad(0.0), np.deg2rad(6.0), np.deg2rad(-7.0)).tolist()},
# Y extents [m] of the panel apparatus in the panel coordinate
# system.
'y_extents_panel': [-1.9, 2.0],
},
# Sensor parameters for perch encoders. The biases of one or
# two degrees are estimated typical biases. The noise level is
# chosen to be pessimistic but not completely unrealistic.
'ts': params['common_params']['ts'],
'levelwind_ele_sensor': [MakeEncoderParams(), MakeEncoderParams()],
'perch_azi_sensor': [
MakeEncoderParams(bias=np.deg2rad(-1.0), noise_level_counts=0.25,
scale=1.0 if perch_azi_enabled[0] else 0.0),
MakeEncoderParams(bias=np.deg2rad(2.0), noise_level_counts=0.25,
scale=1.0 if perch_azi_enabled[1] else 0.0)]
}
|
jburns12/stixproject.github.io
|
documentation/idioms/malware-hash/malware-indicator-for-file-hash_consumer.py
|
Python
|
bsd-3-clause
| 1,039
| 0.001925
|
#!/usr/bin/env python
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import sys
from stix.core import STIXPackage
def parse_stix(pkg):
print("== MALWARE ==")
for fam in pkg.ttps:
print("---")
print("Title : " + fam.title)
print("ID : " + fam.id_)
for sample in fam.behavior.malware_instances:
print("Sample: " + str(sample.names[0]))
print("Type: " + str(sample.types[0]))
for ind in pkg.indicators:
print("---")
print("Title : " + ind.title)
print("Type : " + str(ind.indicat
|
or_types[0]))
print("ID -> : " + ind.indi
|
cated_ttps[0].item.idref)
for obs in ind.observables:
for digest in obs.object_.properties.hashes:
print("Hash : " + str(digest))
return 0
if __name__ == '__main__':
try:
fname = sys.argv[1]
except:
exit(1)
fd = open(fname)
stix_pkg = STIXPackage.from_xml(fd)
parse_stix(stix_pkg)
|
JuBra/GEMEditor
|
GEMEditor/rw/test/test_compartment_rw.py
|
Python
|
gpl-3.0
| 2,169
| 0
|
import lxml.etree as ET
from GEMEditor.model.classes.cobra import Model, Metabolite, Compartment
from GEMEditor.rw import *
from GEMEditor.rw.compartment import add_compartments, parse_compartments
from GEMEditor.rw.test.ex_compartment import valid_compartment_list
from lxml.etree import Element
def test_parse_compartments():
parent_node = ET.fromstring(valid_compartment_list)
model = Model()
parse_compartments(parent_node, model)
assert model.gem_compartments["p"] == Compartment("p", "Periplasm")
assert model.gem_compartments["c"] == Compartment("c", "Cytoplasm")
assert model.gem_compartments["e"] == Compartment("e", "Extracellular")
def test_add_compartments():
model = Model()
model.gem_compartments["c"] = Compartment("c", "Cytoplasm")
root = Element("Root")
add_compartments(root, model)
compartment_list = root.find(sbml3_listOfCompartments)
assert compartment_l
|
ist is not None
compartment = compartment_list.find(sbml3_compartment)
assert compartment is not None
assert compartment.get("id") == "c"
assert compartment.get("name") == "Cytoplasm"
def test_add_compartments_defined_in_metabolite():
model = Model()
metabolite = Metabolite(id="test", compartment="c")
model.add_metabolites([metabolite])
root = Element("Root")
add_c
|
ompartments(root, model)
compartment_list = root.find(sbml3_listOfCompartments)
assert compartment_list is not None
compartment = compartment_list.find(sbml3_compartment)
assert compartment is not None
assert compartment.get("id") == "c"
assert compartment.get("name") is None
def test_add_compartment_empty_model():
model = Model()
root = Element("root")
add_compartments(root, model)
compartment_list = root.find(sbml3_listOfCompartments)
assert compartment_list is None
def test_consistency_write_read():
model1 = Model()
model1.gem_compartments["c"] = Compartment("c", "Cytoplasm")
root = Element("Root")
add_compartments(root, model1)
model2 = Model()
parse_compartments(root, model2)
assert model2.gem_compartments == model1.gem_compartments
|
webmasterraj/FogOrNot
|
flask/lib/python2.7/site-packages/pandas/io/tests/test_cparser.py
|
Python
|
gpl-2.0
| 10,838
| 0.000185
|
"""
C/Cython ascii file parser tests
"""
from pandas.compat import StringIO, BytesIO, map
from datetime import datetime
from pandas import compat
import csv
import os
import sys
import re
import nose
from numpy import nan
import numpy as np
from pandas import DataFrame, Series, Index, isnull, MultiIndex
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextParser, TextFileReader)
|
from pandas.util.testing import (assert_almost_equal,
|
assert_frame_equal,
assert_series_equal, network)
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
import pandas.util.testing as tm
from pandas.parser import TextReader
import pandas.parser as parser
class TestCParser(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def test_file_handle(self):
try:
f = open(self.csv1, 'rb')
reader = TextReader(f)
result = reader.read()
finally:
f.close()
def test_string_filename(self):
reader = TextReader(self.csv1, header=None)
result = reader.read()
def test_file_handle_mmap(self):
try:
f = open(self.csv1, 'rb')
reader = TextReader(f, memory_map=True, header=None)
result = reader.read()
finally:
f.close()
def test_StringIO(self):
text = open(self.csv1, 'rb').read()
src = BytesIO(text)
reader = TextReader(src, header=None)
result = reader.read()
def test_string_factorize(self):
# should this be optional?
data = 'a\nb\na\nb\na'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
self.assertEqual(len(set(map(id, result[0]))), 2)
def test_skipinitialspace(self):
data = ('a, b\n'
'a, b\n'
'a, b\n'
'a, b')
reader = TextReader(StringIO(data), skipinitialspace=True,
header=None)
result = reader.read()
self.assert_numpy_array_equal(result[0], ['a', 'a', 'a', 'a'])
self.assert_numpy_array_equal(result[1], ['b', 'b', 'b', 'b'])
def test_parse_booleans(self):
data = 'True\nFalse\nTrue\nTrue'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
self.assertEqual(result[0].dtype, np.bool_)
def test_delimit_whitespace(self):
data = 'a b\na\t\t "b"\n"a"\t \t b'
reader = TextReader(StringIO(data), delim_whitespace=True,
header=None)
result = reader.read()
self.assert_numpy_array_equal(result[0], ['a', 'a', 'a'])
self.assert_numpy_array_equal(result[1], ['b', 'b', 'b'])
def test_embedded_newline(self):
data = 'a\n"hello\nthere"\nthis'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
expected = ['a', 'hello\nthere', 'this']
self.assert_numpy_array_equal(result[0], expected)
def test_euro_decimal(self):
data = '12345,67\n345,678'
reader = TextReader(StringIO(data), delimiter=':',
decimal=',', header=None)
result = reader.read()
expected = [12345.67, 345.678]
tm.assert_almost_equal(result[0], expected)
def test_integer_thousands(self):
data = '123,456\n12,500'
reader = TextReader(StringIO(data), delimiter=':',
thousands=',', header=None)
result = reader.read()
expected = [123456, 12500]
tm.assert_almost_equal(result[0], expected)
def test_integer_thousands_alt(self):
data = '123.456\n12.500'
reader = TextFileReader(StringIO(data), delimiter=':',
thousands='.', header=None)
result = reader.read()
expected = [123456, 12500]
tm.assert_almost_equal(result[0], expected)
def test_skip_bad_lines(self):
# too many lines, see #2430 for why
data = ('a:b:c\n'
'd:e:f\n'
'g:h:i\n'
'j:k:l:m\n'
'l:m:n\n'
'o:p:q:r')
reader = TextReader(StringIO(data), delimiter=':',
header=None)
self.assertRaises(parser.CParserError, reader.read)
reader = TextReader(StringIO(data), delimiter=':',
header=None,
error_bad_lines=False,
warn_bad_lines=False)
result = reader.read()
expected = {0: ['a', 'd', 'g', 'l'],
1: ['b', 'e', 'h', 'm'],
2: ['c', 'f', 'i', 'n']}
assert_array_dicts_equal(result, expected)
stderr = sys.stderr
sys.stderr = StringIO()
try:
reader = TextReader(StringIO(data), delimiter=':',
header=None,
error_bad_lines=False,
warn_bad_lines=True)
reader.read()
val = sys.stderr.getvalue()
self.assertTrue('Skipping line 4' in val)
self.assertTrue('Skipping line 6' in val)
finally:
sys.stderr = stderr
def test_header_not_enough_lines(self):
data = ('skip this\n'
'skip this\n'
'a,b,c\n'
'1,2,3\n'
'4,5,6')
reader = TextReader(StringIO(data), delimiter=',', header=2,
as_recarray=True)
header = reader.header
expected = [['a', 'b', 'c']]
self.assertEqual(header, expected)
recs = reader.read()
expected = {'a': [1, 4], 'b': [2, 5], 'c': [3, 6]}
assert_array_dicts_equal(expected, recs)
# not enough rows
self.assertRaises(parser.CParserError, TextReader, StringIO(data),
delimiter=',', header=5, as_recarray=True)
def test_escapechar(self):
data = ('\\"hello world\"\n'
'\\"hello world\"\n'
'\\"hello world\"')
reader = TextReader(StringIO(data), delimiter=',', header=None,
escapechar='\\')
result = reader.read()
expected = {0: ['"hello world"'] * 3}
assert_array_dicts_equal(result, expected)
def test_eof_has_eol(self):
# handling of new line at EOF
pass
def test_na_substitution(self):
pass
def test_numpy_string_dtype(self):
data = """\
a,1
aa,2
aaa,3
aaaa,4
aaaaa,5"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', header=None,
**kwds)
reader = _make_reader(dtype='S5,i4')
result = reader.read()
self.assertEqual(result[0].dtype, 'S5')
ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaaa'], dtype='S5')
self.assertTrue((result[0] == ex_values).all())
self.assertEqual(result[1].dtype, 'i4')
reader = _make_reader(dtype='S4')
result = reader.read()
self.assertEqual(result[0].dtype, 'S4')
ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaa'], dtype='S4')
self.assertTrue((result[0] == ex_values).all())
self.assertEqual(result[1].dtype, 'S4')
reader = _make_reader(dtype='S4', as_recarray=True)
result = reader.read()
self.assertEqual(result['0'].dtype, 'S4')
ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaa'], dtype='S4')
self.assertTrue((result['0'] == ex_values).all())
self.assertEqual(result['1'].dtype, 'S4')
def test_pass_dtype(self):
data = """\
one,two
1,a
2,b
3,c
4,d"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', **kwds
|
coeusite/ShuffleIR
|
config.py
|
Python
|
gpl-2.0
| 1,373
| 0.017769
|
# -*- coding: utf-8 -*-
# ShuffleIR的设置文件
#===============================================
# ShuffleMove的安装路径
pathShuffleMove
|
= '../../Shuffle-Move/'
# 当前关卡在ShuffleMove中的关卡ID
# 例如美梦天梯为 'SP_275', Mega超梦为'150'
# 注意要有英文单引号!
varStageID = 'SP_275'
# 支援精灵列表
# 格式为 listSupport = ('口袋妖怪的NationDex #',...)
# 注意要有英文单引号!
# 特殊的条目包括 空白'Air', 铁块'Metal', 木块'Wood', 金币'Coin'
# Mega 精灵请在Dex后加 -m, 例如Mega化石翼龙为 '142-m'
# 已支援图标列表请参考Supported_Icons.md
listSupport=['Air','Wood','150-m','249','488','494']
# 是否载入冰封版图标(0为不载入, 1为载入)
varIceSupport
|
=True
# 铁块计数器
varMetalTimer=3
#===============================================
# 以下设置用于确定Miiverse截图选区
# 消消乐方块区域在窗口截图内部的相对坐标(x1, y1, x2, y2)
# 其中(x1, y1)为左上坐标,(x2, y2) 为右下坐标
#varBox = (46, 6, 274, 234) # Old 3DS XL
varBox = (38,376,494,832) # iPhone 6p + Airserver
#===============================================
# 以下内容最好不要修改
# Path to Mask
pathMask = 'images/block_mask76.png'
# Board实际路径
pathBoard = pathShuffleMove + '/config/boards/board.txt'
#BlockSize = 38
BlockSize = 76
|
vbelakov/h2o
|
py/testdir_single_jvm/test_enum_multi_permission.py
|
Python
|
apache-2.0
| 6,221
| 0.009805
|
import unittest, random, sys, time, os, stat, pwd, grp
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_exec as h2e
FILENUM=100
def write_syn_dataset(csvPathname, rowCount, colCount, SEED, translateList):
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
roll = random.randint(0,1)
# if roll==0:
if 1==1:
# spit out a header
rowData = []
for j in range(colCount):
rowData.append('h' + str(j))
rowDataCsv = ",".join(map(str,rowData))
dsf.write(rowDataCsv + "\n")
for i in range(rowCount):
rowData = []
for j in range(colCount):
ri1 = r1.triangular(0,3,1.5)
ri1Int = int(round(ri1,0))
rowData.append(ri1Int)
if translateList is not None:
for i, iNum in enumerate(rowData):
rowData[i] = translateList[iNum]
rowDataCsv = ",".join(map(str,rowData))
dsf.write(rowDataCsv + "\n")
dsf.close()
# print csvPathname
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
print "WARNING: won't work for remote h2o, because syn_datasets is created locally only, for import"
h2o.init(1,java_heap_GB=14)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_cols_multi_permission(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
translateList = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u']
tryList = [
(300, 100, 'cA', 60),
]
# h2b.browseTheCloud()
cnum = 0
for (rowCount, colCount, hex_key, timeoutSecs) in tryList:
cnum += 1
# FIX! should we add a header to them randomly???
print "Wait while", FILENUM, "synthetic files are created in", SYNDATASETS_DIR
rowxcol = str(rowCount) + 'x' + str(colCount)
for fileN in range(FILENUM):
csvFilename = 'syn_' + str(fileN) + "_" + str(SEED) + "_" + rowxcol + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
write_syn_dataset(csvPathname, rowCount, colCount, SEED, translateList)
# DON"T get redirected to S3! (EC2 hack in config, remember!)
# use it at the node level directly (because we gen'ed the files.
# use regex. the only files in the dir will be the ones we just created with *fileN* match
parseResult = h2i.import_parse(path=SYNDATASETS_DIR + '/*'+rowxcol+'*', schema='local',
exclude=None, header=1, timeoutSecs=timeoutSecs)
print "parseResult['destination_key']: " + parseResult['destination_key']
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
h2o_cmd.infoFromInspect(inspect, csvPathname)
# FIX! h2o strips one of the headers, but treats all the other files with headers as data
print "\n" + parseResult['destination_key'] + ":", \
" numRows:", "{:,}".format(inspect['numRows']), \
" numCols:", "{:,}".format(inspect['numCols'])
# get uid/gid of files the test create (dir here)
origUid = os.getuid()
origGid = os.getgid()
print "my uid and gid:", origUid, origGid
# pick one file to flip
fileList = os.listdir(SYNDATASETS_DIR)
badFile = random.choice(fileList)
badPathname = SYNDATASETS_DIR + "/" + badFile
print "Going to use this file as the bad file:", badPathname
print "checking os.chmod and parse"
# os.chmod(badPathname, stat.S_IRWXU | stat.S_IRWXO)
|
# always have to re-import because source key is deleted by h2o
parseResult = h2i.import_parse(path=SYNDATASETS_DIR + '/*'+rowxcol+'*', schema='local',
exclude=None, header=1, timeoutSecs=timeoutSecs)
print "parseResult['destination_key']: " + parseResult['destination_key']
inspect = h2o_cmd.runIns
|
pect(None, parseResult['destination_key'])
h2o_cmd.infoFromInspect(inspect, csvPathname)
print "write by owner, only, and parse"
os.chmod(badPathname, stat.S_IWRITE)
parseResult = h2i.import_parse(path=SYNDATASETS_DIR + '/*'+rowxcol+'*', schema='local',
exclude=None, header=1, timeoutSecs=timeoutSecs)
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
h2o_cmd.infoFromInspect(inspect, csvPathname)
print "execute by owner, only, and parse"
os.chmod(badPathname, stat.S_IEXEC)
h2o.nodes[0].import_files(SYNDATASETS_DIR)
parseResult = h2i.import_parse(path=SYNDATASETS_DIR + '/*'+rowxcol+'*', schema='local',
exclude=None, header=1, timeoutSecs=timeoutSecs)
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
h2o_cmd.infoFromInspect(inspect, csvPathname)
# change back to normal
# os.chmod(badPathname, stat.S_IRWXU | stat.S_IRWXO)
# how to make this work? disable for now
if (1==0):
# now change uid
badUid = pwd.getpwnam("nobody").pw_uid
badGid = grp.getgrnam("nogroup").gr_gid
print "parsing after one bad uid"
os.chown(badPathname, badUid, origGid)
parseResult = h2i.import_parse(path=SYNDATASETS_DIR + '/*'+rowxcol+'*', schema='local',
exclude=None, header=1, timeoutSecs=timeoutSecs)
print "parsing after one bad gid"
os.chown(badPathname, origUid, badGid)
parseResult = h2i.import_parse(path=SYNDATASETS_DIR + '/*'+rowxcol+'*', schema='local',
exclude=None, header=1, timeoutSecs=timeoutSecs)
os.chown(badPathname, origUid, origGid)
if __name__ == '__main__':
h2o.unit_main()
|
mitodl/ccxcon
|
courses/fields_test.py
|
Python
|
agpl-3.0
| 3,535
| 0.001132
|
"""
Tests for Serializer Fields
"""
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
import pytest
from rest_framework.serializers import ValidationError
from courses.factories import EdxAuthorFactory, CourseFactory
from courses.models import EdxAuthor
from courses.serializers import JsonListField as JLF
from courses.serializers import StringyManyToManyField as SMMF
class JsonListFieldTests(TestCase):
"""
Tests for JsonListField
"""
def test_decodes_string(self):
"""
Test that empty list string decodes properly
"""
f = JLF()
self.assertEqual([], f.to_internal_value('[]'))
def test_decodes_unicode(self):
"""
Test that empty list unicode string decodes properly
"""
f = JLF()
self.assertEqual([], f.to_internal_value(u'[]'))
def test_handles_decoding_nullable_values(self):
"""
Test that null is decoded to None
"""
f = JLF()
self.assertEqual(None, f.to_internal_value('null'))
def test_throws_validationerror_on_invalid_json(self):
"""
Test invalid JSON
"""
f = JLF()
self.assertRaises(ValidationError, f.to_internal_value, 'testing')
def test_not_list(self):
"""
Test that to_internal_value takes only lists
"""
f = JLF()
self.assertRaises(ValidationError, f.to_internal_value, '{}')
class StringyM2MTestCase(TestCase):
"""Tests for m2m stringy field serializer"""
def test_requires_model(self):
"""Field requires a model kwarg"""
self.assertRaises(ImproperlyConfigured, SMMF, lookup='test')
def test_requires_lookup(self):
"""Field requires a lookup kwarg"""
self.assertRaises(ImproperlyConfigured, SMMF, model=EdxAuthor)
def test_returns_string_for_all_objects(self): # pylint: disabl
|
e=no-self-use
"""model-to-string returns correct strings"""
e1 = EdxAuthorFactory.create()
e2 = EdxAuthorFactory.create()
|
co = CourseFactory.create()
co.instructors.add(e1)
co.instructors.add(e2)
f = SMMF(model=EdxAuthor, lookup='edx_uid')
assert sorted([str(e1), str(e2)]) == sorted(f.to_representation(co.instructors))
def test_returns_model_if_string_provided(self): # pylint: disable=no-self-use
"""string-to-model returns correct model for single string"""
uid = '2d133482b3214a119f55c3060d882ceb'
CourseFactory.create()
f = SMMF(model=EdxAuthor, lookup='edx_uid')
ms = f.to_internal_value(uid)
assert len(ms) == 1
assert ms[0].edx_uid == uid
def test_returns_models_if_list_provided(self): # pylint: disable=no-self-use
"""string-to-model returns correct model for list"""
uid = '2d133482b3214a119f55c3060d882ceb'
uid2 = '3d133482b3214a119f55c3060d882ceb'
CourseFactory.create()
f = SMMF(model=EdxAuthor, lookup='edx_uid')
ms = f.to_internal_value([uid, uid2])
assert len(ms) == 2
assert ms[0].edx_uid != ms[1].edx_uid
assert ms[0].edx_uid in [uid, uid2]
assert ms[1].edx_uid in [uid, uid2]
def test_errors_on_invalid_input(self): # pylint: disable=no-self-use
"""Only deserialize known, supported types."""
CourseFactory.create()
f = SMMF(model=EdxAuthor, lookup='edx_uid')
with pytest.raises(ValidationError):
f.to_internal_value(dict())
|
jackchi/interview-prep
|
sorting/bubbleSort.py
|
Python
|
mit
| 738
| 0.023035
|
#! /usr/bin/python
# https://github.com/jackchi/interview-prep
import random
# Bubble Sort
# randomly generate 10 integers from (-100, 100)
arr = [random.randrange(-100,100) for i in range(10)]
print('original %s' % arr)
def bubbleSort(array):
n = len(array)
# traverse thru all elements
for i in range(n):
swapped = False
# traverse thru last i elements already sorted
for j in range(0, n-i-1):
# swap smaller to front
if array[j] > array[j+1]:
array[j], array[j+1] = array[j+1
|
], array[j]
swapped = True
# IF no two elements were swapped
# by in
|
ner loop, then break
if swapped == False:
break
return array
a = bubbleSort(arr)
print(f"sorted {a}")
|
pixmeter/enma
|
enma/rest/__init__.py
|
Python
|
bsd-3-clause
| 131
| 0.015267
|
from flask im
|
port Blueprint
api = Blueprint('api', __name__, url_prefix='/rest/v1.0')
from . import
|
authentication, errors, views
|
autotest/virt-test
|
virttest/utils_config.py
|
Python
|
gpl-2.0
| 13,647
| 0
|
import ast
import logging
import os.path
import ConfigParser
import StringIO
class ConfigError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class ConfigNoOptionError(ConfigError):
def __init__(self, option, path):
self.option = option
self.path = path
def __str__(self):
return "There's no option %s in config file %s." % (
self.option, self.path)
class LibvirtConfigUnknownKeyTypeError(ConfigError):
def __init__(self, key, key_type):
self.key = key
self.key_type = key_type
def __str__(self):
return "Unknown type %s for key %s." % (self.key, self.key_type)
class LibvirtConfigUnknownKeyError(ConfigError):
def __init__(self, key):
self.key = key
def __str__(self):
return 'Unknown config key %s' % self.key
class SectionlessConfig(object):
"""
This is a wrapper class for python's internal library ConfigParser except
allows manipulating sectionless configuration file with a dict-like way.
Example config file test.conf:
># This is a comment line.
>a = 1
>b = [hi, there]
>c = hello
>d = "hi, there"
>e = [hi,
> there]
Example script using `try...finally...` statement:
>>> from virttest import utils_config
>>> config = utils_config.SectionlessConfig('test.conf')
>>> try:
... print len(config)
... print config
... print config['a']
... del config['a']
... config['
|
f'] = 'test'
|
... print config
... finally:
... config.restore()
Example script using `with` statement:
>>> from virttest import utils_config
>>> with utils_config.SectionlessConfig('test.conf') as config:
... print len(config)
... print config
... print config['a']
... del config['a']
... config['f'] = 'test'
... print config
"""
def __init__(self, path):
self.path = path
self.parser = ConfigParser.ConfigParser()
# Prevent of converting option names to lower case
self.parser.optionxform = str
self.backup_content = open(path, 'r').read()
read_fp = StringIO.StringIO('[root]\n' + self.backup_content)
self.parser.readfp(read_fp)
def __sync_file(self):
out_file = open(self.path, 'w')
try:
out_file.write(self.__str__())
finally:
out_file.close()
def __len__(self):
return len(self.parser.items('root'))
def __getitem__(self, option):
try:
return self.parser.get('root', option)
except ConfigParser.NoOptionError:
raise ConfigNoOptionError(option, self.path)
def __setitem__(self, option, value):
self.parser.set('root', option, value)
self.__sync_file()
def __delitem__(self, option):
res = self.parser.remove_option('root', option)
if res:
self.__sync_file()
else:
raise ConfigNoOptionError(option, self.path)
def __contains__(self, item):
return self.parser.has_option('root', item)
def __str__(self):
write_fp = StringIO.StringIO()
self.parser.write(write_fp)
return write_fp.getvalue().split('\n', 1)[1]
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.restore()
def restore(self):
out_file = open(self.path, 'w')
try:
out_file.write(self.backup_content)
finally:
out_file.close()
def set_raw(self, option, value):
self[option] = "%s" % value
def set_string(self, option, value):
self[option] = '"%s"' % value
def set_int(self, option, value):
self[option] = '%d' % int(value)
def set_float(self, option, value):
self[option] = '%s' % float(value)
def set_boolean(self, option, value):
if type(value) == str:
value = int(value)
if bool(value):
self[option] = '1'
else:
self[option] = '0'
def set_list(self, option, value):
# TODO: line separation
value = ['"%s"' % i for i in list(value)]
self[option] = '[%s]' % ', '.join(value)
def get_raw(self, option):
return self[option]
def get_string(self, option):
raw_str = self[option].strip()
if raw_str.startswith('"') and raw_str.endswith('"'):
raw_str = raw_str[1:-1]
elif raw_str.startswith("'") and raw_str.endswith("'"):
raw_str = raw_str[1:-1]
else:
raise ValueError("Invalid value for string: %s" % raw_str)
return raw_str
def get_int(self, option):
return int(self.get_raw(option))
def get_float(self, option):
return float(self.get_raw(option))
def get_boolean(self, option):
try:
bool_str = self.get_string(option).lower()
except ValueError:
bool_str = str(self.get_int(option))
if bool_str in ["1", "yes", "true", "on"]:
return True
if bool_str in ["0", "no", "false", "off"]:
return False
raise ValueError("Invalid value for boolean: %s" % bool_str)
def get_list(self, option):
list_str = self.get_raw(option)
return [str(i) for i in ast.literal_eval(list_str)]
class LibvirtConfigCommon(SectionlessConfig):
"""
A abstract class to manipulate options of a libvirt related configure files
in a property's way.
Variables "__option_types__" and "conf_path" must be setup in the
inherented classes before use.
"__option_types__" is a dict contains every possible option as keys and
their type ("boolean", "int", "string", "float" or "list") as values.
Basic usage:
1) Create a config file object:
>>> # LibvirtdConfig is a subclass of LibvirtConfigCommon.
>>> config = LibvirtdConfig()
2) Set or update an option:
>>> config.listen_tcp = True
>>> config.listen_tcp = 1
>>> config.listen_tcp = "1" # All three have the same effect.
>>> # If the setting value don't meet the specified type.
>>> config.listen_tcp = "invalid"
>>> # It'll thown an warning message and set a raw string instead.
>>> # Use set_* methods when need to customize the result.
>>> config.set_raw("'1'")
3) Get an option:
>>> is_listening = config.listen_tcp
>>> print is_listening
True
4) Delete an option from the config file:
>>> del config.listen_tcp
5) Make the changes take effect in libvirt by restart libvirt daemon.
>>> from virttest import utils_libvirtd
>>> utils_libvirtd.Libvirtd().restart()
6) Restore the content of the config file.
>>> config.restore()
"""
__option_types__ = {}
conf_path = ''
def __init__(self, path=''):
if path:
self.conf_path = path
if not self.conf_path:
raise ConfigError("Path for config file is not set up.")
if not self.__option_types__:
raise ConfigError("__option_types__ is not set up.")
if not os.path.isfile(self.conf_path):
raise ConfigError("Path for config file %s don't exists."
% self.conf_path)
super(LibvirtConfigCommon, self).__init__(self.conf_path)
def __getattr__(self, key):
if key in self.__option_types__:
key_type = self.__option_types__[key]
if key_type not in ['boolean', 'int', 'float', 'string', 'list']:
raise LibvirtConfigUnknownKeyTypeError(key, key_type)
else:
get_func = eval('self.get_' + key_type)
try:
return get_func(key)
except ConfigNoOptionError:
return None
else:
raise LibvirtConfigUnknownKeyError(key)
def __setattr__(self, key, value):
if key in self.__option_types__:
key_type = self.__option_types__[key]
if key_type not in ['boolean', 'int', 'float',
|
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/sympy/simplify/fu.py
|
Python
|
gpl-3.0
| 63,469
| 0.000394
|
"""
Implementation of the trigsimp algorithm by Fu et al.
The idea behind the ``fu`` algorithm is to use a sequence of rules, applied
in what is heuristically known to be a smart order, to select a simpler
expression that is equivalent to the input.
There are transform rules in which a single rule is applied to the
expression tree. The following are just mnemonic in nature; see the
docstrings for examples.
TR0 - simplify expression
TR1 - sec-csc to cos-sin
TR2 - tan-cot to sin-cos ratio
TR2i - sin-cos ratio to tan
TR3 - angle canonicalization
TR4 - functions at special angles
TR5 - powers of sin to powers of cos
TR6 - powers of cos to powers of sin
TR7 - reduce cos power (increase angle)
TR8 - expand products of sin-cos to sums
TR9 - contract sums of sin-cos to products
TR10 - separate sin-cos arguments
TR10i - collect sin-cos arguments
TR11 - reduce double angles
TR12 - separate tan arguments
TR12i - collect tan arguments
TR13 - expand product of tan-cot
TRmorrie - prod(cos(x*2**i), (i, 0, k - 1)) -> sin(2**k*x)/(2**k*sin(x))
TR14 - factored powers of sin or cos to cos or sin power
TR15 - negative powers of sin to cot power
TR16 - negative powers of cos to tan power
TR22 - tan-cot powers to negative powers of sec-csc functions
TR111 - negative sin-cos-tan powers to csc-sec-cot
There are 4 combination transforms (CTR1 - CTR4) in which a sequence of
transformations are applied and the simplest expression is selected from
a few options.
Finally, there are the 2 rule lists (RL1 and RL2), which apply a
sequence of transformations and combined transformations, and the ``fu``
algorithm itself, which applies rules and rule lists and selects the
best expressions. There is also a function ``L`` which counts the number
of trigonometric funcions that appear in the expression.
Other than TR0, re-writing of expressions is not done by the transformations.
e.g. TR10i finds pairs of terms in a sum that are in the form like
``cos(x)*cos(y) + sin(x)*sin(y)``. Such expression are targeted in a bottom-up
traversal of the expression, but no manipulation to make them appear is
attempted. For example,
Set-up for examples below:
>>> from sympy.simplify.fu import fu, L, TR9, TR10i, TR11
>>> from sympy import factor, sin, cos, powsimp
>>> from sympy.abc import x, y, z, a
>>> from time import time
>>> eq = cos(x + y)/cos(x)
>>> TR10i(eq.expand(trig=True))
-sin(x)*sin(y)/cos(x) + cos(y)
If the expression is put in "normal" form (with a common denominator) then
the transformation is successful:
>>> TR10i(_.normal())
cos(x + y)/cos(x)
TR11's behavior is similar. It rewrites double angles as smaller angles but
doesn't do any simplification of the result.
>>> TR11(sin(2)**a*cos(1)**(-a), 1)
(2*sin(1)*cos(1))**a*cos(1)**(-a)
>>> powsimp(_)
(2*sin(1))**a
The temptation is to try make these TR rules "smarter" but that should really
be done at a higher level; the TR rules should try maintain the "do one thing
well" principle. There is one exception, however. In TR10i and TR9 terms are
recognized even when they are each multiplied by a common factor:
>>> fu(a*cos(x)*cos(y) + a*sin(x)*sin(y))
a*cos(x - y)
Factoring with ``factor_terms`` is used but it it "JIT"-like, being delayed
until it is deemed necessary. Furthermore, if the factoring does not
help with the simplification, it is not retained, so
``a*cos(x)*cos(y) + a*sin(x)*sin(z)`` does not become the factored
(but unsimplified in the trigonometric sense) expression:
>>> fu(a*cos(x)*cos(y) + a*sin(x)*sin(z))
a*sin(x)*sin(z) + a*cos(x)*cos(y)
In some cases factoring might be a good idea, but the user is left
to make that decision. For example:
>>> expr=((15*sin(2*x) + 19*sin(x + y) + 17*sin(x + z) + 19*cos(x - z) +
... 25)*(20*sin(2*x) + 15*sin(x + y) + sin(y + z) + 14*cos(x - z) +
... 14*cos(y - z))*(9*sin(2*y) + 12*sin(y + z) + 10*cos(x - y) + 2*cos(y -
... z) + 18)).expand(trig=True).expand()
In the expanded state, there are nearly 1000 trig functions:
>>> L(expr)
932
If the expression where factored first, this would take time but the
resulting expression would be transformed very quickly:
>>> def clock(f, n=2):
... t=time(); f(); return round(time()-t, n)
...
>>> clock(lambda: factor(expr)) # doctest: +SKIP
0.86
>>> clock(lambda: TR10i(expr), 3) # doctest: +SKIP
0.016
If the unexpanded expression is used, the transformation takes longer but
not as long as it took to factor it and then transform it:
>>> clock(lambda: TR10i(expr), 2) # doctest: +SKIP
0.28
So neither expansion nor factoring is used in ``TR10i``: if the
expression is already factored (or partially factored) then expansion
with ``trig=True`` would destroy what is already known and take
longer; if the expression is expanded, factoring may take longer than
simply applying the transformation itself.
Although the algorithms should be canonical, always giving the same
result, they may not yield the best result. This, in general, is
the nature of simplification where searching all possible transformation
paths is very expensive. Here is a simple example. There are 6 terms
in the following sum:
>>> expr = (sin(x)**2*cos(y)*cos(z) + sin(x)*sin(y)*cos(x)*cos(z) +
... sin(x)*sin(z)*cos(x)*cos(y) + sin(y)*sin(z)*cos(x)**2 + sin(y)*sin(z) +
... cos(y)*cos(z))
>>> args = expr.args
Serendipitously, fu gives the best result:
>>> fu(expr)
3*cos(y - z)/2 - cos(2*x + y + z)/2
But if different terms were combined, a less-optimal result might be
obtained, requiring some additional work to get better simplification,
but still less than optimal. The following shows an alternative form
of ``expr`` that resists optimal simplification once a given step
is taken since it leads to a dead end:
>>> TR9(-cos(x)**2*cos(y + z) + 3*cos(y - z)/2 +
... cos(y + z)/2 + cos(-2*x + y + z)/4 - cos(2*x + y + z)/4)
sin(2*x)*sin(y + z)/2 - cos(x)**2*cos(y + z) + 3*cos(y - z)/2 + cos(y + z)/2
Here is a smaller expression that exhibits the same behavior:
>>> a = sin(x)*sin(z)*cos(x)*cos(y) + sin(x)*sin(y)*cos(x)*cos(z)
>>> TR10i(a)
sin(x)*sin(y + z)*cos(x)
>>> newa = _
>>> TR10i(expr - a) # this combines two more of the remaining terms
sin(x)**2*cos(y)*cos(z) + sin(y)*sin(z)*cos(x)**2 + cos(y - z)
>>> TR10i(_ + newa) == _ + newa # but now there is no more simplification
True
Without getting lucky or trying all possible pairings of arguments, the
final result may be less than optimal and impossible to find without
better heuristics or brute force trial of all possibilities.
Notes
=====
This work was started by Dimitar Vlahovski at the Technological School
"Electronic systems" (30.11.2011).
References
==========
http://rfdz.ph-noe.ac.at/fileadmin/Mathematik_Uploads/ACDCA/
DESTIME2006/DES_contribs/Fu/simplification.pdf
http://www.sosmath.com/trig/Trig5/trig5/pdf/pdf.html gives a formula sheet.
"""
from __future__ import print_function, division
from collections import defaultdict
from itertools import combinations
from sympy.simplify.simplify import (s
|
implify, powsimp, ratsimp, combsimp,
_mexpand, bottom_up)
from sympy.core.sympify import sympify
from sympy.functions.elementary.trigonometric import (
cos, sin, tan, cot, sec, csc, sqrt)
from sympy.functions.elementary.hyperbolic import cosh, sinh, tanh, coth
from sympy.core.compatibility import ordered
from sympy.core.core import C
from sympy.core.mul import Mul
from sympy.core.power import Pow
from sympy.core.function import expand_mul, count_ops
from sympy.core.add import Add
from sympy.core.sy
|
mbol import Dummy
from sympy.core.exprtools import Factors, gcd_terms
from sympy.core.rules import Transform
from sympy.core.basic import S
from sympy.core.numbers import Integer, pi, I
from sympy.strategies.tree import greedy
from sympy.strategies.core import identity, debug
from sympy.polys.polytools import factor
from sympy.ntheory.factor_ import perfect_power
from sympy import SYMPY_DEBUG
# ================== Fu-like tools ===========================
def TR0(rv):
"""Simplification of rational polynomials, trying to simplify
the expression, e.g. combine things
|
sserrot/champion_relationships
|
venv/Lib/site-packages/networkx/algorithms/isomorphism/isomorph.py
|
Python
|
mit
| 6,757
| 0
|
"""
Graph isomorphism functions.
"""
import networkx as nx
from networkx.exception import NetworkXError
__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Christopher Ellison cellison@cse.ucdavis.edu)'])
# Copyright (C) 2004-2019 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__all__ = ['could_be_isomorphic',
'fast_could_be_isomorphic',
'faster_could_be_isomorphic',
'is_isomorphic']
def could_be_isomorphic(G1, G2):
"""Returns False if graphs are definitely not isomorphic.
True does NOT guarantee isomorphism.
Parameters
----------
G1, G2 : graphs
The two graphs G1 and G2 must be the same type.
Notes
-----
Checks for matching degree, triangle, and number of cliques sequences.
"""
# Check global properties
if G1.order() != G2.order():
return False
# Check local properties
d1 = G1.degree()
t1 = nx.triangles(G1)
c1 = nx.number_of_cliques(G1)
props1 = [[d, t1[v], c1[v]] for v, d in d1]
props1.sort()
d2 = G2.degree()
t2 = nx.triangles(G2)
c2 = nx.number_of_cliques(G2)
props2 = [[d, t2[v], c2[v]] for v, d in d2]
props2.sort()
if props1 != props2:
return False
# OK...
return True
graph_could_be_isomorphic = could_be_isomorphic
def fast_could_be_isomorphic(G1, G2):
"""Returns False if graphs are definitely not isomorphic.
True does NOT guarantee isomorphism.
Parameters
----------
G1, G2 : graphs
The two graphs G1 and G2 must be the same type.
Notes
-----
Checks for matching degree and triangle sequences.
"""
# Check global properties
if G1.order() != G2.order():
return False
# Check local properties
d1 = G1.degree()
t1 = nx.triangles(G1)
props1 = [[d, t1[v]] for v, d in d1]
props1.sort()
d2 = G2.degree()
t2 = nx.triangles(G2)
props2 = [[d, t2[v]] for v, d in d2]
props2.sort()
if props1 != props2:
return False
# OK...
return True
fast_graph_could_be_isomorphic = fast_could_be_isomorphic
def faster_could_be_isomorphic(G1, G2):
"""Returns False if graphs are definitely not isomorphic.
Tr
|
ue does NOT guarantee isomorphism.
Parameters
----------
G1, G2 : graphs
The two graphs G1 and G2 must be the same type.
|
Notes
-----
Checks for matching degree sequences.
"""
# Check global properties
if G1.order() != G2.order():
return False
# Check local properties
d1 = sorted(d for n, d in G1.degree())
d2 = sorted(d for n, d in G2.degree())
if d1 != d2:
return False
# OK...
return True
faster_graph_could_be_isomorphic = faster_could_be_isomorphic
def is_isomorphic(G1, G2, node_match=None, edge_match=None):
"""Returns True if the graphs G1 and G2 are isomorphic and False otherwise.
Parameters
----------
G1, G2: graphs
The two graphs G1 and G2 must be the same type.
node_match : callable
A function that returns True if node n1 in G1 and n2 in G2 should
be considered equal during the isomorphism test.
If node_match is not specified then node attributes are not considered.
The function will be called like
node_match(G1.nodes[n1], G2.nodes[n2]).
That is, the function will receive the node attribute dictionaries
for n1 and n2 as inputs.
edge_match : callable
A function that returns True if the edge attribute dictionary
for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should
be considered equal during the isomorphism test. If edge_match is
not specified then edge attributes are not considered.
The function will be called like
edge_match(G1[u1][v1], G2[u2][v2]).
That is, the function will receive the edge attribute dictionaries
of the edges under consideration.
Notes
-----
Uses the vf2 algorithm [1]_.
Examples
--------
>>> import networkx.algorithms.isomorphism as iso
For digraphs G1 and G2, using 'weight' edge attribute (default: 1)
>>> G1 = nx.DiGraph()
>>> G2 = nx.DiGraph()
>>> nx.add_path(G1, [1,2,3,4], weight=1)
>>> nx.add_path(G2, [10,20,30,40], weight=2)
>>> em = iso.numerical_edge_match('weight', 1)
>>> nx.is_isomorphic(G1, G2) # no weights considered
True
>>> nx.is_isomorphic(G1, G2, edge_match=em) # match weights
False
For multidigraphs G1 and G2, using 'fill' node attribute (default: '')
>>> G1 = nx.MultiDiGraph()
>>> G2 = nx.MultiDiGraph()
>>> G1.add_nodes_from([1,2,3], fill='red')
>>> G2.add_nodes_from([10,20,30,40], fill='red')
>>> nx.add_path(G1, [1,2,3,4], weight=3, linewidth=2.5)
>>> nx.add_path(G2, [10,20,30,40], weight=3)
>>> nm = iso.categorical_node_match('fill', 'red')
>>> nx.is_isomorphic(G1, G2, node_match=nm)
True
For multidigraphs G1 and G2, using 'weight' edge attribute (default: 7)
>>> G1.add_edge(1,2, weight=7)
1
>>> G2.add_edge(10,20)
1
>>> em = iso.numerical_multiedge_match('weight', 7, rtol=1e-6)
>>> nx.is_isomorphic(G1, G2, edge_match=em)
True
For multigraphs G1 and G2, using 'weight' and 'linewidth' edge attributes
with default values 7 and 2.5. Also using 'fill' node attribute with
default value 'red'.
>>> em = iso.numerical_multiedge_match(['weight', 'linewidth'], [7, 2.5])
>>> nm = iso.categorical_node_match('fill', 'red')
>>> nx.is_isomorphic(G1, G2, edge_match=em, node_match=nm)
True
See Also
--------
numerical_node_match, numerical_edge_match, numerical_multiedge_match
categorical_node_match, categorical_edge_match, categorical_multiedge_match
References
----------
.. [1] L. P. Cordella, P. Foggia, C. Sansone, M. Vento,
"An Improved Algorithm for Matching Large Graphs",
3rd IAPR-TC15 Workshop on Graph-based Representations in
Pattern Recognition, Cuen, pp. 149-159, 2001.
http://amalfi.dis.unina.it/graph/db/papers/vf-algorithm.pdf
"""
if G1.is_directed() and G2.is_directed():
GM = nx.algorithms.isomorphism.DiGraphMatcher
elif (not G1.is_directed()) and (not G2.is_directed()):
GM = nx.algorithms.isomorphism.GraphMatcher
else:
raise NetworkXError("Graphs G1 and G2 are not of the same type.")
gm = GM(G1, G2, node_match=node_match, edge_match=edge_match)
return gm.is_isomorphic()
|
mainconceptx/DAS
|
contrib/spendfrom/spendfrom.py
|
Python
|
mit
| 9,887
| 0.005664
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend dass received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a dasd or Das-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the das data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Das/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Das")
return os.path.expanduser("~/.das")
def read_bitcoin_config(dbdir):
"""Read the das.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "das.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a das JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19998 if testnet else 9998
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the dasd we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(dasd):
info = dasd.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
dasd.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = dasd.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(dasd):
address_summary = dict()
address_to_account = dict()
for info in dasd.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = dasd.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = dasd.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-das-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(dasd, fromaddresses, toaddress, amount, fee):
all_coins = list_available(dasd)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to dasd.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = dasd.createrawtransaction(inputs, outputs)
signed_rawtx = dasd.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(
|
1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(dasd, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = dasd.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
de
|
f compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(dasd, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = dasd.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(dasd, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get dass from")
parser.add_option("--to", dest="to", default=None,
help="address to get send dass to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
|
quattor/aquilon
|
lib/aquilon/worker/commands/show_resource.py
|
Python
|
apache-2.0
| 2,445
| 0
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008-2015,2018 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy.orm.attributes import set_committed_value
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.dbwrappers.resources import get_resource_holder
class CommandShowResource(BrokerCommand):
resource_class = None
resource_name = None
def render(self, session, logger, hostname, cluster, metacluster, all,
personality=None, archetype=None, grn=None, eon_id=None,
host_environment=None, **kwargs):
# resourcegroup is special, because it's both a holder and a resource
# itself
if self.resource_name != "resource
|
group":
resourcegroup = kwargs.pop("resourcegroup", None)
else:
resourcegroup = None
q = session.query(self.resource_class)
who = None
if not all:
if self.resource_name:
name = kwargs.get(self.resource_name)
else:
name = self.resource_class.__mapper__.polymorphic_identity
if n
|
ame:
q = q.filter_by(name=name)
if hostname or cluster or resourcegroup or personality or \
archetype or grn or eon_id:
who = get_resource_holder(session, logger, hostname, cluster,
metacluster, resourcegroup,
personality, archetype, grn, eon_id,
host_environment, config=self.config,
**kwargs)
q = q.filter_by(holder=who)
results = q.all()
if who:
for dbresource in results:
set_committed_value(dbresource, 'holder', who)
return results
|
denkab/FrameworkBenchmarks
|
frameworks/PHP/hhvm/setup.py
|
Python
|
bsd-3-clause
| 1,384
| 0.020954
|
import subprocess
import setup_util
import os
def start(args, logfile, errfile):
setup_util.replace_text("hhvm/once.php.inc", "host=localhost;", "host=" + args.database_host + ";")
setup_util.replace_text("hhvm/deploy/config.hdf", "SourceRoot = .*\/FrameworkBenchmarks/hhvm", "SourceRoot = " + args.troot)
setup_util.replace_text("hhvm/depl
|
oy/config.hdf", "Path = .*\/.hhvm.hhbc", "Path = " + args.troot + "/.hhvm.bbhc")
setup_util.replace_text("hhvm/deploy/config.hdf", "PidFile = .*\/hhvm.pid", "PidFile = " + args.troot + "/hhvm.pid")
setup_util.replace_text("hhvm/deploy/config.hdf", "File = .*\/error.log", "File = " + args.troot + "/error.log")
try:
if os.name == 'nt':
# Not supported !
return 0
subprocess.check_call("hhvm --config $TROOT/deploy/config.hdf -m daemon", shell
|
=True, stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1
def stop(logfile, errfile):
try:
if os.name == 'nt':
# Not Supported !
return 0
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'hhvm' in line and 'toolset' not in line and 'run-ci' not in line and 'run-tests' not in line:
pid = int(line.split(None,2)[1])
os.kill(pid,15)
return 0
except subprocess.CalledProcessError:
return 1
|
sserkez/ocelot
|
demos/sr/k_diode.py
|
Python
|
gpl-3.0
| 2,071
| 0.011106
|
__author__ = 'Sergey Tomin'
from ocelot.rad import *
from ocelot import *
from ocelot.gui import *
import numpy as np
import time
font = {'size' : 14}
matplotlib.rc('font', **font)
#from scipy.optimize import curve_fit
from ocelot.demos.sr.k_analysis import *
#from ocelot.lib.genera.src.python.radiation import generaSR
font = {'size' : 14}
matplotlib.rc('font', **font)
beam = Beam()
beam.E = 17.5
beam.I = 0.1
und = Undulator(Kx = 4., nperiods = 125, lperiod=0.04, eid= "und")
lat = MagneticLattice((und))
screen = Screen()
screen.z = 500.0
screen.size_x = 0.
screen.size_y = 0.
screen.nx = 1
screen.ny = 1
screen.start_energy = 7950 #eV
screen.end_energy = 8200 #eV
screen.num_energy = 1000
screen = calculate_radiation(lat, screen, beam)
show_flux(screen, unit="mrad")
# K-mono scan
beam_energy = 17.5 # GeV
b_energy_jit = 1e-4 # dE/E
screen = Screen()
screen.z = 500.0
screen.size_x = 0.01
screen.size_y = 0.01
screen.nx = 51
screen.ny = 51
ds = screen.size_x/screen.nx*screen.size_y/screen.ny
n_scan_points = 30
n_shots = 5
scan_Kmono_energy = np.linspace(start=8000, stop=8150, num=n_scan_points)
start = time.time()
flux = []
Etotal = []
for n, eph in enumerate(scan_Kmono_energy):
print(n, "/", n
|
_scan_points)
for
|
i in range(n_shots):
beam.E = np.random.normal(beam_energy, beam_energy*b_energy_jit, 1)
print("beam energy: ", beam.E)
screen.start_energy = eph # 8078.2 - 50 + i*100/30. #eV
screen.num_energy = 1
screen = calculate_radiation(lat, screen, beam)
flux.append(sum(screen.Total)*ds)
Etotal.append(eph)
print("time cpp = ", start - time.time())
e_fin, polynom = data_analysis(Etotal, flux=flux, method="least")
print("Eph_fin = ", e_fin)
x = np.linspace(Etotal[0], Etotal[-1], num=100)
plt.plot(Etotal, flux, "r.", lw =2, label="exp data")
plt.plot(x, polynom(x), "b", label="fit func")
plt.plot(e_fin, polynom(e_fin), "go", lw = 3, label=r"$E_{ph}=$" + str(np.around(e_fin, decimals=2)))
plt.xlabel(r"$E_{ph}$, eV")
plt.grid(True)
plt.legend()
plt.show()
|
ibabushkin/Iridium
|
defines/util/tree.py
|
Python
|
gpl-3.0
| 1,538
| 0
|
"""
File: tree.py
Author: Inokentiy Babushkin
Email: inokentiy.babushkin@googlemail.com
Github: ibabushkin
Description:
A tree used to store DFS-trees for the CFG-module.
"""
class Tree(object):
"""
The tree mentioned in the module-docstring.
Probably a Gingko.
"""
def __init__(self, obj):
self.nodes = [TreeNode(obj)]
self.edges = []
self.current_node = 0
def append(self, obj):
"""
Append an object as a node to the tree.
Use the current node as the place for insertion.
"""
self.nodes.append(TreeNode(obj))
self.edges.append((self.current_node, len(self.nodes) - 1))
self.curre
|
nt_node = len(self.nodes) - 1
# print self.c
|
urrent_node
def get_children_of(self, index):
"""
Get a node's children.
"""
ret = []
for edge in self.edges:
if edge[0] == index:
ret.append(edge[1])
return ret
def postorder(self, start=0):
"""
Get the postorder traversal of the tree.
"""
ret = [self.get_content(start).id]
for node in self.get_children_of(start):
ret = self.postorder(node) + ret
return ret
def get_content(self, index):
"""
Get the object saved in a node.
"""
return self.nodes[index].content
class TreeNode(object):
"""
A node for the tree.
Can save an object.
"""
def __init__(self, obj):
self.content = obj
|
bobbysoon/Taxi3
|
Swarm.py
|
Python
|
unlicense
| 1,110
| 0.062162
|
from Centroid import Centroid
from Vec2 import Vec2
from random import random
from math import *
from angle import angle
from Seed import *
Seed()
class Swarm(list):
def __init__(self, count):
self.speed= 1.0/16.0
self.paused= False
def __new__(cls, count):
swarm= list.__new__(cls)
for n in range(count):
x= random()-random()
y= random()-random()
c= Centroid(x,y)
c.inertia= Vec2(0,0)
swarm.append(c)
return swarm
def repel(self, step):
for i in range(1,len(self)):
for j in range(i):
if self[i] in self[j].neighbors:
assert self[j] in self[i].neighbors
a=angle(self[j],self[i])
dx,dy = self[i]-self[j]
dist= sqrt(dx*dx+dy*dy)
push= 1.0/dist
a+=1.5707*push
push= sin(a)*push*step,cos(a)*push*step
self
|
[i].inertia+= push
self[j].inertia-= push
def move(self, step):
if self.paused: return
|
self.repel(step)
step*= self.speed
for c in self:
c+= c.inertia*step
if abs(c.x)>=1:
c.inertia.x*=-1
c.x+=c.inertia.x*2*step
if abs(c.y)>=1:
c.inertia.y*=-1
c.y+=c.inertia.y*2*step
c.clear()
|
bashalex/datapot
|
setup.py
|
Python
|
gpl-3.0
| 1,582
| 0.001896
|
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
CURRENT_DIR = os.path.dirname(__file__)
setup(name='datapot',
description='Library for automatic feature extraction from JSON-datasets',
long_description=open(os.path.join(CURRENT_DIR, 'README.rst')).read(),
version='0.1.3',
url='https://github.com/bashalex/datapot',
author='Alex Bash, Yuriy Mokriy, Nikita Saveyev, Michal Rozenwald, Peter Romov',
author_email='avbashlykov@gmail.com, yurymokriy@gmail.com, n.a.savelyev@gmail.com, michal.rozenwald@gmail.com, romovpa@gmail.com',
license='GNU v3.0',
maintainer='Nikita Savelyev',
maintainer_email='n.a.savelyev@gmail.com',
install_requires=[
'numpy >= 1.6.1',
'scipy >= 0.17.0',
'pandas >= 0.17.1',
'scikit-learn >= 0.17.1',
'iso-639 >= 0.4.5',
|
'langdetect >= 1.0.7',
'gensim >= 2.1.0',
'nltk >= 3.2.4',
'tsfresh >= 0.7.1',
'python-dateutil >= 2.6.0',
'fastnumbers >= 2.0.1',
'pystemmer >= 1.3.0',
],
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Pro
|
gramming Language :: Python :: 3',
'Programming Language :: Python :: 2',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering',
'Topic :: Software Development',
],
packages=find_packages())
|
antoinecarme/pyaf
|
tests/artificial/transf_Quantization/trend_PolyTrend/cycle_5/ar_12/test_artificial_1024_Quantization_PolyTrend_5_12_100.py
|
Python
|
bsd-3-clause
| 270
| 0.085185
|
import pyaf.Bench.TS_datasets as tsds
im
|
port tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 5, transform = "Quantization", sigma
|
= 0.0, exog_count = 100, ar_order = 12);
|
threefoldfoundation/app_backend
|
plugins/tff_backend/migrations/_009_change_token_value.py
|
Python
|
bsd-3-clause
| 1,840
| 0.002174
|
# -*- coding: utf-8 -*-
# Copyright 2018 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.4@@
from mcfw.consts import DEBUG
from plugins.rogerthat_api.api import system
from plugins.tff_backend.bizz import get_tf_token_api_key
from plugins.tff_backend.bizz.global_stats import _get_currency_conversions
from plugins.tff_backend.models.global_stats import GlobalStats
from plugins.tff_backend.plugin_consts import BUY_TOKENS_TAG, BUY_TOKENS_FLOW_V5
def migrate():
for stats_model in GlobalStat
|
s.query(): # type: GlobalStats
new_value = stats_model.value / 100
currencies = _get_currency_conversions(stats_model.currencies,
|
new_value)
stats_model.populate(currencies=currencies, value=new_value)
stats_model.put()
coords = [2, 1, 0]
icon_name = 'fa-suitcase'
label = 'Purchase iTokens'
flow = BUY_TOKENS_FLOW_V5
api_key = get_tf_token_api_key()
roles = system.list_roles(api_key)
menu_item_roles = []
for role in roles:
if role.name in ('invited', 'members'):
menu_item_roles.append(role.id)
system.put_menu_item(api_key, icon_name, BUY_TOKENS_TAG, coords, None, label, static_flow=flow,
roles=[] if DEBUG else menu_item_roles, fall_through=True)
system.publish_changes(api_key)
|
mbsat/gr-poes-weather
|
apps/FY1/gui/usrp_rx_fy1_bb_hrpt.py
|
Python
|
gpl-3.0
| 19,883
| 0.024342
|
#!/usr/bin/env python
##################################################
# Gnuradio Python Flow Graph
# Title: USRP Feng Yun 1 HRPT Receiver
# Author: POES Weather Ltd
# Description: Feng Yun 1 HRPT Receiver
# Generated: Fri Jan 7 15:21:35 2011
##################################################
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio import noaa
from gnuradio import window
from gnuradio.eng_option import eng_option
from gnuradio.gr import firdes
from gnuradio.wxgui import fftsink2
from gnuradio.wxgui import forms
from gnuradio.wxgui import numbersink2
from grc_gnuradio import usrp as grc_usrp
from grc_gnuradio import wxgui as grc_wxgui
from optparse import OptionParser
from time import strftime, localtime
import ConfigParser
import math, os
import poesweather
import wx
class usrp_rx_fy1_bb_hrpt(grc_wxgui.top_block_gui):
def __init__(self, side="A", gain=35, sync_check=False, decim=16, satellite='FENGYUN-1D', frames_file=os.environ['HOME'] + '/FENGYUN-1D.hrpt', baseband_file=os.environ['HOME'] + '/FENGYUN-1D.dat', freq=1700.5e6):
grc_wxgui.top_block_gui.__init__(self, title="USRP Feng Yun 1 HRPT Receiver")
_icon_path = "/usr/share/icons/hicolor/32x32/apps/gnuradio-grc.png"
self.SetIcon(wx.Icon(_icon_path, wx.BITMAP_TYPE_ANY))
##################################################
# Parameters
##################################################
self.side = side
self.gain = gain
self.sync_check = sync_check
self.decim = decim
self.satellite = satellite
self.frames_file = frames_file
self.baseband_file = baseband_file
self.freq = freq
##################################################
# Variables
##################################################
self.sym_rate = sym_rate = 600*1109*2
self.samp_rate = samp_rate = 64e6/decim
self.config_filename = config_filename = os.environ['HOME']+'/.gnuradio/fy1_hrpt.conf'
self.sps = sps = samp_rate/sym_rate
self._saved_pll_alpha_config = ConfigParser.ConfigParser()
self._saved_pll_alpha_config.read(config_filename)
try: saved_pll_alpha = self._saved_pll_alpha_config.getfloat("satname", 'pll_alpha')
except: saved_pll_alpha = 0.005
self.saved_pll_alpha = saved_pll_alpha
self._saved_clock_alpha_config = ConfigParser.ConfigParser()
self._saved_clock_alpha_config.read(config_filename)
try: saved_clock_alpha = self._saved_clock_alpha_config.getfloat("satname", 'clock_alpha')
except: saved_clock_alpha = 0.001
self.saved_clock_alpha = saved_clock_alpha
self.sync_check_txt = sync_check_txt = sync_check
self.side_text = side_text = side
self._saved_gain_config = ConfigParser.ConfigParser()
self._saved_gain_config.read(config_filename)
try: saved_gain = self._saved_gain_config.getfloat("satname", 'gain')
except: saved_gain = gain
self.saved_gain = saved_gain
self.satellite_text = satellite_text = satellite
self.sample_rate_text = sample_rate_text = samp_rate
self.pll_alpha = pll_alpha = saved_pll_alpha
self.max_clock_offset = max_clock_offset = 0.1
self.max_carrier_offset = max_carrier_offset = 2*math.pi*100e3/samp_rate
self.hs = hs = int(sps/2.0)
self.gain_slider = gain_slider = gain
self.freq_tb = freq_tb = freq
self.frames_outfile_text = frames_outfile_text = frames_file
self.decim_tb = decim_tb = decim
self.datetime_text = datetime_text = strftime("%A, %B %d %Y %H:%M:%S", localtime())
self.clock_alpha = clock_alpha = saved_clock_alpha
self.baseband_outfile_text = baseband_outfile_text = baseband_file
##################################################
# Notebooks
##################################################
self.displays = wx.Notebook(self.GetWin(), style=wx.NB_TOP)
self.displays.AddPage(grc_wxgui.Panel(self.displays), "RX Feng Yun 1 HRPT")
self.displays.AddPage(grc_wxgui.Panel(self.displays), "Information")
self.Add(self.displays)
##################################################
# Controls
##################################################
self._sync_check_txt_static_text = forms.static_text(
parent=self.GetWin(),
value=self.sync_check_txt,
callback=self.set_sync_check_txt,
label="Sync check",
converter=forms.float_converter(),
)
self.GridAdd(self._sync_check_txt_static_text, 0, 2, 1, 1)
self._side_text_static_text = forms.static_text(
parent=self.GetWin(),
value=self.side_text,
callback=self.set_side_text,
label="USRP Side",
converter=forms.str_converter(),
)
self.GridAdd(self._side_text_static_text, 0, 0, 1, 1)
self._satellite_text_static_text = forms.static_text(
parent=self.GetWin(),
value=self.satellite_text,
callback=self.set_satellite_text,
label="Satellite",
converter=forms.str_converter(),
)
self.GridAdd(self._satellite_text_static_text, 0, 1, 1, 1)
self._sample_rate_text_static_text = forms.static_text(
parent=self.displays.GetPage(1).GetWin(),
value=self.sample_rate_text,
callback=self.set_sample_rate_text,
label="Sample rate",
converter=forms.float_converter(),
)
self.displays.GetPage(1).GridAdd(self._sample_rate_text_static_text, 3, 0, 1, 1)
_pll_alpha_sizer = wx.BoxSizer(wx.VERTICAL)
self._pll_alpha_text_box = forms.text_box(
parent=self.GetWin(),
sizer=_pll_alpha_sizer,
value=self.pll_alpha,
callback=self.set_pll_alpha,
label="PLL Alpha",
converter=forms.float_converter(),
proportion=0,
)
self._pll_alpha_slider = forms.slider(
parent=self.GetWin(),
sizer=_pll_alpha_sizer,
value=self.pll_alpha,
callback=self.set_pll_alpha,
minimum=0.005,
maximum=0.5,
num_steps=100,
style=wx.SL_HORIZONTAL,
cast=float,
proportion=1,
)
self.GridAdd(_pll_alpha_sizer, 2, 1, 1, 1)
_gain_slider_sizer = wx.BoxSizer(wx.VERTICAL)
self._gain_slider_text_box = forms.text_box(
parent=self.GetWin(),
sizer=_gain_slider_sizer,
value=self.gain_slider,
callback=self.set_gain_slider,
label="Gain",
converter=forms.int_converter(),
proportion=0,
)
self._gain_slider_slider = forms.slider(
parent=self.GetWin(),
sizer=_gain_slider_sizer,
value=self.gain_slider,
callback=self.set_gain_slider,
minimum=0,
maximum=100,
num_steps=100,
style=wx.SL_HORIZONTAL,
cast=int,
proportion=1,
)
self.GridAdd(_gain_slider_sizer, 2, 0, 1, 1)
self._freq_tb_text_box = forms.text_box(
parent=self.GetWin(),
value=self.freq_tb,
callback=self.set_freq_tb,
label="Frequency",
converter=forms.float_converter(),
)
self.GridAdd(self._freq_tb_text_box, 1, 1, 1, 1)
self._frames_outfile_text_static_text = forms.static_text(
parent=self.displays.GetPage(1).GetWin(),
value=self.frames_outfile_text,
callback=self.set_frames_outfile_text,
label="Frames filename",
converter=forms.str_converter(),
)
self.displays.GetPage(1).GridAdd(self._frames_outfile_text_static_text, 5, 0, 1, 1)
self._decim_tb_text_box = forms.text_box(
parent=self.GetWin(),
value=self.decim_tb,
callback=self.set_decim_tb,
label="Decimation",
converter=forms.int_converter(),
)
self.GridAdd(self._decim_tb_text_box, 1,
|
0, 1, 1)
self._datetime_text_static_text = forms.static_text(
parent=self.displays.GetPage(1).GetWin(),
value=self.datetime_text,
callback=self.set_datetime_text,
label="Acquisition start",
converter=forms.str_converter(),
)
self.displays.GetPage(1).GridAdd(self._datetime_text_static_text, 2, 0, 1, 1)
_clock_alpha_sizer = wx.BoxSizer(wx.VERTICAL
|
)
self._clock_alpha_text_box = forms.text_box(
parent=self.GetWin(),
sizer=_clock_alpha_sizer,
value=self.clock_alpha,
callback=self.set_clock_alpha,
label="Clock alpha",
converter=forms.float_converter(),
proportion=0,
)
self._clock_alpha_slider = forms.slider(
parent=self.GetWin(),
sizer=_clock_alpha_sizer,
value=self.clock_alpha,
callback=self.set_clock_alpha,
minimum=0.001,
maximum=0.1,
num_steps=100,
style=wx.SL_HORIZONTAL,
cast=float,
proportion=1,
)
self.GridAdd(_clock_alpha_sizer, 2, 2, 1, 1)
self._baseband_outfile_text_static_text = forms.static_text(
parent=self.displays.GetPage(1).GetWin(),
value=self.baseband
|
aek/pgbouncer-ng
|
pgbouncerlib/__init__.py
|
Python
|
bsd-3-clause
| 36
| 0
|
__version__
|
= (1, 0, 0, 'final
|
', 0)
|
RuiNascimento/krepo
|
script.module.lambdascrapers/lib/lambdascrapers/sources_incursion/en_incursion-1.20(final)/ymovies.py
|
Python
|
gpl-2.0
| 11,750
| 0.007064
|
# -*- coding: utf-8 -*-
'''
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,json,base64
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
from resources.lib.modules import jsunfuck
from resources.lib.modules import source_utils
from resources.lib.modules import dom_parser
from resources.lib.modules import log_utils
CODE = '''def retA():
class Infix:
def __init__(self, function):
self.function = function
def __ror__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
def __or__(self, other):
return self.function(other)
def __rlshift__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
def __rshift__(self, other):
return self.function(other)
def __call__(self, value1, value2):
return self.function(value1, value2)
def my_add(x, y):
try: return x + y
except Exception: return str(x) + str(y)
x = Infix(my_add)
return %s
param = retA()'''
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['yesmovies.to']
self.base_link = 'https://yesmovies.to'
self.search_link = '/movie/search/%s.html'
self.info_link = '/ajax/movie_info/%s.html?is_login=false'
self.server_link = '/ajax/v4_movie_episodes/%s'
self.embed_link = '/ajax/movie_embed/%s'
self.token_link = '/ajax/movie_token?eid=%s&mid=%s'
self.source_link = '/ajax/movie_sources/%s?x=%s&y=%s'
def matchAlias(self, title, aliases):
try:
for alias in aliases:
if cleantitle.get(title) == cleantitle.get(alias['title']):
return True
except:
return False
def movie(self, imdb, title, localtitle, aliases, year):
try:
aliases.append({'country': 'us', 'title': title})
url = {'imdb': imdb, 'title': title, 'year': year, 'aliases': aliases}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
aliases.append({'country': 'us', 'title': tvshowtitle})
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'aliases': aliases}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def searchShow(self, title, season, aliases, headers):
try:
title = cleantitle.normalize(title)
search = '%s Season %01d' % (title, int(season))
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(search)))
log_utils.log('shit Returned: %s' % str(url), log_utils.LOGNOTICE)
r = client.request(url, headers=headers, timeout='15')
r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1])) for i in r]
r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
url = [i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == season][0]
return url
except:
return
def searchMovie(self, title, year, aliases, headers):
try:
title = cleantitle.normalize(title)
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
r = client.request(url, headers=headers, timeout='15')
r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
try:
r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
except:
url = None
pass
if (url == None):
url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url is None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
aliases = eval(data['aliases'])
headers = {}
if 'tvshowtitle' in data:
episode = int(data['episode'])
url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)
else:
episode = 0
url = self.searchMovie(data['title'], data['year'], aliases, headers)
mid = re.findall('-(\d+)', url)[-1]
try:
headers = {'Referer': url}
u = urlparse.urljoin(self.base_link, self.server_link % mid)
r = client.request(u, headers=headers, XHR=True)
r = json.loads(r)['html']
r = client.parseDOM(r, 'div', attrs = {'class': 'pas-list'})
ids = client.parseDOM(r, 'li', ret='data-id')
servers = client.parseDOM(r, 'li', ret='data-server')
labels = client.parseDOM(r, 'a', ret='title')
r = zip(ids, servers, labels)
u = urlparse.urljoin(self.base_link, self.info_link % mid)
quality = client.request(u, headers=headers)
quality = dom_parser.parse_dom(quality, 'div', attrs={'class': 'jtip-quality'})[0].content
if quality == "HD":
quality = "720p"
for eid in r:
try:
try:
ep = re.findall('episode.*?(\d+).*?',eid[2].lower())[0]
except:
ep = 0
if (episode == 0) or (int(ep) == episode):
if eid[1] != '6':
url = urlparse.urljoin(self.base_link, self.embed_link % eid[0])
link = client.request(url)
|
link = json.loads(link)['src']
valid, host = source_utils.is_host_valid(link, hostDict)
sources.append({'source':host,'quality':quality,'language': 'en','url':link,'info':[],'direct':False,'debridonly':False})
else:
url = urlparse.urljoin(self.base_link, self.token_li
|
nk % (eid[0], mid))
script = client.
|
Extintor/DjangoBlog
|
blog/admin.py
|
Python
|
gpl-2.0
| 129
| 0
|
from
|
django.contrib import admin
from blog.models import Blog_post
admin.site.register(Blog_post)
# Register your models
|
here.
|
FlipperPA/pyodbc
|
tests2/sqlservertests.py
|
Python
|
mit
| 54,094
| 0.003605
|
#!/usr/bin/python
# -*- coding: latin-1 -*-
usage = """\
usage: %prog [options] connection_string
Unit tests for SQL Server. To use, pass a connection string as the parameter.
The tests will create and drop tables t1 and t2 as necessary.
These run using the version from the 'build' directory, not the version
installed into the Python directories. You must run python setup.py build
before running the tests.
You can also put the connection string into a setup.cfg file in the root of the project
(the same one setup.py would use) like so:
[sqlservertests]
connection-string=DRIVER={SQL Server};SERVER=localhost;UID=uid;PWD=pwd;DATABASE=db
The connection string above will use the 2000/2005 driver, even if SQL Server 2008
is installed:
2000: DRIVER={SQL Server}
2005: DRIVER={SQL Server}
2008: DRIVER={SQL Server Native Client 10.0}
"""
import sys, os, re
import unittest
from decimal import Decimal
from datetime import datetime, date, time
from os.path import join, getsize, dirname, abspath
from testutils import *
_TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-'
def _generate_test_string(length):
"""
Returns a string of `length` characters, constructed by repeating _TESTSTR as necessary.
To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are
tested with 3 lengths. This function helps us generate the test data.
We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will
be hidden and to help us manually identify where a break occurs.
"""
if length <= len(_TESTSTR):
return _TESTSTR[:length]
c = (length + len(_TESTSTR)-1) / len(_TESTSTR)
v = _TESTSTR * c
return v[:length]
class SqlServerTestCase(unittest.TestCase):
SMALL_FENCEPOST_SIZES = [ 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000 ]
LARGE_FENCEPOST_SIZES = [ 4095, 4096, 4097, 10 * 1024, 20 * 1024 ]
MAX_FENCEPOST_SIZES = [ 5 * 1024 * 1024 ] #, 50 * 1024 * 1024 ]
ANSI_SMALL_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ]
UNICODE_SMALL_FENCEPOSTS = [ unicode(s) for s in ANSI_SMALL_FENCEPOSTS ]
ANSI_LARGE_FENCEPOSTS = ANSI_SMALL_FENCEPOSTS + [ _generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ]
UNICODE_LARGE_FENCEPOSTS = UNICODE_SMALL_FENCEPOSTS + [ unicode(s) for s in [_generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ]]
ANSI_MAX_FENCEPOSTS = ANSI_LARGE_FENCEPOSTS + [ _generate_test_string(size) for size in MAX_FENCEPOST_SIZES ]
UNICODE_MAX_FENCEPOSTS = UNICODE_LARGE_FENCEPOSTS + [ unicode(s) for s in [_generate_test_string(size) for size in MAX_FENCEPOST_SIZES ]]
def __init__(self, method_name, connection_string):
unittest.TestCase.__init__(self, method_name)
self.connection_string = connection_string
def get_sqlserver_version(self):
"""
Returns the major version: 8-->2000, 9-->2005, 10-->2008
"""
self.cursor.execute("exec master..xp_msver 'ProductVersion'")
row = self.cursor.fetchone()
return int(row.Character_Value.split('.', 1)[0])
def setUp(self):
self.cnxn = pyodbc.connect(self.connection_string)
self.cursor = self.cnxn.cursor()
for i in range(3):
try:
self.cursor.execute("drop table t%d" % i)
self.cnxn.commit()
except:
pass
for i in range(3):
try:
self.cursor.execute("drop procedure proc%d" % i)
self.cnxn.commit()
except:
pass
try:
self.cursor.execute('drop function func1')
self.cnxn.commit()
except:
pass
self.cnxn.rollback()
def tearDown(self):
try:
self.cursor.close()
self.cnxn.close()
except:
# If we've already closed the cursor or connection, exceptions are thrown.
pass
def test_binary_type(self):
if sys.hexversion >= 0x02060000:
self.assertIs(pyodbc.BINARY, bytearray)
else:
self.assertIs(pyodbc.BINARY, buffer)
def test_multiple_bindings(self):
"More than one bind and select on a cursor"
self.cursor.execute("create table t1(n int)")
self.cursor.execute("insert into t1 values (?)", 1)
self.cursor.execute("insert into t1 values (?)", 2)
self.cursor.execute("insert into t1 values (?)", 3)
for i in range(3):
self.cursor.execute("select n from t1 where n < ?", 10)
self.cursor.execute("select n from t1 where n < 3")
def test_different_bindings(self):
self.cursor.execute("create table t1(n int)")
self.cursor.execute("create table t2(d datetime)")
self.cursor.execute("insert into t1 values (?)", 1)
self.cursor.execute("insert into t2 values (?)", datetime.now())
def test_datasources(self):
p = pyodbc.dataSources()
self.assert_(isinstance(p, dict))
def test_getinfo_string(self):
value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR)
self.assert_(isinstance(value, str))
def test_getinfo_bool(self):
value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES)
self.assert_(isinstance(value, bool))
def test_getinfo_int(self):
value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION)
self.assert_(isinstance(value, (int, long)))
def test_getinfo_smallint(self):
value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR)
self.assert_(isinstance(value, int))
def test_noscan(self):
self.assertEqual(self.cursor.noscan, False)
self.cursor.noscan = True
self.assertEqual(self.cursor.noscan, Tr
|
ue)
def test_guid(self):
|
self.cursor.execute("create table t1(g1 uniqueidentifier)")
self.cursor.execute("insert into t1 values (newid())")
v = self.cursor.execute("select * from t1").fetchone()[0]
self.assertEqual(type(v), str)
self.assertEqual(len(v), 36)
def test_nextset(self):
self.cursor.execute("create table t1(i int)")
for i in range(4):
self.cursor.execute("insert into t1(i) values(?)", i)
self.cursor.execute("select i from t1 where i < 2 order by i; select i from t1 where i >= 2 order by i")
for i, row in enumerate(self.cursor):
self.assertEqual(i, row.i)
self.assertEqual(self.cursor.nextset(), True)
for i, row in enumerate(self.cursor):
self.assertEqual(i + 2, row.i)
def test_nextset_with_raiserror(self):
self.cursor.execute("select i = 1; RAISERROR('c', 16, 1);")
row = next(self.cursor)
self.assertEqual(1, row.i)
self.assertRaises(pyodbc.ProgrammingError, self.cursor.nextset)
def test_fixed_unicode(self):
value = u"t\xebsting"
self.cursor.execute("create table t1(s nchar(7))")
self.cursor.execute("insert into t1 values(?)", u"t\xebsting")
v = self.cursor.execute("select * from t1").fetchone()[0]
self.assertEqual(type(v), unicode)
self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL
self.assertEqual(v, value)
def _test_strtype(self, sqltype, value, resulttype=None, colsize=None):
"""
The implementation for string, Unicode, and binary tests.
"""
assert colsize in (None, 'max') or isinstance(colsize, int), colsize
assert colsize in (None, 'max') or (value is None or colsize >= len(value))
if colsize:
sql = "create table t1(s %s(%s))" % (sqltype, colsize)
else:
sql = "create table t1(s %s)" % sqltype
if resulttype is None:
resulttype = type(value)
self.cursor.execute(sql)
self.cursor.execute("insert into t1 values(?)", value)
v = self.cursor.execute("select * from t1").fetchone()[0]
self.
|
utkbansal/tardis
|
setup.py
|
Python
|
bsd-3-clause
| 4,442
| 0.002701
|
#!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import glob
import os
import sys
import ah_bootstrap
from setuptools import setup
#A dirty hack to get around some early import/configurations ambiguities
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
builtins._ASTROPY_SETUP_ = True
from astropy_helpers.setup_helpers import (register_commands, adjust_compiler,
get_debug_option, get_package_info,
add_command_option)
from astropy_helpers.git_helpers import get_git_devstr
from astropy_helpers.version_helpers import generate_version_py
# Get some values from the setup.cfg
from distutils import config
conf = config.ConfigParser()
conf.read(['setup.cfg'])
metadata = dict(conf.items('metadata'))
PACKAGENAME = metadata.get('package_name', 'packagename')
DESCRIPTION = metadata.get('description', 'Astropy affiliated package')
AUTHOR = metadata.get('author', '')
AUTHOR_EMAIL = metadata.get('author_email', '')
LICENSE = metadata.get('license', 'unknown')
URL = metadata.get('url', 'http://astropy.org')
# Get the long description from the package's docstring
#__import__(PACKAGENAME)
#package = sys.modules[PACKAGENAME]
LONG_DESCRIPTION = "" #package.__doc__
# Store the package name in a built-in variable so it's easy
# to get from other parts of the setup infrastructure
builtins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME
# VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)
VERSION = '1.5.dev'
# Indicates if this version is a release version
RELEASE = 'dev' not in VERSION
if not RELEASE:
VERSION += get_git_devstr(False)
# Populate the dict of setup command overrides; this should be done before
# invoking any other functionality from distutils since it can potentially
# modify distutils' behavior.
cmdclassd = register_comma
|
nds(PACKAGENAME, VERSION, RELEASE)
add_command_option('install', 'with-openmp', 'compile TARDIS without OpenMP',
is_bool=True)
add_command_option('build', 'with-openmp', 'compile TARDIS without OpenMP',
is_bool=True)
add_command_option('develop', 'with-openmp', 'compile TARDIS without OpenMP',
is_bool=True)
# Adjust the compiler in case the default on th
|
is platform is to use a
# broken one.
adjust_compiler(PACKAGENAME)
# Freeze build information in version.py
generate_version_py(PACKAGENAME, VERSION, RELEASE,
get_debug_option(PACKAGENAME))
# Treat everything in scripts except README.rst as a script to be installed
scripts = [fname for fname in glob.glob(os.path.join('scripts', '*'))
if os.path.basename(fname) != 'README.rst']
# Get configuration information from all of the various subpackages.
# See the docstring for setup_helpers.update_package_files for more
# details.
package_info = get_package_info()
# Add the project-global data
package_info['package_data'].setdefault(PACKAGENAME, [])
package_info['package_data'][PACKAGENAME].append('data/*')
# Define entry points for command-line scripts
entry_points = {}
for hook in [('prereleaser', 'middle'), ('releaser', 'middle'),
('postreleaser', 'before'), ('postreleaser', 'middle')]:
hook_ep = 'zest.releaser.' + '.'.join(hook)
hook_name = 'astropy.release.' + '.'.join(hook)
hook_func = 'astropy.utils.release:' + '_'.join(hook)
entry_points[hook_ep] = ['%s = %s' % (hook_name, hook_func)]
# Include all .c files, recursively, including those generated by
# Cython, since we can not do this in MANIFEST.in with a "dynamic"
# directory name.
c_files = []
for root, dirs, files in os.walk(PACKAGENAME):
for filename in files:
if filename.endswith('.c'):
c_files.append(
os.path.join(
os.path.relpath(root, PACKAGENAME), filename))
package_info['package_data'][PACKAGENAME].extend(c_files)
setup(name=PACKAGENAME + '-sn',
version=VERSION,
description=DESCRIPTION,
scripts=scripts,
requires=['astropy'],
install_requires=['astropy'],
provides=[PACKAGENAME],
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
long_description=LONG_DESCRIPTION,
cmdclass=cmdclassd,
zip_safe=False,
use_2to3=True,
entry_points=entry_points,
**package_info
)
|
jorisvandenbossche/DS-python-data-analysis
|
notebooks/python_recap/_solutions/05-numpy35.py
|
Python
|
bsd-3-clause
| 14
| 0.071429
|
n
|
p.identi
|
ty(3)
|
kylef/swiftenv-api
|
versions.py
|
Python
|
bsd-2-clause
| 4,108
| 0.001217
|
import os
import glob
from pathlib import Path
import flask
import yaml
class VersionManager(object):
def __init__(self, versions=None):
self._versions = versions
@property
def versions(self):
if self._versions is None:
version_paths = Path('versions').glob('**/*.yaml')
version_files = map(str, version_paths)
versions = map(Version.fromfile, version_files)
versions = sorted(versions, key=lambda v: v.version)
self._versions = list(versions)
return self._versions
def all(self):
return self
def filter(self, version=None, pre_release=None, snapshots=None, platform=None):
versions = self.versions
if version:
versions = [v for v in versions if v.version == version]
if pre_release is True:
versions = [v for v in versions if v.is_pre_release]
if pre_release is False:
versions = [v for v in versions if not v.is_pre_release]
if snapshots is True:
versions = [v for v in versions if v.is_snapshot]
if snapshots is False:
versions = [v for v in versions if not v.is_snapshot]
if platform:
versions = [v for v in versions if v.supports_platform(platform)]
return VersionManager(versions)
def get(self, **kwargs):
if kwargs:
versions = self.filter(**kwargs)
return versions.get()
if len(self.versions) == 1:
return self.versions[0]
raise flask.abort(404)
class Version(object):
objects = VersionManager()
@classmethod
def fromfile(cls, path):
version = os.path.splitext(os.path.basename(path))[0]
with open(path) as fp:
content = yaml.safe_load(fp.read())
binaries = {}
for (key, value) in content['binaries'].items():
# convert between old and new schema
if isinstance(value, str):
binaries[key] = {
'x86_64': valu
|
e,
}
|
else:
binaries[key] = value
if 'version' in content:
version = content['version']
return cls(version, binaries)
def __init__(self, version, binaries):
self.version = version
self.binaries = binaries
def __str__(self):
return self.version
def __eq__(self, other):
if isinstance(other, Version):
return self.version == other.version and self.binaries == other.binaries
return False
@property
def is_pre_release(self):
return '-' in self.version
@property
def is_snapshot(self):
return 'SNAPSHOT' in self.version
def supports_platform(self, platform):
"""
Returns if the version has a binary release for the given platform.
"""
return platform in self.binaries.keys()
@property
def path(self):
if self.version.startswith('DEVELOPMENT-SNAPSHOT-'):
version = self.version[len('DEVELOPMENT-SNAPSHOT-'):]
(year, month, rest) = version.split('-', 2)
return os.path.join('versions', 'DEVELOPMENT-SNAPSHOT', year, month, '{}.yaml'.format(rest))
if '-' in self.version:
version, rest = self.version.split('-', 1)
else:
version = self.version
rest = None
major = version.split('.', 1)[0]
if rest:
if rest.startswith('DEVELOPMENT-SNAPSHOT-'):
rest = rest[len('DEVELOPMENT-SNAPSHOT-'):]
return os.path.join('versions', major, '{}-DEVELOPMENT-SNAPSHOT'.format(version), '{}.yaml'.format(rest))
return os.path.join('versions', major, '{}.yaml'.format(self.version))
def save(self):
path = Path(os.path.split(self.path)[0])
path.mkdir(parents=True, exist_ok=True)
with open(self.path, 'w') as fp:
yaml.dump({'version': self.version, 'binaries': self.binaries}, fp, default_flow_style=False)
|
Integral-Technology-Solutions/ConfigNOW
|
Lib/xml/sax/saxutils.py
|
Python
|
mit
| 20,106
| 0.006864
|
"""
A library of useful helper classes to the saxlib classes, for the
convenience of application and driver writers.
$Id: saxutils.py,v 1.19 2001/03/20 07:19:46 loewis Exp $
"""
import types, sys, urllib, urlparse, os, string
import handler, _exceptions, xmlreader
try:
_StringTypes = [types.StringType, types.UnicodeType]
except AttributeError: # 1.5 compatibility:UnicodeType not defined
_StringTypes = [types.StringType]
def escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = string.replace(data, "&", "&")
data = string.replace(data, "<", "<")
data = string.replace(data, ">", ">")
for chars, entity in entities.items():
data = string.replace(data, chars, entity)
return data
# --- DefaultHandler
class DefaultHandler(handler.EntityResolver, handler.DTDHandler,
handler.ContentHandler, handler.ErrorHandler):
"""Default base class for SAX2 event handlers. Implements empty
methods for all callback methods, which can be overridden by
application implementors. Replaces the deprecated SAX1 HandlerBase
class."""
# --- Location
class Location:
"""Represents a location in an XML entity. Initialized by being passed
a locator, from which it reads off the current location, which is then
stored internally."""
def __init__(self, locator):
self.__col = locator.getColumnNumber()
self.__line = locator.getLineNumber()
self.__pubid = locator.getPublicId()
self.__sysid = locator.getSystemId()
def getColumnNumber(self):
return self.__col
def getLineNumber(self):
return self.__line
def getPublicId(self):
return self.__pubid
def getSystemId(self):
return self.__sysid
# --- ErrorPrinter
class ErrorPrinter:
"A simple class that just prints error messages to standard out."
def __init__(self, level=0, outfile=sys.stderr):
self._level = level
self._outfile = outfile
def warning(self, exception):
if self._level <= 0:
self._outfile.write("WARNING in %s: %s\n" %
(self.__getpos(exception),
exception.getMessage()))
def error(self, exception):
if self._level <= 1:
self._outfile.write("ERROR in %s: %s\n" %
(self.__getpos(exception),
exception.getMessage()))
def fatalError(self, exception):
if self._level <= 2:
self._outfile.write("FATAL ERROR in %s: %s\n" %
(self.__getpos(exception),
exception.getMessage()))
def __getpos(self, exception):
if isinstance(exception, _exceptions.SAXParseException):
return "%s:%s:%s" % (exception.getSystemId(),
exception.getLineNumber(),
exception.getColumnNumber())
else:
return "<unknown>"
# --- ErrorRaiser
class ErrorRaiser:
"A simple class that just raises the exceptions it is passed."
def __init__(self, level = 0):
self._level = level
def error(self, exception):
if self._level <= 1:
raise exception
def fatalError(self, exception):
if self._level <= 2:
raise exception
def warning(self, exception):
if self._level <= 0:
raise exception
# --- AttributesImpl now lives in xmlreader
from xmlreader import AttributesImpl
# --- XMLGenerator is the SAX2 ContentHandler for writing back XML
try:
import codecs
def _outputwrapper(stream,encoding):
writerclass = codecs.lookup(encoding)[3]
return writerclass(stream)
except ImportError: # 1.5 compatibility: fall back to do-nothing
def _outputwrapper(stream,encoding):
return stream
class XMLGenerator(handler.ContentHandler):
def __init__(self, out=None, encoding="iso-8859-1"):
if out is None:
import sys
out = sys.stdout
handler.ContentHandler.__init__(self)
self._out = _outputwrapper(out,encoding)
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self._undeclared_ns_maps = []
self._encoding = encoding
# ContentHandler methods
def startDocument(self):
self._out.write('<?xml version="1.0" encoding="%s"?>\n' %
self._encoding)
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
self._undeclared_ns_maps.append((prefix, uri))
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElement(self, name, attrs):
s
|
elf._out.write('<' + name)
for (name, value) in attrs.items():
self._out.write(' %s="%s"' % (name, escape(value)))
self._out.write('>')
def endElement(self, name):
self._out.write('</%s>' % name)
def startElementNS(self, name, qname, attrs):
if name[0] is None:
name = name[1]
elif self._current_context[name
|
[0]] is None:
# default namespace
name = name[1]
else:
name = self._current_context[name[0]] + ":" + name[1]
self._out.write('<' + name)
for k,v in self._undeclared_ns_maps:
if k is None:
self._out.write(' xmlns="%s"' % v)
else:
self._out.write(' xmlns:%s="%s"' % (k,v))
self._undeclared_ns_maps = []
for (name, value) in attrs.items():
name = self._current_context[name[0]] + ":" + name[1]
self._out.write(' %s="%s"' % (name, escape(value)))
self._out.write('>')
def endElementNS(self, name, qname):
# XXX: if qname is not None, we better use it.
# Python 2.0b2 requires us to use the recorded prefix for
# name[0], though
if name[0] is None:
qname = name[1]
elif self._current_context[name[0]] is None:
qname = name[1]
else:
qname = self._current_context[name[0]] + ":" + name[1]
self._out.write('</%s>' % qname)
def characters(self, content):
self._out.write(escape(content))
def ignorableWhitespace(self, content):
self._out.write(content)
def processingInstruction(self, target, data):
self._out.write('<?%s %s?>' % (target, data))
# --- ContentGenerator is the SAX1 DocumentHandler for writing back XML
class ContentGenerator(XMLGenerator):
def characters(self, str, start, end):
# In SAX1, characters receives start and end; in SAX2, it receives
# a string. For plain strings, we may want to use a buffer object.
return XMLGenerator.characters(self, str[start:start+end])
# --- XMLFilterImpl
class XMLFilterBase(xmlreader.XMLReader):
"""This class is designed to sit between an XMLReader and the
client application's event handlers. By default, it does nothing
but pass requests up to the reader and events on to the handlers
unmodified, but subclasses can override specific methods to modify
the event stream or the configuration requests as they pass
through."""
# ErrorHandler methods
def error(self, exception):
self._err_handler.error(exception)
def fatalError(self, exception):
self._err_handler.fatalError(exception)
def warning(self, exception):
self._err_handler.warning(exception)
# ContentHandler methods
def setDocumentLocator(self, locator):
self._cont_handler.setDocumentLocator(locator)
def startDocument(self):
self._cont_handler.startDocument()
def endDocument(self):
self.
|
TonyThompson/fail2ban-patch
|
fail2ban/server/transmitter.py
|
Python
|
gpl-2.0
| 10,359
| 0.03147
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Author: Cyril Jaquier
#
__author__ = "Cyril Jaquier"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
__license__ = "GPL"
import time
import json
from ..helpers import getLogger
from .. import version
# Gets the instance of the logger.
logSys = getLogger(__name__)
class Transmitter:
##
# Constructor.
#
# @param The server reference
def __init__(self, server):
self.__server = server
##
# Proceeds a command.
#
# Proceeds an incoming command.
# @param command The incoming command
def proceed(self, command):
# Deserialize object
logSys.debug("Command: " + `command`)
try:
ret = self.__commandHandler(command)
ack = 0, ret
except Exception, e:
logSys.warning("Command %r has failed. Received %r"
% (command, e))
ack = 1, e
return ack
##
# Handle an command.
#
#
def __commandHandler(self, command):
if command[0] == "ping":
return "pong"
elif command[0] == "add":
name = command[1]
if name == "all":
raise Exception("Reserved name")
try:
backend = command[2]
except IndexError:
backend = "auto"
self.__server.addJail(name, backend)
return name
elif command[0] == "start":
name = command[1]
self.__server.startJail(name)
return None
elif command[0] == "stop":
if len(command) == 1:
self.__server.quit()
elif command[1] == "all":
self.__server.stopAllJail()
else:
name = command[1]
self.__server.stopJail(name)
return None
elif command[0] == "sleep":
value = command[1]
time.sleep(int(value))
return None
elif command[0] == "flushlogs":
return self.__server.flushLogs()
elif command[0] == "set":
return self.__commandSet(command[1:])
elif command[0] == "get":
return self.__commandGet(command[1:])
elif command[0] == "status":
return self.status(command[1:])
elif command[0] == "version":
return version.version
raise Exception("Invalid command")
def __commandSet(self, command):
name = command[0]
# Logging
if name == "loglevel":
value = command[1]
self.__server.setLogLevel(value)
return self.__server.getLogLevel()
elif name == "logtarget":
value = command[1]
if self.__server.setLogTarget(value):
return self.__server.getLogTarget()
else:
raise Exception("Failed to change log target")
#Database
elif name == "dbfile":
self.__server.setDatabase(command[1])
db = self.__server.getDatabase()
if db is None:
return None
else:
return db.filename
elif name == "dbpurgeage":
db = self.__server.getDatabase()
if db is None:
return None
else:
db.purgeage = command[1]
return db.purgeage
# Jail
elif command[1] == "idle":
if command[2] == "on":
self.__server.setIdleJail(name, True)
elif command[2] == "off":
self.__server.setIdleJail(name, False)
else:
raise Exception("Invalid idle option, must be 'on' or 'off'")
return self.__server.getIdleJail(name)
# Filter
elif command[1] == "addignoreip":
value = command[2]
self.__server.addIgnoreIP(name, value)
return self.__server.getIgnoreIP(name)
elif command[1] == "delignoreip":
value = command[2]
self.__server.delIgnoreIP(name, value)
return self.__server.getIgnoreIP(name)
elif command[1] == "ignorecommand":
value = command[2]
self.__server.setIgnoreCommand(name, value)
return self.__server.getIgnoreCommand(name)
elif command[1] == "addlogpath":
value = command[2]
tail = False
if len(command) == 4:
if command[3].lower() == "tail":
tail = True
elif command[3].lower() != "head":
raise ValueError("File option must be 'head' or 'tail'")
elif len(command) > 4:
raise ValueError("Only one file can be added at a time")
self.__server.addLogPath(name, value, tail)
return self.__server.getLogPath(name)
elif command[1] == "dellogpath":
value = command[2]
self.__server.delLogPath(name, value)
return self.__server.getLogPath(name)
elif command[1] == "logencoding":
value = command[2]
self.__server.setLogEncoding(name, value)
return self.__server.getLogEncoding(name)
elif command[1] == "addjournalmatch": # pragma: systemd no cover
value = command[2:]
self.__server.addJournalMatch(name, value)
return self.__server.getJournalMatch(name)
elif command[1] == "deljournalmatch": # pragma: systemd no cover
value = command[2:]
self.__server.delJournalMatch(name, value)
return self.__server.getJournalMatch(name)
elif command[1] == "addfailregex":
value = command[2]
self.__server.addFailRegex(name, value)
return self.__server.getFailRegex(name)
elif command[1] == "delfailregex":
value = int(command[2])
self.__server.delFailRegex(name, value)
return self.__server.getFailRegex(name)
elif command[1] == "addignoreregex":
value = command[2]
self.__server.addIgnoreRegex(name, value)
return self.__server.getIgnoreRegex(name)
elif command[1] == "delignoreregex":
value = int(command[2])
self.__server.delIgnoreRegex(name, value)
return self.__server.getIgnoreRegex(name)
elif command[1] == "usedns":
value = command[2]
self.__server.setUseDns(name, value)
return self.__server.getUseDns(name)
elif command[1] == "findtime":
value = command[2]
self.__server.setFindTime(name, int(value))
return self.__server.getFindTime(name)
elif command[1] == "datepatter
|
n":
value = command[2]
self.__server.setDatePattern(name, value)
return self.__server.getDatePattern(name)
elif command[1] == "maxretry":
value = command[2]
self.__server.setMaxRetry(name, int(value))
return self.__server.getMaxRetry(name)
elif command[1] == "maxlines":
value = command[2]
self.__server.set
|
MaxLines(name, int(value))
return self.__server.getMaxLines(name)
# command
elif command[1] == "bantime":
value = command[2]
self.__server.setBanTime(name, int(value))
return self.__server.getBanTime(name)
elif command[1] == "banip":
value = command[2]
return self.__server.setBanIP(name,value)
elif command[1] == "unbanip":
value = command[2]
self.__server.setUnbanIP(name, value)
return value
elif command[1] == "addaction":
args = [command[2]]
if len(command) > 3:
args.extend([command[3], json.loads(command[4])])
self.__server.addAction(name, *args)
return args[0]
elif command[1] == "delaction":
value = command[2]
self.__server.delAction(name, value)
return None
elif command[1] == "action":
actionname = command[2]
actionkey = command[3]
action = self.__server.getAction(name, actionname)
if callable(getattr(action, actionkey, None)):
actionvalue = json.loads(command[4]) if len(command)>4 else {}
return getattr(action, actionkey)(**actionvalue)
else:
actionvalue = command[4]
setattr(action, actionkey, actionvalue)
return getattr(action, actionkey)
raise Exception("Invalid command (no set action or not yet implemented)")
def __commandGet(self, command):
name = command[0]
# Logging
if name == "loglevel":
return self.__server.getLogLevel()
elif name == "logtarget":
return self.__server.getLogTarget()
#Database
elif name == "dbfile":
db = self.__server.getDatabase()
if db is None:
return None
else:
return db.filename
elif name == "dbpurgeage":
db =
|
Taapat/enigma2-openpli-vuplus
|
lib/python/Components/ResourceManager.py
|
Python
|
gpl-2.0
| 529
| 0.032136
|
class ResourceManager:
def __init__(self):
self.resourceList = {}
def addResource(self, name, resource):
print "adding Resource", name
self.resourceList[name] = resource
print "resources:", self.resourceList
def getResource(self, name)
|
:
if not self.hasResource(name):
return None
return self.resourceList[n
|
ame]
def hasResource(self, name):
return name in self.resourceList
def removeResource(self, name):
if self.hasResource(name):
del self.resourceList[name]
resourcemanager = ResourceManager()
|
walterbender/turtle3D
|
plugins/audio_sensors/ringbuffer.py
|
Python
|
mit
| 4,149
| 0.000241
|
# Copyright (C) 2009, Benjamin Berg, Sebastian Berg
# Copyright (C) 2010, Walter Bender
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import numpy as np
class RingBuffer1d(object):
"""This class implements an array being written in as a ring and that can
be read from continuously ending with the newest data or starting with the
oldest. It returns a numpy array copy of the data;
"""
def __init__(self, length, dtype=None):
"""Initialize the 1 dimensional ring buffer with the given lengths.
The initial values are all 0s
"""
self.offset = 0
self._data = np.zeros(length, dtype=dtype)
self.stored = 0
def fill(self, number):
self._data.fill(number)
self.offset = 0
def append(self, data):
"""Append to the ring buffer (and overwrite old data). If len(data)
is greater then the ring buffers length, the newest data takes
precedence.
"""
data = np.asarray(data)
if len(self._data) == 0:
return
if len(data) >= len(self._data):
self._data[:] = data[-len(self._data):]
self.offset = 0
self.stored = len(self._data)
elif len(self._data) - self.offset >= len(data):
self._data[self.offset: self.offset + len(data)] = data
self.offset = self.offset + len(data)
self.stored += len(data)
else:
self._data[self.offset:] = data[:len(self._data) - self.offset]
self._data[:len(data) - (len(self._data) - self.offset)] = \
data[-len(data) + (len(self._data) - self.offset):]
self.offset = len(data) - (len(self._data) - self.offset)
self.stored += len(data)
if len(self._data) <= self.stored:
self.read = self._read
def read(self, number=None, step=1):
"""Read the ring Buffer. Number can be positive or negative.
Positive values will give the latest information, negative v
|
alues will
give the newest added information from the buffer. (in normal order)
Before the buffer
|
is filled once: This returns just None
"""
return np.array([])
def _read(self, number=None, step=1):
"""Read the ring Buffer. Number can be positive or negative.
Positive values will give the latest information, negative values will
give the newest added information from the buffer. (in normal order)
"""
if number == None:
number = len(self._data) // step
number *= step
assert abs(number) <= len(self._data), \
'Number to read*step must be smaller then length'
if number < 0:
if abs(number) <= self.offset:
return self._data[self.offset + number:self.offset:step]
spam = (self.offset - 1) % step
return np.concatenate(
(self._data[step - spam - 1 + self.offset + number::step],
self._data[spam:self.offset:step]))
if number - (len(self._data) - self.offset) > 0:
spam = ((self.offset + number) - self.offset - 1) % step
return np.concatenate(
(self._data[self.offset:self.offset + number:step],
self._data[spam:number -
(len(self._data) - self.offset):step]))
return self._data[self.offset:self.offset + number:step].copy()
|
mldbai/mldb
|
testing/MLDB-2100_fetcher_timeout_test.py
|
Python
|
apache-2.0
| 1,628
| 0.003686
|
#
# MLDB-2100_fetcher_timeout_test.py
# Francois-Michel L'Heureux, 2016-11-20
# This file is part of MLDB. Copyright 2016 mldb.ai inc. All rights reserved.
#
import socket
import threading
import time
class MyThread(threading.Thread):
def run(self):
try:
threading.Thread.run(self)
except Exc
|
eption as xxx_todo_changeme:
self.err = xxx_todo_changeme
pass
else:
self.err = None
# tim
|
eout in case MLDB fails to connect to the socket, the test won't hang
socket.setdefaulttimeout(10)
from mldb import mldb
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind(('127.0.0.1', 0))
serversocket.listen(1)
port_num = serversocket.getsockname()[1]
keep_going = threading.Event()
def sleeper():
while not keep_going.is_set():
time.sleep(1)
def client_thread(clientsocket):
return threading.Thread(target=sleeper)
def mldb_test():
mldb.log("MLDB querying")
res = mldb.query(
"SELECT fetcher('http://localhost:{}/toto')".format(port_num))
assert res[1][2].find("Timeout was reached") != -1
mldb_thread = MyThread(target=mldb_test)
mldb_thread.start()
# accept connections from outside
try:
(clientsocket, address) = serversocket.accept()
except socket.timeout:
mldb.log("MLDB did not contact the socket")
raise
# now do something with the clientsocket
# in this case, we'll pretend this is a threaded server
ct = client_thread(clientsocket)
ct.start()
mldb_thread.join()
keep_going.set()
ct.join()
if mldb_thread.err:
raise mldb_thread.err
request.set_return("success")
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/packet_capture_result_paged.py
|
Python
|
mit
| 987
| 0.001013
|
# coding=utf-8
# ------------------------------------------------
|
--------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will
|
be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class PacketCaptureResultPaged(Paged):
"""
A paging container for iterating over a list of :class:`PacketCaptureResult <azure.mgmt.network.v2017_08_01.models.PacketCaptureResult>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[PacketCaptureResult]'}
}
def __init__(self, *args, **kwargs):
super(PacketCaptureResultPaged, self).__init__(*args, **kwargs)
|
longmazhanfeng/interface_web
|
interface_platform/management/interfacerunner.py
|
Python
|
mit
| 2,680
| 0.000856
|
# -*- coding: UTF-8 -*-
from ..model.interfacebuilder import InterfaceBuilder
from ..models import ITLog, ITStatement
import interface_pl
|
atform.settings as settings
import logging.config
import os
from datetime import datetime
# 运行接口
# 传入it_id和验证数据
class InterfaceRunner(object):
def __init__(self, it_id):
print "InterfaceRunner.__init__()"
self._it = ITStatement.objects.get(id=it_id)
self._log_name = str(datetime.now()) + ".log"
self.set_log_file(self._log_name)
self._logger = logging.getLogger(settings.LOGGER_NAME)
self._interface = None
self._interfacebui
|
lder = InterfaceBuilder(self._it, self._logger)
@property
def interfacebuilder(self):
return self._interfacebuilder
@property
def logger(self):
return self._logger
@property
def interface(self):
return self._interface
# 执行之后返回执行结果:失败、通过、未执行
def runner(self):
print "InterfaceRunner.runner()"
self._interface = self._interfacebuilder.build()
self._interface.run()
self._interface.validate()
self._logger.info("接口执行状态: " + self._interface.status)
self._logger.info("接口构建完成!")
return self._interface.status
# 该函数实现的功能:传递一个要记录日志的文件名作为参数,默认是settings.py里的LOGGING中filename
# 1. 设置日志存储路径
# 2. 建立用例日志表或读取日志文件路径
def set_log_file(self, filename):
print "InterfaceRunner.set_log_file()"
# 日志存储路径
log_path = os.path.join(settings.LOG_ROOT, filename.decode("utf-8"))
# 存储日志文件路径到数据库
# 有中文的话这里的路径使用不方便
logs = ITLog.objects.filter(name=filename)
if logs.exists() is False:
log = ITLog.objects.create(it=self._it, name=filename, log_path=log_path)
log.save()
else:
# 删除文件重新创建
# 可能文件被占用的情况,删除失败
# remove_log(log_path)
pass
logging_dic = settings.LOGGING
logging_dic['handlers']['eat']['filename'] = log_path
logging.config.dictConfig(logging_dic)
# 获取日志内容
# 打开后记得关闭文件
def get_log(self):
print "InterfaceRunner.get_log()"
log_path = os.path.join(settings.LOG_ROOT, self._log_name.decode('utf-8'))
f_log = open(log_path)
content = f_log.readlines()
f_log.close()
return content
|
dwitvliet/CATMAID
|
django/applications/catmaid/control/exportneuroml.py
|
Python
|
gpl-3.0
| 3,874
| 0.003356
|
# A file to contain exclusively dependencies of the NeuroML package.
# See:
# https://github.com/NeuralEnsemble/libNeuroML
# http://neuroml.org
from __future__ import print_function
from collections import defaultdict
try:
from neuroml import Cell, Segment, SegmentParent, Morphology, \
NeuroMLDocument, Point3DWithDiam
except ImportError:
print("NeuroML module could not be loaded.")
def neuroml_single_cell(skeleton_id, nodes, pre, post):
""" Encapsulate a single skeleton into a NeuroML Cell instance.
skeleton_id: the ID of the skeleton to which all nodes belong.
nodes: a dictionary of node ID vs tuple of node parent ID, location as a tuple of 3 floats, and radius. In nanometers.
pre: a dictionary of node ID vs list of connector ID
post: a dictionary of node ID vs list of connector ID
Returns a Cell with id=skeleton_id.
"""
# Collect the children of every node
successors = defaultdict(list) # parent node ID vs list of children node IDs
rootID = None
for nodeID, props in nodes.iteritems():
parentID = props[0]
if not parentID:
rootID = nodeID
continue
successors[parentID].append(nodeID)
# Cache of Point3DWithDiam
points = {}
def asPoint(nodeID):
""" Return the node as a Point3DWithDiam, in micrometers. """
p = points.get(nodeID)
if not p:
props = nodes[nodeID]
radius = props[2]
if radius < 0:
radius = 0.1 # FUTURE Will have to change
loc = props[1]
# Point in micrometers
p = Point3DWithDiam(loc[0] / 1000.0, loc[1] / 1000.0, loc[2] / 1000.0, radius)
points[nodeID] = p
return p
# Starting from the root node, iterate towards the end nodes, adding a segment
# for each parent-child pair.
segments = []
segment_id = 1
todo = [rootID]
# VERY CONFUSINGLY, the Segment.parent is a SegmentParent with the same id as the parent Segment. An unseemly overheady way to reference the parent Segment.
while todo:
nodeID = todo.pop()
children = successors[nodeID]
if not children:
continue
p1 = asPoint(nodeID)
parent = segments[-1] if segments else None
segment_parent = SegmentParent(
|
segments=parent.id) if parent else None
for childID in children:
p2 = asPoint(childID)
segment_id +=
|
1
segment = Segment(proximal=p1, distal=p2, parent=segment_parent)
segment.id = segment_id
segment.name = "%s-%s" % (nodeID, childID)
segments.append(segment)
todo.append(childID)
# Pack the segments into a Cell
morphology = Morphology()
morphology.segments.extend(segments)
morphology.id = "Skeleton #%s" % skeleton_id
# Synapses: TODO requires input from Padraig Gleeson
cell = Cell()
cell.name = 'Cell'
cell.id = skeleton_id
cell.morphology = morphology
return cell
def neuroml_network(cells, response):
""" Write a list of Cell instances.
cells: a list of Cell instances.
response: somewhere to write to, like an HttpResponse
Returns nothing.
"""
doc = NeuroMLDocument()
doc.cells.extend(cells)
doc.id = "NeuroMLDocument"
namespacedef = 'xmlns="http://www.neuroml.org/schema/neuroml2"' \
+ ' xmlns:xi="http://www.w3.org/2001/XInclude"' \
+ ' xmlns:xs="http://www.w3.org/2001/XMLSchema"' \
+ ' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"' \
+ ' xsi:schemaLocation="http://www.w3.org/2001/XMLSchema"'
doc.export( response, 0, name_="neuroml", namespacedef_=namespacedef)
return response
|
helenst/django
|
django/db/models/base.py
|
Python
|
bsd-3-clause
| 66,310
| 0.001508
|
from __future__ import unicode_literals
import copy
import inspect
import sys
import warnings
from django.apps import apps
from django.apps.config import MODELS_MODULE_NAME
from django.conf import settings
from django.core import checks
from django.core.exceptions import (ObjectDoesNotExist,
MultipleObjectsReturned, FieldError, ValidationError, NON_FIELD_ERRORS)
from django.db import (router, connections, transaction, DatabaseError,
DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY)
from django.db.models.deletion import Collector
from django.db.models.fields import AutoField, FieldDoesNotExist
from django.db.models.fields.related import (ForeignObjectRel, ManyToOneRel,
OneToOneField, add_lazy_relation)
from django.db.models.manager import ensure_default_manager
from django.db.models.options import Options
from django.db.models.query import Q
from django.db.models.query_utils import DeferredAttribute, deferred_class_factory
from django.db.models import signals
from django.utils import six
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_str, force_text
from django.utils.functional import curry
from django.utils.six.moves import zip
from django.utils.text import get_text_list, capfirst
from django.utils.translation import ugettext_lazy as _
from django.utils.version import get_version
def subclass_exception(name, parents, module, attached_to=None):
"""
Create exception subclass. Used by ModelBase below.
If 'attached_to' is supplied, the exception will be created in a way that
allows it to be pickled, assuming the returned exception class will be added
as an attribute to the 'attached_to' class.
"""
class_dict = {'__module__': module}
if attached_to is not None:
def __reduce__(self):
# Exceptions are special - they've got state that isn't
# in self.__dict__. We assume it is all in self.args.
return (unpickle_inner_exception, (attached_to, name), self.args)
def __setstate__(self, args):
self.args = args
class_dict['__reduce__'] = __reduce__
class_dict['__setstate__'] = __setstate__
return type(name, parents, class_dict)
class ModelBase(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ModelBase, cls).__new__
# Also ensure initialization is only performed for subclasses of Model
# (excluding Model class itself).
parents = [b for b in bases if isinstance(b, ModelBase)]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
# Look for an application configuration to attach the model to.
app_config = apps.get_containing_app_config(module)
if getattr(meta, 'app_label', None) is None:
if app_config is None:
# If the model is imported before the configuration for its
# application is created (#21719), or isn't in an installed
# application (#21680), use the legacy logic to figure out the
# app_label by looking one level up from the package or module
# named 'models'. If no such package or module exists, fall
# back to looking one level up from the module this model is
# defined in.
# For 'django.contrib.sites.models', this would be 'sites'.
# For 'geo.models.places' this would be 'geo'.
msg = (
"Model class %s.%s doesn't declare an explicit app_label "
"and either isn't in an application in INSTALLED_APPS or "
"else was imported before its application was loaded. "
"This will no longer be supported in Django 1.9." %
(module, name))
if not abstract:
warnings.warn(msg, RemovedInDjango19Warning, stacklevel=2)
model_module = sys.modules[new_class.__module__]
package_components = model_module.__name__.split('.')
package_components.reverse() # find the last occurrence of 'models'
try:
app_label_index = package_components.index(MODELS_MODULE_NAME) + 1
except ValueError:
app_label_index = 1
kwargs = {"app_label": package_components[app_label_index]}
else:
kwargs = {"app_label": app_config.label}
else:
kwargs = {}
new_class.add_to_class('_meta', Options(meta, **kwargs))
if not abstract:
new_class.add_to_class(
'DoesNotExist',
subclass_exception(
str('DoesNotExist'),
tuple(
x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (ObjectDoesNotExist,),
module,
attached_to=new_class))
new_class.add_to_class(
'MultipleObjectsReturned',
subclass_exception(
str('MultipleObjectsReturned'),
tuple(
x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (MultipleObjectsReturned,),
module,
|
attached_to=new_class))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attrib
|
utes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
# If the model is a proxy, ensure that the base class
# hasn't been swapped out.
if is_proxy and base_meta and base_meta.swapped:
raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped))
if getattr(new_class, '_default_manager', None):
if not is_proxy:
# Multi-table inheritance doesn't inherit default manager from
# parents.
new_class._default_manager = None
new_class._base_manager = None
else:
# Proxy classes do inherit parent's default manager, if none is
# set explicitly.
new_class._default_manager = new_class._default_manager._copy_to_model(new_class)
new_class._base_manager = new_class._base_manager._copy_to_model(new_class)
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = (
new_class._meta.local_fields +
new_class._meta.local_many_to_many +
new_class._meta.virtual_fields
)
field_names = set(f.name for f in new_fields)
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [kls for kls in parents if hasattr(kls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError(
"Abstract base class containing model fields not "
"permitted for proxy m
|
nhejazi/project-gamma
|
code/project_config.py
|
Python
|
bsd-3-clause
| 562
| 0.007117
|
"""
Convenient way to expose filepaths to scripts. Also, important
constants are centralized here to avoid m
|
ultiple copies.
"""
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(os.path.join(os.path.dirname(__file__)))
sys.path.append(os.path.join(os.path.dirname(__file__), 'utils'))
sys.path.append(os.path.join(os.path.dirname(__file__), 'utils', 'tests'))
TR = 2.5
#we choose cutoff value values by i
|
nspecting the histogram of data values of the standard mni brain
MNI_CUTOFF = 5000
MIN_STD_SHAPE = (91, 109, 91)
|
razzius/sqlalchemy-migrate
|
migrate/tests/versioning/test_script.py
|
Python
|
mit
| 9,323
| 0.002789
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import shutil
from migrate import exceptions
from migrate.versioning import version, repository
from migrate.versioning.script import *
from migrate.versioning.util import *
from migrate.tests import fixture
from migrate.tests.fixture.models import tmp_sql_table
class TestBaseScript(fixture.Pathed):
def test_all(self):
"""Testing all basic BaseScript operations"""
# verify / source / run
src = self.tmp()
open(src, 'w').close()
bscript = BaseScript(src)
BaseScript.verify(src)
self.assertEqual(bscript.source(), '')
self.assertRaises(NotImplementedError, bscript.run, 'foobar')
class TestPyScript(fixture.Pathed, fixture.DB):
cls = PythonScript
def test_create(self):
"""We can create a migration script"""
path = self.tmp_py()
# Creating a file that doesn't exist should succeed
self.cls.create(path)
self.assertTrue(os.path.exists(path))
# Created file should be a valid script (If not, raises an error)
self.cls.verify(path)
# Can't create
|
it again: it already exists
self.assertRaises(exceptions.PathFoundError,self.cls.create,path)
@fixture.usedb(supported='sqlite')
def test_run(self):
script_path = self.tmp_py()
pyscript = PythonScript.cr
|
eate(script_path)
pyscript.run(self.engine, 1)
pyscript.run(self.engine, -1)
self.assertRaises(exceptions.ScriptError, pyscript.run, self.engine, 0)
self.assertRaises(exceptions.ScriptError, pyscript._func, 'foobar')
# clean pyc file
os.remove(script_path + 'c')
# test deprecated upgrade/downgrade with no arguments
contents = open(script_path, 'r').read()
f = open(script_path, 'w')
f.write(contents.replace("upgrade(migrate_engine)", "upgrade()"))
f.close()
pyscript = PythonScript(script_path)
pyscript._module = None
try:
pyscript.run(self.engine, 1)
pyscript.run(self.engine, -1)
except exceptions.ScriptError:
pass
else:
self.fail()
def test_verify_notfound(self):
"""Correctly verify a python migration script: nonexistant file"""
path = self.tmp_py()
self.assertFalse(os.path.exists(path))
# Fails on empty path
self.assertRaises(exceptions.InvalidScriptError,self.cls.verify,path)
self.assertRaises(exceptions.InvalidScriptError,self.cls,path)
def test_verify_invalidpy(self):
"""Correctly verify a python migration script: invalid python file"""
path=self.tmp_py()
# Create empty file
f = open(path,'w')
f.write("def fail")
f.close()
self.assertRaises(Exception,self.cls.verify_module,path)
# script isn't verified on creation, but on module reference
py = self.cls(path)
self.assertRaises(Exception,(lambda x: x.module),py)
def test_verify_nofuncs(self):
"""Correctly verify a python migration script: valid python file; no upgrade func"""
path = self.tmp_py()
# Create empty file
f = open(path, 'w')
f.write("def zergling():\n\tprint 'rush'")
f.close()
self.assertRaises(exceptions.InvalidScriptError, self.cls.verify_module, path)
# script isn't verified on creation, but on module reference
py = self.cls(path)
self.assertRaises(exceptions.InvalidScriptError,(lambda x: x.module),py)
@fixture.usedb(supported='sqlite')
def test_preview_sql(self):
"""Preview SQL abstract from ORM layer (sqlite)"""
path = self.tmp_py()
f = open(path, 'w')
content = '''
from migrate import *
from sqlalchemy import *
metadata = MetaData()
UserGroup = Table('Link', metadata,
Column('link1ID', Integer),
Column('link2ID', Integer),
UniqueConstraint('link1ID', 'link2ID'))
def upgrade(migrate_engine):
metadata.create_all(migrate_engine)
'''
f.write(content)
f.close()
pyscript = self.cls(path)
SQL = pyscript.preview_sql(self.url, 1)
self.assertEqualIgnoreWhitespace("""
CREATE TABLE "Link"
("link1ID" INTEGER,
"link2ID" INTEGER,
UNIQUE ("link1ID", "link2ID"))
""", SQL)
# TODO: test: No SQL should be executed!
def test_verify_success(self):
"""Correctly verify a python migration script: success"""
path = self.tmp_py()
# Succeeds after creating
self.cls.create(path)
self.cls.verify(path)
# test for PythonScript.make_update_script_for_model
@fixture.usedb()
def test_make_update_script_for_model(self):
"""Construct script source from differences of two models"""
self.setup_model_params()
self.write_file(self.first_model_path, self.base_source)
self.write_file(self.second_model_path, self.base_source + self.model_source)
source_script = self.pyscript.make_update_script_for_model(
engine=self.engine,
oldmodel=load_model('testmodel_first:meta'),
model=load_model('testmodel_second:meta'),
repository=self.repo_path,
)
self.assertTrue("['User'].create()" in source_script)
self.assertTrue("['User'].drop()" in source_script)
@fixture.usedb()
def test_make_update_script_for_equal_models(self):
"""Try to make update script from two identical models"""
self.setup_model_params()
self.write_file(self.first_model_path, self.base_source + self.model_source)
self.write_file(self.second_model_path, self.base_source + self.model_source)
source_script = self.pyscript.make_update_script_for_model(
engine=self.engine,
oldmodel=load_model('testmodel_first:meta'),
model=load_model('testmodel_second:meta'),
repository=self.repo_path,
)
self.assertFalse('User.create()' in source_script)
self.assertFalse('User.drop()' in source_script)
@fixture.usedb()
def test_make_update_script_direction(self):
"""Check update scripts go in the right direction"""
self.setup_model_params()
self.write_file(self.first_model_path, self.base_source)
self.write_file(self.second_model_path, self.base_source + self.model_source)
source_script = self.pyscript.make_update_script_for_model(
engine=self.engine,
oldmodel=load_model('testmodel_first:meta'),
model=load_model('testmodel_second:meta'),
repository=self.repo_path,
)
self.assertTrue(0
< source_script.find('upgrade')
< source_script.find("['User'].create()")
< source_script.find('downgrade')
< source_script.find("['User'].drop()"))
def setup_model_params(self):
self.script_path = self.tmp_py()
self.repo_path = self.tmp()
self.first_model_path = os.path.join(self.temp_usable_dir, 'testmodel_first.py')
self.second_model_path = os.path.join(self.temp_usable_dir, 'testmodel_second.py')
self.base_source = """from sqlalchemy import *\nmeta = MetaData()\n"""
self.model_source = """
User = Table('User', meta,
Column('id', Integer, primary_key=True),
Column('login', Unicode(40)),
Column('passwd', String(40)),
)"""
self.repo = repository.Repository.create(self.repo_path, 'repo')
self.pyscript = PythonScript.create(self.script_path)
sys.modules.pop('testmodel_first', None)
sys.modules.pop('testmodel_second', None)
def write_file(self, path, contents):
f = open(path, 'w')
f.write(contents)
f.close()
class TestSqlScript(fixture.Pathed, fixture.DB):
@fixture.usedb()
def test_error(self):
"""Test if exception is raised on wrong script source"""
src = self.tmp()
f = open(src, 'w')
f.write("""foo
|
joetainment/mmmmtools
|
MmmmToolsMod/script_file_runner_scripts/hand_auto_rigging.py
|
Python
|
gpl-3.0
| 5,762
| 0.020305
|
import maya.cmds as cmds
import pymel.all as pm
import traceback
controlCurve = pm.PyNode('control_curve')
## to make a numerical 'floating point'
## attribute, we use at='double', keyable=True
controlCurve.addAttr( 'allCurl', at='double', keyable=True )
controlCurve.addAttr( 'pointerAllCurl', at='double', keyable=True )
controlCurve.addAttr( 'middleAllCurl', at='double', keyable=True )
controlCurve.addAttr( 'pinkyAllCurl', at='double', keyable=True )
controlCurve.addAttr( 'pointerACurl', at='double', keyable=True )
controlCurve.addAttr( 'pointerBCurl', at='double', keyable=True )
controlCurve.addAttr( 'pointerCCurl', at='double', keyable=True )
controlCurve.addAttr( 'middleACurl', at='double', keyable=True )
controlCurve.addAttr( 'middleBCurl', at='double', keyable=True )
controlCurve.addAttr( 'middleCCurl', at='double', keyable=True )
controlCurve.addAttr( 'pinkyACurl', at='double', keyable=True )
controlCurve.addAttr( 'pinkyBCurl', at='double', keyable=True )
controlCurve.addAttr( 'pinkyCCurl', at='double', keyable=True )
pointerA = pm.PyNode('pointer_a')
pointerB = pm.PyNode('pointer_b')
pointerC = pm.PyNode('pointer_c')
middleA = pm.PyNode('middle_a')
middleB = pm.PyNode('middle_b')
middleC = pm.PyNode('middle_c')
pinkyA = pm.PyNode('pinky_a')
pinkyB = pm.PyNode('pinky_b')
pinkyC = pm.PyNode('pinky_c')
pointerAll = [ pointerA, pointerB, pointerC ]
middleAll = [ middleA, middleB, middleC ]
pinkyAll = [ pinkyA, pinkyB, pinkyC ]
all = pointerAll + middleAll + pinkyAll
adds = { }
for jnt in all:
addNodeY = pm.createNode( 'plusMinusAverage' )
addNodeZ = pm.createNode( 'plusMinusAverage' )
addNodeY.rename( jnt.name()+'_addY' )
addNodeZ.rename( jnt.name()+'_addZ' )
## the operator >> means "connect" for pymel
addNodeY.output1D >> jnt.rotateY
addNodeZ.output1D >> jnt.rotateZ
adds[ jnt.name()+'Y' ] = addNodeY
adds[ jnt.name()+'Z' ] = addNodeZ
## We can't hard core the name because Maya might change that
## #controlCurve.pointerAllCurl >> pm.PyNode('pointer_a_addZ').input1D
## so we use our adds dictionary to get the right answer
## pointerAllCurl connections
target = adds[ 'pointer_a' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.pointerAllCurl >> target.input1D[num]
target = adds[ 'pointer_b' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.pointerAllCurl >> target.input1D[num]
target = adds[ 'pointer_c' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.pointerAllCurl >> target.input1D[num]
## pointer A,B,C connections
target = adds[ 'pointer_a' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.pointerACurl >> target.input1D[num]
target = adds[ 'pointer_b' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.pointerBCurl >> target.input1D[num]
target = adds[ 'pointer_c' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.pointerCCurl >> target.input1D[num]
## middleAllCurl connections
target = adds[ 'middle_a' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.middleAllCurl >> target.input1D[num]
target = adds[ 'middle_b' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.middleAllCurl >> target.input1D[num]
target = adds[ 'middle_c' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.middleAllCurl >> target.input1D[num]
## middle A,B,C connections
target = adds[ 'middle_a' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.middleACurl >> target.input1D[num]
target = adds[ 'middle_b' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.middleBCurl >> target.input1D[num]
target = adds[ 'middle_c' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.middleCCurl >> target.input1D[num]
## pinkyAllCurl connections
target = adds[ 'pinky_a' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.pinkyAllCurl >> target.input1D[num]
target = adds[ 'pinky_b' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.pinkyAllCurl >> target.input1D[num]
target = adds[ 'pinky_c' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.pinkyAllCurl >> target.input1D[num]
## pinky A,B,C connections
target = adds[ 'pinky_a' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.pinkyACurl >> target.input1D[num]
target = adds[ 'pinky_b' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.p
|
inkyBCurl >> target.input1D[num]
target = adds[ 'pinky_c' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.pinkyCCurl >> target.input1D[num]
## allCurl connections
target = adds[ 'pointer_a' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.allCurl >> target.input1D[num]
target = adds[ 'pointer_b' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.allCurl >> target.input1D[num]
target = adds[ 'pointer_c' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.allCurl >> t
|
arget.input1D[num]
target = adds[ 'middle_a' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.allCurl >> target.input1D[num]
target = adds[ 'middle_b' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.allCurl >> target.input1D[num]
target = adds[ 'middle_c' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.allCurl >> target.input1D[num]
target = adds[ 'pinky_a' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.allCurl >> target.input1D[num]
target = adds[ 'pinky_b' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.allCurl >> target.input1D[num]
target = adds[ 'pinky_c' + 'Z' ]
num = target.input1D.getNumElements()
controlCurve.allCurl >> target.input1D[num]
""" ## These are ignorable comments
pointerAllCurlTargetsZ = [pointerA, pointerB, pointerC]
attrToTargets = {
'pointerAllCurl'+'Z': [pointerA, pointerB, pointerC]
}
""" ## These are ignorable comments
|
ebilionis/py-best
|
best/inverse/_inverse_problem.py
|
Python
|
lgpl-3.0
| 4,531
| 0.002207
|
"""Define the InverseProblem class.
Author:
Ilias Bilionis
Date:
1/14/2013
1/21/2013
"""
__all__ = ['InverseProblem']
import numpy as np
import itertools
from ..random import StudentTLikelihoodFunction
from ..random import RandomWalkProposal
from ..random import SequentialMonteCarlo
class InverseProblem(object):
"""The general inverse problem class."""
# The SMC object
_smc = None
# The final particles
_r = None
# The final weights
_w = None
# A resampled version of the particles
_resampled_r = None
# The mean of the particles
_mean = None
# The variance of the particles
_variance = None
@property
def smc(self):
"""Get the SMC object."""
return self._smc
@property
def alpha(self):
"""Get the alpha parameter of the Gamma dist. for the precision."""
return self._alpha
@property
def beta(self):
"""Get the beta parameter of the Gamma dist. for the precision."""
return self._beta
@property
def particles(self):
"""Get the final particles."""
return self._r
@property
def weights(self):
"""Get the final weights."""
return self._w
@property
def resampled_particles(self):
"""Get the resampled particles."""
return self._resampled_r
@property
def mean(self):
"""Get the mean of the particles."""
return self._mean
@property
def variance(self):
"""Get the variance of the particles."""
return self._variance
def __init__(self, solver=None, prior=None, data=None, alpha=1e-2, beta=1e-2,
verbose=True, mpi=None, comm=None, num_particles=100,
num_mcmc=10, proposal=RandomWalkProposal(dt=0.2),
store_intermediate_samples=False):
"""Initialize the object.
Keyword Arguments:
solver --- The forward solver you wish to use.
prior --- The prior distribution of the parameters.
proposal--- The MCMC proposal.
alpha --- The alpha parameter (shape) of the Gamma
distribution of the precision of the forward solver.
beta --- The beta parameter (rate) of the Gamma
distribution of the precision of the forward solver.
verbose --- Be verbose ir not.
mpi --- Use MPI or not.
comm --- The MPI communicator.
num_particles --- The number of particles.
num_mcmc --- The number of MCMC steps per SMC step.
proposal --- The MCMC proposal.
"""
if solver is None:
raise ValueError('The forward solver must be specified.')
if data is None:
raise ValueError('The data must be specified.')
if prior is None:
raise ValueError('The prior must be specified.')
likelihood = StudentTLikelihoodFunction(2. * alpha, num_input=prior.num_input,
data=data,
|
mean_function=solver,
cov=(beta / alpha))
self._smc = SequentialMonteCarlo(prior=prior, likelihood=likelihood,
verbose=verbose, num_particles=num_particles,
num_mcmc=num_mcmc, proposal=proposal,
store_intermediate_samples=store_inter
|
mediate_samples,
mpi=mpi, comm=comm)
def solve(self):
"""Solve the inverse problem."""
r, w = self.smc.sample()
self._r = r
self._w = w
idf = lambda(x): x
self._mean = self.mean_of(idf)
self._variance = self.variance_of(idf, self.mean)
return r, w
def mean_of(self, function):
"""Calculate the mean of a function of the particles."""
y = np.array([self._w[i] * function(self._r[i,:]) for i in
xrange(self._r.shape[0])])
return np.mean(y, axis=0)
def variance_of(self, function, mean=None):
"""Calculate the variance of a function"""
if mean is None:
mean = self.mean_of(function)
v = np.array([self._w[i] * (function(self._r[i, :]) - mean) ** 2
for i in xrange(self._r.shape[0])])
return np.mean(v, axis=0)
|
luotao1/Paddle
|
python/paddle/fluid/incubate/fleet/parameter_server/ir/ps_dispatcher.py
|
Python
|
apache-2.0
| 3,500
| 0.000286
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
class PSDispatcher(object):
"""
PSDispatcher is the base class for dispatching vars
into different pserver instance.
You need to implement the `dispatch` interface.
"""
def __init__(self, pserver_endpoints):
self._eps = pserver_endpoints
self._step = 0
@property
def eps(self):
return self._eps
def reset(self):
"""
reset the step counter, set it zero.
"""
self._step = 0
def dispatch(self, varlist):
"""
Args:
varlist(list): a list of Variables
Returns:
a map of pserver endpoint -> varname
"""
raise NotImplementedError("Interface has not been implemented.")
class HashName(PSDispatcher):
"""
Hash variable names to several endpoints using python
"hash()" function.
Args:
pserver_endpoints (list): list of endpoint(ip:port).
Examples:
.. code-block:: python
pserver_endpoints = ["127.0.0.1:6007", "127.0.0.1:6008"]
vars = ["var1","var2","var3","var4","var5"]
rr = RoundRobin(pserver_endpoints)
rr.dispatch(vars)
"""
def __init__(self, pserver_endpoints):
super(HashName, self).__init__(pserver_endpoints)
def _hash_block(self, block_str, total):
return hash(block_str) % total
def dispatch(self, varlist):
"""
use `HashName` method to dispatch variables with each parameter server.
Args:
varlist (list): a list of Variables
"""
eplist = []
for var in varlist:
server_id = self._hash_block(var.name(), len(self._eps))
server_for_param = self._eps[server_id]
eplist.append(server_for_param)
return eplist
class RoundRobin(PSDispatcher):
"""
|
Distribute variables to several endpoints using
RondRobin<https://en.wikipedia.org/wiki/Round-robin_scheduling> method.
Args:
pserver_endpoints (list): list of endpoint(ip:port).
Examples:
.. code-block:: python
pserver_endpoints = ["127.0.0.1:6007", "127.0.0.1:6008"]
vars = ["var1","var2","var3","var4","var5"]
|
rr = RoundRobin(pserver_endpoints)
rr.dispatch(vars)
"""
def __init__(self, pserver_endpoints):
super(RoundRobin, self).__init__(pserver_endpoints)
def dispatch(self, varlist):
"""
use `RoundRobin` method to dispatch variables with each parameter server.
Args:
varlist (list): a list of Variables
"""
eplist = []
for var in varlist:
server_for_param = self._eps[self._step]
eplist.append(server_for_param)
self._step += 1
if self._step >= len(self._eps):
self._step = 0
return eplist
|
lonnen/socorro
|
webapp-django/crashstats/crashstats/migrations/0001_initial.py
|
Python
|
mpl-2.0
| 369
| 0
|
# Generated by Django 1.11.15 on 2018-09-25 15:40
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the M
|
PL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.db import migrations
class Migration(migrations.Migration):
dependencie
|
s = []
operations = []
|
crast/grpc
|
tools/run_tests/run_interops.py
|
Python
|
bsd-3-clause
| 1,163
| 0.009458
|
import argparse
import xml.etree.cElementTree as ET
import jobset
argp = argparse.ArgumentParser(description='Run interop tests.')
argp.add_argument('-l', '--language',
default='c++')
args = argp.parse_args()
# build job
build_job = jobset.JobSpec(cmdline=['tools/run_tests/run_interops_build.sh', '%s' % args.language], shortname='build')
# test jobs, each test is a separate job to run in parallel
_TESTS = ['large_unary', 'empty_unary', 'ping_pong', 'client_streaming', 'server_streaming']
jobs = []
jobNumber = 0
for test in _TESTS:
test_job = jobset.JobSpec(
cmdline=['tools/run_tests/run_interops_test.sh', '%s' % args.language,
|
'%s' % test],
shortname=test,
timeout_seconds=15*60)
jobs.append(test_job)
jobNumber+=1
root = ET.Element('testsuites')
testsuite = ET.SubElement(root, 'testsuite', id='1', package='grpc', name='tests')
# always do the build of docker first, and then all the tests can run in parallel
jobset.run([build_job], maxjobs=1, xml_report=testsuite)
jobset.run(jobs, maxjob
|
s=jobNumber, xml_report=testsuite)
tree = ET.ElementTree(root)
tree.write('report.xml', encoding='UTF-8')
|
datacommonsorg/data
|
scripts/ourworldindata/covid19/preprocess_csv_test.py
|
Python
|
apache-2.0
| 2,497
| 0
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import filecmp
import os
import tempfile
import unittest
from .preprocess_csv import create_formatted_csv_file
from .preprocess_csv import create_tmcf_file
# module_dir_ is the path to where this test is running from.
module_dir_ = os.path.dirname(__file__)
class TestPreprocessCsvTest(unittest.TestCase):
def test_create_csv(self):
with tempfile.TemporaryDirectory() as tmp_dir:
f = os.path.join(module_dir_, 'test_data/test_data.csv')
expected_csv_file = os.path.join(
module_dir_, 'test_data/expected_formatted_data.csv')
with open(f, "r") as f_in:
result_csv_file = os.path.join(tmp_dir,
'OurWorldInData_Covid19.csv')
create_formatted_csv_file(f_in, result_csv_file)
with open(result_csv_file, "r") as result_f:
result_str: str = result_f.read()
with open(expected_csv_file, "r") as expect_f:
expect_str: str = expect_f.read()
self.assertEqual(result_str, expect_str)
os.remove(result_csv_file)
def test_create_tmcf(self):
with tempfile.TemporaryDirectory() as tmp_dir:
expected_tmcf_file = os.path.join(
module_dir_, 'test_d
|
ata/expected_covid19.tmcf')
result_tmcf_file = os.path.join(tmp_dir,
'OurWorldInData_Covid19.tmcf')
create_tmcf_file(result_tmcf_file)
with open(result_tmcf_file, "r") as result_f:
|
result_str: str = result_f.read()
with open(expected_tmcf_file, "r") as expect_f:
expect_str: str = expect_f.read()
self.assertEqual(result_str, expect_str)
os.remove(result_tmcf_file)
if __name__ == '__main__':
unittest.main()
|
dkamotsky/program-y
|
src/test/parser/test_aiml_parser.py
|
Python
|
mit
| 30,276
| 0.000727
|
import unittest
import os
from xml.etree.ElementTree import ParseError
from programy.parser.aiml_parser import AIMLParser
from programy.parser.exceptions import ParserException
from programy.parser.pattern.nodes.root import PatternRootNode
from programy.parser.pattern.nodes.topic import PatternTopicNode
from programy.parser.pattern.nodes.that import PatternThatNode
from programy.parser.pattern.nodes.word import PatternWordNode
from programy.parser.pattern.nodes.oneormore import PatternOneOrMoreWildCardNode
from programy.parser.pattern.nodes.template import PatternTemplateNode
from programy.dialog import Sentence
class AIMLParserTests(unittest.TestCase):
def setUp(self):
self.parser = AIMLParser(supress_warnings=True, stop_on_invalid=True)
self.assertIsNotNone(self.parser)
def test_parse_from_file_valid(self):
filename = os.path.dirname(__file__)+ '/valid.aiml'
self.parser.parse_from_file(filename)
def test_parse_from_file_invalid(self):
filename = os.path.dirname(__file__)+ '/invalid.aiml'
self.parser.parse_from_file(filename)
def test_crud(self):
with self.assertRaises(ParseError) as raised:
self.parser.parse_from_text(
"""Blah Blah Blah
""")
def test_no_aiml(self):
with self.assertRaises(ParseError) as raised:
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
""")
self.assertTrue(str(raised.exception).startswith("no element found:"))
def test_no_content(self):
with self.assertRaises(ParseError) as raised:
self.parser.parse_from_text(
"""
""")
self.assertTrue(str(raised.exception).startswith("no element found:"))
def test_base_aiml_no_content(self):
with self.assertRaises(ParserException) as raised:
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
</aiml>
""")
self.assertEqual(raised.exception.message, "Error, no categories in aiml file")
def test_base_aiml_topic_no_name(self):
with self.assertRaises(ParserException) as raised:
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<topic>
</topic>
</aiml>
""")
self.assertEqual(raised.exception.message, "Error, missing name attribute for topic")
def test_base_aiml_topic_no_category(self):
with self.assertRaises(ParserException) as raised:
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<topic name="test">
</topic>
</aiml>
""")
self.assertEqual(raised.exception.message, "Error, no categories in topic")
def test_base_aiml_topic_category_no_content(self):
with self.assertRaises(ParserException) as raised:
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<topic name="test">
<category>
</category>
</topic>
</aiml>
""")
self.assertEqual(raised.exception.message, "Error, no template node found in category")
def test_base_aiml_topic_at_multiple_levels(self):
with self.assertRaises(ParserException) as raised:
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<topic name="test">
<category>
<topic name="test2" />
<pattern>*</pattern>
<template>RESPONSE</template>
</category>
</topic>
</aiml>
""")
self.assertEqual(raised.exception.message, "Error, topic exists in category AND as parent node")
def test_base_aiml_topic_category_no_template(self):
with self.assertRaises(ParserException) as raised:
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<topic name="test">
<category>
<pattern>*</pattern>
</category>
</topic>
</aiml>
""")
self.assertEqual(raised.exception.message, "Error, no template node found in category")
def test_base_aiml_category_no_content(self):
with self.assertRaises(ParserException) as raised:
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
</category>
</aiml>
""")
self.assertEqual(raised.exception.message, "Error, no template node found in category")
def test_base_aiml_category_no_template(self):
with self.assertRaises(ParserException) as raised:
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<pattern>*</pattern>
</category>
</aiml>
""")
self.assertEqual(raised.exception.message, "Error, no template node found in category")
def test_base_aiml_topic_empty_parent_node(self):
with self.assertRaises(ParserException) as raised:
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<topic name="">
<category>
<pattern>*</pattern>
<template>RESPONSE</template>
</category>
</topic>
</aiml>
""")
self.assertEqual(raised.exception.message, "Topic name empty or null")
def test_base_aiml_topic_with_something_else(self):
with self.assertRaises(ParserException) as raised:
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<topic name="test">
<xxxx>
<pattern>*</pattern>
<template>RESPONSE</template>
</xxxx>
</topic>
</aiml>
""")
self.assertEqual(raised.exception.message, "Error unknown child node of topic, xxxx")
def test_base_aiml_topic_empty_child_node1(self):
with self.assertRaises(ParserException) as raised:
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<topic name="" />
<pattern>*</pattern>
<template>RESPONSE</template>
</category>
</aiml>
""")
self.assertEqual(raised.exception.message, "Topic node text is empty")
def test_base_aiml_topic_empty_child_node2(self):
with self.assertRaises(ParserException) as raised:
self.parser.parse_f
|
rom_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<topic></topic>
<pattern>*</pattern>
<template>RESPONSE</template>
</category>
</aiml>
|
""")
self.assertEqual(raised.exception.message, "Topic node text is empty")
def test_base_aiml_that_empty_child_node(self):
with self.assertRaises(ParserException) as raised:
|
ujvl/ray-ng
|
python/ray/reporter.py
|
Python
|
apache-2.0
| 6,847
| 0
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import json
import os
import traceback
import time
import datetime
from socket import AddressFamily
try:
import psutil
except ImportError:
print("The reporter requires psutil to run.")
import sys
sys.exit(1)
import ray.ray_constants as ray_constants
import ray.utils
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray provides a default configuration at
# entry/init points.
logger = logging.getLogger(__name__)
def recursive_asdict(o):
if isinstance(o, tuple) and hasattr(o, "_asdict"):
return recursive_asdict(o._asdict())
if isinstance(o, (tuple, list)):
L = []
for k in o:
L.append(recursive_asdict(k))
return L
if isinstance(o, dict):
D = {k: recursive_asdict(v) for k, v in o.items()}
return D
return o
def jsonify_asdict(o):
return json.dumps(recursive_asdict(o))
def is_worker(cmdline):
return cmdline and cmdline[0].startswith("ray_")
def determine_ip_address():
"""Return the first IP address for an ethernet interface on the system."""
addrs = [
x.address for k, v in psutil.net_if_addrs().items() if k[0] == "e"
for x in v if x.family == AddressFamily.AF_INET
]
return addrs[0]
def to_posix_time(dt):
return (dt - datetime.datetime(1970, 1, 1)).total_seconds()
class Reporter(object):
"""A monitor process for monitoring Ray nodes.
Attributes:
host (str): The hostname of this machine. Used to improve the log
messages published to Redis.
redis_client: A client used to communicate with the Redis server.
"""
def __init__(self, redis_address, redis_password=None):
"""Initialize the reporter object."""
self.cpu_counts = (psutil.cpu_count(), psutil.cpu_count(logical=False))
self.ip_addr = determine_ip_address()
self.hostname = os.uname().nodename
_ = psutil.cpu_percent() # For initialization
self.redis_key = "{}.{}".format(ray.gcs_utils.REPORTER_CHANNEL,
self.hostname)
self.redis_client = ray.services.create_redis_client(
redis_address, password=redis_password)
self.network_stats_hist = [(0, (0.0, 0.0))] # time, (sent, recv)
@staticmethod
def get_cpu_percent():
return psutil.cpu_percent()
@staticmethod
def get_boot_time():
return psutil.boot_time()
@staticmethod
def get_network_stats():
ifaces = [
v for k, v in psutil.net_io_counters(pernic=True).items()
if k[0] == "e"
]
sent = sum((iface.bytes_sent for iface in ifaces))
recv = sum((iface.bytes_recv for iface in ifaces))
return sent, recv
@staticmethod
def get_mem_usage():
vm = psutil.virtual_memory()
return vm.total, vm.available, vm.percent
@staticmethod
def get_disk_usage():
return {x: psutil.disk_usage(x) for x in ["/", "/tmp"]}
@staticmethod
def get_workers():
return [
x.as_dict(attrs=[
"pid", "create_time", "cpu_percent", "cpu_times", "name",
"cmdline", "memory_info", "memory_full_info"
]) for x in psutil.process_iter(attrs=["cmdline"])
if is_worker(x.info["cmdline"])
]
def get_load_avg(self):
load = os.getloadavg()
per_cpu_load = tuple((round(x / self.cpu_counts[0], 2) for x in load))
return load, per_cpu_load
def get_all_stats(self):
now = to_posix_time(datetime.datetime.utcnow())
network_stats = self.get_network_stats()
self.network_stats_hist.append((now, network_stats))
self.network_stats_hist = self.network_stats_hist[-7:]
then, prev_network_stats = self.network_stats_hist[0]
netstats = ((network_stats[0] - prev_network_stats[0]) / (now - then),
(network_stats[1] - prev_network_stats[1]) / (now - then))
return {
"now": now,
"hostname": self.hostname,
"ip": self.ip_addr,
"cpu": self.get_cpu_percent(),
"cpus": self.cpu_counts,
"mem": self.get_mem_usage(),
"workers": self.get_workers(),
"boot_time": self.get_boot_time(),
"load_avg": self.get_load_avg(),
"disk": self.get_disk_usage(),
"net": netstats,
}
def perform_iteration(self):
"""Get any changes to the log files and push updates to Redis."""
stats = self.get_all_stats()
self.redis_client.publish(
self.redis_key,
jsonify_asdict(stats),
)
def run(self):
"""Run the reporter."""
while True:
try:
self.perform_iteration()
except Exception:
traceback.print_exc()
pass
time.sleep(ray_constants.REPORTER_UPDATE_INTERVAL_MS / 1000)
if __name__ == "__main__":
parser = argpar
|
se.ArgumentParser(
description=("Parse Redis server for the "
"reporter to connect to."))
parser.add_argument(
"--redis-address",
required=True,
type=str,
help="The address to use for Redis.")
parser.add_argument(
"--redis-password",
required=False,
type
|
=str,
default=None,
help="the password to use for Redis")
parser.add_argument(
"--logging-level",
required=False,
type=str,
default=ray_constants.LOGGER_LEVEL,
choices=ray_constants.LOGGER_LEVEL_CHOICES,
help=ray_constants.LOGGER_LEVEL_HELP)
parser.add_argument(
"--logging-format",
required=False,
type=str,
default=ray_constants.LOGGER_FORMAT,
help=ray_constants.LOGGER_FORMAT_HELP)
args = parser.parse_args()
ray.utils.setup_logger(args.logging_level, args.logging_format)
reporter = Reporter(args.redis_address, redis_password=args.redis_password)
try:
reporter.run()
except Exception as e:
# Something went wrong, so push an error to all drivers.
redis_client = ray.services.create_redis_client(
args.redis_address, password=args.redis_password)
traceback_str = ray.utils.format_error_message(traceback.format_exc())
message = ("The reporter on node {} failed with the following "
"error:\n{}".format(os.uname()[1], traceback_str))
ray.utils.push_error_to_driver_through_redis(
redis_client, ray_constants.REPORTER_DIED_ERROR, message)
raise e
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/eggs/bx_python-0.7.2-py2.7-linux-x86_64-ucs4.egg/bx/_seqmapping.py
|
Python
|
gpl-3.0
| 281
| 0.035587
|
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resour
|
ces.resource_filename(__name__,'_seqmapping.so')
__loader__ = None; del __bootstrap__, __loader__
|
imp.load_dynamic(__name__,__file__)
__bootstrap__()
|
terzeron/FeedMakerApplications
|
study/_javabeat/capture_item_link_title.py
|
Python
|
gpl-2.0
| 791
| 0.001264
|
#!/usr/bin/env python
import sys
import re
import getopt
from typing import List, Tuple
from feed_maker_util import IO
def main():
num_of_recent_feeds = 1000
optlist, _ = getopt.getopt(sys.argv[1:], "
|
n:")
for o, a in optlist:
if o == '-n':
num_of_recent_feeds = int(a)
line_list = IO.read_stdin_as_line_list()
result_list: List[Tuple[str, str]] = []
for line in line_list:
m = re.search(r'a href="(?P<link>[^"]+)"[^>]*title="(?P<title>[^"]+)"', line)
if m:
link = m.group("link")
title = m.group("title")
result_list.append((link, title))
for (li
|
nk, title) in result_list[:num_of_recent_feeds]:
print("%s\t%s" % (link, title))
if __name__ == "__main__":
sys.exit(main())
|
kinsights/brabeion
|
brabeion/tests/tests.py
|
Python
|
bsd-3-clause
| 2,258
| 0.001329
|
from django.conf import settings
from django.contrib.auth.models import User
from django.db import connection
from django.test import TestCase
from brabeion import badges
from brabeion.base import Badge, BadgeAwarded
from brabeion.tests.models import PlayerStat
class PointsBadge(Badge):
slug = "points"
levels = [
"Bronze",
|
"Silver",
"Gold",
]
events = [
"points_awarded",
]
multiple = False
def award(self, **state):
user = state["user"]
points = user.stats.points
if points > 10000:
return BadgeAwarded(3)
elif points > 7500:
return BadgeAwarded(2)
elif points > 5000:
return BadgeAwarded(1)
badges.register(PointsBadge)
class BaseTestCase(TestCase):
def assert_num_queries(self, n, func)
|
:
current_debug = settings.DEBUG
settings.DEBUG = True
current = len(connection.queries)
func()
self.assertEqual(current + n, len(connection.queries), connection.queries[current:])
settings.DEBUG = current_debug
class BadgesTests(BaseTestCase):
def test_award(self):
u = User.objects.create_user("Lars Bak", "lars@hotspot.com", "x864lyfe")
PlayerStat.objects.create(user=u)
badges.possibly_award_badge("points_awarded", user=u)
self.assertEqual(u.badges_earned.count(), 0)
u.stats.points += 5001
u.stats.save()
badges.possibly_award_badge("points_awarded", user=u)
self.assertEqual(u.badges_earned.count(), 1)
self.assertEqual(u.badges_earned.all()[0].badge.name, "Bronze")
badges.possibly_award_badge("points_awarded", user=u)
self.assertEqual(u.badges_earned.count(), 1)
u.stats.points += 2500
badges.possibly_award_badge("points_awarded", user=u)
self.assertEqual(u.badges_earned.count(), 2)
def test_lazy_user(self):
u = User.objects.create_user("Lars Bak", "lars@hotspot.com", "x864lyfe")
PlayerStat.objects.create(user=u, points=5001)
badges.possibly_award_badge("points_awarded", user=u)
self.assertEqual(u.badges_earned.count(), 1)
self.assert_num_queries(1, lambda: u.badges_earned.get().badge)
|
petervanderdoes/wger
|
wger/core/tests/test_repetition_unit.py
|
Python
|
agpl-3.0
| 3,162
| 0.000316
|
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You
|
should have received a copy of the GNU Affero General Public License
# along with Workout Manager. If not, see <http://w
|
ww.gnu.org/licenses/>.
# wger
from wger.core.models import RepetitionUnit
from wger.core.tests import api_base_test
from wger.core.tests.base_testcase import (
WorkoutManagerAccessTestCase,
WorkoutManagerAddTestCase,
WorkoutManagerDeleteTestCase,
WorkoutManagerEditTestCase,
WorkoutManagerTestCase
)
class RepresentationTestCase(WorkoutManagerTestCase):
'''
Test the representation of a model
'''
def test_representation(self):
'''
Test that the representation of an object is correct
'''
self.assertEqual("{0}".format(RepetitionUnit.objects.get(pk=1)), 'Repetitions')
class OverviewTest(WorkoutManagerAccessTestCase):
'''
Tests the settings unit overview page
'''
url = 'core:repetition-unit:list'
anonymous_fail = True
class AddTestCase(WorkoutManagerAddTestCase):
'''
Tests adding a new unit
'''
object_class = RepetitionUnit
url = 'core:repetition-unit:add'
data = {'name': 'Furlongs'}
user_success = 'admin',
user_fail = ('general_manager1',
'general_manager2',
'member1',
'member2',
'trainer2',
'trainer3',
'trainer4',
'manager3')
class DeleteTestCase(WorkoutManagerDeleteTestCase):
'''
Tests deleting a unit
'''
pk = 1
object_class = RepetitionUnit
url = 'core:repetition-unit:delete'
user_success = 'admin',
user_fail = ('general_manager1',
'general_manager2',
'member1',
'member2',
'trainer2',
'trainer3',
'trainer4',
'manager3')
class EditTestCase(WorkoutManagerEditTestCase):
'''
Tests editing a unit
'''
pk = 1
object_class = RepetitionUnit
url = 'core:repetition-unit:edit'
data = {'name': 'Furlongs'}
user_success = 'admin',
user_fail = ('general_manager1',
'general_manager2',
'member1',
'member2',
'trainer2',
'trainer3',
'trainer4',
'manager3')
class ApiTestCase(api_base_test.ApiBaseResourceTestCase):
'''
Tests the unit resource
'''
pk = 1
resource = RepetitionUnit
private_resource = False
def get_resource_name(self):
return 'setting-repetitionunit'
|
ibamacsr/routes_registry_api
|
routes_registry_api/routes/management/commands/importstates.py
|
Python
|
agpl-3.0
| 1,055
| 0.002844
|
# -*- coding: utf-8 -*-
import simplejson
from django.core.managemen
|
t.base import BaseCommand
from django.contrib.gis.geos import MultiPolygon, Polygon
from ...models import State
class Command(BaseCommand):
args = 'filename'
help = 'Import states from a GeoJSON file'
def handle(self, *args, **options):
for filename in args:
data_json = open(filename, 'r').read()
data = simplejson.loads(data_json)
for feature in data['features']:
state = State(
name=feature['properties'].get('na
|
me'),
code=feature['properties'].get('code'),
)
if feature['geometry'].get('type') == 'MultiPolygon':
state.geom = MultiPolygon(
[Polygon(poly) for poly in feature['geometry'].get('coordinates')[0]]
)
else:
state.geom = MultiPolygon(Polygon(feature['geometry'].get('coordinates')[0]))
state.save()
|
boada/desCluster
|
mkTargeted/legacy/targetedRealistic_async.py
|
Python
|
mit
| 4,107
| 0.006331
|
from multiprocessing import Pool
import h5py as hdf
import numpy as np
from calc_cluster_props import *
from data_handler import mkTruth, mkHalo
import os
import sys
class AsyncFactory:
def __init__(self, func, cb_func):
self.func = func
self.cb_func = cb_func
self.pool = Pool()
def call(self,*args, **kwargs):
self.pool.apply_async(self.func, args, kwargs, self.cb_func)
def wait(self):
self.pool.close()
self.pool.join()
def worker(pos, data, center, tZ):
#print "IN:PID: %d \t Value: %d" % (os.getpid(), pos)
data = updateArray(data)
#data = findClusterRedshift(data)
data['CLUSZ'] = tZ
data = findSeperationSpatial(data, center)
data = findLOSV(data)
# make initial cuts
mask = ab
|
s(data['LOSV']) < 5000
data = data[mask]
while True:
try:
if size == data.size:
break
except NameError:
pass
size = data.size
#
|
print 'size', data.size
#data = rejectInterlopers(data)
try:
x = shifty_gapper(data['SEP'], data['Z'], tZ, ngap=15, glimit=500)
data = data[x]
except:
break
#data = findLOSVD(data)
data = findLOSVDgmm(data)
data['LOSVD'] = data['LOSVDgmm']
data = findR200(data)
mask = data['SEP'] < data['R200'][0]
data = data[mask]
data = findClusterRedshift(data)
data = findSeperationSpatial(data, center)
#data = findLOSVDgmm(data)
data = calc_mass_Evrard(data, A1D = 1177, alpha = 0.364)
#print "OUT:PID: %d \t Value: %d" % (os.getpid(), pos)
return pos, data
def cb_func((pos, data)):
if pos % 1000 == 0:
print pos
results['IDX'][pos] = pos
results['CLUSZ'][pos] = data['CLUSZ'][0]
results['LOSVD'][pos] = data['LOSVD'][0]
results['LOSVDgmm'][pos] = data['LOSVDgmm'][0]
results['MASS'][pos] = data['MASS'][0]
results['R200'][pos] = data['R200'][0]
results['NGAL'][pos] = data.size
if __name__ == "__main__":
async_worker = AsyncFactory(worker, cb_func)
halo = mkHalo()
truth = mkTruth()
mask = truth['g'] < 23.
truth = truth[mask]
mask = (halo['m200c']/0.72 >= 1e13) & (halo['upid'] == -1)
maskedHalo = halo[mask]
hids, uniqueIdx = np.unique(maskedHalo['id'], return_index=True)
# limit the cases
print sys.argv[1], sys.argv[2]
if int(sys.argv[1]) == 0:
hids = hids[:int(sys.argv[2])]
uniqueIdx = uniqueIdx[:int(sys.argv[2])]
elif int(sys.argv[2]) == 9:
hids = hids[int(sys.argv[1]):]
uniqueIdx = uniqueIdx[int(sys.argv[1]):]
else:
hids = hids[int(sys.argv[1]):int(sys.argv[2])]
uniqueIdx = uniqueIdx[int(sys.argv[1]):int(sys.argv[2])]
# make the results container
results = np.zeros((hids.size,), dtype=[('IDX', '>i4'), ('HALOID',
'>i8'), ('ZSPEC', '>f4'), ('VRMS', '>f4'), ('M200c', '>f4'), ('RVIR',
'>f4'), ('CLUSZ', '>f4'), ('LOSVD', '>f4'), ('LOSVDgmm', '>f4'),
('MASS', '>f4'), ('R200', '>f4'), ('NGAL', '>i4')])
results['HALOID'] = hids
# now we have to make some initial cuts and then make final spatial cuts
for i, SH in enumerate(hids):
center = (maskedHalo['ra'][uniqueIdx[i]],
maskedHalo['dec'][uniqueIdx[i]])
raMask = (center[0] - 0.5 < truth['RA']) & (truth['RA'] < center[0] + 0.5)
decMask = (center[1] - 0.5 < truth['DEC']) & (truth['DEC'] < center[1] +
0.5)
async_worker.call(i, truth[raMask & decMask], center,
maskedHalo['zspec'][uniqueIdx[i]])
results['ZSPEC'][i] = maskedHalo['zspec'][uniqueIdx[i]]
results['VRMS'][i] = maskedHalo['vrms'][uniqueIdx[i]]/np.sqrt(3)
results['M200c'][i] = maskedHalo['m200c'][uniqueIdx[i]]/0.72
results['RVIR'][i] = maskedHalo['rvir'][uniqueIdx[i]]/0.72
async_worker.wait()
with hdf.File('result_targetedRealistic'+str(os.environ['LSB_JOBID'])+'.hdf5',
'w') as f:
f['result_targetedRealistic'] = results
|
google/material-design-icons
|
update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/T_S_I_V_.py
|
Python
|
apache-2.0
| 572
| 0.026224
|
from fontTools.misc.py23 import strjoin, tobytes, tostr
from . import
|
asciiTable
class table_T_S_I_V_(asciiTable.asciiTable):
def toXML(self
|
, writer, ttFont):
data = tostr(self.data)
# removing null bytes. XXX needed??
data = data.split('\0')
data = strjoin(data)
writer.begintag("source")
writer.newline()
writer.write_noindent(data.replace("\r", "\n"))
writer.newline()
writer.endtag("source")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
lines = strjoin(content).split("\n")
self.data = tobytes("\r".join(lines[1:-1]))
|
dims/nova
|
nova/objects/request_spec.py
|
Python
|
apache-2.0
| 22,987
| 0.000131
|
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
import six
from nova.db.sqlalchemy import api as db
from nova.db.sqlalchemy import api_models
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova.objects import instance as obj_instance
from nova.virt import hardware
@base.NovaObjectRegistry.register
class RequestSpec(base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: ImageMeta version 1.6
# Version 1.2: SchedulerRetries version 1.1
# Version 1.3: InstanceGroup version 1.10
# Version 1.4: ImageMeta version 1.7
# Version 1.5: Added get_by_instance_uuid(), create(), save()
VERSION = '1.5'
fields = {
'id': fields.IntegerField(),
'image': fields.ObjectField('ImageMeta', nullable=True),
'numa_topology': fields.ObjectField('InstanceNUMATopology',
nullable=True),
'pci_requests': fields.ObjectField('InstancePCIRequests',
nullable=True),
'project_id': fields.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'flavor': fields.ObjectField('Flavor', nullable=False),
'num_instances': fields.IntegerField(default=1),
'ignore_hosts': fields.ListOfStringsField(nullable=True),
'force_hosts': fields.ListOfStringsField(nullable=True),
'force_nodes': fields.ListOfStringsField(nullable=True),
'retry': fields.ObjectField('SchedulerRetries', nullable=True),
'limits': fields.ObjectField('SchedulerLimits', nullable=True),
'instance_group': fields.ObjectField('InstanceGroup', nullable=True),
# NOTE(sbauza): Since hints are depending on running filters, we prefer
# to leave the API correctly validating the hints per the filters and
# just provide to the RequestSpec object a free-form dictionary
'scheduler_hints': fields.DictOfListOfStringsField(nullable=True),
'instance_uuid': fields.UUIDField(),
}
@property
def vcpus(self):
return self.flavor.vcpus
@property
def memory_mb(self):
return self.flavor.memory_mb
@property
def root_gb(self):
return self.flavor.root_gb
@property
def ephemeral_gb(self):
return self.flavor.ephemeral_gb
@property
def swap(self):
return self.flavor.swap
def _image_meta_from_image(self, image):
if isinstance(image, objects.ImageMeta):
self.image = image
elif isinstance(image, dict):
# NOTE(sbauza): Until Nova is fully providing an ImageMeta object
# for getting properties, we still need to hydrate it here
# TODO(sbauza): To be removed once all RequestSpec hydrations are
# done on the conductor side and if the image is an ImageMeta
self.image = objects.ImageMeta.from_dict(image)
else:
self.image = None
def _from_instance(self, instance):
if isinstance(instance, obj_instance.Instance):
# NOTE(sbauza): Instance should normally be a NovaObject...
getter = getattr
elif isinstance(instance, dict):
|
# NOTE(sbauza): ... but there are some cas
|
es where request_spec
# has an instance key as a dictionary, just because
# select_destinations() is getting a request_spec dict made by
# sched_utils.build_request_spec()
# TODO(sbauza): To be removed once all RequestSpec hydrations are
# done on the conductor side
getter = lambda x, y: x.get(y)
else:
# If the instance is None, there is no reason to set the fields
return
instance_fields = ['numa_topology', 'pci_requests', 'uuid',
'project_id', 'availability_zone']
for field in instance_fields:
if field == 'uuid':
setattr(self, 'instance_uuid', getter(instance, field))
elif field == 'pci_requests':
self._from_instance_pci_requests(getter(instance, field))
elif field == 'numa_topology':
self._from_instance_numa_topology(getter(instance, field))
else:
setattr(self, field, getter(instance, field))
def _from_instance_pci_requests(self, pci_requests):
if isinstance(pci_requests, dict):
pci_req_cls = objects.InstancePCIRequests
self.pci_requests = pci_req_cls.from_request_spec_instance_props(
pci_requests)
else:
self.pci_requests = pci_requests
def _from_instance_numa_topology(self, numa_topology):
if isinstance(numa_topology, dict):
self.numa_topology = hardware.instance_topology_from_instance(
dict(numa_topology=numa_topology))
else:
self.numa_topology = numa_topology
def _from_flavor(self, flavor):
if isinstance(flavor, objects.Flavor):
self.flavor = flavor
elif isinstance(flavor, dict):
# NOTE(sbauza): Again, request_spec is primitived by
# sched_utils.build_request_spec() and passed to
# select_destinations() like this
# TODO(sbauza): To be removed once all RequestSpec hydrations are
# done on the conductor side
self.flavor = objects.Flavor(**flavor)
def _from_retry(self, retry_dict):
self.retry = (SchedulerRetries.from_dict(self._context, retry_dict)
if retry_dict else None)
def _populate_group_info(self, filter_properties):
if filter_properties.get('instance_group'):
# New-style group information as a NovaObject, we can directly set
# the field
self.instance_group = filter_properties.get('instance_group')
elif filter_properties.get('group_updated') is True:
# Old-style group information having ugly dict keys containing sets
# NOTE(sbauza): Can be dropped once select_destinations is removed
policies = list(filter_properties.get('group_policies'))
hosts = list(filter_properties.get('group_hosts'))
members = list(filter_properties.get('group_members'))
self.instance_group = objects.InstanceGroup(policies=policies,
hosts=hosts,
members=members)
# hosts has to be not part of the updates for saving the object
self.instance_group.obj_reset_changes(['hosts'])
else:
# Set the value anyway to avoid any call to obj_attr_is_set for it
self.instance_group = None
def _from_limits(self, limits_dict):
self.limits = SchedulerLimits.from_dict(limits_dict)
def _from_hints(self, hints_dict):
if hints_dict is None:
self.scheduler_hints = None
return
self.scheduler_hints = {
hint: value if isinstance(value, list) else [value]
for hint, value in six.iteritems(hints_dict)}
@classmethod
def from_primitives(cls, context, request_spec, filter_properties):
"""Returns a new RequestSpec object by hydrating it from legacy dicts.
Deprecated. A RequestSpec object is created early in the boot process
using the from_components method. That object will either be passed to
|
klen/simpletree
|
benchmark/main/tests.py
|
Python
|
bsd-3-clause
| 3,596
| 0.000278
|
from django.test import TestCase
import time
from .models import SimpleTree, MPTTTree, TBMP, TBNS
def timeit(method):
|
""" Measure time of method's execution.
"""
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
print '\n%r: %2.2f sec' % \
(method.__name__, te - ts)
return result
return timed
CYCLES = 8
class Benchmark(object):
@timeit
def test_creation(self):
self._create_tree()
def test_delete(self):
self._create_tree(cycles=7)
@time
|
it
def test_deletion():
for _ in xrange(pow(2, CYCLES) / 2):
self._delete_last()
test_deletion()
def test_get(self):
self._create_tree(cycles=7)
@timeit
def test_get_tree():
root = self._get_root()
for _ in xrange(100):
self._get_tree(root)
test_get_tree()
def _create_tree(self, cycles=CYCLES):
root = self._create_root(title='root1')
nodes = [root]
for _ in xrange(CYCLES):
new_nodes = []
for node in nodes:
new_nodes.append(self._create_child(parent=node))
new_nodes.append(self._create_child(parent=node))
nodes = new_nodes
return nodes
def _create_root(self, **params):
pass
def _create_child(self, parent, **params):
pass
def _delete_last(self):
pass
def _get_root(self):
pass
def _get_tree(self, parent):
pass
class SimpleTest(TestCase, Benchmark):
def setUp(self):
print "\nSimpleTree benchmark"
def _create_root(self, **params):
return SimpleTree.objects.create(**params)
def _create_child(self, parent, **params):
return SimpleTree.objects.create(parent=parent, **params)
def _delete_last(self):
SimpleTree.objects.order_by('-id')[0].delete()
def _get_root(self):
return SimpleTree.objects.get(parent=None)
def _get_tree(self, parent):
return parent.get_tree()
class MPTTTest(TestCase, Benchmark):
def setUp(self):
print "\nMPTT benchmark"
def _create_root(self, **params):
return MPTTTree.objects.create(**params)
def _create_child(self, parent, **params):
return MPTTTree.objects.create(parent=parent, **params)
def _delete_last(self):
MPTTTree.objects.order_by('-id')[0].delete()
def _get_root(self):
return MPTTTree.objects.get(parent=None)
def _get_tree(self, parent):
return list(parent.get_ancestors()) + list(parent.get_descendants(include_self=False))
class TreeBeardMP(TestCase, Benchmark):
def setUp(self):
print "\nTreebeard MP benchmark"
def _create_root(self, **params):
return TBMP.add_root(**params)
def _create_child(self, parent, **params):
return parent.add_child(**params)
def _delete_last(self):
TBMP.objects.order_by('-id')[0].delete()
def _get_root(self):
return TBMP.get_root_nodes()[0]
def _get_tree(self, parent):
TBMP.get_tree(parent=parent)
class TreeBeardNS(TreeBeardMP):
def setUp(self):
print "\nTreebeard NS benchmark"
def _create_root(self, **params):
return TBNS.add_root(**params)
def _delete_last(self):
TBNS.objects.order_by('-id')[0].delete()
def _get_root(self):
return TBNS.get_root_nodes()[0]
def _get_tree(self, parent):
TBNS.get_tree(parent=parent)
|
hellotomfan/v8-coroutine
|
deps/v8/tools/push-to-trunk/releases.py
|
Python
|
gpl-2.0
| 18,090
| 0.007573
|
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script retrieves the history of all V8 branches and trunk revisions and
# their corresponding Chromium revisions.
# Requires a chromium checkout with branch heads:
# gclient sync --with_branch_heads
# gclient fetch
import argparse
import csv
import itertools
import json
import os
import re
import sys
from common_includes import *
CONFIG = {
"BRANCHNAME": "retrieve-v8-releases",
"PERSISTFILE_BASENAME": "/tmp/v8-re
|
leases-tempfile",
}
# Expression for retrieving the bleeding edge revision from a commit message.
PUSH_MSG_SVN_RE = re.compile(r".* \(based
|
on bleeding_edge revision r(\d+)\)$")
PUSH_MSG_GIT_RE = re.compile(r".* \(based on ([a-fA-F0-9]+)\)$")
# Expression for retrieving the merged patches from a merge commit message
# (old and new format).
MERGE_MESSAGE_RE = re.compile(r"^.*[M|m]erged (.+)(\)| into).*$", re.M)
CHERRY_PICK_TITLE_GIT_RE = re.compile(r"^.* \(cherry\-pick\)\.?$")
# New git message for cherry-picked CLs. One message per line.
MERGE_MESSAGE_GIT_RE = re.compile(r"^Merged ([a-fA-F0-9]+)\.?$")
# Expression for retrieving reverted patches from a commit message (old and
# new format).
ROLLBACK_MESSAGE_RE = re.compile(r"^.*[R|r]ollback of (.+)(\)| in).*$", re.M)
# New git message for reverted CLs. One message per line.
ROLLBACK_MESSAGE_GIT_RE = re.compile(r"^Rollback of ([a-fA-F0-9]+)\.?$")
# Expression for retrieving the code review link.
REVIEW_LINK_RE = re.compile(r"^Review URL: (.+)$", re.M)
# Expression with three versions (historical) for extracting the v8 revision
# from the chromium DEPS file.
DEPS_RE = re.compile(r"""^\s*(?:["']v8_revision["']: ["']"""
"""|\(Var\("googlecode_url"\) % "v8"\) \+ "\/trunk@"""
"""|"http\:\/\/v8\.googlecode\.com\/svn\/trunk@)"""
"""([^"']+)["'].*$""", re.M)
# Expression to pick tag and revision for bleeding edge tags. To be used with
# output of 'svn log'.
BLEEDING_EDGE_TAGS_RE = re.compile(
r"A \/tags\/([^\s]+) \(from \/branches\/bleeding_edge\:(\d+)\)")
def SortBranches(branches):
"""Sort branches with version number names."""
return sorted(branches, key=SortingKey, reverse=True)
def FilterDuplicatesAndReverse(cr_releases):
"""Returns the chromium releases in reverse order filtered by v8 revision
duplicates.
cr_releases is a list of [cr_rev, v8_rev] reverse-sorted by cr_rev.
"""
last = ""
result = []
for release in reversed(cr_releases):
if last == release[1]:
continue
last = release[1]
result.append(release)
return result
def BuildRevisionRanges(cr_releases):
"""Returns a mapping of v8 revision -> chromium ranges.
The ranges are comma-separated, each range has the form R1:R2. The newest
entry is the only one of the form R1, as there is no end range.
cr_releases is a list of [cr_rev, v8_rev] reverse-sorted by cr_rev.
cr_rev either refers to a chromium svn revision or a chromium branch number.
"""
range_lists = {}
cr_releases = FilterDuplicatesAndReverse(cr_releases)
# Visit pairs of cr releases from oldest to newest.
for cr_from, cr_to in itertools.izip(
cr_releases, itertools.islice(cr_releases, 1, None)):
# Assume the chromium revisions are all different.
assert cr_from[0] != cr_to[0]
# TODO(machenbach): Subtraction is not git friendly.
ran = "%s:%d" % (cr_from[0], int(cr_to[0]) - 1)
# Collect the ranges in lists per revision.
range_lists.setdefault(cr_from[1], []).append(ran)
# Add the newest revision.
if cr_releases:
range_lists.setdefault(cr_releases[-1][1], []).append(cr_releases[-1][0])
# Stringify and comma-separate the range lists.
return dict((rev, ", ".join(ran)) for rev, ran in range_lists.iteritems())
def MatchSafe(match):
if match:
return match.group(1)
else:
return ""
class Preparation(Step):
MESSAGE = "Preparation."
def RunStep(self):
self.CommonPrepare()
self.PrepareBranch()
class RetrieveV8Releases(Step):
MESSAGE = "Retrieve all V8 releases."
def ExceedsMax(self, releases):
return (self._options.max_releases > 0
and len(releases) > self._options.max_releases)
def GetBleedingEdgeFromPush(self, title):
return MatchSafe(PUSH_MSG_SVN_RE.match(title))
def GetBleedingEdgeGitFromPush(self, title):
return MatchSafe(PUSH_MSG_GIT_RE.match(title))
def GetMergedPatches(self, body):
patches = MatchSafe(MERGE_MESSAGE_RE.search(body))
if not patches:
patches = MatchSafe(ROLLBACK_MESSAGE_RE.search(body))
if patches:
# Indicate reverted patches with a "-".
patches = "-%s" % patches
return patches
def GetMergedPatchesGit(self, body):
patches = []
for line in body.splitlines():
patch = MatchSafe(MERGE_MESSAGE_GIT_RE.match(line))
if patch:
patches.append(patch)
patch = MatchSafe(ROLLBACK_MESSAGE_GIT_RE.match(line))
if patch:
patches.append("-%s" % patch)
return ", ".join(patches)
def GetReleaseDict(
self, git_hash, bleeding_edge_rev, bleeding_edge_git, branch, version,
patches, cl_body):
revision = self.vc.GitSvn(git_hash)
return {
# The SVN revision on the branch.
"revision": revision,
# The git revision on the branch.
"revision_git": git_hash,
# The SVN revision on bleeding edge (only for newer trunk pushes).
"bleeding_edge": bleeding_edge_rev,
# The same for git.
"bleeding_edge_git": bleeding_edge_git,
# The branch name.
"branch": branch,
# The version for displaying in the form 3.26.3 or 3.26.3.12.
"version": version,
# The date of the commit.
"date": self.GitLog(n=1, format="%ci", git_hash=git_hash),
# Merged patches if available in the form 'r1234, r2345'.
"patches_merged": patches,
# Default for easier output formatting.
"chromium_revision": "",
# Default for easier output formatting.
"chromium_branch": "",
# Link to the CL on code review. Trunk pushes are not uploaded, so this
# field will be populated below with the recent roll CL link.
"review_link": MatchSafe(REVIEW_LINK_RE.search(cl_body)),
# Link to the commit message on google code.
"revision_link": ("https://code.google.com/p/v8/source/detail?r=%s"
% revision),
}
def GetRelease(self, git_hash, branch):
self.ReadAndPersistVersion()
base_version = [self["major"], self["minor"], self["build"]]
version = ".".join(base_version)
body = self.GitLog(n=1, format="%B", git_hash=git_hash)
patches = ""
if self["patch"] != "0":
version += ".%s" % self["patch"]
if CHERRY_PICK_TITLE_GIT_RE.match(body.splitlines()[0]):
patches = self.GetMergedPatchesGit(body)
else:
patches = self.GetMergedPatches(body)
title = self.GitLog(n=1, format="%s", git_hash=git_hash)
bleeding_edge_revision = self.GetBleedingEdgeFromPush(title)
bleeding_edge_git = ""
if bleeding_edge_revision:
bleeding_edge_git = self.vc.SvnGit(bleeding_edge_revision,
self.vc.RemoteMasterBranch())
else:
bleeding_edge_git = self.GetBleedingEdgeGitFromPush(title)
return self.GetReleaseDict(
git_hash, bleeding_edge_revision, bleeding_edge_git, branch, version,
patches, body), self["patch"]
def GetReleasesFromMaster(self):
tag_text = self.SVN("log https://v8.googlecode.com/svn/tags -v --limit 20")
releases = []
for (tag, revision) in re.findall(BLEEDING_EDGE_TAGS_RE, tag_text):
git_hash = self.vc.SvnGit(revision)
# Add bleeding edge release. It does not contain patches or a code
# review link, as tags are not uploaded.
releases.append(self.GetReleaseDict(
git_hash, revision, git_hash, self.vc.MasterBranch(), tag, "", ""))
return releases
def GetReleasesFromBranch(self, branch):
self.GitReset(self.vc.RemoteBranch(branch
|
bmi-forum/bmi-pyre
|
pythia-0.8/examples/tabulator/tabulator/Quadratic.py
|
Python
|
gpl-2.0
| 1,399
| 0.005004
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from pyre.components.Component import Component
class Quadratic(Component):
class Inventory(Component.Inventory):
impo
|
rt pyre.inventory
a = pyre.inventory.float("a", default=0.0)
b = pyre.inventory.float("b", default=0.0)
c = pyre.inventory.float("c", default=0.0)
def initialize(self):
self.a = self.inventory.a
self.b = self.inventory.b
self.c = self.inventory.c
import tabulator._tabulator
tabulator._tabulator.quadraticSet(self.a, self.b, self.c)
return
|
def __init__(self):
Component.__init__(self, "quadratic", "functor")
self.a = 0.0
self.b = 0.0
self.c = 0.0
import tabulator._tabulator
self.handle = tabulator._tabulator.quadratic()
return
def _init(self):
Component._init(self)
self.initialize()
return
# version
__id__ = "$Id: Quadratic.py,v 1.1.1.1 2005/03/17 20:03:02 aivazis Exp $"
# End of file
|
rtx3/saltstack-deyunio
|
srv/salt/modules/xmpp.py
|
Python
|
apache-2.0
| 5,646
| 0
|
# -*- coding: utf-8 -*-
'''
Module for Sending Messages via XMPP (a.k.a. Jabber)
.. versionadded:: 2014.1.0
:depends: - sleekxmpp python module
:configuration: This module can be used by either passing a jid and password
directly to send_message, or by specifying the name of a configuration
profile in the minion config, minion pillar, or master config.
For example:
.. code-block:: yaml
my-xmpp-login:
xmpp.jid: myuser@jabber.example.org/resourcename
xmpp.password: verybadpass
The resourcename refers to the resource that is using this account. It is
user-definable, and optional. The following configurations are both valid:
.. code-block:: yaml
my-xmpp-login:
xmpp.jid: myuser@jabber.example.org/salt
xmpp.password: verybadpass
my-xmpp-login:
xmpp.jid: myuser@jabber.example.org
xmpp.password: verybadpass
'''
# Import Python Libs
from __future__ import absolute_import
import logging
HAS_LIBS = False
try:
from sleekxmpp import ClientXMPP as _ClientXMPP
from sleekxmpp.exceptions import XMPPError
HAS_LIBS = True
except ImportError:
class _ClientXMPP(object):
'''
Fake class in order not to raise errors
'''
log = logging.getLogger(__name__)
__virtualname__ = 'xmpp'
MUC_DEPRECATED = "Use of send mask waiters is deprecated."
def __virtual__():
'''
Only load this module if sleekxmpp is installed on this minion.
'''
if HAS_LIBS:
return __virtualname__
return False
class SleekXMPPMUC(logging.Filter):
def filter(self, record):
return not record.getMessage() == MUC_DEPRECATED
class SendMsgBot(_ClientXMPP):
def __init__(self, jid, password, recipient, msg): # pylint: disable=E1002
# PyLint wrongly reports an error when calling super, hence the above
# disable call
super(SendMsgBot, self).__init__(jid, password)
self.recipients = [] if recipient is None else [recipient]
self.rooms = []
self.msg = msg
self.add_event_handler('session_start', self.start)
@classmethod
def create_multi(cls, jid, password, msg, recipients=None, rooms=None,
nick="SaltStack Bot"):
'''
Alternate constructor that accept multiple recipients and rooms
'''
obj = SendMsgBot(jid, password, None, msg)
obj.recipients = [] if recipients is None else recipients
obj.rooms = [] if rooms is None else rooms
obj.nick = nick
return obj
def start(self, event):
self.send_presence()
self.get_roster()
for recipient in self.recipients:
self.send_message(mto=recipient,
mbody=self.msg,
mtype='chat')
for room in self.rooms:
self.plugin['xep_0045'].joinMUC(room,
self.nick,
wait=True)
self.send_message(mto=room,
mbody=self.msg,
mtype='groupchat')
self.disconnect(wait=True)
def send_msg(recipient, message, jid=None, password=None, profile=None):
'''
Send a message to an XMPP recipient. Designed for use in states.
CLI Examples::
xmpp.send_msg 'admins@xmpp.example.com' 'This is a salt module test' \
profile='my-xmpp-account'
xmpp.send_msg 'admins@xmpp.example.com' 'This is a salt module test' \
jid='myuser@xmpp.example.com/salt' password='verybadpass'
'''
if profile:
creds = __salt__['config.option'](profile)
jid = creds.get('xmpp.jid')
password = creds.get('xmpp.password')
xmpp = SendMsgBot(jid, password, recipient, message)
xmpp.register_plugin('xep_0030') # Service Discovery
xmpp.register_plugin('xep_0199') # XMPP Ping
if xmpp.connect():
xmpp.process(block=True)
return True
return False
def send_msg_multi(message,
recipients=None,
rooms=None,
jid=None,
password=None,
nick="SaltStack Bot",
profile=None):
'''
Send a message to an XMPP recipient, support send message to
multiple recipients or chat room.
CLI Examples::
xmpp.send_msg recipients=['admins@xmpp.example.com'] \
rooms=['secret@conference.xmpp.example.com'] \
'This is a salt module test' \
profile='my-xmpp-account'
xmpp.send_msg recipients=['admins@xmpp.example.com'] \
rooms=['secret@conference.xmpp.example.com'] \
'This is a salt module test' \
jid='myuser@xmpp.example.com/salt' password='v
|
erybadpass'
'''
# Remove: [WARNING ] Use of send mask waiters is deprecated.
for handler in logging.root.handlers:
handler.addFilter(SleekXMPPMUC())
if profile:
creds = __salt__['config.option'](profile)
jid = creds.get('xmpp.jid')
password = creds.get('xmpp.password')
xmpp =
|
SendMsgBot.create_multi(
jid, password, message, recipients=recipients, rooms=rooms)
if rooms:
xmpp.register_plugin('xep_0045') # MUC plugin
if xmpp.connect():
try:
xmpp.process(block=True)
return True
except XMPPError as err:
log.error("Could not send message, error: %s", err)
else:
log.error("Could not connect to XMPP server")
return False
|
endlessm/chromium-browser
|
third_party/llvm/llvm/test/MC/COFF/bigobj.py
|
Python
|
bsd-3-clause
| 935
| 0.004278
|
# RUN: python %s | llvm-mc -filetype=obj -triple i686-pc-win32 - | llvm-readobj -h | FileCheck %s
from __future__ import print_function
# This test checks that the COFF object emitter can produce objects with
# more than 65279 sections.
# While we only generate 65277 sections, an implicit .text, .data and .bss will
# also be emitted. This brings the total to 65280.
num_sections = 65277
# CHECK: ImageFileHeader {
# CHECK-NEXT: Machine: IMAGE_FILE_MACHINE_I386
# CHECK
|
-NEXT: SectionCount: 65280
# CHECK-NEXT: TimeDateStamp: {{[0-9]+}}
# CHECK-NEXT: PointerToSymbolTable: 0x{{[0-9A-F]+}}
# CHECK-NEXT: SymbolCount: 195837
# CHECK-NEXT: OptionalHeaderSize: 0
# CHECK-NEXT: Characteristics [ (0x0)
# CHECK-NEXT: ]
# CHECK-NEXT: }
for i in range(0, num_sections):
print(""" .section .bss,"bw",discard,
|
_b%d
.globl _b%d # @b%d
_b%d:
.byte 0 # 0x0
""" % (i, i, i, i))
|
poranmeloge/test-github
|
stm32_rtt_wifi/bsp/efm32/rtconfig.py
|
Python
|
gpl-2.0
| 2,289
| 0.009611
|
import os
# toolchains options
ARCH = 'arm'
CPU = 'cortex-m3'
CROSS_TOOL = 'gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = 'C:\Program Files (x86)\CodeSourcery\Sourcery G++ Lite\bin'
#EXEC_PATH = 'C:\Program Files (x86)\yagarto\bin'
elif CROSS_TOOL == 'keil':
print '================ERROR============================'
print 'Not support keil yet!'
print '================================================='
exit(0)
elif CROSS_TOOL == 'iar':
print '================ERROR============================'
print 'Not support iar yet!'
print '====================================
|
============='
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
# EFM32_BOARD = 'EFM32_G8XX_STK'
# EFM32_BOARD = 'EFM32_GXXX_DK'
EFM32_BOARD = 'EFM32GG_DK3750'
if EFM32_BOARD == 'EFM32_G8XX_STK':
EFM32_FAMILY = 'Gecko'
EFM32_TYPE = 'EFM32G890F128'
EFM32_LCD = 'none'
elif EFM32_BOARD == 'EFM32_GXXX_DK':
EFM32_FAMILY = 'Gecko'
EFM32_TYPE = 'EFM32G290F128'
EFM32_LCD = 'none'
elif EFM32_BOARD == 'EFM32GG_DK3750
|
':
EFM32_FAMILY = 'Giant Gecko'
EFM32_TYPE = 'EFM32GG990F1024'
# EFM32_LCD = 'LCD_MAPPED'
EFM32_LCD = 'LCD_DIRECT'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'axf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m3 -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread-efm32.map,-cref,-u,__cs3_reset -T'
if EFM32_BOARD == 'EFM32_G8XX_STK' or EFM32_BOARD == 'EFM32_GXXX_DK':
LFLAGS += ' efm32g_rom.ld'
elif EFM32_BOARD == 'EFM32GG_DK3750':
LFLAGS += ' efm32gg_rom.ld'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
|
mitre/multiscanner
|
multiscanner/modules/MachineLearning/EndgameEmber.py
|
Python
|
mpl-2.0
| 2,656
| 0.001506
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
By default, this module uses the pre-built Ember model from
https://pubdata.endgame.com/ember/ember_dataset.tar.bz2.
Documentation about training a new model can be found on the Ember GitHub page
(https://github.com/endgameinc/ember).
After training a new model, place the resulting txt file in
`multiscanner/etc` and update `config.ini` with the new filename.
"""
from __future__ import division, absolute_import, with_statement, print_function, unicode_literals
import os
from pathlib import Path
from multiscanner import CONFIG
__authors__ = "Patrick Copeland"
__license__ = "MPL 2.0"
TYPE = "MachineLearning"
NAME = "EndgameEmber"
REQUIRES = ['libmagic']
DEFAULTCONF = {
'ENABLED': False,
'path-to-model': os.path.join(os.path.split(CONFIG
|
)[0], 'etc', 'ember', 'ember_model_2017.txt'),
}
LGBM_MODEL = None
try:
import ember
has_ember = True
except ImportError as e:
print("ember module not installed...")
has_ember = False
try:
import lightgbm as lgb
except ImportError as e:
print("lightgbm module needed for ember. Not installed...")
has_ember = False
def check(conf=DEFAULTCONF):
if not conf['EN
|
ABLED']:
return False
if not has_ember:
return False
if not Path(conf['path-to-model']).is_file():
print("'{}' does not exist. Check config.ini for model location.".format(conf['path-to-model']))
return False
try:
global LGBM_MODEL
LGBM_MODEL = lgb.Booster(model_file=conf['path-to-model'])
except lgb.LightGBMError as e:
print("Unable to load model, {}. ({})".format(conf['path-to-model'], e))
return False
return True
def scan(filelist, conf=DEFAULTCONF):
results = []
for fname in filelist:
# Ensure libmagic returns results
if REQUIRES[0] is not None:
# only run the analytic if it is an Office document
file_type = _get_libmagicresults(REQUIRES[0][0], fname)
if file_type.startswith('PE32'):
with open(fname, 'rb') as fh:
ember_result = ember.predict_sample(LGBM_MODEL, fh.read())
results.append(
(fname, {'Prediction': ember_result})
)
metadata = {}
metadata["Name"] = NAME
metadata["Type"] = TYPE
return (results, metadata)
def _get_libmagicresults(results, fname):
libmagicdict = dict(results)
return libmagicdict.get(fname)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.