repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
SahilTikale/haas
|
examples/dbinit.py
|
2
|
1653
|
#!/usr/bin/python
"""
Register nodes with HIL.
This is intended to be used as a template for either creating a mock HIL setup
for development or to be modified to register real-life nodes that follow a
particular pattern.
In the example environment for which this module is written, there are 10
nodes which have IPMI interfaces that are sequentially numbered starting with
10.0.0.0, have a username of "ADMIN_USER" and password of "ADMIN_PASSWORD".
The ports are also numbered sequentially and are named following a dell switch
scheme, which have ports that look like "gi1/0/5"
It could be used in an environment similar to the one which
``hil.cfg`` corresponds, though could also be used for development with the
``hil.cfg.dev*``
"""
from subprocess import check_call
N_NODES = 6
ipmi_user = "ADMIN_USER"
ipmi_pass = "ADMIN_PASSWORD"
switch = "mock01"
obmd_base_uri = 'http://obmd.example.com/nodes/'
obmd_admin_token = 'secret'
def hil(*args):
"""Convenience function that calls the hil command line tool with
the given arguments.
"""
args = map(str, args)
print args
check_call(['hil'] + args)
hil('switch', 'register', switch, 'mock', 'ip', 'user', 'pass')
for node in range(N_NODES):
ipmi_ip = "10.0.0." + str(node + 1)
nic_port = "gi1/0/%d" % (node)
nic_name = 'nic1'
hil('node', 'register',
node,
obmd_base_uri + str(node),
obmd_admin_token,
"mock", ipmi_ip, ipmi_user, ipmi_pass)
hil('node', 'nic', 'register', node, nic_name, 'FillThisInLater')
hil('port', 'register', switch, nic_port)
hil('port', 'nic', 'add', switch, nic_port, node, nic_name)
|
apache-2.0
| 1,030,338,861,592,609,900
| 29.611111
| 78
| 0.678766
| false
| 3.19112
| false
| false
| false
|
tobast/sysres-pikern
|
snake/snake_common.py
|
1
|
1643
|
SERVER_PORT = 31412
PSIZE = 20
WIDTH = 30
HEIGHT = 30
PERIOD = 100
def p2add(u, v):
return (u[0] + v[0], u[1] + v[1])
DIRS = [(0, 1), (1, 0), (-1, 0), (0, -1)]
NB_APPLES = 3
class Packet:
def __init__(self, data = b''):
self.start_index = 0
self.data = data
def add_position(self, p):
self.data += bytes((p[0], p[1]))
def add_uint16(self, n):
self.data += bytes(((n >> 8) & 0xff, n & 0xff))
def add_uint8(self, n):
self.data += bytes((n,))
def add_color(self, c):
self.add_uint16(c[0])
self.add_uint16(c[1])
self.add_uint16(c[2])
def add_position_list(self, l):
self.add_uint16(len(l))
for p in l:
self.add_position(p)
def read_position(self):
r = self.data[self.start_index]
s = self.data[self.start_index + 1]
self.start_index += 2
return (r, s)
def read_uint16(self):
r = self.data[self.start_index]
s = self.data[self.start_index + 1]
self.start_index += 2
return (r << 8) | s
def read_uint8(self):
r = self.data[self.start_index]
self.start_index += 1
return r
def read_position_list(self):
l = []
n = self.read_uint16()
for i in range(n):
l.append(self.read_position())
return l
def read_color(self):
r = self.read_uint16()
g = self.read_uint16()
b = self.read_uint16()
return (r, g, b)
TOSERVER_INIT = 0
TOCLIENT_INIT = 1
SET_SNAKE = 2
SET_APPLES = 3
SET_DIRECTION = 4
SET_SNAKE_COLOR = 5
TOCLIENT_ACCESS_DENIED = 6
|
gpl-3.0
| 71,077,579,929,375,590
| 20.906667
| 55
| 0.522215
| false
| 2.872378
| false
| false
| false
|
jolyonb/edx-platform
|
lms/djangoapps/course_api/blocks/tests/test_forms.py
|
1
|
8460
|
"""
Tests for Course Blocks forms
"""
from urllib import urlencode
import ddt
from django.http import Http404, QueryDict
from opaque_keys.edx.locator import CourseLocator
from rest_framework.exceptions import PermissionDenied
from openedx.core.djangoapps.util.test_forms import FormTestMixin
from student.models import CourseEnrollment
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from ..forms import BlockListGetForm
@ddt.ddt
class TestBlockListGetForm(FormTestMixin, SharedModuleStoreTestCase):
"""
Tests for BlockListGetForm
"""
FORM_CLASS = BlockListGetForm
@classmethod
def setUpClass(cls):
super(TestBlockListGetForm, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestBlockListGetForm, self).setUp()
self.student = UserFactory.create()
self.student2 = UserFactory.create()
self.staff = UserFactory.create(is_staff=True)
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
CourseEnrollmentFactory.create(user=self.student2, course_id=self.course.id)
usage_key = self.course.location
self.initial = {'requesting_user': self.student}
self.form_data = QueryDict(
urlencode({
'username': self.student.username,
'usage_key': unicode(usage_key),
}),
mutable=True,
)
self.cleaned_data = {
'all_blocks': None,
'block_counts': set(),
'depth': 0,
'nav_depth': None,
'return_type': 'dict',
'requested_fields': {'display_name', 'type'},
'student_view_data': set(),
'usage_key': usage_key,
'username': self.student.username,
'user': self.student,
'block_types_filter': set(),
}
def assert_raises_permission_denied(self):
"""
Fail unless permission is denied to the form
"""
with self.assertRaises(PermissionDenied):
self.get_form(expected_valid=False)
def assert_raises_not_found(self):
"""
Fail unless a 404 occurs
"""
with self.assertRaises(Http404):
self.get_form(expected_valid=False)
def assert_equals_cleaned_data(self):
"""
Check that the form returns the expected data
"""
form = self.get_form(expected_valid=True)
self.assertDictEqual(form.cleaned_data, self.cleaned_data)
def test_basic(self):
self.assert_equals_cleaned_data()
#-- usage key
def test_no_usage_key_param(self):
self.form_data.pop('usage_key')
self.assert_error('usage_key', "This field is required.")
def test_invalid_usage_key(self):
self.form_data['usage_key'] = 'invalid_usage_key'
self.assert_error('usage_key', "'invalid_usage_key' is not a valid usage key.")
def test_non_existent_usage_key(self):
self.form_data['usage_key'] = self.store.make_course_usage_key(CourseLocator('non', 'existent', 'course'))
self.assert_raises_permission_denied()
#-- user
@ddt.data("True", "true", True)
def test_no_user_all_blocks_true(self, all_blocks_value):
self.initial = {'requesting_user': self.staff}
self.form_data.pop('username')
self.form_data['all_blocks'] = all_blocks_value
self.get_form(expected_valid=True)
@ddt.data("False", "false", False)
def test_no_user_all_blocks_false(self, all_blocks_value):
self.initial = {'requesting_user': self.staff}
self.form_data.pop('username')
self.form_data['all_blocks'] = all_blocks_value
self.assert_error('username', "This field is required unless all_blocks is requested.")
def test_no_user_all_blocks_none(self):
self.initial = {'requesting_user': self.staff}
self.form_data.pop('username')
self.assert_error('username', "This field is required unless all_blocks is requested.")
def test_no_user_non_staff(self):
self.form_data.pop('username')
self.form_data['all_blocks'] = True
self.assert_raises_permission_denied()
def test_nonexistent_user_by_student(self):
self.form_data['username'] = 'non_existent_user'
self.assert_raises_permission_denied()
def test_nonexistent_user_by_staff(self):
self.initial = {'requesting_user': self.staff}
self.form_data['username'] = 'non_existent_user'
self.assert_raises_not_found()
def test_other_user_by_student(self):
self.form_data['username'] = self.student2.username
self.assert_raises_permission_denied()
def test_other_user_by_staff(self):
self.initial = {'requesting_user': self.staff}
self.get_form(expected_valid=True)
def test_unenrolled_student(self):
CourseEnrollment.unenroll(self.student, self.course.id)
self.assert_raises_permission_denied()
def test_unenrolled_staff(self):
CourseEnrollment.unenroll(self.staff, self.course.id)
self.initial = {'requesting_user': self.staff}
self.form_data['username'] = self.staff.username
self.get_form(expected_valid=True)
def test_unenrolled_student_by_staff(self):
CourseEnrollment.unenroll(self.student, self.course.id)
self.initial = {'requesting_user': self.staff}
self.get_form(expected_valid=True)
#-- depth
def test_depth_integer(self):
self.form_data['depth'] = 3
self.cleaned_data['depth'] = 3
self.assert_equals_cleaned_data()
def test_depth_all(self):
self.form_data['depth'] = 'all'
self.cleaned_data['depth'] = None
self.assert_equals_cleaned_data()
def test_depth_invalid(self):
self.form_data['depth'] = 'not_an_integer'
self.assert_error('depth', "'not_an_integer' is not a valid depth value.")
#-- nav depth
def test_nav_depth(self):
self.form_data['nav_depth'] = 3
self.cleaned_data['nav_depth'] = 3
self.cleaned_data['requested_fields'] |= {'nav_depth'}
self.assert_equals_cleaned_data()
def test_nav_depth_invalid(self):
self.form_data['nav_depth'] = 'not_an_integer'
self.assert_error('nav_depth', "Enter a whole number.")
def test_nav_depth_negative(self):
self.form_data['nav_depth'] = -1
self.assert_error('nav_depth', "Ensure this value is greater than or equal to 0.")
#-- return_type
def test_return_type(self):
self.form_data['return_type'] = 'list'
self.cleaned_data['return_type'] = 'list'
self.assert_equals_cleaned_data()
def test_return_type_invalid(self):
self.form_data['return_type'] = 'invalid_return_type'
self.assert_error(
'return_type',
"Select a valid choice. invalid_return_type is not one of the available choices."
)
#-- requested fields
def test_requested_fields(self):
self.form_data.setlist('requested_fields', ['graded', 'nav_depth', 'some_other_field'])
self.cleaned_data['requested_fields'] |= {'graded', 'nav_depth', 'some_other_field'}
self.assert_equals_cleaned_data()
@ddt.data('block_counts', 'student_view_data')
def test_higher_order_field(self, field_name):
field_value = {'block_type1', 'block_type2'}
self.form_data.setlist(field_name, field_value)
self.cleaned_data[field_name] = field_value
self.cleaned_data['requested_fields'].add(field_name)
self.assert_equals_cleaned_data()
def test_combined_fields(self):
# add requested fields
self.form_data.setlist('requested_fields', ['field1', 'field2'])
# add higher order fields
block_types_list = {'block_type1', 'block_type2'}
for field_name in ['block_counts', 'student_view_data']:
self.form_data.setlist(field_name, block_types_list)
self.cleaned_data[field_name] = block_types_list
# verify the requested_fields in cleaned_data includes all fields
self.cleaned_data['requested_fields'] |= {'field1', 'field2', 'student_view_data', 'block_counts'}
self.assert_equals_cleaned_data()
|
agpl-3.0
| 6,443,629,654,869,371,000
| 34.39749
| 114
| 0.635697
| false
| 3.748339
| true
| false
| false
|
famish99/pyvisa-sim
|
pyvisa_sim/tcpip.py
|
1
|
2280
|
# -*- coding: utf-8 -*-
"""
pyvisa-sim.tcpip
~~~~~~~~~~~~~~~~
TCPIP simulated session class.
:copyright: 2014 by PyVISA-sim Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
import time
from pyvisa import constants
from . import sessions
class BaseTCPIPSession(sessions.Session):
"""Base class for TCPIP sessions."""
def read(self, count):
end_char, _ = self.get_attribute(constants.VI_ATTR_TERMCHAR)
enabled, _ = self.get_attribute(constants.VI_ATTR_TERMCHAR_EN)
timeout, _ = self.get_attribute(constants.VI_ATTR_TMO_VALUE)
timeout /= 1000
start = time.time()
out = b""
while time.time() - start <= timeout:
last = self.device.read()
if not last:
time.sleep(0.01)
continue
out += last
if enabled:
if len(out) > 0 and out[-1] == end_char:
return out, constants.StatusCode.success_termination_character_read
if len(out) == count:
return out, constants.StatusCode.success_max_count_read
else:
return out, constants.StatusCode.error_timeout
def write(self, data):
send_end = self.get_attribute(constants.VI_ATTR_SEND_END_EN)
for i in range(len(data)):
self.device.write(data[i : i + 1])
if send_end:
# EOM 4882
pass
return len(data), constants.StatusCode.success
@sessions.Session.register(constants.InterfaceType.tcpip, "INSTR")
class TCPIPInstrumentSession(BaseTCPIPSession):
def after_parsing(self):
self.attrs[constants.VI_ATTR_INTF_NUM] = int(self.parsed.board)
self.attrs[constants.VI_ATTR_TCPIP_ADDR] = self.parsed.host_address
self.attrs[constants.VI_ATTR_TCPIP_DEVICE_NAME] = self.parsed.lan_device_name
@sessions.Session.register(constants.InterfaceType.tcpip, "SOCKET")
class TCPIPSocketSession(BaseTCPIPSession):
def after_parsing(self):
self.attrs[constants.VI_ATTR_INTF_NUM] = int(self.parsed.board)
self.attrs[constants.VI_ATTR_TCPIP_ADDR] = self.parsed.host_address
self.attrs[constants.VI_ATTR_TCPIP_PORT] = int(self.parsed.port)
|
mit
| -8,203,132,260,625,480,000
| 29.4
| 87
| 0.623684
| false
| 3.68932
| false
| false
| false
|
waynewolf/abucket
|
from-tf-web/quickstart/1-get-started-tf-contrib-learn-customize.py
|
1
|
1494
|
import numpy as np
import tensorflow as tf
# Declare list of features, we only have one real-valued feature
def model(features, labels, mode):
# Build a linear model and predict values
W = tf.get_variable("W", [1], dtype=tf.float64)
b = tf.get_variable("b", [1], dtype=tf.float64)
y = W*features['x'] + b
# Loss sub-graph
loss = tf.reduce_sum(tf.square(y - labels))
# Training sub-graph
global_step = tf.train.get_global_step()
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = tf.group(optimizer.minimize(loss),
tf.assign_add(global_step, 1))
# ModelFnOps connects subgraphs we built to the
# appropriate functionality.
return tf.contrib.learn.ModelFnOps(
mode=mode, predictions=y,
loss=loss,
train_op=train)
estimator = tf.contrib.learn.Estimator(model_fn=model)
# define our data sets
x_train = np.array([1., 2., 3., 4.])
y_train = np.array([0., -1., -2., -3.])
x_eval = np.array([2., 5., 8., 1.])
y_eval = np.array([-1.01, -4.1, -7, 0.])
input_fn = tf.contrib.learn.io.numpy_input_fn({"x": x_train}, y_train, 4, num_epochs=1000)
eval_input_fn = tf.contrib.learn.io.numpy_input_fn(
{"x":x_eval}, y_eval, batch_size=4, num_epochs=1000)
# train
estimator.fit(input_fn=input_fn, steps=1000)
# Here we evaluate how well our model did.
train_loss = estimator.evaluate(input_fn=input_fn)
eval_loss = estimator.evaluate(input_fn=eval_input_fn)
print("train loss: %r"% train_loss)
print("eval loss: %r"% eval_loss)
|
mit
| -8,651,862,507,716,371,000
| 37.307692
| 90
| 0.670683
| false
| 2.912281
| false
| false
| false
|
MadsJensen/agency_connectivity
|
tf_functions.py
|
1
|
5293
|
"""
Functions for TF analysis.
@author: mje
@email: mads [] cnru.dk
"""
import mne
from mne.time_frequency import (psd_multitaper, tfr_multitaper, tfr_morlet,
cwt_morlet)
from mne.viz import iter_topography
import matplotlib.pyplot as plt
import numpy as np
def calc_psd_epochs(epochs, plot=False):
"""Calculate PSD for epoch.
Parameters
----------
epochs : list of epochs
plot : bool
To show plot of the psds.
It will be average for each condition that is shown.
Returns
-------
psds_vol : numpy array
The psds for the voluntary condition.
psds_invol : numpy array
The psds for the involuntary condition.
"""
tmin, tmax = -0.5, 0.5
fmin, fmax = 2, 90
# n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
psds_vol, freqs = psd_multitaper(epochs["voluntary"],
tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax)
psds_inv, freqs = psd_multitaper(epochs["involuntary"],
tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax)
psds_vol = 20 * np.log10(psds_vol) # scale to dB
psds_inv = 20 * np.log10(psds_inv) # scale to dB
if plot:
def my_callback(ax, ch_idx):
"""Executed once you click on one of the channels in the plot."""
ax.plot(freqs, psds_vol_plot[ch_idx], color='red',
label="voluntary")
ax.plot(freqs, psds_inv_plot[ch_idx], color='blue',
label="involuntary")
ax.set_xlabel = 'Frequency (Hz)'
ax.set_ylabel = 'Power (dB)'
ax.legend()
psds_vol_plot = psds_vol.copy().mean(axis=0)
psds_inv_plot = psds_inv.copy().mean(axis=0)
for ax, idx in iter_topography(epochs.info,
fig_facecolor='k',
axis_facecolor='k',
axis_spinecolor='k',
on_pick=my_callback):
ax.plot(psds_vol_plot[idx], color='red', label="voluntary")
ax.plot(psds_inv_plot[idx], color='blue', label="involuntary")
plt.legend()
plt.gcf().suptitle('Power spectral densities')
plt.show()
return psds_vol, psds_inv, freqs
def multitaper_analysis(epochs):
"""
Parameters
----------
epochs : list of epochs
Returns
-------
result : numpy array
The result of the multitaper analysis.
"""
frequencies = np.arange(6., 90., 2.)
n_cycles = frequencies / 2.
time_bandwidth = 4 # Same time-smoothing as (1), 7 tapers.
power, plv = tfr_multitaper(epochs, freqs=frequencies, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=True)
return power, plv
def morlet_analysis(epochs, n_cycles=4):
"""
Parameters
----------
epochs : list of epochs
Returns
-------
result : numpy array
The result of the multitaper analysis.
"""
frequencies = np.arange(6., 30., 2.)
# n_cycles = frequencies / 2.
power, plv = tfr_morlet(epochs, freqs=frequencies, n_cycles=n_cycles,
return_itc=True,
verbose=True)
return power, plv
def single_trial_tf(epochs, frequencies, n_cycles=4.):
"""
Parameters
----------
epochs : Epochs object
The epochs to calculate TF analysis on.
frequencies : numpy array
n_cycles : int
The number of cycles for the Morlet wavelets.
Returns
-------
results : numpy array
"""
results = []
for j in range(len(epochs)):
tfr = cwt_morlet(epochs.get_data()[j],
sfreq=epochs.info["sfreq"],
freqs=frequencies,
use_fft=True,
n_cycles=n_cycles,
# decim=2,
zero_mean=False)
results.append(tfr)
return results
def calc_spatial_resolution(freqs, n_cycles):
"""Calculate the spatial resolution for a Morlet wavelet.
The formula is: (freqs * cycles)*2.
Parameters
----------
freqs : numpy array
The frequencies to be calculated.
n_cycles : int or numpy array
The number of cycles used. Can be integer for the same cycle for all
frequencies, or a numpy array for individual cycles per frequency.
Returns
-------
result : numpy array
The results
"""
return (freqs / float(n_cycles)) * 2
def calc_wavelet_duration(freqs, n_cycles):
"""Calculate the wavelet duration for a Morlet wavelet in ms.
The formula is: (cycle / frequencies / pi)*1000
Parameters
----------
freqs : numpy array
The frequencies to be calculated.
n_cycles : int or numpy array
The number of cycles used. Can be integer for the same cycle for all
frequencies, or a numpy array for individual cycles per frequency.
Returns
-------
result : numpy array
The results
"""
return (float(n_cycles) / freqs / np.pi) * 1000
|
bsd-3-clause
| 6,559,354,453,207,024,000
| 27.005291
| 79
| 0.543737
| false
| 3.871982
| false
| false
| false
|
termNinja/Conversion-of-Regex-into-Automatons
|
pyscripts/classes/resources.py
|
1
|
18835
|
import re, os, sys
from Queue import Queue
# -----------------------------------------------------------------------------
class term_colors:
""" Usage: print term_colors.WARNING + "This is a msg" + term_colors.ENDC """
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# -----------------------------------------------------------------------------
class xlogger:
@staticmethod
def dbg(msg):
""" Prints a debugging msg onto stderr """
print >> sys.stderr, term_colors.FAIL + str(msg) + term_colors.ENDC
@staticmethod
def warn(msg):
""" Prints a warning msg onto stderr """
print >> sys.stderr, term_colors.WARNING + str(msg) + term_colors.ENDC
@staticmethod
def info(msg):
""" Prints an info msg onto stderr """
print >> sys.stderr, term_colors.OKBLUE + str(msg) + term_colors.ENDC
@staticmethod
def fine(msg):
""" Prints an ok msg onto stderr """
print >> sys.stderr, term_colors.OKGREEN + str(msg) + term_colors.ENDC
# -----------------------------------------------------------------------------
# handy macro
class algo_step:
thompson = "_01_thompson"
elimeps = "_02_elimeps"
determ = "_03_determ"
minim = "_04_minim"
# -----------------------------------------------------------------------------
# VERY IMPORTANT:
# -----------------------------------------------------------------------------
# I changed type of end_node into STRING type, if error occurs BEFORE determinisation,
# make sure to check it wasn't caused by this
# -----------------------------------------------------------------------------
class Edge:
def __init__(self, end_node, weight):
"""
Initializes edge object.
end_node -> string
weight -> string
"""
self.end_node = str(end_node)
self.weight = str(weight)
def __str__(self):
return "(" + str(self.end_node) + ", " + str(self.weight) + ")"
def __eq__(self, other):
if self.end_node == other.end_node and self.weight == other.weight:
return True
else:
return False
def __hash__(self):
return hash(self.end_node) ^ hash(self.weight)
# -----------------------------------------------------------------------------
class Node:
def __init__(self, node_val, is_ending):
self.node_val = int(node_val)
self.is_ending = bool(is_ending)
def __str__(self):
if self.is_ending:
return "(" + str(self.node_val) + ")"
else:
return str(self.node_val)
# When reading thomhpson's graph from .gv file, we KNOW that
# node 1 is ENDING state, because that's how Thompson's algorithm was implemented
# for this particular project.
# -----------------------------------------------------------------------------
class Graph:
# -------------------------------------------------------------------------
def __init__(self, graph_map, graph_name):
self.graph_map = {}
self.graph_name = graph_name
self.ending_nodes = [int(1)]
for ending_node in self.ending_nodes:
self.graph_map[ending_node] = []
# -------------------------------------------------------------------------
def __str__(self):
output = str(self.graph_name) + "\n-----------------------------\n"
output += str(self.graph_map)
return output
# -------------------------------------------------------------------------
def form_graph_from_gv(self):
"""
Reads the .gv file that represent the graph
and maps it onto Graph object.
"""
print "reading graph: " + self.graph_name
# algo_step.thompson because python continues where C stopped with work
# => Thompson algorithm has been performed
f = open("../graphs/" + self.graph_name + algo_step.thompson + ".gv", "r")
data = f.read()
f.close()
print "Graph data:"
print data
print
# -----------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------
# Forming graph
regex = r"\"([a-zA-Z0-9]+)\"\s*->\s*\"([a-zA-Z0-9]+)\"\s*"
regex += r"(\[label\s*[=]\s*\"([a-zA-Z0-9]+)\"\])?"
regex = re.compile(regex)
for iter in regex.finditer(data):
node_val = iter.group(1)
into_node = iter.group(2)
if iter.group(4) == None:
graph_weight = "eps"
else:
graph_weight = iter.group(4)
# Creating node
# NOTICE TODO: Node objects aren't actually needed. It can be removed...later though
if int(node_val) in self.ending_nodes:
node = Node(node_val, True)
print "making " + str(node_val) + "into ending node!"
else:
node = Node(int(node_val), False)
# Creating edge
edge = Edge(into_node, graph_weight)
print node, edge
if node.node_val in self.graph_map.keys():
self.graph_map[node.node_val].append(edge)
else:
self.graph_map[node.node_val] = []
self.graph_map[node.node_val].append(edge)
## TODO remove this, i've put it for testing purposes
self.elim_eps()
self.determinize()
# -------------------------------------------------------------------------
def export_as_gv(self, algstep):
"""
Maps Graph object as gv file.
"""
output_text = []
output_text.append("digraph finite_state_machine {\n")
output_text.append("graph [fontname = \"lmroman12\"];\n")
output_text.append("node [fontname = \"lmroman12\"];\n")
output_text.append("edge [fontname = \"lmroman12\"];\n")
output_text.append("\trankdir=LR;\n")
output_text.append("\tsize=\"8,5\"\n")
output_text.append("\tnode [shape = doublecircle]; ")
for node in self.ending_nodes:
output_text.append("\"")
output_text.append(str(node))
output_text.append("\"")
output_text.append(",")
output_text[-1] = ";\n"
output_text.append("\tnode [shape = circle];\n")
# lets fill in the elements
nodes = self.graph_map.keys()
for node in nodes:
edges = self.graph_map[node]
for edge in edges:
output_text.append("\t\"" + str(node) + "\" -> \"" + str(edge.end_node) + "\"")
# check if it was epsilon
if edge.weight != "eps":
output_text.append(" [label=\"" + str(edge.weight) + "\"]")
output_text.append("\n")
output_text.append("}")
# writing into file
f = open(self.graph_name + str(algstep) + ".gv", "w")
# f = open("tester.gv", "w")
f.write("".join(output_text))
f.close()
# -------------------------------------------------------------------------
# Export graph structure as pdf
# command is:
# dot -Tpdf ../../graphs/source_file.gv -o ../../graphs/output.pdf
def export_as_pdf(self, algstep):
"""
Draw a vector image of graph that it reads
from gv file (make sure you have it created).
Uses dot from graphviz to acomplish this amazing task.
"""
graph_id = self.graph_name.split("_")[0]
output_name = self.graph_name + str(algstep)
os.system("dot -Tpdf " + output_name + ".gv -o " + output_name + ".pdf")
return 1
# -------------------------------------------------------------------------
def elim_eps(self):
"""
Performs algorithm that eliminates epsilon edges in graph.
Wrapper for solve_eps_prob.
"""
new_map = {0: []}
new_ending_nodes = []
visited_nodes = {0: False}
visited = {}
for node in self.graph_map.keys():
visited[node] = {}
for tmp_node in self.graph_map.keys():
visited[node][tmp_node] = False
self.solve_eps_prob(0, 0, new_map, visited, new_ending_nodes)
self.graph_map = new_map
self.ending_nodes = new_ending_nodes
self.export_as_gv(algo_step.elimeps)
self.export_as_pdf(algo_step.elimeps)
xlogger.fine("Exported: " + self.graph_name + algo_step.elimeps + ".gv")
xlogger.fine("Exported: " + self.graph_name + algo_step.elimeps + ".pdf")
# -------------------------------------------------------------------------
def solve_eps_prob(self, root_node, current_node, new_map, visited, ending_nodes):
"""
Recursive method that peforms a DFS search and eliminates epsilon edges.
"""
visited[root_node][current_node] = True
if current_node in self.ending_nodes:
ending_nodes.append(root_node)
return
for adj in self.graph_map[current_node]:
if adj.weight == "eps" and not visited[root_node][int(adj.end_node)]:
self.solve_eps_prob(root_node, int(adj.end_node), new_map, visited, ending_nodes)
elif adj.weight == "eps":
return
else:
if not root_node in new_map.keys():
new_map[root_node] = []
new_map[root_node].append(adj)
if not visited[root_node][int(adj.end_node)]:
self.solve_eps_prob(int(adj.end_node), int(adj.end_node), new_map, visited, ending_nodes)
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def determinize(self):
"""
Performs the determinisation algorithm.
"""
# we switch to string keys because of new states
queue = Queue() # queue.get() queue.put(item)
queue.put("0") # 0 is always the starting node
new_map = {}
new_map["0"] = set()
while queue.qsize() > 0:
print
print "----------------------------------------------------------"
xlogger.info("Queue state: " + str([item for item in queue.queue]))
print "----------------------------------------------------------"
current_node = queue.get()
xlogger.info("Took " + str(current_node) + " from queue.")
# find all adjacent vertices
# gives something like: "1,2,3"
# gives a hash map like:
# str(a) -> set(int(1), ...) str(b) -> set(int(5), int(6), int(7))
xlogger.info("Calling find_adjacent_nodes with " + str(current_node))
adjacent_nodes = self.find_adjacent_nodes(current_node)
xlogger.info("Adjacent nodes: " + str(adjacent_nodes))
# update a map row if required for new deterministic nodes
self.update_new_map_row(current_node, adjacent_nodes, new_map, queue)
xlogger.fine("Determinized graph:")
for key in new_map.keys():
print str(key) + "->"
for elem in new_map[key]:
print "---->" + str(elem)
self.convert_into_object_map(new_map)
self.export_as_gv(algo_step.determ)
self.export_as_pdf(algo_step.determ)
# ----------------------------------------------------------------------
# Used by method: determinize
# ----------------------------------------------------------------------
def update_new_map_row(self, current_node, adjacent_nodes, new_map, queue):
"""
Used as a helper function in determinsation algorithm.
It initialises and transforms some things in main graph object.
"""
# For each weight in array
for weight in adjacent_nodes.keys():
# --------------------------------------------------------------
# We iterate over set of ints and form a string
# --------------------------------------------------------------
new_node = []
new_edges = []
for elem in adjacent_nodes[weight]:
# forming a string
new_node.append(str(elem))
new_node.append(",")
new_node = "".join(new_node)[0:-1] # cut , at the end
xlogger.info("formed string: " + new_node)
# --------------------------------------------------------------
elem = self.list_to_string(adjacent_nodes[weight])
xlogger.info("result from [a] -> str: " + str(elem))
xlogger.info("type(" + str(elem) + " is " + str(type(elem)))
# new_map[current_node] = elem
if not current_node in new_map:
new_map[current_node] = set()
new_map[current_node].add((weight, elem))
## now we check if new_node is in new_map.keys(),
## if so, we ignore it, if not, we add it into queue and update
## it's adjacent nodes
print type(new_node)
if not new_node in new_map.keys():
## adding into queue
xlogger.info("adding into queue: " + str(new_node))
queue.put(new_node)
## updating
# new_map[new_node] = []
# ----------------------------------------------------------------------
def list_to_string(self, nodelist):
"""
Converts a list of elements onto string with character ',' as separator
[1, 2, 3] => "1,2,3"
"""
print
res = []
for elem in nodelist:
res.append(str(elem))
res.append(",")
res = "".join(res)[0:-1] # cut , at the end
xlogger.dbg("Done conversion: " + str(res))
print
return res
# ----------------------------------------------------------------------
def string_to_list(self, nodestr):
"""
Converts a , separated string into a list of strings.
It also sorts the list.
"1,2,3" => [1, 2, 3]
"ab,cd" => ["ab", "cd"]
"""
if nodestr[-1] == ",":
nodestr = nodestr.split(",")[0:-1]
else:
nodestr = nodestr.split(",")
tmp = []
xlogger.dbg("string_to_list: ")
xlogger.dbg("nodestr: " + str(nodestr))
for elem in nodestr:
tmp.append(int(elem))
tmp.sort()
nodestr = []
for elem in tmp:
nodestr.append(str(elem))
xlogger.dbg("nodestr: " + str(nodestr))
return nodestr
# ----------------------------------------------------------------------
# Used by method: determinize
# ----------------------------------------------------------------------
def find_adjacent_nodes(self, current_node):
"""
Used as a helper function in determinsation algorithm.
It finds adjacent nodes for a given node.
"""
xlogger.info("Entered find_adjacent_nodes with current_node = " + str(current_node))
# current node can be something like: "0,3,5"
adjacent_nodes = {} # example: a -> "1,2,3" b -> "3,4,5"
# [1, 2, 3] -> "1,2,3"
xlogger.dbg("calling conversion for: " + str(current_node))
current_node = self.string_to_list(current_node)
xlogger.info("updated current_node, current_node = " + str(current_node))
# ['0', '3', '5] -> '0', '3', '5'
xlogger.dbg("current node: " + str(current_node))
for node in current_node:
xlogger.dbg("node: " + str(node))
if int(node) in self.graph_map.keys():
for edge in self.graph_map[int(node)]:
if edge.weight not in adjacent_nodes:
adjacent_nodes[edge.weight] = set()
adjacent_nodes[edge.weight].add(int(edge.end_node))
return adjacent_nodes
# ----------------------------------------------------------------------
def convert_into_object_map(self, new_map):
"""
Converts a temp hash map created during determinisation algorithm
onto a main graph map used for storing a graph.
It also sets ending nodes.
"""
ending_nodes = []
self.graph_map.clear()
graph_nodes = new_map.keys()
for node in graph_nodes:
self.graph_map[node] = []
for edge in new_map[node]:
# ('1,2,3', 'a')
self.graph_map[node].append(Edge(edge[1], edge[0]))
if not edge[1] in graph_nodes:
self.graph_map[edge[1]] = []
# finding ending nodes
# node => "11,3" for example
for node in self.graph_map.keys():
nodez = self.string_to_list(node)
for elem in nodez:
xlogger.dbg("elem: " + str(elem))
if int(elem) in self.ending_nodes:
ending_nodes.append(str(node))
break
xlogger.info("old ending nodes: " + str(self.ending_nodes))
xlogger.info("new ending nodes: " + str(ending_nodes))
# adding nodes that don't have an output edge
# currently, they are implicitly given in our graph structure
# they appear only in edges in map (example: 3 has no output edge)
# For example, "1,2" -> ("ab", "3")
# Lets find nodes like this and add them into main map
for node in graph_nodes:
for edge in new_map[node]:
if not edge[1] in graph_nodes:
self.graph_map[edge[1]] = []
# Finally, we form the ending nodes in Graph object
self.ending_nodes = ending_nodes
print
self.show_graph()
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def show_graph(self):
"""
Prints graph to stdout.
"""
for node in self.graph_map.keys():
print node
for edge in self.graph_map[node]:
print " -> " + str(edge)
# ----------------------------------------------------------------------
# TODO: Nexto to implement
# ----------------------------------------------------------------------
def minimize():
"""
Performs minimization algorithm.
"""
return 1
# -----------------------------------------------------------------------------
|
gpl-3.0
| -8,287,676,522,294,573,000
| 38.239583
| 109
| 0.45219
| false
| 4.267105
| false
| false
| false
|
saurabh6790/frappe
|
frappe/core/doctype/user/user.py
|
1
|
39931
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
from bs4 import BeautifulSoup
import frappe
import frappe.share
import frappe.defaults
import frappe.permissions
from frappe.model.document import Document
from frappe.utils import (cint, flt, has_gravatar, escape_html, format_datetime,
now_datetime, get_formatted_email, today)
from frappe import throw, msgprint, _
from frappe.utils.password import update_password as _update_password, check_password, get_password_reset_limit
from frappe.desk.notifications import clear_notifications
from frappe.desk.doctype.notification_settings.notification_settings import create_notification_settings, toggle_notifications
from frappe.utils.user import get_system_managers
from frappe.website.utils import is_signup_enabled
from frappe.rate_limiter import rate_limit
from frappe.utils.background_jobs import enqueue
from frappe.core.doctype.user_type.user_type import user_linked_with_permission_on_doctype
STANDARD_USERS = ("Guest", "Administrator")
class MaxUsersReachedError(frappe.ValidationError):
pass
class User(Document):
__new_password = None
def __setup__(self):
# because it is handled separately
self.flags.ignore_save_passwords = ['new_password']
def autoname(self):
"""set name as Email Address"""
if self.get("is_admin") or self.get("is_guest"):
self.name = self.first_name
else:
self.email = self.email.strip().lower()
self.name = self.email
def onload(self):
from frappe.config import get_modules_from_all_apps
self.set_onload('all_modules',
[m.get("module_name") for m in get_modules_from_all_apps()])
def before_insert(self):
self.flags.in_insert = True
throttle_user_creation()
def after_insert(self):
create_notification_settings(self.name)
frappe.cache().delete_key('users_for_mentions')
def validate(self):
self.check_demo()
# clear new password
self.__new_password = self.new_password
self.new_password = ""
if not frappe.flags.in_test:
self.password_strength_test()
if self.name not in STANDARD_USERS:
self.validate_email_type(self.email)
self.validate_email_type(self.name)
self.add_system_manager_role()
self.set_system_user()
self.set_full_name()
self.check_enable_disable()
self.ensure_unique_roles()
self.remove_all_roles_for_guest()
self.validate_username()
self.remove_disabled_roles()
self.validate_user_email_inbox()
ask_pass_update()
self.validate_roles()
self.validate_allowed_modules()
self.validate_user_image()
if self.language == "Loading...":
self.language = None
if (self.name not in ["Administrator", "Guest"]) and (not self.get_social_login_userid("frappe")):
self.set_social_login_userid("frappe", frappe.generate_hash(length=39))
def validate_roles(self):
if self.role_profile_name:
role_profile = frappe.get_doc('Role Profile', self.role_profile_name)
self.set('roles', [])
self.append_roles(*[role.role for role in role_profile.roles])
def validate_allowed_modules(self):
if self.module_profile:
module_profile = frappe.get_doc('Module Profile', self.module_profile)
self.set('block_modules', [])
for d in module_profile.get('block_modules'):
self.append('block_modules', {
'module': d.module
})
def validate_user_image(self):
if self.user_image and len(self.user_image) > 2000:
frappe.throw(_("Not a valid User Image."))
def on_update(self):
# clear new password
self.share_with_self()
clear_notifications(user=self.name)
frappe.clear_cache(user=self.name)
now=frappe.flags.in_test or frappe.flags.in_install
self.send_password_notification(self.__new_password)
frappe.enqueue(
'frappe.core.doctype.user.user.create_contact',
user=self,
ignore_mandatory=True,
now=now
)
if self.name not in ('Administrator', 'Guest') and not self.user_image:
frappe.enqueue('frappe.core.doctype.user.user.update_gravatar', name=self.name, now=now)
# Set user selected timezone
if self.time_zone:
frappe.defaults.set_default("time_zone", self.time_zone, self.name)
if self.has_value_changed('allow_in_mentions') or self.has_value_changed('user_type'):
frappe.cache().delete_key('users_for_mentions')
def has_website_permission(self, ptype, user, verbose=False):
"""Returns true if current user is the session user"""
return self.name == frappe.session.user
def check_demo(self):
if frappe.session.user == 'demo@erpnext.com':
frappe.throw(_('Cannot change user details in demo. Please signup for a new account at https://erpnext.com'), title=_('Not Allowed'))
def set_full_name(self):
self.full_name = " ".join(filter(None, [self.first_name, self.last_name]))
def check_enable_disable(self):
# do not allow disabling administrator/guest
if not cint(self.enabled) and self.name in STANDARD_USERS:
frappe.throw(_("User {0} cannot be disabled").format(self.name))
if not cint(self.enabled):
self.a_system_manager_should_exist()
# clear sessions if disabled
if not cint(self.enabled) and getattr(frappe.local, "login_manager", None):
frappe.local.login_manager.logout(user=self.name)
# toggle notifications based on the user's status
toggle_notifications(self.name, enable=cint(self.enabled))
def add_system_manager_role(self):
# if adding system manager, do nothing
if not cint(self.enabled) or ("System Manager" in [user_role.role for user_role in
self.get("roles")]):
return
if (self.name not in STANDARD_USERS and self.user_type == "System User" and not self.get_other_system_managers()
and cint(frappe.db.get_single_value('System Settings', 'setup_complete'))):
msgprint(_("Adding System Manager to this User as there must be atleast one System Manager"))
self.append("roles", {
"doctype": "Has Role",
"role": "System Manager"
})
if self.name == 'Administrator':
# Administrator should always have System Manager Role
self.extend("roles", [
{
"doctype": "Has Role",
"role": "System Manager"
},
{
"doctype": "Has Role",
"role": "Administrator"
}
])
def email_new_password(self, new_password=None):
if new_password and not self.flags.in_insert:
_update_password(user=self.name, pwd=new_password, logout_all_sessions=self.logout_all_sessions)
def set_system_user(self):
'''For the standard users like admin and guest, the user type is fixed.'''
user_type_mapper = {
'Administrator': 'System User',
'Guest': 'Website User'
}
if self.user_type and not frappe.get_cached_value('User Type', self.user_type, 'is_standard'):
if user_type_mapper.get(self.name):
self.user_type = user_type_mapper.get(self.name)
else:
self.set_roles_and_modules_based_on_user_type()
else:
'''Set as System User if any of the given roles has desk_access'''
self.user_type = 'System User' if self.has_desk_access() else 'Website User'
def set_roles_and_modules_based_on_user_type(self):
user_type_doc = frappe.get_cached_doc('User Type', self.user_type)
if user_type_doc.role:
self.roles = []
# Check whether User has linked with the 'Apply User Permission On' doctype or not
if user_linked_with_permission_on_doctype(user_type_doc, self.name):
self.append('roles', {
'role': user_type_doc.role
})
frappe.msgprint(_('Role has been set as per the user type {0}')
.format(self.user_type), alert=True)
user_type_doc.update_modules_in_user(self)
def has_desk_access(self):
'''Return true if any of the set roles has desk access'''
if not self.roles:
return False
return len(frappe.db.sql("""select name
from `tabRole` where desk_access=1
and name in ({0}) limit 1""".format(', '.join(['%s'] * len(self.roles))),
[d.role for d in self.roles]))
def share_with_self(self):
frappe.share.add(self.doctype, self.name, self.name, write=1, share=1,
flags={"ignore_share_permission": True})
def validate_share(self, docshare):
pass
# if docshare.user == self.name:
# if self.user_type=="System User":
# if docshare.share != 1:
# frappe.throw(_("Sorry! User should have complete access to their own record."))
# else:
# frappe.throw(_("Sorry! Sharing with Website User is prohibited."))
def send_password_notification(self, new_password):
try:
if self.flags.in_insert:
if self.name not in STANDARD_USERS:
if new_password:
# new password given, no email required
_update_password(user=self.name, pwd=new_password,
logout_all_sessions=self.logout_all_sessions)
if not self.flags.no_welcome_mail and cint(self.send_welcome_email):
self.send_welcome_mail_to_user()
self.flags.email_sent = 1
if frappe.session.user != 'Guest':
msgprint(_("Welcome email sent"))
return
else:
self.email_new_password(new_password)
except frappe.OutgoingEmailError:
print(frappe.get_traceback())
pass # email server not set, don't send email
@Document.hook
def validate_reset_password(self):
pass
def reset_password(self, send_email=False, password_expired=False):
from frappe.utils import random_string, get_url
key = random_string(32)
self.db_set("reset_password_key", key)
url = "/update-password?key=" + key
if password_expired:
url = "/update-password?key=" + key + '&password_expired=true'
link = get_url(url)
if send_email:
self.password_reset_mail(link)
return link
def get_other_system_managers(self):
return frappe.db.sql("""select distinct `user`.`name` from `tabHas Role` as `user_role`, `tabUser` as `user`
where user_role.role='System Manager'
and `user`.docstatus<2
and `user`.enabled=1
and `user_role`.parent = `user`.name
and `user_role`.parent not in ('Administrator', %s) limit 1""", (self.name,))
def get_fullname(self):
"""get first_name space last_name"""
return (self.first_name or '') + \
(self.first_name and " " or '') + (self.last_name or '')
def password_reset_mail(self, link):
self.send_login_mail(_("Password Reset"),
"password_reset", {"link": link}, now=True)
def send_welcome_mail_to_user(self):
from frappe.utils import get_url
link = self.reset_password()
subject = None
method = frappe.get_hooks("welcome_email")
if method:
subject = frappe.get_attr(method[-1])()
if not subject:
site_name = frappe.db.get_default('site_name') or frappe.get_conf().get("site_name")
if site_name:
subject = _("Welcome to {0}").format(site_name)
else:
subject = _("Complete Registration")
self.send_login_mail(subject, "new_user",
dict(
link=link,
site_url=get_url(),
))
def send_login_mail(self, subject, template, add_args, now=None):
"""send mail with login details"""
from frappe.utils.user import get_user_fullname
from frappe.utils import get_url
created_by = get_user_fullname(frappe.session['user'])
if created_by == "Guest":
created_by = "Administrator"
args = {
'first_name': self.first_name or self.last_name or "user",
'user': self.name,
'title': subject,
'login_url': get_url(),
'created_by': created_by
}
args.update(add_args)
sender = frappe.session.user not in STANDARD_USERS and get_formatted_email(frappe.session.user) or None
frappe.sendmail(recipients=self.email, sender=sender, subject=subject,
template=template, args=args, header=[subject, "green"],
delayed=(not now) if now!=None else self.flags.delay_emails, retry=3)
def a_system_manager_should_exist(self):
if not self.get_other_system_managers():
throw(_("There should remain at least one System Manager"))
def on_trash(self):
frappe.clear_cache(user=self.name)
if self.name in STANDARD_USERS:
throw(_("User {0} cannot be deleted").format(self.name))
self.a_system_manager_should_exist()
# disable the user and log him/her out
self.enabled = 0
if getattr(frappe.local, "login_manager", None):
frappe.local.login_manager.logout(user=self.name)
# delete todos
frappe.db.sql("""DELETE FROM `tabToDo` WHERE `owner`=%s""", (self.name,))
frappe.db.sql("""UPDATE `tabToDo` SET `assigned_by`=NULL WHERE `assigned_by`=%s""",
(self.name,))
# delete events
frappe.db.sql("""delete from `tabEvent` where owner=%s
and event_type='Private'""", (self.name,))
# delete shares
frappe.db.sql("""delete from `tabDocShare` where user=%s""", self.name)
# delete messages
frappe.db.sql("""delete from `tabCommunication`
where communication_type in ('Chat', 'Notification')
and reference_doctype='User'
and (reference_name=%s or owner=%s)""", (self.name, self.name))
# unlink contact
frappe.db.sql("""update `tabContact`
set `user`=null
where `user`=%s""", (self.name))
# delete notification settings
frappe.delete_doc("Notification Settings", self.name, ignore_permissions=True)
if self.get('allow_in_mentions'):
frappe.cache().delete_key('users_for_mentions')
def before_rename(self, old_name, new_name, merge=False):
self.check_demo()
frappe.clear_cache(user=old_name)
self.validate_rename(old_name, new_name)
def validate_rename(self, old_name, new_name):
# do not allow renaming administrator and guest
if old_name in STANDARD_USERS:
throw(_("User {0} cannot be renamed").format(self.name))
self.validate_email_type(new_name)
def validate_email_type(self, email):
from frappe.utils import validate_email_address
validate_email_address(email.strip(), True)
def after_rename(self, old_name, new_name, merge=False):
tables = frappe.db.get_tables()
for tab in tables:
desc = frappe.db.get_table_columns_description(tab)
has_fields = []
for d in desc:
if d.get('name') in ['owner', 'modified_by']:
has_fields.append(d.get('name'))
for field in has_fields:
frappe.db.sql("""UPDATE `%s`
SET `%s` = %s
WHERE `%s` = %s""" %
(tab, field, '%s', field, '%s'), (new_name, old_name))
if frappe.db.exists("Chat Profile", old_name):
frappe.rename_doc("Chat Profile", old_name, new_name, force=True, show_alert=False)
if frappe.db.exists("Notification Settings", old_name):
frappe.rename_doc("Notification Settings", old_name, new_name, force=True, show_alert=False)
# set email
frappe.db.sql("""UPDATE `tabUser`
SET email = %s
WHERE name = %s""", (new_name, new_name))
def append_roles(self, *roles):
"""Add roles to user"""
current_roles = [d.role for d in self.get("roles")]
for role in roles:
if role in current_roles:
continue
self.append("roles", {"role": role})
def add_roles(self, *roles):
"""Add roles to user and save"""
self.append_roles(*roles)
self.save()
def remove_roles(self, *roles):
existing_roles = dict((d.role, d) for d in self.get("roles"))
for role in roles:
if role in existing_roles:
self.get("roles").remove(existing_roles[role])
self.save()
def remove_all_roles_for_guest(self):
if self.name == "Guest":
self.set("roles", list(set(d for d in self.get("roles") if d.role == "Guest")))
def remove_disabled_roles(self):
disabled_roles = [d.name for d in frappe.get_all("Role", filters={"disabled":1})]
for role in list(self.get('roles')):
if role.role in disabled_roles:
self.get('roles').remove(role)
def ensure_unique_roles(self):
exists = []
for i, d in enumerate(self.get("roles")):
if (not d.role) or (d.role in exists):
self.get("roles").remove(d)
else:
exists.append(d.role)
def validate_username(self):
if not self.username and self.is_new() and self.first_name:
self.username = frappe.scrub(self.first_name)
if not self.username:
return
# strip space and @
self.username = self.username.strip(" @")
if self.username_exists():
if self.user_type == 'System User':
frappe.msgprint(_("Username {0} already exists").format(self.username))
self.suggest_username()
self.username = ""
def password_strength_test(self):
""" test password strength """
if self.flags.ignore_password_policy:
return
if self.__new_password:
user_data = (self.first_name, self.middle_name, self.last_name, self.email, self.birth_date)
result = test_password_strength(self.__new_password, '', None, user_data)
feedback = result.get("feedback", None)
if feedback and not feedback.get('password_policy_validation_passed', False):
handle_password_test_fail(result)
def suggest_username(self):
def _check_suggestion(suggestion):
if self.username != suggestion and not self.username_exists(suggestion):
return suggestion
return None
# @firstname
username = _check_suggestion(frappe.scrub(self.first_name))
if not username:
# @firstname_last_name
username = _check_suggestion(frappe.scrub("{0} {1}".format(self.first_name, self.last_name or "")))
if username:
frappe.msgprint(_("Suggested Username: {0}").format(username))
return username
def username_exists(self, username=None):
return frappe.db.get_value("User", {"username": username or self.username, "name": ("!=", self.name)})
def get_blocked_modules(self):
"""Returns list of modules blocked for that user"""
return [d.module for d in self.block_modules] if self.block_modules else []
def validate_user_email_inbox(self):
""" check if same email account added in User Emails twice """
email_accounts = [ user_email.email_account for user_email in self.user_emails ]
if len(email_accounts) != len(set(email_accounts)):
frappe.throw(_("Email Account added multiple times"))
def get_social_login_userid(self, provider):
try:
for p in self.social_logins:
if p.provider == provider:
return p.userid
except:
return None
def set_social_login_userid(self, provider, userid, username=None):
social_logins = {
"provider": provider,
"userid": userid
}
if username:
social_logins["username"] = username
self.append("social_logins", social_logins)
def get_restricted_ip_list(self):
if not self.restrict_ip:
return
return [i.strip() for i in self.restrict_ip.split(",")]
@classmethod
def find_by_credentials(cls, user_name: str, password: str, validate_password: bool = True):
"""Find the user by credentials.
This is a login utility that needs to check login related system settings while finding the user.
1. Find user by email ID by default
2. If allow_login_using_mobile_number is set, you can use mobile number while finding the user.
3. If allow_login_using_user_name is set, you can use username while finding the user.
"""
login_with_mobile = cint(frappe.db.get_value("System Settings", "System Settings", "allow_login_using_mobile_number"))
login_with_username = cint(frappe.db.get_value("System Settings", "System Settings", "allow_login_using_user_name"))
or_filters = [{"name": user_name}]
if login_with_mobile:
or_filters.append({"mobile_no": user_name})
if login_with_username:
or_filters.append({"username": user_name})
users = frappe.db.get_all('User', fields=['name', 'enabled'], or_filters=or_filters, limit=1)
if not users:
return
user = users[0]
user['is_authenticated'] = True
if validate_password:
try:
check_password(user['name'], password, delete_tracker_cache=False)
except frappe.AuthenticationError:
user['is_authenticated'] = False
return user
@frappe.whitelist()
def get_timezones():
import pytz
return {
"timezones": pytz.all_timezones
}
@frappe.whitelist()
def get_all_roles(arg=None):
"""return all roles"""
active_domains = frappe.get_active_domains()
roles = frappe.get_all("Role", filters={
"name": ("not in", "Administrator,Guest,All"),
"disabled": 0
}, or_filters={
"ifnull(restrict_to_domain, '')": "",
"restrict_to_domain": ("in", active_domains)
}, order_by="name")
return [ role.get("name") for role in roles ]
@frappe.whitelist()
def get_roles(arg=None):
"""get roles for a user"""
return frappe.get_roles(frappe.form_dict['uid'])
@frappe.whitelist()
def get_perm_info(role):
"""get permission info"""
from frappe.permissions import get_all_perms
return get_all_perms(role)
@frappe.whitelist(allow_guest=True)
def update_password(new_password, logout_all_sessions=0, key=None, old_password=None):
#validate key to avoid key input like ['like', '%'], '', ['in', ['']]
if key and not isinstance(key, str):
frappe.throw(_('Invalid key type'))
result = test_password_strength(new_password, key, old_password)
feedback = result.get("feedback", None)
if feedback and not feedback.get('password_policy_validation_passed', False):
handle_password_test_fail(result)
res = _get_user_for_update_password(key, old_password)
if res.get('message'):
frappe.local.response.http_status_code = 410
return res['message']
else:
user = res['user']
logout_all_sessions = cint(logout_all_sessions) or frappe.db.get_single_value("System Settings", "logout_on_password_reset")
_update_password(user, new_password, logout_all_sessions=cint(logout_all_sessions))
user_doc, redirect_url = reset_user_data(user)
# get redirect url from cache
redirect_to = frappe.cache().hget('redirect_after_login', user)
if redirect_to:
redirect_url = redirect_to
frappe.cache().hdel('redirect_after_login', user)
frappe.local.login_manager.login_as(user)
frappe.db.set_value("User", user, "last_password_reset_date", today())
frappe.db.set_value("User", user, "reset_password_key", "")
if user_doc.user_type == "System User":
return "/app"
else:
return redirect_url if redirect_url else "/"
@frappe.whitelist(allow_guest=True)
def test_password_strength(new_password, key=None, old_password=None, user_data=None):
from frappe.utils.password_strength import test_password_strength as _test_password_strength
password_policy = frappe.db.get_value("System Settings", None,
["enable_password_policy", "minimum_password_score"], as_dict=True) or {}
enable_password_policy = cint(password_policy.get("enable_password_policy", 0))
minimum_password_score = cint(password_policy.get("minimum_password_score", 0))
if not enable_password_policy:
return {}
if not user_data:
user_data = frappe.db.get_value('User', frappe.session.user,
['first_name', 'middle_name', 'last_name', 'email', 'birth_date'])
if new_password:
result = _test_password_strength(new_password, user_inputs=user_data)
password_policy_validation_passed = False
# score should be greater than 0 and minimum_password_score
if result.get('score') and result.get('score') >= minimum_password_score:
password_policy_validation_passed = True
result['feedback']['password_policy_validation_passed'] = password_policy_validation_passed
return result
#for login
@frappe.whitelist()
def has_email_account(email):
return frappe.get_list("Email Account", filters={"email_id": email})
@frappe.whitelist(allow_guest=False)
def get_email_awaiting(user):
waiting = frappe.db.sql("""select email_account,email_id
from `tabUser Email`
where awaiting_password = 1
and parent = %(user)s""", {"user":user}, as_dict=1)
if waiting:
return waiting
else:
frappe.db.sql("""update `tabUser Email`
set awaiting_password =0
where parent = %(user)s""",{"user":user})
return False
@frappe.whitelist(allow_guest=False)
def set_email_password(email_account, user, password):
account = frappe.get_doc("Email Account", email_account)
if account.awaiting_password:
account.awaiting_password = 0
account.password = password
try:
account.save(ignore_permissions=True)
except Exception:
frappe.db.rollback()
return False
return True
def setup_user_email_inbox(email_account, awaiting_password, email_id, enable_outgoing):
""" setup email inbox for user """
def add_user_email(user):
user = frappe.get_doc("User", user)
row = user.append("user_emails", {})
row.email_id = email_id
row.email_account = email_account
row.awaiting_password = awaiting_password or 0
row.enable_outgoing = enable_outgoing or 0
user.save(ignore_permissions=True)
udpate_user_email_settings = False
if not all([email_account, email_id]):
return
user_names = frappe.db.get_values("User", { "email": email_id }, as_dict=True)
if not user_names:
return
for user in user_names:
user_name = user.get("name")
# check if inbox is alreay configured
user_inbox = frappe.db.get_value("User Email", {
"email_account": email_account,
"parent": user_name
}, ["name"]) or None
if not user_inbox:
add_user_email(user_name)
else:
# update awaiting password for email account
udpate_user_email_settings = True
if udpate_user_email_settings:
frappe.db.sql("""UPDATE `tabUser Email` SET awaiting_password = %(awaiting_password)s,
enable_outgoing = %(enable_outgoing)s WHERE email_account = %(email_account)s""", {
"email_account": email_account,
"enable_outgoing": enable_outgoing,
"awaiting_password": awaiting_password or 0
})
else:
users = " and ".join([frappe.bold(user.get("name")) for user in user_names])
frappe.msgprint(_("Enabled email inbox for user {0}").format(users))
ask_pass_update()
def remove_user_email_inbox(email_account):
""" remove user email inbox settings if email account is deleted """
if not email_account:
return
users = frappe.get_all("User Email", filters={
"email_account": email_account
}, fields=["parent as name"])
for user in users:
doc = frappe.get_doc("User", user.get("name"))
to_remove = [ row for row in doc.user_emails if row.email_account == email_account ]
[ doc.remove(row) for row in to_remove ]
doc.save(ignore_permissions=True)
def ask_pass_update():
# update the sys defaults as to awaiting users
from frappe.utils import set_default
users = frappe.db.sql("""SELECT DISTINCT(parent) as user FROM `tabUser Email`
WHERE awaiting_password = 1""", as_dict=True)
password_list = [ user.get("user") for user in users ]
set_default("email_user_password", u','.join(password_list))
def _get_user_for_update_password(key, old_password):
# verify old password
if key:
user = frappe.db.get_value("User", {"reset_password_key": key})
if not user:
return {
'message': _("The Link specified has either been used before or Invalid")
}
elif old_password:
# verify old password
frappe.local.login_manager.check_password(frappe.session.user, old_password)
user = frappe.session.user
else:
return
return {
'user': user
}
def reset_user_data(user):
user_doc = frappe.get_doc("User", user)
redirect_url = user_doc.redirect_url
user_doc.reset_password_key = ''
user_doc.redirect_url = ''
user_doc.save(ignore_permissions=True)
return user_doc, redirect_url
@frappe.whitelist()
def verify_password(password):
frappe.local.login_manager.check_password(frappe.session.user, password)
@frappe.whitelist(allow_guest=True)
def sign_up(email, full_name, redirect_to):
if not is_signup_enabled():
frappe.throw(_('Sign Up is disabled'), title='Not Allowed')
user = frappe.db.get("User", {"email": email})
if user:
if user.disabled:
return 0, _("Registered but disabled")
else:
return 0, _("Already Registered")
else:
if frappe.db.sql("""select count(*) from tabUser where
HOUR(TIMEDIFF(CURRENT_TIMESTAMP, TIMESTAMP(modified)))=1""")[0][0] > 300:
frappe.respond_as_web_page(_('Temporarily Disabled'),
_('Too many users signed up recently, so the registration is disabled. Please try back in an hour'),
http_status_code=429)
from frappe.utils import random_string
user = frappe.get_doc({
"doctype":"User",
"email": email,
"first_name": escape_html(full_name),
"enabled": 1,
"new_password": random_string(10),
"user_type": "Website User"
})
user.flags.ignore_permissions = True
user.flags.ignore_password_policy = True
user.insert()
# set default signup role as per Portal Settings
default_role = frappe.db.get_value("Portal Settings", None, "default_role")
if default_role:
user.add_roles(default_role)
if redirect_to:
frappe.cache().hset('redirect_after_login', user.name, redirect_to)
if user.flags.email_sent:
return 1, _("Please check your email for verification")
else:
return 2, _("Please ask your administrator to verify your sign-up")
@frappe.whitelist(allow_guest=True)
@rate_limit(key='user', limit=get_password_reset_limit, seconds = 24*60*60, methods=['POST'])
def reset_password(user):
if user=="Administrator":
return 'not allowed'
try:
user = frappe.get_doc("User", user)
if not user.enabled:
return 'disabled'
user.validate_reset_password()
user.reset_password(send_email=True)
return frappe.msgprint(_("Password reset instructions have been sent to your email"))
except frappe.DoesNotExistError:
frappe.clear_messages()
return 'not found'
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs
def user_query(doctype, txt, searchfield, start, page_len, filters):
from frappe.desk.reportview import get_match_cond, get_filters_cond
conditions=[]
user_type_condition = "and user_type != 'Website User'"
if filters and filters.get('ignore_user_type'):
user_type_condition = ''
filters.pop('ignore_user_type')
txt = "%{}%".format(txt)
return frappe.db.sql("""SELECT `name`, CONCAT_WS(' ', first_name, middle_name, last_name)
FROM `tabUser`
WHERE `enabled`=1
{user_type_condition}
AND `docstatus` < 2
AND `name` NOT IN ({standard_users})
AND ({key} LIKE %(txt)s
OR CONCAT_WS(' ', first_name, middle_name, last_name) LIKE %(txt)s)
{fcond} {mcond}
ORDER BY
CASE WHEN `name` LIKE %(txt)s THEN 0 ELSE 1 END,
CASE WHEN concat_ws(' ', first_name, middle_name, last_name) LIKE %(txt)s
THEN 0 ELSE 1 END,
NAME asc
LIMIT %(page_len)s OFFSET %(start)s
""".format(
user_type_condition = user_type_condition,
standard_users=", ".join([frappe.db.escape(u) for u in STANDARD_USERS]),
key=searchfield,
fcond=get_filters_cond(doctype, filters, conditions),
mcond=get_match_cond(doctype)
),
dict(start=start, page_len=page_len, txt=txt)
)
def get_total_users():
"""Returns total no. of system users"""
return flt(frappe.db.sql('''SELECT SUM(`simultaneous_sessions`)
FROM `tabUser`
WHERE `enabled` = 1
AND `user_type` = 'System User'
AND `name` NOT IN ({})'''.format(", ".join(["%s"]*len(STANDARD_USERS))), STANDARD_USERS)[0][0])
def get_system_users(exclude_users=None, limit=None):
if not exclude_users:
exclude_users = []
elif not isinstance(exclude_users, (list, tuple)):
exclude_users = [exclude_users]
limit_cond = ''
if limit:
limit_cond = 'limit {0}'.format(limit)
exclude_users += list(STANDARD_USERS)
system_users = frappe.db.sql_list("""select name from `tabUser`
where enabled=1 and user_type != 'Website User'
and name not in ({}) {}""".format(", ".join(["%s"]*len(exclude_users)), limit_cond),
exclude_users)
return system_users
def get_active_users():
"""Returns No. of system users who logged in, in the last 3 days"""
return frappe.db.sql("""select count(*) from `tabUser`
where enabled = 1 and user_type != 'Website User'
and name not in ({})
and hour(timediff(now(), last_active)) < 72""".format(", ".join(["%s"]*len(STANDARD_USERS))), STANDARD_USERS)[0][0]
def get_website_users():
"""Returns total no. of website users"""
return frappe.db.sql("""select count(*) from `tabUser`
where enabled = 1 and user_type = 'Website User'""")[0][0]
def get_active_website_users():
"""Returns No. of website users who logged in, in the last 3 days"""
return frappe.db.sql("""select count(*) from `tabUser`
where enabled = 1 and user_type = 'Website User'
and hour(timediff(now(), last_active)) < 72""")[0][0]
def get_permission_query_conditions(user):
if user=="Administrator":
return ""
else:
return """(`tabUser`.name not in ({standard_users}))""".format(
standard_users = ", ".join(frappe.db.escape(user) for user in STANDARD_USERS))
def has_permission(doc, user):
if (user != "Administrator") and (doc.name in STANDARD_USERS):
# dont allow non Administrator user to view / edit Administrator user
return False
def notify_admin_access_to_system_manager(login_manager=None):
if (login_manager
and login_manager.user == "Administrator"
and frappe.local.conf.notify_admin_access_to_system_manager):
site = '<a href="{0}" target="_blank">{0}</a>'.format(frappe.local.request.host_url)
date_and_time = '<b>{0}</b>'.format(format_datetime(now_datetime(), format_string="medium"))
ip_address = frappe.local.request_ip
access_message = _('Administrator accessed {0} on {1} via IP Address {2}.').format(
site, date_and_time, ip_address)
frappe.sendmail(
recipients=get_system_managers(),
subject=_("Administrator Logged In"),
template="administrator_logged_in",
args={'access_message': access_message},
header=['Access Notification', 'orange']
)
def extract_mentions(txt):
"""Find all instances of @mentions in the html."""
soup = BeautifulSoup(txt, 'html.parser')
emails = []
for mention in soup.find_all(class_='mention'):
if mention.get('data-is-group') == 'true':
try:
user_group = frappe.get_cached_doc('User Group', mention['data-id'])
emails += [d.user for d in user_group.user_group_members]
except frappe.DoesNotExistError:
pass
continue
email = mention['data-id']
emails.append(email)
return emails
def handle_password_test_fail(result):
suggestions = result['feedback']['suggestions'][0] if result['feedback']['suggestions'] else ''
warning = result['feedback']['warning'] if 'warning' in result['feedback'] else ''
suggestions += "<br>" + _("Hint: Include symbols, numbers and capital letters in the password") + '<br>'
frappe.throw(' '.join([_('Invalid Password:'), warning, suggestions]))
def update_gravatar(name):
gravatar = has_gravatar(name)
if gravatar:
frappe.db.set_value('User', name, 'user_image', gravatar)
@frappe.whitelist(allow_guest=True)
def send_token_via_sms(tmp_id,phone_no=None,user=None):
try:
from frappe.core.doctype.sms_settings.sms_settings import send_request
except:
return False
if not frappe.cache().ttl(tmp_id + '_token'):
return False
ss = frappe.get_doc('SMS Settings', 'SMS Settings')
if not ss.sms_gateway_url:
return False
token = frappe.cache().get(tmp_id + '_token')
args = {ss.message_parameter: 'verification code is {}'.format(token)}
for d in ss.get("parameters"):
args[d.parameter] = d.value
if user:
user_phone = frappe.db.get_value('User', user, ['phone','mobile_no'], as_dict=1)
usr_phone = user_phone.mobile_no or user_phone.phone
if not usr_phone:
return False
else:
if phone_no:
usr_phone = phone_no
else:
return False
args[ss.receiver_parameter] = usr_phone
status = send_request(ss.sms_gateway_url, args, use_post=ss.use_post)
if 200 <= status < 300:
frappe.cache().delete(tmp_id + '_token')
return True
else:
return False
@frappe.whitelist(allow_guest=True)
def send_token_via_email(tmp_id,token=None):
import pyotp
user = frappe.cache().get(tmp_id + '_user')
count = token or frappe.cache().get(tmp_id + '_token')
if ((not user) or (user == 'None') or (not count)):
return False
user_email = frappe.db.get_value('User',user, 'email')
if not user_email:
return False
otpsecret = frappe.cache().get(tmp_id + '_otp_secret')
hotp = pyotp.HOTP(otpsecret)
frappe.sendmail(
recipients=user_email,
sender=None,
subject="Verification Code",
template="verification_code",
args=dict(code=hotp.at(int(count))),
delayed=False,
retry=3
)
return True
@frappe.whitelist(allow_guest=True)
def reset_otp_secret(user):
otp_issuer = frappe.db.get_value('System Settings', 'System Settings', 'otp_issuer_name')
user_email = frappe.db.get_value('User',user, 'email')
if frappe.session.user in ["Administrator", user] :
frappe.defaults.clear_default(user + '_otplogin')
frappe.defaults.clear_default(user + '_otpsecret')
email_args = {
'recipients':user_email, 'sender':None, 'subject':'OTP Secret Reset - {}'.format(otp_issuer or "Frappe Framework"),
'message':'<p>Your OTP secret on {} has been reset. If you did not perform this reset and did not request it, please contact your System Administrator immediately.</p>'.format(otp_issuer or "Frappe Framework"),
'delayed':False,
'retry':3
}
enqueue(method=frappe.sendmail, queue='short', timeout=300, event=None, is_async=True, job_name=None, now=False, **email_args)
return frappe.msgprint(_("OTP Secret has been reset. Re-registration will be required on next login."))
else:
return frappe.throw(_("OTP secret can only be reset by the Administrator."))
def throttle_user_creation():
if frappe.flags.in_import:
return
if frappe.db.get_creation_count('User', 60) > frappe.local.conf.get("throttle_user_limit", 60):
frappe.throw(_('Throttled'))
@frappe.whitelist()
def get_role_profile(role_profile):
roles = frappe.get_doc('Role Profile', {'role_profile': role_profile})
return roles.roles
@frappe.whitelist()
def get_module_profile(module_profile):
module_profile = frappe.get_doc('Module Profile', {'module_profile_name': module_profile})
return module_profile.get('block_modules')
def update_roles(role_profile):
users = frappe.get_all('User', filters={'role_profile_name': role_profile})
role_profile = frappe.get_doc('Role Profile', role_profile)
roles = [role.role for role in role_profile.roles]
for d in users:
user = frappe.get_doc('User', d)
user.set('roles', [])
user.add_roles(*roles)
def create_contact(user, ignore_links=False, ignore_mandatory=False):
from frappe.contacts.doctype.contact.contact import get_contact_name
if user.name in ["Administrator", "Guest"]: return
contact_name = get_contact_name(user.email)
if not contact_name:
contact = frappe.get_doc({
"doctype": "Contact",
"first_name": user.first_name,
"last_name": user.last_name,
"user": user.name,
"gender": user.gender,
})
if user.email:
contact.add_email(user.email, is_primary=True)
if user.phone:
contact.add_phone(user.phone, is_primary_phone=True)
if user.mobile_no:
contact.add_phone(user.mobile_no, is_primary_mobile_no=True)
contact.insert(ignore_permissions=True, ignore_links=ignore_links, ignore_mandatory=ignore_mandatory)
else:
contact = frappe.get_doc("Contact", contact_name)
contact.first_name = user.first_name
contact.last_name = user.last_name
contact.gender = user.gender
# Add mobile number if phone does not exists in contact
if user.phone and not any(new_contact.phone == user.phone for new_contact in contact.phone_nos):
# Set primary phone if there is no primary phone number
contact.add_phone(
user.phone,
is_primary_phone=not any(
new_contact.is_primary_phone == 1 for new_contact in contact.phone_nos
)
)
# Add mobile number if mobile does not exists in contact
if user.mobile_no and not any(new_contact.phone == user.mobile_no for new_contact in contact.phone_nos):
# Set primary mobile if there is no primary mobile number
contact.add_phone(
user.mobile_no,
is_primary_mobile_no=not any(
new_contact.is_primary_mobile_no == 1 for new_contact in contact.phone_nos
)
)
contact.save(ignore_permissions=True)
@frappe.whitelist()
def generate_keys(user):
"""
generate api key and api secret
:param user: str
"""
if "System Manager" in frappe.get_roles():
user_details = frappe.get_doc("User", user)
api_secret = frappe.generate_hash(length=15)
# if api key is not set generate api key
if not user_details.api_key:
api_key = frappe.generate_hash(length=15)
user_details.api_key = api_key
user_details.api_secret = api_secret
user_details.save()
return {"api_secret": api_secret}
frappe.throw(frappe._("Not Permitted"), frappe.PermissionError)
@frappe.whitelist()
def switch_theme(theme):
if theme in ["Dark", "Light"]:
frappe.db.set_value("User", frappe.session.user, "desk_theme", theme)
|
mit
| -1,635,436,983,036,877,300
| 31.306634
| 213
| 0.69565
| false
| 3.128163
| false
| false
| false
|
jacebrowning/dropthebeat
|
setup.py
|
1
|
2371
|
#!/usr/bin/env python
"""Setup script for DropTheBeat."""
import os
import sys
import setuptools
PACKAGE_NAME = 'dtb'
MINIMUM_PYTHON_VERSION = 3, 4
def check_python_version():
"""Exit when the Python version is too low."""
if sys.version_info < MINIMUM_PYTHON_VERSION:
sys.exit("Python {}.{}+ is required.".format(*MINIMUM_PYTHON_VERSION))
def read_package_variable(key):
"""Read the value of a variable from the package without importing."""
module_path = os.path.join(PACKAGE_NAME, '__init__.py')
with open(module_path) as module:
for line in module:
parts = line.strip().split(' ')
if parts and parts[0] == key:
return parts[-1].strip("'")
assert 0, "'{0}' not found in '{1}'".format(key, module_path)
def read_descriptions():
"""Build a description for the project from documentation files."""
try:
readme = open("README.rst").read()
changelog = open("CHANGELOG.rst").read()
except IOError:
return "<placeholder>"
else:
return readme + '\n' + changelog
check_python_version()
setuptools.setup(
name=read_package_variable('__project__'),
version=read_package_variable('__version__'),
description="Music sharing using Dropbox.",
url='https://github.com/jacebrowning/dropthebeat',
author='Jace Browning',
author_email='jacebrowning@gmail.com',
packages=setuptools.find_packages(),
entry_points={'console_scripts': ['dtb = dtb.cli:main',
'DropTheBeat = dtb.gui:main']},
long_description=read_descriptions(),
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: MacOS X',
'Environment :: Win32 (MS Windows)',
'Environment :: X11 Applications',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Communications :: File Sharing',
'Topic :: Multimedia :: Sound/Audio',
],
install_requires=open("requirements.txt").readlines(),
)
|
lgpl-3.0
| 5,393,324,607,916,161,000
| 29.397436
| 78
| 0.609869
| false
| 4.018644
| false
| false
| false
|
MeanEYE/Sunflower
|
sunflower/gui/preferences/toolbar.py
|
1
|
8025
|
import json
from gi.repository import Gtk
from sunflower.widgets.settings_page import SettingsPage
class Column:
NAME = 0
DESCRIPTION = 1
TYPE = 2
ICON = 3
CONFIG = 4
class ToolbarOptions(SettingsPage):
"""Toolbar options extension class"""
def __init__(self, parent, application):
SettingsPage.__init__(self, parent, application, 'toolbar', _('Toolbar'))
self._toolbar_manager = self._application.toolbar_manager
# create list box
container = Gtk.ScrolledWindow()
container.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.ALWAYS)
container.set_shadow_type(Gtk.ShadowType.IN)
self._store = Gtk.ListStore(str, str, str, str, str)
self._list = Gtk.TreeView()
self._list.set_model(self._store)
cell_icon = Gtk.CellRendererPixbuf()
cell_name = Gtk.CellRendererText()
cell_name.set_property('editable', True)
cell_name.set_property('mode', Gtk.CellRendererMode.EDITABLE)
cell_name.connect('edited', self._edited_name, 0)
cell_type = Gtk.CellRendererText()
# create name column
col_name = Gtk.TreeViewColumn(_('Name'))
col_name.set_min_width(200)
col_name.set_resizable(True)
# pack and configure renderes
col_name.pack_start(cell_icon, False)
col_name.pack_start(cell_name, True)
col_name.add_attribute(cell_icon, 'icon-name', Column.ICON)
col_name.add_attribute(cell_name, 'text', Column.NAME)
# create type column
col_type = Gtk.TreeViewColumn(_('Type'), cell_type, markup=Column.DESCRIPTION)
col_type.set_resizable(True)
col_type.set_expand(True)
# add columns to the list
self._list.append_column(col_name)
self._list.append_column(col_type)
container.add(self._list)
# create controls
button_box = Gtk.HBox(False, 5)
button_add = Gtk.Button(stock=Gtk.STOCK_ADD)
button_add.connect('clicked', self._add_widget)
button_delete = Gtk.Button(stock=Gtk.STOCK_DELETE)
button_delete.connect('clicked', self._delete_widget)
button_edit = Gtk.Button(stock=Gtk.STOCK_EDIT)
button_edit.connect('clicked', self._edit_widget)
image_up = Gtk.Image()
image_up.set_from_stock(Gtk.STOCK_GO_UP, Gtk.IconSize.BUTTON)
button_move_up = Gtk.Button(label=None)
button_move_up.add(image_up)
button_move_up.set_tooltip_text(_('Move Up'))
button_move_up.connect('clicked', self._move_widget, -1)
image_down = Gtk.Image()
image_down.set_from_stock(Gtk.STOCK_GO_DOWN, Gtk.IconSize.BUTTON)
button_move_down = Gtk.Button(label=None)
button_move_down.add(image_down)
button_move_down.set_tooltip_text(_('Move Down'))
button_move_down.connect('clicked', self._move_widget, 1)
# pack ui
button_box.pack_start(button_add, False, False, 0)
button_box.pack_start(button_delete, False, False, 0)
button_box.pack_start(button_edit, False, False, 0)
button_box.pack_end(button_move_down, False, False, 0)
button_box.pack_end(button_move_up, False, False, 0)
# toolbar style
label_style = Gtk.Label(label=_('Toolbar style:'))
list_styles = Gtk.ListStore(str, int)
list_styles.append((_('Icons'), Gtk.ToolbarStyle.ICONS))
list_styles.append((_('Text'), Gtk.ToolbarStyle.TEXT))
list_styles.append((_('Both'), Gtk.ToolbarStyle.BOTH))
list_styles.append((_('Both horizontal'), Gtk.ToolbarStyle.BOTH_HORIZ))
renderer = Gtk.CellRendererText()
self._combobox_styles = Gtk.ComboBox(model=list_styles)
self._combobox_styles.pack_start(renderer, True)
self._combobox_styles.add_attribute(renderer, 'text', 0)
self._combobox_styles.connect('changed', self._parent.enable_save)
# toolbar icon size
label_icon_size = Gtk.Label(label=_('Icon size:'))
list_icon_size = Gtk.ListStore(str, int)
list_icon_size.append((_('Small toolbar icon'), Gtk.IconSize.SMALL_TOOLBAR))
list_icon_size.append((_('Large toolbar icon'), Gtk.IconSize.LARGE_TOOLBAR))
list_icon_size.append((_('Same as drag icons'), Gtk.IconSize.DND))
list_icon_size.append((_('Same as dialog'), Gtk.IconSize.DIALOG))
renderer = Gtk.CellRendererText()
self._combobox_icon_size = Gtk.ComboBox(model=list_icon_size)
self._combobox_icon_size.pack_start(renderer, True)
self._combobox_icon_size.add_attribute(renderer, 'text', 0)
self._combobox_icon_size.connect('changed', self._parent.enable_save)
style_box = Gtk.HBox(False, 5)
style_box.pack_start(label_style, False, False, 0)
style_box.pack_start(self._combobox_styles, False, False, 0)
size_box = Gtk.HBox(False, 5)
size_box.pack_start(label_icon_size, False, False, 0)
size_box.pack_start(self._combobox_icon_size, False, False, 0)
self.pack_start(style_box, False, False, 0)
self.pack_start(size_box, False, False, 0)
self.pack_start(container, True, True, 0)
self.pack_start(button_box, False, False, 0)
def _add_widget(self, widget, data=None):
"""Show dialog for creating toolbar widget"""
widget_added = self._toolbar_manager.show_create_widget_dialog(self._parent)
if widget_added:
self._add_item_to_list(widget_added)
# enable save button
self._parent.enable_save()
def _delete_widget(self, widget, data=None):
"""Delete selected toolbar widget"""
selection = self._list.get_selection()
list_, iter_ = selection.get_selected()
if iter_ is not None:
# remove item from list
list_.remove(iter_)
# enable save button if item was removed
self._parent.enable_save()
def _edited_name(self, cell, path, text, column):
"""Record edited text"""
selected_iter = self._store.get_iter(path)
if selected_iter is not None:
self._store.set_value(selected_iter, column, text)
# enable save button
self._parent.enable_save()
def _edit_widget(self, widget, data=None):
"""Edit selected toolbar widget"""
selection = self._list.get_selection()
list_, iter_ = selection.get_selected()
if iter_ is not None:
name = list_.get_value(iter_, Column.NAME)
widget_type = list_.get_value(iter_, Column.TYPE)
widget_config = list_.get_value(iter_, Column.CONFIG)
edited = self._toolbar_manager.show_configure_widget_dialog(
name,
widget_type,
json.loads(widget_config),
self._parent
)
# enable save button
if edited:
self._store.set_value(iter_, Column.CONFIG, json.dumps(edited))
self._parent.enable_save()
def _move_widget(self, widget, direction):
"""Move selected bookmark up"""
selection = self._list.get_selection()
list_, iter_ = selection.get_selected()
if iter_ is not None:
# get iter index
index = list_.get_path(iter_)[0]
# depending on direction, swap iters
if (direction == -1 and index > 0) \
or (direction == 1 and index < len(list_) - 1):
list_.swap(iter_, list_[index + direction].iter)
# enable save button if iters were swapped
self._parent.enable_save()
def _add_item_to_list(self, item):
name = item['name']
widget_type = item['type']
widget_config = item['config'] if 'config' in item else {}
data = self._toolbar_manager.get_widget_data(widget_type)
if data is not None:
icon = data[1]
description = data[0]
else: # failsafe, display raw widget type
icon = ''
description = '{0} <small><i>({1})</i></small>'.format(widget_type, _('missing plugin'))
self._store.append((name, description, widget_type, icon, json.dumps(widget_config)))
def _load_options(self):
"""Load options from file"""
options = self._application.toolbar_options
self._combobox_styles.set_active(options.get('style'))
self._combobox_icon_size.set_active(options.get('icon_size'))
# clear list store
self._store.clear()
for item in options.get('items'):
self._add_item_to_list(item)
def _save_options(self):
"""Save settings to config file"""
options = self._application.toolbar_options
options.set('style', self._combobox_styles.get_active())
options.set('icon_size', self._combobox_icon_size.get_active())
# save toolbar items settings
items = []
for data in self._store:
items.append({
'name': data[Column.NAME],
'type': data[Column.TYPE],
'config': json.loads(data[Column.CONFIG]),
})
options.set('items', items)
|
gpl-3.0
| -8,279,819,229,029,512,000
| 30.470588
| 91
| 0.694704
| false
| 3.016917
| true
| false
| false
|
raiden-network/raiden
|
raiden/utils/notifying_queue.py
|
1
|
1422
|
from typing import Generic, Iterable, List, TypeVar
from gevent.event import Event
from gevent.queue import Queue
T = TypeVar("T")
class NotifyingQueue(Event, Generic[T]):
"""This is not the same as a JoinableQueue. Here, instead of waiting for
all the work to be processed, the wait is for work to be available.
"""
def __init__(self, maxsize: int = None, items: Iterable[T] = ()) -> None:
super().__init__()
self.queue = Queue(maxsize, items)
if items:
self.set()
def put(self, item: T) -> None:
"""Add new item to the queue."""
self.queue.put(item)
self.set()
def get(self, block: bool = True, timeout: float = None) -> T:
"""Removes and returns an item from the queue."""
value = self.queue.get(block, timeout)
if self.queue.empty():
self.clear()
return value
def peek(self, block: bool = True, timeout: float = None) -> T:
return self.queue.peek(block, timeout)
def __len__(self) -> int:
return len(self.queue)
def copy(self) -> List[T]:
"""Copies the current queue items."""
copy = self.queue.copy()
result = list()
while not copy.empty():
result.append(copy.get_nowait())
return result
def __repr__(self) -> str:
return f"NotifyingQueue(id={id(self)}, num_items={len(self.queue)})"
|
mit
| 7,711,695,073,187,145,000
| 28.020408
| 77
| 0.580169
| false
| 3.812332
| false
| false
| false
|
pwarren/AGDeviceControl
|
agdevicecontrol/tests/test_aggregator.py
|
1
|
9428
|
# AGDeviceControl
# Copyright (C) 2005 The Australian National University
#
# This file is part of AGDeviceControl.
#
# AGDeviceControl is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# AGDeviceControl is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with AGDeviceControl; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import random
import types
import agdevicecontrol
from agdevicecontrol.server.aggregator import Aggregator
from agdevicecontrol.server.configurator import Configurator
from twisted.internet import defer, reactor
from twisted.trial import unittest
from twisted.spread import pb
import agdevicecontrol.server.ports as ports
from twisted.test.test_process import SignalMixin
from agdevicecontrol.tests.subprocessprotocol import SubProcessProtocol
configdata = """
# sample Aggregator.conf
[DeviceServer1]
host: localhost
port: %s
password: bkurk
""" % ports.deviceserver
class TestAggregator(SignalMixin, unittest.TestCase):
def setUpClass(self):
"""Start a DeviceServer in a child process to test against"""
self.deviceserverprocess = SubProcessProtocol()
self.deviceserverprocess.waitOnStartUp( ['server.py', 'deviceserver.conf', '-n'], \
path=os.path.join(agdevicecontrol.path,'bin') )
if self.deviceserverprocess.running is False:
raise unittest.SkipTest, "DeviceServer didn't start correctly, skipping tests"
#wait for slow single CPU buildbots to catch up
import time
time.sleep(1)
# use the config above
conf = Configurator()
conf.fromString(configdata)
# can be set by timeout
self.failure = False
# safety timeout
self.timeout = reactor.callLater(10, self.failed, "Aggregator failed to connect to all deviceservers ... failing")
self.aggregator = Aggregator(conf)
self.done = False
while not self.done:
print "Waiting for aggregator to connect to deviceservers"
reactor.iterate(0.1)
if self.aggregator.connected:
self.succeeded()
if self.failure:
raise unittest.SkipTest, "Aggregator didn't connect to all deviceservers ... skipping tests"
# FIXME: we really should handle missing and newly appearing deviceservers.
# safety timeout
self.timeout = reactor.callLater(10, self.failed, "Aggregator failed to map all deviceservers ... failing")
self.aggregator.notifyOnMapped(self.succeeded)
self.done = False
while not self.done:
print "Waiting for aggregator to map deviceservers"
reactor.iterate(0.1)
if self.failure:
raise unittest.SkipTest, "Aggregator didn't start correctly, skipping tests"
def tearDownClass(self):
"""Stop the DeviceServer running in a child process"""
print "*** tearDownClass: ", self.deviceserverprocess.done
self.deviceserverprocess.waitOnShutDown()
def succeeded(self, *args):
"""Allow reactor iteration loop in test proper to exit and pass test"""
self.done = True
if self.timeout is not None:
self.timeout.cancel() # safety timeout no longer necessary
self.timeout = None
self.lastargs = args # make persistent for later checks
def failed(self, reason):
"""Allow reactor iteration loop in test proper to exit and fail test"""
self.done = True
self.failure = reason
self.timeout.cancel() # safety timeout no longer necessary
self.timeout = None
def setUp(self):
"""I'm called at the very beginning of each test"""
self.done = False
self.failure = None
self.timeout = None
def tearDown(self):
"""I'm called at the end of each test"""
if self.timeout:
self.timeout.cancel()
def timedOut(self):
"""I'm called when the safety timer expires indicating test probably won't complete"""
print "timedOut callback, test did not complete"
self.failed("Safety timeout callback ... test did not complete")
reactor.crash()
#---------- tests proper ------------------------------------
def test_handled_configurator(self):
"""Aggregator instantiated with a configurator rather than .conf filename"""
assert 'DeviceServer1' in self.aggregator.config
def test_password(self):
"""Aggregator should have random password"""
assert type(self.aggregator.getPassword()) == type("")
# ensure a second instance has differing password ...
conf = Configurator()
conf.fromString('')
other = Aggregator(conf)
assert other.getPassword() != self.aggregator.getPassword()
def test_devicelist_as_deferred(self):
"""Return aggregated device list"""
# safety timeout
self.timeout = reactor.callLater(10, self.failed, "retrieving devicelist timed out ... failing")
d = self.aggregator.getDeviceList()
assert isinstance(d, defer.Deferred)
d.addCallback(self.succeeded)
# idle until code above triggers succeeded or timeout causes failure
while not self.done:
reactor.iterate(0.1)
# will arrive here eventually when either succeeded or failed method has fired
if self.failure:
self.failed(self.failure)
devicelist = self.lastargs[0]
assert len(devicelist) == 2
assert 'Device1' in devicelist
assert 'Device2' in devicelist
def test_devicemap_as_deferred(self):
"""Return aggregated device map"""
# safety timeout
self.timeout = reactor.callLater(10, self.failed, "retrieving devicemap timed out ... failing")
d = self.aggregator.getDeviceMap()
assert isinstance(d, defer.Deferred)
# caution: as this deferred is ready-to-go, the callback is called *immediately*
d.addCallback(self.succeeded)
# i.e., self.succeeded has now been called
# idle until code above triggers succeeded or timeout causes failure
while not self.done:
reactor.iterate(0.1)
# will arrive here eventually when either succeeded or failed method has fired
if self.failure:
self.failed(self.failure)
devicemap = self.lastargs[0]
print devicemap
assert type(devicemap) == types.DictType
assert len(devicemap) == 1
assert 'PseudoDevice' in devicemap
assert len(devicemap['PseudoDevice']) == 2
assert 'Device1' in devicemap['PseudoDevice']
assert 'Device2' in devicemap['PseudoDevice']
def test_device_execute(self):
"""Proxy forward command to correct DeviceServer"""
# safety timeout
self.timeout = reactor.callLater(10, self.failed, "executing remote setParameter timed out ... failing")
# 3-digit random integer
value = int(random.random()*1000)
# get a device key for use in next step
self.done = False
d = self.aggregator.getDeviceList()
d.addCallback(self.succeeded)
d.addErrback(self.failed)
while not self.done:
reactor.iterate(0.1)
if self.failure:
self.fail(self.failure)
print
print "DEBUG:"
device = self.lastargs[0][0]
print device.name
# store number in 'remote' PseudoDevice
d = self.aggregator.deviceExecute(device, 'setParameter', value)
assert isinstance(d, defer.Deferred)
d.addCallback(self.succeeded)
# idle until code above triggers succeeded or timeout causes failure
self.done = False
while not self.done:
reactor.iterate(0.1)
# will arrive here eventually when either succeeded or failed method has fired
if self.failure:
self.failed(self.failure)
# safety timeout
self.timeout = reactor.callLater(10, self.failed, "executing remote getParameter timed out ... failing")
# store number in 'remote' PseudoDevice
d = self.aggregator.deviceExecute(device, 'getParameter')
assert isinstance(d, defer.Deferred)
d.addCallback(self.succeeded)
# idle until code above triggers succeeded or timeout causes failure
self.done = False
while not self.done:
reactor.iterate(0.1)
# will arrive here eventually when either succeeded or failed method has fired
if self.failure:
self.failed(self.failure)
returnvalue = self.lastargs[0]
assert returnvalue == value
if False:
test_handled_configurator = True
test_devicelist_as_deferred = True
test_devicemap_as_deferred = True
test_device_execute = True
test_password = True
|
gpl-2.0
| -369,536,809,628,326,600
| 30.851351
| 122
| 0.655176
| false
| 4.480989
| true
| false
| false
|
vallemrv/tpvB3
|
tpv_for_eetop/valle_libs/valleorm/models/relatedfields.py
|
1
|
9703
|
# -*- coding: utf-8 -*-
# @Author: Manuel Rodriguez <vallemrv>
# @Date: 29-Aug-2017
# @Email: valle.mrv@gmail.com
# @Last modified by: valle
# @Last modified time: 18-Feb-2018
# @License: Apache license vesion 2.0
import sys
import inspect
import importlib
from constant import constant
class RelationShip(object):
def __init__(self, othermodel, **options):
self.tipo_class = constant.TIPO_RELATION
self.class_name = "ForeignKey"
self.main_module = None
self.related_class = None
self.main_class = None
self.field_related_name = None
self.field_related_id = None
self.on_delete = constant.CASCADE
if type(othermodel) in (str, unicode):
self.related_name = othermodel
else:
self.related_name = othermodel.__name__
self.related_class = othermodel
for k, v in options.items():
setattr(self, k, v)
def get_id_field_name(self):
if self.field_related_name == None:
return self.related_name.lower() + "_id"
return self.field_related_name
def set_id_field_name(self, value):
self.field_related_name = value
def get(self, **condition):
pass
field_name_id = property(get_id_field_name, set_id_field_name)
class OneToManyField(RelationShip):
def __init__(self, main_class, related_name, **kargs):
super(OneToManyField, self).__init__(related_name, **kargs)
self.class_name = "OneToManyField"
self.main_class = main_class
self.related_name = related_name
if self.main_module == None:
self.main_module = self.main_class.__module__
self.related_class = create_class_related(self.main_module, self.related_name)
self.tb_name_main = self.main_class.get_db_table()
if self.field_related_id == None:
self.field_name_id = self.tb_name_main + "_id"
def get(self, **condition):
query = u"{0}={1}".format(self.field_name_id, self.main_class.id)
if 'query' in condition:
condition['query'] += " AND " + query
else:
condition['query'] = query
return self.related_class.filter(**condition)
def add(self, child):
if self.main_class.id == -1:
self.main_class.save()
setattr(child, self.field_name_id, self.main_class.id)
child.save()
class ForeignKey(RelationShip):
def __init__(self, othermodel, on_delete=constant.CASCADE, **kargs):
super(ForeignKey, self).__init__(othermodel, **kargs)
self.class_name = "ForeignKey"
self.on_delete = on_delete
def get_choices(self, **condition):
return self.related_class.getAll(**condition)
def get_sql_pk(self):
sql = u"FOREIGN KEY({0}) REFERENCES {1}(id) %s" % self.on_delete
sql = sql.format(self.field_name_id, self.related_name)
return sql
def get(self):
if self.related_class == None:
if self.main_module == None:
self.main_module = self.main_class.__module__
self.related_class = create_class_related(self.main_module, self.related_name)
reg = self.related_class(db_name=self.main_class.db_name)
reg.load_by_pk(getattr(self.main_class, self.field_name_id))
return reg
class ManyToManyField(RelationShip):
def __init__(self, othermodel, db_table_nexo=None, **kargs):
super(ManyToManyField, self).__init__(othermodel, **kargs)
self.class_name = "ManyToManyField"
self.db_table_nexo = db_table_nexo
if self.main_class != None:
if self.main_module == None:
self.main_module = self.main_class.__module__
self.tb_name_main = self.main_class.get_db_table()
self.related_class = create_class_related(self.main_module, self.related_name)
self.tb_name_related = self.related_class.get_db_table()
if self.field_related_id == None:
self.field_name_id = self.tb_name_main + "_id"
self.field_related_id = self.tb_name_related + "_id"
def get_sql_tb_nexo(self):
key = "PRIMARY KEY ({0}, {1})".format(self.field_name_id, self.field_related_id)
frgKey = u"FOREIGN KEY({0}) REFERENCES {1}(id) ON DELETE CASCADE, "
frgKey = frgKey.format(self.field_name_id, self.tb_name_main)
frgKey += u"FOREIGN KEY({0}) REFERENCES {1}(id) ON DELETE CASCADE"
frgKey = frgKey.format(self.field_related_id, self.tb_name_related)
sql = u"CREATE TABLE IF NOT EXISTS {0} ({1}, {2} ,{3}, {4});"
sql = sql.format(self.db_table_nexo, self.field_name_id+" INTEGER NOT NULL",
self.field_related_id+" INTEGER NOT NULL ",key, frgKey)
return sql
def get(self, **condition):
if "tb_nexo" in condition:
self.db_table_nexo = condition["tb_nexo"]
if "field_related_id" in condition:
self.field_related_id = condition["field_related_id"]
if "field_name_id" in condition:
self.field_name_id = condition["field_name_id"]
condition["columns"] = [self.tb_name_related+".*"]
condition["joins"] = [(self.db_table_nexo + " ON "+ \
self.db_table_nexo+"."+self.field_related_id+\
"="+self.tb_name_related+".id")]
query = self.field_name_id+"="+str(self.main_class.id)
if 'query' in condition:
condition["query"] += " AND " + query
else:
condition["query"] = query
if self.related_class == None:
if self.main_module == None:
self.main_module = self.main_class.__module__
self.related_class = create_class_related(self.main_module, self.related_name)
return self.related_class.filter(**condition)
def add(self, *childs):
for child in childs:
child.save()
cols = [self.field_name_id, self.field_related_id]
values = [str(self.main_class.id), str(child.id)]
sql = u"INSERT OR REPLACE INTO {0} ({1}) VALUES ({2});".format(self.db_table_nexo,
", ".join(cols), ", ".join(values));
self.main_class.execute(sql)
def delete(self, child):
sql = u"DELETE FROM {0} WHERE {1}={2} AND {3}={4};".format(self.db_table_nexo,
self.field_name_id,
child.id,
self.field_related_id,
self.main_class.id)
self.main_class.execute(sql)
class ManyToManyChild(RelationShip):
def __init__(self, main_class, related_name, **kargs):
super(ManyToManyChild, self).__init__(related_name, **kargs)
self.class_name = "ManyToManyChild"
self.main_class = main_class
self.related_name = related_name
if self.main_module == None:
self.main_module = self.main_class.__module__
self.related_class = create_class_related(self.main_module, self.related_name)
self.tb_name_main = self.main_class.get_db_table()
self.tb_name_related = self.related_class.get_db_table()
self.db_table_nexo = self.tb_name_related + '_' + self.tb_name_main
if self.field_related_id == None:
self.field_name_id = self.tb_name_main + "_id"
self.field_related_id = self.tb_name_related + "_id"
def get(self, **condition):
if "tb_nexo" in condition:
self.db_table_nexo = condition["tb_nexo"]
if "field_related_id" in condition:
self.field_related_id = condition["field_related_id"]
if "field_name_id" in condition:
self.field_name_id = condition["field_name_id"]
condition["columns"] = [self.tb_name_related+".*"]
condition["joins"] = [(self.db_table_nexo + " ON "+ \
self.db_table_nexo+"."+self.field_related_id+\
"="+self.tb_name_related+".id")]
query = self.field_name_id+"="+str(self.main_class.id)
if 'query' in condition:
condition["query"] += " AND " + query
else:
condition["query"] = query
return self.related_class.filter(**condition)
def add(self, *childs):
for child in childs:
child.save()
cols = [self.field_name_id, self.field_related_id]
values = [str(self.main_class.id), str(child.id)]
sql = u"INSERT OR REPLACE INTO {0} ({1}) VALUES ({2});".format(self.db_table_nexo,
", ".join(cols), ", ".join(values));
self.main_class.execute(sql)
def delete(self, child):
sql = u"DELETE FROM {0} WHERE {1}={2} AND {3}={4};".format(self.db_table_nexo,
self.field_related_id,
child.id,
self.field_name_id,
self.main_class.id)
self.main_class.execute(sql)
def create_class_related(module, class_name):
module = ".".join(module.split(".")[:-1])
modulo = importlib.import_module(module)
nclass = getattr(modulo, str(class_name))
return nclass
|
apache-2.0
| -2,517,989,328,502,603,300
| 37.351779
| 99
| 0.547356
| false
| 3.673987
| false
| false
| false
|
sassoftware/saspy
|
saspy/sasiocom.py
|
1
|
37140
|
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import csv
import io
import numbers
import os
import shlex
import sys
import warnings
try:
from win32com.client import dynamic
except ImportError:
pass
try:
import pandas as pd
except ImportError:
pass
class SASConfigCOM(object):
"""
This object is not intended to be used directly. Instantiate a SASSession
object instead.
"""
NO_OVERRIDE = ['kernel', 'sb']
def __init__(self, **kwargs):
self._kernel = kwargs.get('kernel')
session = kwargs['sb']
sascfg = session.sascfg.SAScfg
name = session.sascfg.name
cfg = getattr(sascfg, name)
opts = getattr(sascfg, 'SAS_config_options', {})
outs = getattr(sascfg, 'SAS_output_options', {})
self.host = cfg.get('iomhost')
self.port = cfg.get('iomport')
self.user = cfg.get('omruser')
self.pw = cfg.get('omrpw')
self.authkey = cfg.get('authkey')
self.class_id = cfg.get('class_id', '440196d4-90f0-11d0-9f41-00a024bb830c')
self.provider = cfg.get('provider')
self.encoding = cfg.get('encoding', '')
self.output = outs.get('output', 'html5')
self.verbose = opts.get('verbose', True)
self.verbose = kwargs.get('verbose', self.verbose)
self._lock = opts.get('lock_down', True)
self._prompt = session.sascfg._prompt
if self.authkey is not None:
self._set_authinfo()
for key, value in filter(lambda x: x[0] not in self.NO_OVERRIDE, kwargs.items()):
self._try_override(key, value)
def _set_authinfo(self):
"""
Attempt to set the session user's credentials based on provided
key to read from ~/.authinfo file. See .authinfo documentation
here: https://documentation.sas.com/api/docsets/authinfo/9.4/content/authinfo.pdf.
This method supports a subset of the .authinfo spec, in accordance with
other IO access methods. This method will only parse `user` and `password`
arguments, but does support spaces in values if the value is quoted. Use
python's `shlex` library to parse these values.
"""
if os.name == 'nt':
authfile = os.path.expanduser(os.path.join('~', '_authinfo'))
else:
authfile = os.path.expanduser(os.path.join('~', '.authinfo'))
try:
with open(authfile, 'r') as f:
# Take first matching line found
parsed = (shlex.split(x, posix=False) for x in f.readlines())
authline = next(filter(lambda x: x[0] == self.authkey, parsed), None)
except OSError:
print('Error trying to read {}'.format(authfile))
authline = None
if authline is None:
print('Key {} not found in authinfo file: {}'.format(self.authkey, authfile))
elif len(authline) < 5:
print('Incomplete authinfo credentials in {}; key: {}'.format(authfile, self.authkey))
else:
# Override user/pw if previously set
# `authline` is in the following format:
# AUTHKEY username USERNAME password PASSWORD
self.user = authline[2]
self.pw = authline[4]
def _try_override(self, attr, value):
"""
Attempt to override a configuration file option if `self._lock` is
False. Otherwise, warn the user.
:param attr: Configuration attribute.
:param value: Configuration value.
"""
if self._lock is False:
setattr(self, attr, value)
else:
err = "Param '{}' was ignored due to configuration restriction".format(attr)
print(err, file=sys.stderr)
class SASSessionCOM(object):
"""
Initiate a connection to a SAS server and provide access for Windows
clients without the Java dependency. Utilizes available COM objects for
client communication with the IOM interface.
It may be possible to communicate with local SAS instances as well,
although this is functionality is untested. A slight change may be
required to the `_startsas` method to support local instances.
"""
SAS_APP = 'SASApp'
HTML_RESULT_FILE = 'saspy_results.html'
# SASObjectManager.Protocols Enum values
PROTOCOL_COM = 0
PROTOCOL_IOM = 2
# SAS Date/Time/Datetime formats
FMT_DEFAULT_DATE_NAME = 'E8601DA'
FMT_DEFAULT_DATE_LENGTH = 10
FMT_DEFAULT_DATE_PRECISION = 0
FMT_DEFAULT_TIME_NAME = 'E8601TM'
FMT_DEFAULT_TIME_LENGTH = 15
FMT_DEFAULT_TIME_PRECISION = 6
FMT_DEFAULT_DATETIME_NAME = 'E8601DT'
FMT_DEFAULT_DATETIME_LENGTH = 26
FMT_DEFAULT_DATETIME_PRECISION = 6
# Pandas data types
PD_NUM_TYPE = ('i', 'u', 'f', 'c')
PD_STR_TYPE = ('S', 'U', 'V')
PD_DT_TYPE = ('M')
PD_BOOL_TYPE = ('b')
# ADODB RecordSet CursorTypeEnum values
CURSOR_UNSPECIFIED = -1
CURSOR_FORWARD = 0
CURSOR_KEYSET = 1
CURSOR_DYNAMIC = 2
CURSOR_STATIC = 3
# ADODB RecordSet LockTypeEnum values
LOCK_UNSPECIFIED = -1
LOCK_READONLY = 1
LOCK_PESSIMISTIC = 2
LOCK_OPTIMISTIC = 3
LOCK_BATCH_OPTIMISTIC = 4
# ADODB RecordSet CommandTypeEnum values
CMD_UNSPECIFIED = -1
CMD_TEXT = 1
CMD_TABLE = 2
CMD_STORED_PROC = 4
CMD_UNKNOWN = 8
CMD_FILE = 256
CMD_TABLE_DIRECT = 512
# ADODB Connection SchemaEnum values
SCHEMA_COLUMNS = 4
SCHEMA_TABLES = 20
# ADODB ObjectStateEnum values
STATE_CLOSED = 0
STATE_OPEN = 1
# FileService StreamOpenMode values
STREAM_READ = 1
STREAM_WRITE = 2
def __init__(self, **kwargs):
self._log = ''
self.sascfg = SASConfigCOM(**kwargs)
self._sb = kwargs.get('sb')
self.pid = self._startsas()
def __del__(self):
if self.adodb.State == self.STATE_OPEN:
self._endsas()
def _startsas(self) -> str:
"""
Create a workspace and open a connection with SAS.
:return [str]:
"""
if getattr(self, 'workspace', None) is not None:
# Do not create a new connection
return self.workspace.UniqueIdentifier
factory = dynamic.Dispatch('SASObjectManager.ObjectFactoryMulti2')
server = dynamic.Dispatch('SASObjectManager.ServerDef')
self.keeper = dynamic.Dispatch('SASObjectManager.ObjectKeeper')
self.adodb = dynamic.Dispatch('ADODB.Connection')
if self.sascfg.host is None:
# Create a local connection.
server.MachineDNSName = '127.0.0.1'
server.Port = 0
server.Protocol = self.PROTOCOL_COM
user = None
password = None
else:
# Create a remote connection. The following are required:
# 1. host
# 2. port
# 3. class_id
server.MachineDNSName = self.sascfg.host
server.Port = self.sascfg.port
server.Protocol = self.PROTOCOL_IOM
server.ClassIdentifier = self.sascfg.class_id
if self.sascfg.user is not None:
user = self.sascfg.user
else:
user = self.sascfg._prompt('Username: ')
if self.sascfg.pw is not None:
password = self.sascfg.pw
else:
password = self.sascfg._prompt('Password: ', pw=True)
self.workspace = factory.CreateObjectByServer(self.SAS_APP, True,
server, user, password)
self.keeper.AddObject(1, 'WorkspaceObject', self.workspace)
self.adodb.Open('Provider={}; Data Source=iom-id://{}'.format(
self.sascfg.provider, self.workspace.UniqueIdentifier))
ll = self.submit("options svgtitle='svgtitle'; options validvarname=any validmemname=extend pagesize=max nosyntaxcheck; ods graphics on;", "text")
if self.sascfg.verbose:
print("SAS Connection established. Workspace UniqueIdentifier is "+str(self.workspace.UniqueIdentifier)+"\n")
return self.workspace.UniqueIdentifier
def _endsas(self):
"""
Close a connection with SAS.
"""
self.adodb.Close()
self.keeper.RemoveObject(self.workspace)
self.workspace.Close()
if self.sascfg.verbose:
print("SAS Connection terminated. Workspace UniqueIdentifierid was "+str(self.pid))
def _getlst(self, buf: int=2048) -> str:
"""
Flush listing.
:option buf [int]: Download buffer. Default 2048.
:return [str]:
"""
flushed = self.workspace.LanguageService.FlushList(buf)
result = flushed
while flushed:
flushed = self.workspace.LanguageService.FlushList(buf)
result += flushed
return result
def _getlog(self, buf: int=2048) -> str:
"""
Flush log.
:option buf [int]: Download buffer. Default 2048.
:return [str]:
"""
flushed = self.workspace.LanguageService.FlushLog(buf)
result = flushed
while flushed:
flushed = self.workspace.LanguageService.FlushLog(buf)
result += flushed
# Store flush result in running log
self._log += result
if result.count('ERROR:') > 0:
warnings.warn("Noticed 'ERROR:' in LOG, you ought to take a look and see if there was a problem")
self._sb.check_error_log = True
return result
def _getfile(self, fname: str, buf: int=2048, decode: bool=False) -> str:
"""
Use object file service to download a file from the provider.
:param fname [str]: Filename.
:option buf [int]: Download buffer. Default 2048.
:option decode [bool]: Decode the byte stream.
:return [str]:
"""
fobj = self.workspace.FileService.AssignFileref('outfile', 'DISK', fname, '', '')
# Use binary stream to support text and image transfers. The binary
# stream interface does not require a max line length, which allows
# support of arbitrarily wide tables.
stream = fobj[0].OpenBinaryStream(self.STREAM_READ)
flushed = stream.Read(buf)
result = bytes(flushed)
while flushed:
flushed = stream.Read(buf)
result += bytes(flushed)
stream.Close()
self.workspace.FileService.DeassignFileref(fobj[0].FilerefName)
if decode is True:
result = result.decode(self.sascfg.encoding, errors='replace')
return result
def _gethtmlfn(self) -> str:
"""
Return the path of the output HTML file. This is the combination of
the `workpath` attribute and `HTML_RESULT_FILE` constant.
:return [str]:
"""
return self._sb.workpath + self.HTML_RESULT_FILE
def _reset(self):
"""
Reset the LanguageService interface to its initial state with respect
to token scanning. Use it to release the LanguageService from an error
state associated with the execution of invalid syntax or incomplete
program source. This primarily occurs when a statement is submitted
without a trailing semicolon.
"""
self.workspace.LanguageService.Reset()
def _tablepath(self, table: str, libref: str=None) -> str:
"""
Define a sas dataset path based on a table name and optional libref
name. Will return a two-level or one-level path string based on the
provided arguments. One-level names are of this form: `table`, while
two-level names are of this form: `libref.table`. If libref is not
defined, SAS will implicitly define the library to WORK or USER. The
USER library needs to have been defined previously in SAS, otherwise
WORK is the default option. If the `libref` parameter is any value
that evaluates to `False`, the one-level path is returned.
:param table [str]: SAS data set name.
:option libref [str]: Optional library name.
:return [str]:
"""
if not libref:
path = "'{}'n".format(table.strip())
else:
path = "{}.'{}'n".format(libref, table.strip())
return path
def _schema(self, table: str, libref: str=None) -> dict:
"""
Request a table schema for a given `libref.table`.
:param table [str]: Table name
:option libref [str]: Library name.
:return [dict]:
"""
#tablepath = self._tablepath(table, libref=libref)
if not libref:
tablepath = table
else:
tablepath = "{}.{}".format(libref, table)
criteria = [None, None, tablepath]
schema = self.adodb.OpenSchema(self.SCHEMA_COLUMNS, criteria)
schema.MoveFirst()
metadata = {}
while not schema.EOF:
col_info = {x.Name: x.Value for x in schema.Fields}
if col_info['FORMAT_NAME'] in self._sb.sas_date_fmts:
col_info['CONVERT'] = lambda x: self._sb.SAS_EPOCH + datetime.timedelta(days=x) if x else x
elif col_info['FORMAT_NAME'] in self._sb.sas_datetime_fmts:
col_info['CONVERT'] = lambda x: self._sb.SAS_EPOCH + datetime.timedelta(seconds=x) if x else x
# elif FIXME TIME FORMATS
else:
col_info['CONVERT'] = lambda x: x
metadata[col_info['COLUMN_NAME']] = col_info
schema.MoveNext()
schema.Close()
return metadata
def _prompt(self, key: str, hide: bool=False) -> tuple:
"""
Ask the user for input about a given key.
:param key [str]: Key name.
:option hide [bool]: Hide user keyboard input.
:return [tuple]:
"""
input_ok = False
while input_ok is False:
val = self.sascfg._prompt('Enter value for macro variable {} '.format(key), pw=hide)
if val is None:
raise RuntimeError("No value for prompted macro variable provided.")
if val:
input_ok = True
else:
print('Input not valid.')
return (key, val)
def _asubmit(self, code: str, results: str='html'):
"""
Submit any SAS code. Does not return a result.
:param code [str]: SAS statements to execute.
"""
# Support html ods
if results.lower() == 'html':
ods_open = """
ods listing close;
ods {} (id=saspy_internal) options(bitmap_mode='inline')
file="{}"
device=svg
style={};
ods graphics on / outputfmt=png;
""".format(self.sascfg.output, self._gethtmlfn(), self._sb.HTML_Style)
ods_close = """
ods {} (id=saspy_internal) close;
ods listing;
""".format(self.sascfg.output)
else:
ods_open = ''
ods_close = ''
# Submit program
full_code = ods_open + code + ods_close
self.workspace.LanguageService.Submit(full_code)
def submit(self, code: str, results: str='html', prompt: dict=None, **kwargs) -> dict:
"""
Submit any SAS code. Returns log and listing as dictionary with keys
LOG and LST.
:param code [str]: SAS statements to execute.
:option results [str]: Result format. Options: HTML, TEXT. Default HTML.
:option prompt [dict]: Create macro variables from prompted keys.
"""
RESET = """;*';*";*/;quit;run;"""
prompt = prompt if prompt is not None else {}
printto = kwargs.pop('undo', False)
macro_declare = ''
for key, value in prompt.items():
macro_declare += '%let {} = {};\n'.format(*self._prompt(key, value))
# Submit program
self._asubmit(RESET + macro_declare + code + RESET, results)
# Retrieve listing and log
log = self._getlog()
if results.lower() == 'html':
# Make the following replacements in HTML listing:
# 1. Swap \x0c for \n
# 2. Change body class selector
# 3. Increase font size
listing = self._getfile(self._gethtmlfn(), decode=True) \
.replace(chr(12), chr(10)) \
.replace('<body class="c body">', '<body class="l body">') \
.replace('font-size: x-small;', 'font-size: normal;')
else:
listing = self._getlst()
# Invalid syntax will put the interface in to an error state. Reset
# the LanguageService to prevent further errors.
# FIXME: In the future, may only want to reset on ERROR. However, this
# operation seems pretty lightweight, so calling `_reset()` on all
# submits is not a burden.
self._reset()
if printto:
self._asubmit("\nproc printto;run;\n", 'text')
log += self._getlog()
self._sb._lastlog = log
return {'LOG': log, 'LST': listing}
def saslog(self) -> str:
"""
Return the full SAS log.
:return [str]:
"""
return self._log
def exist(self, table: str, libref: str=None) -> bool:
"""
Determine if a `libref.table` exists.
:param table [str]: Table name
:option libref [str]: Library name.
:return [bool]:
"""
#tablepath = self._tablepath(table, libref=libref)
#criteria = [None, None, tablepath]
#schema = self.adodb.OpenSchema(self.SCHEMA_COLUMNS, criteria)
#exists = not schema.BOF
#schema.Close()
#return exists
code = 'data _null_; e = exist("'
if len(libref):
code += libref+"."
code += "'"+table.strip()+"'n"+'"'+");\n"
code += 'v = exist("'
if len(libref):
code += libref+"."
code += "'"+table.strip()+"'n"+'"'+", 'VIEW');\n if e or v then e = 1;\n"
code += "te='TABLE_EXISTS='; put te e;run;\n"
ll = self.submit(code, "text")
l2 = ll['LOG'].rpartition("TABLE_EXISTS= ")
l2 = l2[2].partition("\n")
exists = int(l2[0])
return bool(exists)
def read_sasdata(self, table: str, libref: str=None, dsopts: dict=None) -> tuple:
"""
Read any SAS dataset and return as a tuple of header, rows
:param table [str]: Table name
:option libref [str]: Library name.
:option dsopts [dict]: Dataset options.
:return [tuple]:
"""
TARGET = '_saspy_sd2df'
EXPORT = """
data {tgt};
set {tbl} {dopt};
run;
"""
dsopts = self._sb._dsopts(dsopts) if dsopts is not None else ''
tablepath = self._tablepath(table, libref=libref)
recordset = dynamic.Dispatch('ADODB.RecordSet')
# Create an intermediate dataset with `dsopts` applied
export = EXPORT.format(tgt=TARGET, tbl=tablepath, dopt=dsopts)
self.workspace.LanguageService.Submit(export)
meta = self._schema(TARGET)
# Connect RecordSet object to ADODB connection with params:
# Cursor: Forward Only
# Lock: Read Only
# Command: Table Direct
recordset.Open(TARGET, self.adodb, self.CURSOR_FORWARD,
self.LOCK_READONLY, self.CMD_TABLE_DIRECT)
recordset.MoveFirst()
header = [x.Name for x in recordset.Fields]
rows = []
while not recordset.EOF:
rows.append([meta[x.Name]['CONVERT'](x.Value) for x in recordset.Fields])
recordset.MoveNext()
recordset.Close()
return (header, rows, meta)
def read_csv(self, filepath: str, table: str, libref: str=None, nosub: bool=False, opts: dict=None):
"""
Submit an import job to the SAS workspace.
:param filepath [str]: File URI.
:param table [str]: Table name.
:option libref [str]: Library name.
:option nosob [bool]: Return the SAS code instead of executing it.
:option opts [dict]: SAS PROC IMPORT options.
"""
opts = opts if opts is not None else {}
filepath = 'url ' + filepath if filepath.lower().startswith('http') else filepath
tablepath = self._tablepath(table, libref=libref)
proc_code = """
filename csv_file "{}";
proc import datafile=csv_file out={} dbms=csv replace;
{}
run;
""".format(filepath.replace('"', '""'), tablepath, self._sb._impopts(opts))
if nosub is True:
return proc_code
else:
return self.submit(proc_code, 'text')
def write_csv(self, filepath: str, table: str, libref: str=None, nosub: bool=True, dsopts: dict=None, opts: dict=None):
"""
Submit an export job to the SAS workspace.
:param filepath [str]: File URI.
:param table [str]: Table name.
:option libref [str]: Library name.
:option nosob [bool]: Return the SAS code instead of executing it.
:option opts [dict]: SAS PROC IMPORT options.
:option dsopts [dict]: SAS dataset options.
"""
opts = opts if opts is not None else {}
dsopts = dsopts if dsopts is not None else {}
tablepath = self._tablepath(table, libref=libref)
proc_code = """
filename csv_file "{}";
proc export data={} {} outfile=csv_file dbms=csv replace;
{}
run;
""".format(filepath.replace('"', '""'), tablepath, self._sb._dsopts(dsopts), self._sb._expopts(opts))
if nosub is True:
return proc_code
else:
return self.submit(proc_code, 'text')['LOG']
def dataframe2sasdata(self, df: '<Pandas Data Frame object>', table: str ='a',
libref: str ="", keep_outer_quotes: bool=False,
embedded_newlines: bool=True,
LF: str = '\x01', CR: str = '\x02',
colsep: str = '\x03', colrep: str = ' ',
datetimes: dict={}, outfmts: dict={}, labels: dict={},
outdsopts: dict={}, encode_errors = None, char_lengths = None,
**kwargs):
"""
Create a SAS dataset from a pandas data frame.
:param df [pd.DataFrame]: Pandas data frame containing data to write.
:param table [str]: Table name.
:option libref [str]: Library name. Default work.
None of these options are used by this access method; they are needed for other access methods
keep_outer_quotes - for character columns, have SAS keep any outer quotes instead of stripping them off.
embedded_newlines - if any char columns have embedded CR or LF, set this to True to get them iported into the SAS data set
LF - if embedded_newlines=True, the chacter to use for LF when transferring the data; defaults to '\x01'
CR - if embedded_newlines=True, the chacter to use for CR when transferring the data; defaults to '\x02'
colsep - the column seperator character used for streaming the delimmited data to SAS defaults to '\x03'
colrep - the char to convert to for any embedded colsep, LF, CR chars in the data; defaults to ' '
datetimes - not implemented yet in this access method
outfmts - not implemented yet in this access method
labels - not implemented yet in this access method
outdsopts - not implemented yet in this access method
encode_errors - not implemented yet in this access method
char_lengths - not implemented yet in this access method
"""
DATETIME_NAME = 'DATETIME26.6'
DATETIME_FMT = '%Y-%m-%dT%H:%M:%S.%f'
if self.sascfg.verbose:
if keep_outer_quotes != False:
print("'keep_outer_quotes=' is not used with this access method. option ignored.")
if embedded_newlines != True:
print("'embedded_newlines=' is not used with this access method. option ignored.")
if LF != '\x01' or CR != '\x02' or colsep != '\x03':
print("'LF=, CR= and colsep=' are not used with this access method. option(s) ignored.")
if datetimes != {}:
print("'datetimes=' is not used with this access method. option ignored.")
if outfmts != {}:
print("'outfmts=' is not used with this access method. option ignored.")
if labels != {}:
print("'labels=' is not used with this access method. option ignored.")
if outdsopts != {}:
print("'outdsopts=' is not used with this access method. option ignored.")
if encode_errors:
print("'encode_errors=' is not used with this access method. option ignored.")
if char_lengths:
print("'char_lengths=' is not used with this access method. option ignored.")
tablepath = self._tablepath(table, libref=libref)
if type(df.index) != pd.RangeIndex:
warnings.warn("Note that Indexes are not transferred over as columns. Only actual coulmns are transferred")
columns = []
formats = {}
for i, name in enumerate(df.columns):
if df[name].dtypes.kind in self.PD_NUM_TYPE:
# Numeric type
definition = "'{}'n num".format(name)
formats[name] = lambda x: str(x) if pd.isnull(x) is False else 'NULL'
elif df[name].dtypes.kind in self.PD_STR_TYPE:
# Character type
# NOTE: If a character string contains a single `'`, replace
# it with `''`. This is the SAS equivalent to `\'`.
length = df[name].map(len).max()
definition = "'{}'n char({})".format(name, length)
formats[name] = lambda x: "'{}'".format(x.replace("'", "''")) if pd.isnull(x) is False else 'NULL'
elif df[name].dtypes.kind in self.PD_DT_TYPE:
# Datetime type
definition = "'{}'n num informat={} format={}".format(name, DATETIME_NAME, DATETIME_NAME)
formats[name] = lambda x: "'{:{}}'DT".format(x, DATETIME_FMT) if pd.isnull(x) is False else 'NULL'
else:
# Default to character type
# NOTE: If a character string contains a single `'`, replace
# it with `''`. This is the SAS equivalent to `\'`.
length = df[name].map(str).map(len).max()
definition = "'{}'n char({})".format(name, length)
formats[name] = lambda x: "'{}'".format(x.replace("'", "''")) if pd.isnull(x) is False else 'NULL'
columns.append(definition)
sql_values = []
for index, row in df.iterrows():
vals = []
for i, col in enumerate(row):
func = formats[df.columns[i]]
vals.append(func(col))
sql_values.append('values({})'.format(', '.join(vals)))
sql_create = 'create table {} ({});'.format(tablepath, ', '.join(columns))
sql_insert = 'insert into {} {};'.format(tablepath, '\n'.join(sql_values))
self.adodb.Execute(sql_create)
self.adodb.Execute(sql_insert)
return None
def sasdata2dataframe(self, table: str, libref: str=None, dsopts: dict=None, method: str='', **kwargs) -> 'pd.DataFrame':
"""
Create a pandas data frame from a SAS dataset.
:param table [str]: Table name.
:option libref [str]: Library name.
:option dsopts [dict]: Dataset options.
:option method [str]: Download method.
:option tempkeep [bool]: Download the csv file if using the csv method.
:option tempfile [str]: File path for the saved output file.
:return [pd.DataFrame]:
"""
# strip off unused by this access method options from kwargs
# so they can't be passes to panda later
rowsep = kwargs.pop('rowsep', ' ')
colsep = kwargs.pop('colsep', ' ')
rowrep = kwargs.pop('rowrep', ' ')
colrep = kwargs.pop('colrep', ' ')
if method.upper() == 'DISK':
print("This access method doesn't support the DISK method. Try CSV or MEMORY")
return None
if method.upper() == 'CSV':
df = self.sasdata2dataframeCSV(table, libref, dsopts=dsopts, **kwargs)
else:
my_fmts = kwargs.pop('my_fmts', False)
k_dts = kwargs.pop('dtype', None)
if self.sascfg.verbose:
if my_fmts != False:
print("'my_fmts=' is not supported in this access method. option ignored.")
if k_dts is not None:
print("'dtype=' is only used with the CSV version of this method. option ignored.")
header, rows, meta = self.read_sasdata(table, libref, dsopts=dsopts)
df = pd.DataFrame.from_records(rows, columns=header, **kwargs)
for col in meta.keys():
if meta[col]['FORMAT_NAME'] in self._sb.sas_date_fmts + self._sb.sas_datetime_fmts:
df[col] = pd.to_datetime(df[col], errors='coerce')
elif meta[col]['DATA_TYPE'] == 5:
df[col] = pd.to_numeric(df[col], errors='coerce')
return df
def sasdata2dataframeCSV(self, table: str, libref: str ='', dsopts: dict = None,
tempfile: str=None, tempkeep: bool=False, **kwargs) -> 'pd.DataFrame':
"""
Create a pandas data frame from a SAS dataset.
:param table [str]: Table name.
:option libref [str]: Library name.
:option dsopts [dict]: Dataset options.
:option opts [dict]: dictionary containing any of the following Proc Export options(delimiter, putnames)
:option tempkeep [bool]: Download the csv file if using the csv method.
:option tempfile [str]: File path for the saved output file.
:return [pd.DataFrame]:
"""
FORMAT_STRING = '{column} {format}{length}.{precision}'
EXPORT = """
data _saspy_sd2df;
format {fmt};
set {tbl};
run;
proc export data=_saspy_sd2df {dopt}
outfile="{out}"
dbms=csv replace;
{exopts}
run;
"""
k_dts = kwargs.get('dtype', None)
my_fmts = kwargs.pop('my_fmts', False)
if self.sascfg.verbose:
if my_fmts != False:
print("'my_fmts=' is not supported in this access method. option ignored.")
sas_csv = '{}saspy_sd2df.csv'.format(self._sb.workpath)
dopts = self._sb._dsopts(dsopts) if dsopts is not None else ''
tablepath = self._tablepath(table, libref=libref)
expopts = self._sb._expopts(kwargs.pop('opts', {}))
# Convert any date format to one pandas can understand (ISO-8601).
# Save a reference of the column name in a list so pandas can parse
# the column during construction.
datecols = []
fmtlist = []
meta = self._schema(table, libref)
for name, col in meta.items():
if col['FORMAT_NAME'] in self._sb.sas_date_fmts:
datecols.append(name)
col_format = self.FMT_DEFAULT_DATE_NAME
col_length = self.FMT_DEFAULT_DATE_LENGTH
col_precis = self.FMT_DEFAULT_DATE_PRECISION
elif col['FORMAT_NAME'] in self._sb.sas_datetime_fmts:
datecols.append(name)
col_format = self.FMT_DEFAULT_DATETIME_NAME
col_length = self.FMT_DEFAULT_DATETIME_LENGTH
col_precis = self.FMT_DEFAULT_DATETIME_PRECISION
# elif FIXME TIME FORMATS
else:
col_format = col['FORMAT_NAME']
col_length = col['FORMAT_LENGTH']
col_precis = col['FORMAT_DECIMAL']
if col['FORMAT_NAME']:
full_format = FORMAT_STRING.format(
column=col['COLUMN_NAME'],
format=col_format,
length=col_length,
precision=col_precis)
fmtlist.append(full_format)
export = EXPORT.format(fmt=' '.join(fmtlist),
tbl=tablepath,
dopt=dopts,
exopts=expopts,
out=sas_csv)
# Use `LanguageService.Submit` instead of `submit` for a slight
# performance bump. We don't need the log or listing here so skip
# the wrapper function.
self.workspace.LanguageService.Submit(export)
outstring = self._getfile(sas_csv, decode=True)
# Write temp file if requested by user
if kwargs.get('tempkeep') is True and kwargs.get('tempfile') is not None:
with open(kwargs['tempfile'], 'w') as f:
f.write(outstring)
df = pd.read_csv(io.StringIO(outstring), parse_dates=datecols, **kwargs)
if k_dts is None: # don't override these if user provided their own dtypes
for col in meta.keys():
if meta[col]['FORMAT_NAME'] in self._sb.sas_date_fmts + self._sb.sas_datetime_fmts:
df[col] = pd.to_datetime(df[col], errors='coerce')
return df
def upload(self, local: str, remote: str, overwrite: bool=True, permission: str='', **kwargs):
"""
Upload a file to the SAS server.
:param local [str]: Local filename.
:param remote [str]: Local filename.
:option overwrite [bool]: Overwrite the file if it exists.
:option permission [str]: See SAS filename statement documentation.
"""
perms = "PERMISSION='{}'".format(permission) if permission else ''
valid = self._sb.file_info(remote, quiet=True)
if valid == {}:
# Parameter `remote` references a directory. Default to using the
# filename in `local` path.
remote_file = remote + self._sb.hostsep + os.path.basename(local)
elif valid is not None and overwrite is False:
# Parameter `remote` references a file that exists but we cannot
# overwrite it.
# TODO: Raise exception here instead of returning dict
return {'Success': False,
'LOG': 'File {} exists and overwrite was set to False. Upload was stopped.'.format(remote)}
else:
remote_file = remote
with open(local, 'rb') as f:
fobj = self.workspace.FileService.AssignFileref('infile', 'DISK', remote_file, perms, '')
stream = fobj[0].OpenBinaryStream(self.STREAM_WRITE)
stream.Write(f.read())
stream.Close()
self.workspace.FileService.DeassignFileref(fobj[0].FilerefName)
return {'Success': True,
'LOG': 'File successfully written using FileService.'}
def download(self, local: str, remote: str, overwrite: bool=True, **kwargs):
"""
Download a file from the SAS server.
:param local [str]: Local filename.
:param remote [str]: Local filename.
:option overwrite [bool]: Overwrite the file if it exists.
"""
valid = self._sb.file_info(remote, quiet=True)
if valid is None:
# Parameter `remote` references an invalid file path.
# TODO: Raise exception here instead of returning dict
return {'Success': False,
'LOG': 'File {} does not exist.'.format(remote)}
elif valid == {}:
# Parameter `remote` references a directory.
# TODO: Raise exception here instead of returning dict
return {'Success': False,
'LOG': 'File {} is a directory.'.format(remote)}
if os.path.isdir(local) is True:
# Parameter `local` references a directory. Default to using the
# filename in `remote` path.
local_file = os.path.join(local, remote.rpartition(self._sb.hostsep)[2])
else:
local_file = local
with open(local_file, 'wb') as f:
f.write(self._getfile(remote))
return {'Success': True,
'LOG': 'File successfully read using FileService.'}
|
apache-2.0
| 6,462,494,287,132,911,000
| 38.135933
| 154
| 0.575229
| false
| 4.042669
| false
| false
| false
|
CindyvdVries/News_Crawler
|
Sat2/sat/pipelines.py
|
1
|
2575
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.utils.conf import get_config
from scrapy.exceptions import DropItem
import pika.credentials
import pika
import json
import logging
class JsonWriterPipeline(object):
def __init__(self):
self.file = open('items.jl', 'wb')
def process_item(self, item, spider):
line = json.dumps(dict(item)) + "\n"
self.file.write(line)
return item
class DuplicatePipeline(object):
def __init__(self):
self.ids_seen = set()
def process_item(self, item, spider):
if item['id'] in self.ids_seen:
raise DropItem("Duplicate item found: %s" % item)
else:
self.ids_seen.add(item['id'])
return item
class RabbitMQPipeline(object):
def __init__(self):
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.info("Constructing rabbitmq logger")
username = get_config().get('rabbitmq', 'username')
password = get_config().get('rabbitmq', 'password')
credentials = pika.credentials.PlainCredentials(
username=username,
password=password
)
host = get_config().get('rabbitmq', 'host')
parameters = pika.ConnectionParameters(
host=host,
port=5672,
virtual_host='/',
credentials=credentials
)
connection = pika.BlockingConnection(
parameters=parameters
)
channel = connection.channel()
# we're publishing to two channels, the download request
# so that a download queue can pick it up
channel.queue_declare('crisis_download_requests')
# and a fanout exchange to notify listeners that we've crawled something
channel.exchange_declare(
'crisis_crawl',
type='fanout'
)
self.channel = channel
def process_item(self, item, spider):
self.logger.info('sending message')
serialized = json.dumps(dict(item))
# send to the work queue
self.channel.basic_publish(
exchange='',
routing_key='crisis_download_requests',
body='%s' % (serialized,)
)
# and to the channel
self.channel.basic_publish(
exchange='crisis_crawl',
routing_key='',
body='%s' % (serialized,)
)
return item
|
gpl-3.0
| 1,274,635,759,553,769,500
| 30.024096
| 80
| 0.593398
| false
| 4.17342
| false
| false
| false
|
PieterMostert/Lipgloss
|
model/serializers/oxideserializer.py
|
1
|
2233
|
import json
try:
from lipgloss.core_data import Oxide
except:
from ..lipgloss.core_data import Oxide
class OxideSerializer(object):
"""A class to support serializing/deserializing of a single oxide and dictionaries of oxides. Needs improvement"""
@staticmethod
def get_serializable_oxide(oxide):
"""A serializable oxide is one that can be serialized to JSON using the python json encoder."""
serializable_oxide = {}
serializable_oxide["molar_mass"] = oxide.molar_mass
serializable_oxide["flux"] = oxide.flux
serializable_oxide["min_threshhold"] = oxide.min_threshhold
return serializable_oxide
@staticmethod
def serialize(oxide):
"""Serialize a single Oxide object to JSON."""
return json.dumps(OxideSerializer.get_serializable_oxide(oxide), indent=4)
@staticmethod
def serialize_dict(oxide_dict):
"""Convert a dictionary of Oxide objects to serializable dictionary.
Use json.dump(output, file) to save output to file"""
serializable_dict = {};
for index, oxide in oxide_dict.items():
serializable_dict[index] = OxideSerializer.get_serializable_oxide(oxide)
return serializable_dict
@staticmethod
def get_oxide(serialized_oxide):
"""Convert a serialized oxide (a dict) returned by the JSON decoder into a Oxide object."""
oxide = Oxide(serialized_oxide["molar_mass"],
serialized_oxide["flux"],
serialized_oxide["min_threshhold"])
return oxide
@staticmethod
def deserialize(json_str):
"""Deserialize a single oxide from JSON to a Oxide object."""
serialized_oxide_dict = json.loads(json_str)
return OxideSerializer.get_oxide(serialized_oxide_dict)
@staticmethod
def deserialize_dict(serialized_oxide_dict):
"""Deserialize a number of oxides from JSON to a dict containing Oxide objects, indexed by Oxide name."""
oxide_dict = {}
for i, serialized_oxide in serialized_oxide_dict.items():
oxide_dict[i] = OxideSerializer.get_oxide(serialized_oxide)
return oxide_dict
|
gpl-3.0
| 1,879,315,643,800,368,000
| 39.6
| 119
| 0.653829
| false
| 3.897033
| false
| false
| false
|
kulbirsaini/pdfrw-fork
|
examples/rl1/booklet.py
|
1
|
1588
|
#!/usr/bin/env python
'''
usage: booklet.py my.pdf
Uses Form XObjects and reportlab to create booklet.my.pdf.
Demonstrates use of pdfrw with reportlab.
'''
import sys
import os
from reportlab.pdfgen.canvas import Canvas
import find_pdfrw
from pdfrw import PdfReader
from pdfrw.buildxobj import pagexobj
from pdfrw.toreportlab import makerl
def read_and_double(inpfn):
pages = PdfReader(inpfn, decompress=False).pages
pages = [pagexobj(x) for x in pages]
if len(pages) & 1:
pages.append(pages[0]) # Sentinel -- get same size for back as front
xobjs = []
while len(pages) > 2:
xobjs.append((pages.pop(), pages.pop(0)))
xobjs.append((pages.pop(0), pages.pop()))
xobjs += [(x,) for x in pages]
return xobjs
def make_pdf(outfn, xobjpairs):
canvas = Canvas(outfn)
for xobjlist in xobjpairs:
x = y = 0
for xobj in xobjlist:
x += xobj.BBox[2]
y = max(y, xobj.BBox[3])
canvas.setPageSize((x,y))
# Handle blank back page
if len(xobjlist) > 1 and xobjlist[0] == xobjlist[-1]:
xobjlist = xobjlist[:1]
x = xobjlist[0].BBox[2]
else:
x = 0
y = 0
for xobj in xobjlist:
canvas.saveState()
canvas.translate(x, y)
canvas.doForm(makerl(canvas, xobj))
canvas.restoreState()
x += xobj.BBox[2]
canvas.showPage()
canvas.save()
inpfn, = sys.argv[1:]
outfn = 'booklet.' + os.path.basename(inpfn)
make_pdf(outfn, read_and_double(inpfn))
|
mit
| 5,298,639,258,305,954,000
| 22.014493
| 77
| 0.595718
| false
| 3.113725
| false
| false
| false
|
varunarya10/oslo.utils
|
oslo_utils/tests/test_reflection.py
|
1
|
8493
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslotest import base as test_base
import six
import testtools
from oslo_utils import reflection
if six.PY3:
RUNTIME_ERROR_CLASSES = ['RuntimeError', 'Exception',
'BaseException', 'object']
else:
RUNTIME_ERROR_CLASSES = ['RuntimeError', 'StandardError', 'Exception',
'BaseException', 'object']
def dummy_decorator(f):
@six.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper
def mere_function(a, b):
pass
def function_with_defs(a, b, optional=None):
pass
def function_with_kwargs(a, b, **kwargs):
pass
class Class(object):
def method(self, c, d):
pass
@staticmethod
def static_method(e, f):
pass
@classmethod
def class_method(cls, g, h):
pass
class CallableClass(object):
def __call__(self, i, j):
pass
class ClassWithInit(object):
def __init__(self, k, l):
pass
class CallbackEqualityTest(test_base.BaseTestCase):
def test_different_simple_callbacks(self):
def a():
pass
def b():
pass
self.assertFalse(reflection.is_same_callback(a, b))
def test_static_instance_callbacks(self):
class A(object):
@staticmethod
def b(a, b, c):
pass
a = A()
b = A()
self.assertTrue(reflection.is_same_callback(a.b, b.b))
def test_different_instance_callbacks(self):
class A(object):
def b(self):
pass
def __eq__(self, other):
return True
b = A()
c = A()
self.assertFalse(reflection.is_same_callback(b.b, c.b))
self.assertTrue(reflection.is_same_callback(b.b, c.b, strict=False))
class GetCallableNameTest(test_base.BaseTestCase):
def test_mere_function(self):
name = reflection.get_callable_name(mere_function)
self.assertEqual('.'.join((__name__, 'mere_function')), name)
def test_method(self):
name = reflection.get_callable_name(Class.method)
self.assertEqual('.'.join((__name__, 'Class', 'method')), name)
def test_instance_method(self):
name = reflection.get_callable_name(Class().method)
self.assertEqual('.'.join((__name__, 'Class', 'method')), name)
def test_static_method(self):
name = reflection.get_callable_name(Class.static_method)
if six.PY3:
self.assertEqual('.'.join((__name__, 'Class', 'static_method')),
name)
else:
# NOTE(imelnikov): static method are just functions, class name
# is not recorded anywhere in them.
self.assertEqual('.'.join((__name__, 'static_method')), name)
def test_class_method(self):
name = reflection.get_callable_name(Class.class_method)
self.assertEqual('.'.join((__name__, 'Class', 'class_method')), name)
def test_constructor(self):
name = reflection.get_callable_name(Class)
self.assertEqual('.'.join((__name__, 'Class')), name)
def test_callable_class(self):
name = reflection.get_callable_name(CallableClass())
self.assertEqual('.'.join((__name__, 'CallableClass')), name)
def test_callable_class_call(self):
name = reflection.get_callable_name(CallableClass().__call__)
self.assertEqual('.'.join((__name__, 'CallableClass',
'__call__')), name)
# These extended/special case tests only work on python 3, due to python 2
# being broken/incorrect with regard to these special cases...
@testtools.skipIf(not six.PY3, 'python 3.x is not currently available')
class GetCallableNameTestExtended(test_base.BaseTestCase):
# Tests items in http://legacy.python.org/dev/peps/pep-3155/
class InnerCallableClass(object):
def __call__(self):
pass
def test_inner_callable_class(self):
obj = self.InnerCallableClass()
name = reflection.get_callable_name(obj.__call__)
expected_name = '.'.join((__name__, 'GetCallableNameTestExtended',
'InnerCallableClass', '__call__'))
self.assertEqual(expected_name, name)
def test_inner_callable_function(self):
def a():
def b():
pass
return b
name = reflection.get_callable_name(a())
expected_name = '.'.join((__name__, 'GetCallableNameTestExtended',
'test_inner_callable_function', '<locals>',
'a', '<locals>', 'b'))
self.assertEqual(expected_name, name)
def test_inner_class(self):
obj = self.InnerCallableClass()
name = reflection.get_callable_name(obj)
expected_name = '.'.join((__name__,
'GetCallableNameTestExtended',
'InnerCallableClass'))
self.assertEqual(expected_name, name)
class GetCallableArgsTest(test_base.BaseTestCase):
def test_mere_function(self):
result = reflection.get_callable_args(mere_function)
self.assertEqual(['a', 'b'], result)
def test_function_with_defaults(self):
result = reflection.get_callable_args(function_with_defs)
self.assertEqual(['a', 'b', 'optional'], result)
def test_required_only(self):
result = reflection.get_callable_args(function_with_defs,
required_only=True)
self.assertEqual(['a', 'b'], result)
def test_method(self):
result = reflection.get_callable_args(Class.method)
self.assertEqual(['self', 'c', 'd'], result)
def test_instance_method(self):
result = reflection.get_callable_args(Class().method)
self.assertEqual(['c', 'd'], result)
def test_class_method(self):
result = reflection.get_callable_args(Class.class_method)
self.assertEqual(['g', 'h'], result)
def test_class_constructor(self):
result = reflection.get_callable_args(ClassWithInit)
self.assertEqual(['k', 'l'], result)
def test_class_with_call(self):
result = reflection.get_callable_args(CallableClass())
self.assertEqual(['i', 'j'], result)
def test_decorators_work(self):
@dummy_decorator
def special_fun(x, y):
pass
result = reflection.get_callable_args(special_fun)
self.assertEqual(['x', 'y'], result)
class AcceptsKwargsTest(test_base.BaseTestCase):
def test_no_kwargs(self):
self.assertEqual(False, reflection.accepts_kwargs(mere_function))
def test_with_kwargs(self):
self.assertEqual(True, reflection.accepts_kwargs(function_with_kwargs))
class GetClassNameTest(test_base.BaseTestCase):
def test_std_exception(self):
name = reflection.get_class_name(RuntimeError)
self.assertEqual('RuntimeError', name)
def test_class(self):
name = reflection.get_class_name(Class)
self.assertEqual('.'.join((__name__, 'Class')), name)
def test_instance(self):
name = reflection.get_class_name(Class())
self.assertEqual('.'.join((__name__, 'Class')), name)
def test_int(self):
name = reflection.get_class_name(42)
self.assertEqual('int', name)
class GetAllClassNamesTest(test_base.BaseTestCase):
def test_std_class(self):
names = list(reflection.get_all_class_names(RuntimeError))
self.assertEqual(RUNTIME_ERROR_CLASSES, names)
def test_std_class_up_to(self):
names = list(reflection.get_all_class_names(RuntimeError,
up_to=Exception))
self.assertEqual(RUNTIME_ERROR_CLASSES[:-2], names)
|
apache-2.0
| -899,403,400,105,379,200
| 29.44086
| 79
| 0.601554
| false
| 3.991071
| true
| false
| false
|
yukaritan/qtbot3
|
qtbot3_service/plugins/achievements.py
|
1
|
2926
|
from util import irc
from util.garbage import rainbow
from util.handler_utils import prehook, get_value, set_value, get_target, cmdhook, fetch_all
from qtbot3_common.types.message import Message
disconnection_ladder = {
1: "Connection reset by peer",
5: "Connection reset by beer",
10: "Connection reset by queer",
25: "Connection reset by Cher",
50: "Connection reset by ...deer?",
100: "Connection reset by ... enough already. I don't know.. Gears?",
250: "Connection reset 250 times. Seriously?",
500: "You've lost your connection 500 times. Do you even internet?",
1000: "One thousand disconnects. A thousand. One, three zeros. Holy shit."
}
def get_achievement(message: Message, match: dict, nick: str, count: int) -> str:
print("Achievement progress for {user}: {count}".format(count=count, **match))
if count in disconnection_ladder:
print("Dealt achievement \"" + disconnection_ladder[count] + "\" to", match['nick'])
if not 'target' in match or match['target'] is None:
return
target = get_target(message, nick)
msg = "{nick} has unlocked an achievement: {desc}"
msg = rainbow(msg.format(nick=match['nick'], desc=disconnection_ladder[count]))
return irc.chat_message(target, msg)
return None
@prehook(':(?P<nick>[^\s]+)'
'!(?P<user>[^\s]+)'
' QUIT'
'( :(?P<message>.*))?')
@prehook(':(?P<nick>[^\s]+)'
'!(?P<user>[^\s]+)'
' PART'
' (?P<target>[^\s]+)'
'( :(?P<message>.*))?')
def achievement_prehook_part(message: Message, match: dict, nick: str):
try:
key = 'chiev_partcount_' + match['user']
print("old value:", get_value(key))
count = (get_value(key) or 0) + 1
print("new value:", count)
set_value(key, count)
return get_achievement(message, match, nick, count)
except Exception as ex:
print("achievement prehook exception:", ex)
@prehook(':(?P<nick>[^\s]+)'
'!(?P<user>[^\s]+)'
' JOIN'
' (?P<target>[^\s]+)')
def achievement_prehook_join(message: Message, match: dict, nick: str):
try:
key = 'chiev_partcount_' + match['user']
count = get_value(key) or 0
return get_achievement(message, match, nick, count)
except Exception as ex:
print("achievement prehook exception:", ex)
@cmdhook('aimbot (?P<nick>[^\s]+)')
def achievement_cheat_codes(message: Message, match: dict, nick: str) -> str:
fetched = fetch_all(keyfilter='user_', valuefilter=match['nick'])
target = get_target(message, nick)
output = []
for key in fetched:
user = key.split('_', 1)[1]
key = 'chiev_partcount_' + user
count = get_value(key) or 0
msg = rainbow("%s has disconnected %d times" % (user, count))
output.append(irc.chat_message(target, msg))
return output
|
gpl-3.0
| -309,478,902,671,180,200
| 32.25
| 92
| 0.598086
| false
| 3.42623
| false
| false
| false
|
CobwebOrg/cobweb-django
|
core/migrations/0014_auto_20181026_1019.py
|
1
|
1216
|
# Generated by Django 2.1.2 on 2018-10-26 17:19
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('core', '0013_user_terms_accepted'),
]
operations = [
migrations.AddField(
model_name='organization',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='organization',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='user',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='user',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='user',
name='terms_accepted',
field=models.BooleanField(default=False),
),
]
|
mit
| 243,347,793,550,178,400
| 28.658537
| 93
| 0.570724
| false
| 4.405797
| false
| false
| false
|
lmorchard/django-teamwork
|
teamwork/templatetags/teamwork_tags.py
|
1
|
3741
|
"""
``django-teamwork`` template tags, loaded like so:
{% load teamwork_tags %}
"""
from __future__ import unicode_literals
from django import template
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, AnonymousUser
from django.template import get_library
from django.template import InvalidTemplateLibrary
from django.template.defaulttags import LoadNode
from ..shortcuts import build_policy_admin_links
register = template.Library()
class ObjectPermissionsNode(template.Node):
def __init__(self, user_var, obj, context_var):
self.user_var = template.Variable(user_var)
self.obj = template.Variable(obj)
self.context_var = context_var
def render(self, context):
user_var = self.user_var.resolve(context)
if isinstance(user_var, get_user_model()):
self.user = user_var
elif isinstance(user_var, AnonymousUser):
self.user = user_var
else:
raise Exception("User instance required (got %s)"
% user_var.__class__)
obj = self.obj.resolve(context)
perms = self.user.get_all_permissions(obj)
context[self.context_var] = perms
return ''
@register.tag
def get_all_obj_permissions(parser, token):
"""
Get all of a user's permissions granted by an object. For example:
{% get_all_obj_permissions user for obj as "context_var" %}
"""
bits = token.split_contents()
format = '{% get_all_obj_permissions user for obj as "context_var" %}'
if len(bits) != 6 or bits[2] != 'for' or bits[4] != 'as':
raise template.TemplateSyntaxError("get_all_permissions tag should be in "
"format: %s" % format)
_, user_var, _, obj, _, context_var = bits
if context_var[0] != context_var[-1] or context_var[0] not in ('"', "'"):
raise template.TemplateSyntaxError(
"get_all_obj_permissions tag's context_var argument should be "
"quoted")
context_var = context_var[1:-1]
return ObjectPermissionsNode(user_var, obj, context_var)
class PolicyAdminLinksNode(template.Node):
def __init__(self, user_var, obj, context_var):
self.user_var = template.Variable(user_var)
self.obj = template.Variable(obj)
self.context_var = context_var
def render(self, context):
user_var = self.user_var.resolve(context)
if isinstance(user_var, get_user_model()):
self.user = user_var
elif isinstance(user_var, AnonymousUser):
self.user = user_var
else:
raise Exception("User instance required (got %s)"
% user_var.__class__)
obj = self.obj.resolve(context)
links = build_policy_admin_links(self.user, obj)
context[self.context_var] = links
return ''
@register.tag
def get_policy_admin_links(parser, token):
"""
Get a set of links to admin pages to manage policy for an object by a user
{% policy_admin_links user for obj as "context_var" %}
"""
bits = token.split_contents()
format = '{% policy_admin_links user for obj as "context_var" %}'
if len(bits) != 6 or bits[2] != 'for' or bits[4] != 'as':
raise template.TemplateSyntaxError("get_all_permissions tag should be in "
"format: %s" % format)
_, user_var, _, obj, _, context_var = bits
if context_var[0] != context_var[-1] or context_var[0] not in ('"', "'"):
raise template.TemplateSyntaxError(
"policy_admin_links tag's context_var argument should be "
"quoted")
context_var = context_var[1:-1]
return PolicyAdminLinksNode(user_var, obj, context_var)
|
mpl-2.0
| 7,748,176,117,772,263,000
| 31.25
| 82
| 0.624967
| false
| 3.825153
| false
| false
| false
|
summychou/TBTracker
|
src/TBTracker_Gui/TBTracker_Gui_Button.py
|
1
|
4941
|
# -*- coding: utf-8 -*-
from PyQt5.QtCore import QCoreApplication
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QPushButton
'''
@author : Zhou Jian
@email : zhoujian@hust.edu.cn
@version : V1.1
@date : 2018.04.22
'''
class BaseButton(QPushButton):
'''
基类按钮
'''
def __init__(self, name=""):
super(BaseButton, self).__init__(name)
class SearchButton(BaseButton):
'''
搜素按钮,继承自基类按钮
'''
def __init__(self):
super(SearchButton, self).__init__(name="商品搜索")
self.function_init()
# 功能绑定 -
def function_init(self):
pass
class AddButton(BaseButton):
'''
添加标签按钮,继承自基类按钮
'''
def __init__(self):
super(AddButton, self).__init__(name="添加标签")
self.function_init()
# 功能绑定 -
def function_init(self):
pass
class AttachButton(BaseButton):
'''
标注标签按钮,继承自基类按钮
'''
def __init__(self):
super(AttachButton, self).__init__(name="标注标签")
self.function_init()
# 功能绑定 -
def function_init(self):
pass
class ImportButton(BaseButton):
'''
导入数据按钮,继承自基类按钮
'''
def __init__(self):
super(ImportButton, self).__init__(name="导入数据")
self.function_init()
# 功能绑定 -
def function_init(self):
pass
class ExportButton(BaseButton):
'''
导出数据按钮,继承自基类按钮
'''
def __init__(self):
super(ExportButton, self).__init__(name="导出数据")
self.function_init()
# 功能绑定 -
def function_init(self):
pass
class InsertButton(BaseButton):
'''
添加数据按钮,继承自基类按钮
'''
def __init__(self):
super(InsertButton, self).__init__(name="添加数据")
self.function_init()
# 功能绑定 -
def function_init(self):
pass
class DeleteButton(BaseButton):
'''
删除数据按钮,继承自基类按钮
'''
def __init__(self):
super(DeleteButton, self).__init__(name="删除数据")
self.function_init()
# 功能绑定 -
def function_init(self):
pass
class ConfirmButton(BaseButton):
'''
确定按钮,继承自基类按钮
'''
def __init__(self):
super(ConfirmButton, self).__init__(name="确定")
self.function_init()
# 功能绑定 -
def function_init(self):
pass
class CancelButton(BaseButton):
'''
取消按钮,继承自基类按钮
'''
def __init__(self):
super(CancelButton, self).__init__(name="取消")
self.function_init()
# 功能绑定 -
def function_init(self):
pass
class GlobalSelectButton(BaseButton):
'''
全局按钮,继承自基类按钮
'''
def __init__(self):
super(GlobalSelectButton, self).__init__(name="全局选择")
self.function_init()
# 功能绑定 -
def function_init(self):
pass
class AllSelectButton(BaseButton):
'''
全选按钮,继承自基类按钮
'''
def __init__(self):
super(AllSelectButton, self).__init__(name="全部选择")
self.function_init()
# 功能绑定 -
def function_init(self):
pass
class ChangeConfigButton(BaseButton):
'''
更改配置按钮,继承自基类按钮
'''
def __init__(self):
super(ChangeConfigButton, self).__init__(name="更改配置")
self.function_init()
# 功能绑定 -
def function_init(self):
pass
class ManualUpdateButton(BaseButton):
'''
手动更新按钮,继承自基类按钮
'''
def __init__(self):
super(ManualUpdateButton, self).__init__(name="手动更新")
self.function_init()
# 功能绑定 -
def function_init(self):
pass
class SelectCommodityButton(BaseButton):
'''
选择商品按钮,继承自基类按钮
'''
def __init__(self):
super(SelectCommodityButton, self).__init__(name="选择商品")
self.function_init()
# 功能绑定 -
def function_init(self):
pass
class MonthlyDataButton(BaseButton):
'''
月份数据按钮,继承自基类按钮
'''
def __init__(self):
super(MonthlyDataButton, self).__init__(name="月份数据")
self.function_init()
# 功能绑定 -
def function_init(self):
pass
class YearlyDataButton(BaseButton):
'''
年份数据按钮,继承自基类按钮
'''
def __init__(self):
super(YearlyDataButton, self).__init__(name="年份数据")
self.function_init()
# 功能绑定 -
def function_init(self):
pass
|
mit
| 4,804,963,411,780,969,000
| 17.836283
| 64
| 0.547099
| false
| 2.576877
| false
| false
| false
|
AlexStarov/Shop
|
applications/discount/management/commands/processing_actions.py
|
1
|
6004
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from applications.product.models import Category, Product
from applications.discount.models import Action
__author__ = 'Alex Starov'
class Command(BaseCommand, ):
def handle(self, *args, **options):
try:
action_category = Category.objects.get(url=u'акции', )
except Category.DoesNotExist:
action_category = False
""" Выключаем продукты из "АКЦИИ" срок действия акции которой уже подощёл к концу """
action_not_active = Action.objects.not_active()
if action_not_active:
print 'Action - NOT ACTIVE:', action_not_active
for action in action_not_active:
products_of_action = action.product_in_action.all()
print 'All products:', products_of_action
"""
Если акция с авто окончанием,
то заканчиваем еЁ.
"""
if action.auto_end:
products_of_action = action.product_in_action.in_action()
if len(products_of_action, ) > 0:
print 'Product auto_end:', products_of_action
for product in products_of_action:
print 'Del product from Action: ', product
"""
Помечает товар как не учавствующий в акции
"""
if action_category:
product.category.remove(action_category, )
product.in_action = False
if action.auto_del_action_from_product:
if action_category:
product.action.remove(action, )
product.save()
if action.auto_del:
action.deleted = True
action.save()
action_active = Action.objects.active()
if action_active:
print 'Action - ACTIVE:', action_active
for action in action_active:
products_of_action = action.product_in_action.all()
print 'All products:', products_of_action
"""
Если акция с автостартом,
то мы еЁ стартуем.
"""
if action.auto_start:
""" Включаем галочку 'Учавствует в акции' всем продуктам которые внесены в акцию
исключая продукты 'отсутсвующие на складе' """
products_of_action = action.product_in_action.exclude(is_availability=4, )
if len(products_of_action, ) > 0:
print 'Product auto_start:', products_of_action
for product in products_of_action:
""" Помечает товар как учавствующий в акции """
product.in_action = True
""" Добавляем категорию 'Акция' в товар """
if action_category:
product.category.add(action_category, )
product.save()
""" Удаляем товары учавствующие в активной акции но при этом 'отсутсвующие на складе' """
products_remove_from_action = action.product_in_action.exclude(is_availability__lt=4, )
if len(products_remove_from_action, ) > 0:
print 'Product auto_start remove:', products_remove_from_action
for product in products_remove_from_action:
""" Помечает товар как не учавствующий в акции """
product.in_action = False
""" Удаляем категорию 'Акция' из товара """
if action_category:
product.category.remove(action_category, )
product.save()
""" Убираем галочку 'участвует в акции' всем продуктам у которых она почемуто установлена,
но при этом отсутвует хоть какая то акция """
products = Product.objects.filter(in_action=True, action=None, ).update(in_action=False, )
print 'Товары удаленные из акции по причине вывода их из акции: ', products
""" Убираем галочку 'участвует в акции' всем продуктам которые отсутсвуют на складе """
products = Product.objects.filter(in_action=True, is_availability=4, ).update(in_action=False, )
print 'Товары удаленные из акции по причине отсутсвия на складе: ', products
""" Делаем активной акционную категорию, если есть хоть один акционный товар """
all_actions_products = action_category.products.all()
if len(all_actions_products) != 0 and not action_category.is_active:
action_category.is_active = True
action_category.save()
elif len(all_actions_products) == 0 and action_category.is_active:
action_category.is_active = False
action_category.save()
|
apache-2.0
| -2,092,252,847,127,148,000
| 51.525253
| 109
| 0.527115
| false
| 3.13253
| false
| false
| false
|
d120/pyfeedback
|
src/feedback/migrations/0043_auto_20190618_2221.py
|
1
|
8403
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.21 on 2019-06-18 22:21
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('feedback', '0042_auto_20180608_1423'),
]
operations = [
migrations.CreateModel(
name='FragebogenUE2016',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fach', models.CharField(blank=True, choices=[('inf', 'Informatik'), ('math', 'Mathematik'), ('ce', 'Computational Engineering'), ('ist', 'Informationssystemtechnik'), ('etit', 'Elektrotechnik'), ('psyit', 'Psychologie in IT'), ('winf', 'Wirtschaftsinformatik'), ('sonst', 'etwas anderes')], max_length=5)),
('abschluss', models.CharField(blank=True, choices=[('bsc', 'Bachelor'), ('msc', 'Master'), ('dipl', 'Diplom'), ('lehr', 'Lehramt'), ('sonst', 'anderer Abschluss')], max_length=5)),
('semester', models.CharField(blank=True, choices=[('1', '1'), ('2', '2'), ('3', '3'), ('4', '4'), ('5', '5'), ('6', '6'), ('7', '7'), ('8', '8'), ('9', '9'), ('10', '>=10')], max_length=4)),
('geschlecht', models.CharField(blank=True, choices=[('w', 'weiblich'), ('m', 'männlich'), ('s', 'sonstiges')], max_length=1)),
('studienberechtigung', models.CharField(blank=True, choices=[('d', 'Deutschland'), ('o', 'anderes Land')], max_length=1)),
('ue_wie_oft_besucht', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_besuch_ueberschneidung', models.CharField(blank=True, choices=[('j', 'ja'), ('n', 'nein')], max_length=1)),
('ue_besuch_qualitaet', models.CharField(blank=True, choices=[('j', 'ja'), ('n', 'nein')], max_length=1)),
('ue_besuch_verhaeltnisse', models.CharField(blank=True, choices=[('j', 'ja'), ('n', 'nein')], max_length=1)),
('ue_besuch_privat', models.CharField(blank=True, choices=[('j', 'ja'), ('n', 'nein')], max_length=1)),
('ue_besuch_elearning', models.CharField(blank=True, choices=[('j', 'ja'), ('n', 'nein')], max_length=1)),
('ue_besuch_zufrueh', models.CharField(blank=True, choices=[('j', 'ja'), ('n', 'nein')], max_length=1)),
('ue_besuch_sonstiges', models.CharField(blank=True, choices=[('j', 'ja'), ('n', 'nein')], max_length=1)),
('ue_3_1', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_3_2', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_3_3', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_3_4', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_3_5', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_3_6', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_3_7', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_3_8', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_4_1', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_4_2', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_4_3', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_4_4', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_4_5', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_4_6', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_4_7', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_4_8', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_4_9', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_4_10', models.CharField(blank=True, max_length=1)),
('ue_4_11', models.CharField(blank=True, max_length=1)),
('kennziffer', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_5_1', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_5_2', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_5_3', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_5_4', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_5_5', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_5_6', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_5_7', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_5_8', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_5_9', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_5_10', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_5_11', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_5_12', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_5_13', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_5_14', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_5_15', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_5_16', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_6_1', models.CharField(blank=True, choices=[('0', '0'), ('1', '0.5'), ('2', '1'), ('3', '2'), ('4', '3'), ('5', '4'), ('6', '5'), ('7', '>=5')], max_length=1)),
('ue_6_2', models.PositiveSmallIntegerField(blank=True, null=True)),
('ue_6_3', models.PositiveSmallIntegerField(blank=True, null=True)),
('veranstaltung', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='feedback.Veranstaltung')),
],
options={
'verbose_name': 'Übungsfragebogen 2016',
'verbose_name_plural': 'Übungfragebögen 2016',
'ordering': ['semester', 'veranstaltung'],
},
),
migrations.AddField(
model_name='ergebnis2016',
name='ue_arbeitsbedingungen',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='ergebnis2016',
name='ue_arbeitsbedingungen_count',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='ergebnis2016',
name='ue_didaktik',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='ergebnis2016',
name='ue_didaktik_count',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='ergebnis2016',
name='ue_feedbackpreis',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='ergebnis2016',
name='ue_feedbackpreis_count',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='ergebnis2016',
name='ue_lernerfolg',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='ergebnis2016',
name='ue_lernerfolg_count',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='ergebnis2016',
name='ue_organisation',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='ergebnis2016',
name='ue_organisation_count',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='ergebnis2016',
name='ue_umgang',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='ergebnis2016',
name='ue_umgang_count',
field=models.PositiveIntegerField(default=0),
),
]
|
agpl-3.0
| 591,802,522,073,656,600
| 58.992857
| 324
| 0.57233
| false
| 3.771441
| false
| false
| false
|
mskala/birdie
|
birdieapp/utils/media.py
|
1
|
4300
|
# -*- coding: utf-8 -*-
# Copyright (C) 2013-2014 Ivo Nunes/Vasco Nunes
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PIL import Image, ImageDraw
from gi.repository import GdkPixbuf
from birdieapp.constants import BIRDIE_CACHE_PATH
import StringIO
import os
def resize_and_crop(img, size, crop_type='middle'):
"""
Resize and crop an image to fit the specified size.
"""
# Get current and desired ratio for the images
img_ratio = img.size[0] / float(img.size[1])
ratio = size[0] / float(size[1])
# The image is scaled/cropped vertically or horizontally depending on the
# ratio
if ratio > img_ratio:
img = img.resize(
(size[0], size[0] * img.size[1] / img.size[0]), Image.ANTIALIAS)
# Crop in the top, middle or bottom
if crop_type == 'top':
box = (0, 0, img.size[0], size[1])
elif crop_type == 'middle':
box = (0, (img.size[1] - size[1]) / 2, img.size[
0], (img.size[1] + size[1]) / 2)
elif crop_type == 'bottom':
box = (0, img.size[1] - size[1], img.size[0], img.size[1])
else:
raise ValueError('ERROR: invalid value for crop_type')
img = img.crop(box)
elif ratio < img_ratio:
img = img.resize(
(size[1] * img.size[0] / img.size[1], size[1]), Image.ANTIALIAS)
# Crop in the top, middle or bottom
if crop_type == 'top':
box = (0, 0, size[0], img.size[1])
elif crop_type == 'middle':
box = ((img.size[0] - size[0]) / 2, 0, (
img.size[0] + size[0]) / 2, img.size[1])
elif crop_type == 'bottom':
box = (img.size[0] - size[0], 0, img.size[0], img.size[1])
else:
raise ValueError('ERROR: invalid value for crop_type')
img = img.crop(box)
else:
img = img.resize((size[0], size[1]), Image.ANTIALIAS)
return img
def cropped_thumbnail(img):
"""Creates a centered cropped thumbnail GdkPixbuf of given image"""
# thumbnail and crop
try:
im = Image.open(img)
im = im.convert('RGBA')
im = resize_and_crop(im, (318, 120))
# Convert to GdkPixbuf
buff = StringIO.StringIO()
im.save(buff, 'ppm')
contents = buff.getvalue()
buff.close()
loader = GdkPixbuf.PixbufLoader.new_with_type('pnm')
loader.write(contents)
pixbuf = loader.get_pixbuf()
loader.close()
return pixbuf
except IOError:
print("Invalid image file %s"%img)
try:
os.remove(img)
except IOError:
pass
return None
def fit_image_screen(img, widget):
pixbuf = GdkPixbuf.Pixbuf.new_from_file(img)
screen_h = widget.get_screen().get_height()
screen_w = widget.get_screen().get_width()
if pixbuf.get_height() >= screen_h - 100:
factor = float(pixbuf.get_width()) / pixbuf.get_height()
new_width = factor * (screen_h - 100)
pixbuf = pixbuf.scale_simple(
new_width, screen_h - 100, GdkPixbuf.InterpType.BILINEAR)
return pixbuf
if pixbuf.get_width() >= screen_w:
factor = float(pixbuf.get_height()) / pixbuf.get_width()
new_height = factor * (screen_w - 100)
pixbuf.scale_simple(
screen_w - 100, new_height, GdkPixbuf.InterType.BILINEAR)
return pixbuf
return pixbuf
def simple_resize(img_path, w, h):
try:
im = Image.open(img_path)
img = im.resize((w, h), Image.ANTIALIAS)
dest = BIRDIE_CACHE_PATH + os.path.basename(img_path) + ".jpg"
img.save(dest)
return dest
except IOError:
return None
|
gpl-3.0
| 2,320,095,578,948,772,000
| 33.126984
| 77
| 0.594651
| false
| 3.43725
| false
| false
| false
|
wenxichen/tensorflow_yolo2
|
src/img_dataset/ilsvrc2017_cls.py
|
1
|
7175
|
"""ILSVRC 2017 Classicifation Dataset.
DEPRECATED version. For the purpose of keeping history only.
Use ilsvrc2017_cls_multithread.py instead.
"""
import os
import cv2
import numpy as np
import random
import config as cfg
class ilsvrc_cls:
def __init__(self, image_set, rebuild=False, data_aug=True):
self.name = 'ilsvrc_2017'
self.devkit_path = cfg.ILSVRC_PATH
self.data_path = self.devkit_path
self.cache_path = cfg.CACHE_PATH
self.batch_size = cfg.BATCH_SIZE
self.image_size = cfg.IMAGE_SIZE
self.image_set = image_set
self.rebuild = rebuild
self.data_aug = data_aug
self.cursor = 0
self.load_classes()
# self.gt_labels = None
assert os.path.exists(self.devkit_path), \
'VOCdevkit path does not exist: {}'.format(self.devkit_path)
assert os.path.exists(self.data_path), \
'Path does not exist: {}'.format(self.data_path)
self.prepare()
def prepare(self):
"""Create a list of ground truth that includes input path and label.
"""
if (self.image_set == "train"):
imgset_fname = "train_cls.txt"
else:
imgset_fname = self.image_set + ".txt"
imgset_file = os.path.join(
self.data_path, 'ImageSets', 'CLS-LOC', imgset_fname)
print('Processing gt_labels using ' + imgset_file)
gt_labels = []
with open(imgset_file, 'r') as f:
for line in f.readlines():
img_path = line.strip().split()[0]
label = self.class_to_ind[img_path.split("/")[0]]
imname = os.path.join(
self.data_path, 'Data', 'CLS-LOC', self.image_set, img_path + ".JPEG")
gt_labels.append(
{'imname': imname, 'label': label})
random.shuffle(gt_labels)
self.gt_labels = gt_labels
def load_classes(self):
"""Use the folder name to get labels."""
if (self.image_set == "train"):
img_folder = os.path.join(
self.data_path, 'Data', 'CLS-LOC', 'train')
print('Loading class info from ' + img_folder)
self.classes = [item for item in os.listdir(img_folder)
if os.path.isdir(os.path.join(img_folder, item))]
self.num_class = len(self.classes)
assert (self.num_class == 1000), "number of classes is not 1000!"
self.class_to_ind = dict(
list(zip(self.classes, list(range(self.num_class)))))
def get(self):
"""Get shuffled images and labels according to batchsize.
Return:
images: 4D numpy array
labels: 1D numpy array
"""
images = np.zeros(
(self.batch_size, self.image_size, self.image_size, 3))
labels = np.zeros(self.batch_size)
count = 0
while count < self.batch_size:
imname = self.gt_labels[self.cursor]['imname']
images[count, :, :, :] = self.image_read(imname, data_aug=self.data_aug)
labels[count] = self.gt_labels[self.cursor]['label']
count += 1
self.cursor += 1
if self.cursor >= len(self.gt_labels):
random.shuffle(self.gt_labels)
self.cursor = 0
return images, labels
def image_read(self, imname, data_aug=False):
image = cv2.imread(imname)
#####################
# Data Augmentation #
#####################
if data_aug:
flip = bool(random.getrandbits(1))
rotate_deg = random.randint(0, 359)
# 75% chance to do random crop
# another 25% change in maintaining input at 224x224
# this help simplify the input processing for test, val
# TODO: can make multiscale test input later
random_crop_chance = random.randint(0, 3)
too_small = False
color_pert = bool(random.getrandbits(1))
if flip:
image = image[:, ::-1, :]
# assume color image
rows, cols, _ = image.shape
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), rotate_deg, 1)
image = cv2.warpAffine(image, M, (cols, rows))
# color perturbation
if color_pert:
hue_shift_sign = bool(random.getrandbits(1))
hue_shift = random.randint(0, 10)
saturation_shift_sign = bool(random.getrandbits(1))
saturation_shift = random.randint(0, 10)
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# TODO: currently not sure what cv2 does to values
# that are larger than the maximum.
# It seems it does not cut at the max
# nor normalize the whole by multiplying a factor.
# need to expore this in more detail
if hue_shift_sign:
hsv[:, :, 0] += hue_shift
else:
hsv[:, :, 0] -= hue_shift
if saturation_shift_sign:
hsv[:, :, 1] += saturation_shift
else:
hsv[:, :, 1] -= saturation_shift
image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
# random crop
if random_crop_chance > 0:
# current random crop upbound is 292 (1.3 x 224)
short_side_len = random.randint(
self.image_size, cfg.RAND_CROP_UPBOUND)
short_side = min([cols, rows])
if short_side == cols:
scaled_cols = short_side_len
factor = float(short_side_len) / cols
scaled_rows = int(rows * factor)
else:
scaled_rows = short_side_len
factor = float(short_side_len) / rows
scaled_cols = int(cols * factor)
# print "scaled_cols and rows:", scaled_cols, scaled_rows
if scaled_cols < 224 or scaled_rows < 224:
too_small = True
print "Image is too small,", imname
else:
image = cv2.resize(image, (scaled_cols, scaled_rows))
col_offset = random.randint(0, scaled_cols - self.image_size)
row_offset = random.randint(0, scaled_rows - self.image_size)
# print "col_offset and row_offset:", col_offset, row_offset
image = image[row_offset:self.image_size + row_offset,
col_offset:self.image_size + col_offset]
# assuming still using image size 224x224
# print "image shape is", image.shape
if random_crop_chance == 0 or too_small:
image = cv2.resize(image, (self.image_size, self.image_size))
else:
image = cv2.resize(image, (self.image_size, self.image_size))
image = image.astype(np.float32)
image = (image / 255.0) * 2.0 - 1.0
return image
|
mit
| -763,985,578,201,332,100
| 39.767045
| 90
| 0.522509
| false
| 3.905825
| false
| false
| false
|
oss/rutgers-repository-utils
|
lib/repoclosure.py
|
1
|
11619
|
#!/usr/bin/python -t
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# seth vidal 2005 (c) etc etc
#Read in the metadata of a series of repositories and check all the
# dependencies in all packages for resolution. Print out the list of
# packages with unresolved dependencies
import sys
import os
import logging
import yum
import yum.Errors
from yum.misc import getCacheDir
from optparse import OptionParser
import rpmUtils.arch
import rpmUtils.updates
from yum.constants import *
from yum.packageSack import ListPackageSack
def parseArgs():
usage = """
Read in the metadata of a series of repositories and check all the
dependencies in all packages for resolution. Print out the list of
packages with unresolved dependencies
%s [-c <config file>] [-a <arch>] [-l <lookaside>] [-r <repoid>] [-r <repoid2>]
""" % sys.argv[0]
parser = OptionParser(usage=usage)
parser.add_option("-c", "--config", default='/etc/yum.conf',
help='config file to use (defaults to /etc/yum.conf)')
parser.add_option("-a", "--arch", default=[], action='append',
help='check packages of the given archs, can be specified multiple ' +
'times (default: current arch)')
parser.add_option("--basearch", default=None,
help="set the basearch for yum to run as")
parser.add_option("-b", "--builddeps", default=False, action="store_true",
help='check build dependencies only (needs source repos enabled)')
parser.add_option("-l", "--lookaside", default=[], action='append',
help="specify a lookaside repo id to query, can be specified multiple times")
parser.add_option("-r", "--repoid", default=[], action='append',
help="specify repo ids to query, can be specified multiple times (default is all enabled)")
parser.add_option("-t", "--tempcache", default=False, action="store_true",
help="Use a temp dir for storing/accessing yum-cache")
parser.add_option("-q", "--quiet", default=0, action="store_true",
help="quiet (no output to stderr)")
parser.add_option("-n", "--newest", default=0, action="store_true",
help="check only the newest packages in the repos")
parser.add_option("--repofrompath", action="append",
help="specify repoid & paths of additional repositories - unique repoid and path required, can be specified multiple times. Example. --repofrompath=myrepo,/path/to/repo")
parser.add_option("-p", "--pkg", action="append",
help="check closure for this package only")
parser.add_option("-g", "--group", action="append",
help="check closure for packages in this group only")
(opts, args) = parser.parse_args()
return (opts, args)
# Note that this is a "real" API, used by spam-o-matic etc.
# so we have to do at least some API guarantee stuff.
class RepoClosure(yum.YumBase):
def __init__(self, arch=[], config="/etc/yum.conf", builddeps=False, pkgonly=None,
basearch=None, grouponly=None):
yum.YumBase.__init__(self)
if basearch:
self.preconf.arch = basearch
self.logger = logging.getLogger("yum.verbose.repoclosure")
self.lookaside = []
self.builddeps = builddeps
self.pkgonly = pkgonly
self.grouponly = grouponly
self.doConfigSetup(fn = config,init_plugins=False)
self._rc_arches = arch
if hasattr(self.repos, 'sqlite'):
self.repos.sqlite = False
self.repos._selectSackType()
def evrTupletoVer(self,tup):
"""convert an evr tuple to a version string, return None if nothing
to convert"""
e, v, r = tup
if v is None:
return None
val = v
if e is not None:
val = '%s:%s' % (e, v)
if r is not None:
val = '%s-%s' % (val, r)
return val
def readMetadata(self):
self.doRepoSetup()
archs = []
if not self._rc_arches:
archs.extend(self.arch.archlist)
else:
for arch in self._rc_arches:
archs.extend(self.arch.get_arch_list(arch))
if self.builddeps and 'src' not in archs:
archs.append('src')
self.doSackSetup(archs)
for repo in self.repos.listEnabled():
self.repos.populateSack(which=[repo.id], mdtype='filelists')
def getBrokenDeps(self, newest=False):
unresolved = {}
resolved = {}
pkgs = self.pkgSack
if newest:
pkgs = self.pkgSack.returnNewestByNameArch()
mypkgSack = ListPackageSack(pkgs)
pkgtuplist = mypkgSack.simplePkgList()
# toss out any of the obsoleted pkgs so we can't depsolve with them
self.up = rpmUtils.updates.Updates([], pkgtuplist)
self.up.rawobsoletes = mypkgSack.returnObsoletes()
for pkg in pkgs:
fo = self.up.checkForObsolete([pkg.pkgtup])
if fo:
# useful debug to make sure the obsoletes is sane
#print "ignoring obsolete pkg %s" % pkg
#for i in fo[pkg.pkgtup]:
# print i
self.pkgSack.delPackage(pkg)
# we've deleted items so remake the pkgs
pkgs = self.pkgSack.returnNewestByNameArch()
pkgtuplist = mypkgSack.simplePkgList()
if self.builddeps:
pkgs = filter(lambda x: x.arch == 'src', pkgs)
pkglist = self.pkgonly
if self.grouponly:
if not pkglist:
pkglist = []
for group in self.grouponly:
groupobj = self.comps.return_group(group)
if not groupobj:
continue
pkglist.extend(groupobj.packages)
if pkglist:
pkgs = filter(lambda x: x.name in pkglist, pkgs)
for pkg in pkgs:
if pkg.repoid in self.lookaside:
# don't attempt to resolve dependancy issues for
# packages from lookaside repositories
continue
for (req, flags, (reqe, reqv, reqr)) in pkg.returnPrco('requires'):
if req.startswith('rpmlib'): continue # ignore rpmlib deps
ver = self.evrTupletoVer((reqe, reqv, reqr))
if (req,flags,ver) in resolved:
continue
try:
resolve_sack = self.whatProvides(req, flags, ver)
except yum.Errors.RepoError, e:
pass
if len(resolve_sack) < 1:
if pkg not in unresolved:
unresolved[pkg] = []
unresolved[pkg].append((req, flags, ver))
continue
if newest:
resolved_by_newest = False
for po in resolve_sack:# look through and make sure all our answers are newest-only
if po.pkgtup in pkgtuplist:
resolved_by_newest = True
break
if resolved_by_newest:
resolved[(req,flags,ver)] = 1
else:
if pkg not in unresolved:
unresolved[pkg] = []
unresolved[pkg].append((req, flags, ver))
return unresolved
def main():
(opts, cruft) = parseArgs()
my = RepoClosure(arch=opts.arch,
config=opts.config,
builddeps=opts.builddeps,
pkgonly=opts.pkg,
grouponly=opts.group,
basearch=opts.basearch)
if opts.repofrompath:
# setup the fake repos
for repo in opts.repofrompath:
repoid,repopath = tuple(repo.split(','))
if repopath.startswith('http') or repopath.startswith('ftp') or repopath.startswith('file:'):
baseurl = repopath
else:
repopath = os.path.abspath(repopath)
baseurl = 'file://' + repopath
newrepo = yum.yumRepo.YumRepository(repoid)
newrepo.name = repopath
newrepo.baseurl = baseurl
newrepo.basecachedir = my.conf.cachedir
newrepo.metadata_expire = 0
newrepo.timestamp_check = False
my.repos.add(newrepo)
my.repos.enableRepo(newrepo.id)
my.logger.info( "Added %s repo from %s" % (repoid,repopath))
if opts.repoid:
for repo in my.repos.repos.values():
if ((repo.id not in opts.repoid) and
(repo.id not in opts.lookaside)):
repo.disable()
else:
repo.enable()
if opts.lookaside:
my.lookaside = opts.lookaside
if os.geteuid() != 0 or opts.tempcache:
cachedir = getCacheDir()
if cachedir is None:
my.logger.error("Error: Could not make cachedir, exiting")
sys.exit(50)
my.repos.setCacheDir(cachedir)
if not opts.quiet:
my.logger.info('Reading in repository metadata - please wait....')
try:
my.readMetadata()
except yum.Errors.RepoError, e:
my.logger.info(e)
my.logger.info('Some dependencies may not be complete for this repository')
my.logger.info('Run as root to get all dependencies or use -t to enable a user temp cache')
if not opts.quiet:
my.logger.info('Checking Dependencies')
baddeps = my.getBrokenDeps(opts.newest)
if opts.newest:
num = len(my.pkgSack.returnNewestByNameArch())
else:
num = len(my.pkgSack)
repos = my.repos.listEnabled()
if not opts.quiet:
my.logger.info('Repos looked at: %s' % len(repos))
for repo in repos:
my.logger.info(' %s' % repo)
my.logger.info('Num Packages in Repos: %s' % num)
pkgs = baddeps.keys()
def sortbyname(a,b):
return cmp(a.__str__(),b.__str__())
pkgs.sort(sortbyname)
for pkg in pkgs:
my.logger.info('package: %s from %s\n unresolved deps: ' % (pkg, pkg.repoid))
for (n, f, v) in baddeps[pkg]:
req = '%s' % n
if f:
flag = LETTERFLAGS[f]
req = '%s %s'% (req, flag)
if v:
req = '%s %s' % (req, v)
my.logger.info(' %s' % req)
if __name__ == "__main__":
try:
main()
except (yum.Errors.YumBaseError, ValueError), e:
print >> sys.stderr, str(e)
sys.exit(1)
|
gpl-2.0
| 6,160,217,929,008,276,000
| 36.846906
| 192
| 0.560117
| false
| 4.096968
| true
| false
| false
|
dominikgiermala/properties-editor
|
src/properties_editor.py
|
1
|
5314
|
import os
import sublime
import sublime_plugin
from .lib.pyjavaproperties import Properties
class AddEditPropertiesCommand(sublime_plugin.WindowCommand):
def run(self, paths = []):
# TODO: validate if *.properties file
self.paths = paths
self.window.show_input_panel("Properties to add/edit:", '', self.on_properties_put, None, None)
def on_properties_put(self, properties_string):
if properties_string and properties_string.strip() and '=' in properties_string:
self.properties = {}
for property_string in properties_string.split('\n'):
key_value = property_string.split('=', 1)
if key_value[0] and key_value[1]:
self.properties[key_value[0]] = key_value[1]
self.edit_properties(self.properties)
def edit_properties(self, properties):
files_without_key = {}
files_with_key = {}
for key in properties:
files_with_key[key] = []
files_without_key[key] = []
for file in self.paths:
p = Properties()
p.load(open(file, encoding='latin-1', mode='r'))
for key, value in properties.items():
if p.getProperty(key):
files_with_key[key].append(os.path.basename(file))
else:
files_without_key[key].append(os.path.basename(file))
p[key] = value
p.store(open(file, encoding='latin-1', mode='w'))
self.display_confirmation_message(files_without_key, files_with_key)
def display_confirmation_message(self, files_without_key, files_with_key):
confirmation_message = ""
for key, value in self.properties.items():
confirmation_message += "Property " + key + "=" + value + " was: "
if files_without_key[key]:
confirmation_message += "\nAdded in files:\n" + "\n".join(files_without_key[key])
if files_with_key[key]:
confirmation_message += "\n\nEdited in files:\n" + "\n".join(files_with_key[key])
confirmation_message += "\n\n"
sublime.message_dialog(confirmation_message)
class RemovePropertyCommand(sublime_plugin.WindowCommand):
def run(self, paths = []):
# TODO: validate if *.properties file
self.paths = paths
self.window.show_input_panel("Property key to remove:", '', self.on_key_put, None, None)
def on_key_put(self, key):
if key and key.strip():
self.key = key
self.remove_property(key, self.paths)
def remove_property(self, key, paths):
files_without_key = []
files_with_key = []
for file in self.paths:
p = Properties()
p.load(open(file))
if p.getProperty(key):
p.removeProperty(key)
files_with_key.append(os.path.basename(file))
p.store(open(file, 'w'))
else:
files_without_key.append(os.path.basename(file))
self.display_confirmation_message(files_without_key, files_with_key)
def display_confirmation_message(self, files_without_key, files_with_key):
confirmation_message = "Property with key " + self.key + " was: "
if files_with_key:
confirmation_message += "\nRemoved in files:\n" + "\n".join(files_with_key)
if files_without_key:
confirmation_message += "\n\nNot found in files:\n" + "\n".join(files_without_key)
if files_without_key:
sublime.error_message(confirmation_message)
else:
sublime.message_dialog(confirmation_message)
class RenameKeyCommand(sublime_plugin.WindowCommand):
def run(self, paths = []):
# TODO: validate if *.properties file
self.paths = paths
self.window.show_input_panel("Key to rename:", '', self.on_old_key_put, None, None)
def on_old_key_put(self, old_key):
if old_key and old_key.strip():
self.old_key = old_key
self.window.show_input_panel("New key:", '', self.on_new_key_put, None, None)
def on_new_key_put(self, new_key):
if new_key and new_key.strip():
self.new_key = new_key
self.rename_key(self.old_key, self.new_key)
def rename_key(self, old_key, new_key):
files_without_old_key = []
files_with_new_key = []
files_with_renamed_key = []
for file in self.paths:
p = Properties()
p.load(open(file))
if p.getProperty(old_key):
if not p.getProperty(new_key):
p[new_key] = p[old_key]
p.removeProperty(old_key)
files_with_renamed_key.append(os.path.basename(file))
else:
files_with_new_key.append(os.path.basename(file))
else:
files_without_old_key.append(os.path.basename(file))
p.store(open(file, 'w'))
self.display_confirmation_message(files_without_old_key, files_with_new_key, files_with_renamed_key)
def display_confirmation_message(self, files_without_old_key, files_with_new_key, files_with_renamed_key):
confirmation_message = "Key " + self.old_key + " was: "
if files_with_renamed_key:
confirmation_message += "\nRenamed in files:\n" + "\n".join(files_with_renamed_key)
if files_without_old_key:
confirmation_message += "\n\nNot found in files:\n" + "\n".join(files_without_old_key)
if files_with_new_key:
confirmation_message += "\n\nKey " + self.new_key + " already exists in files:\n" + "\n".join(files_with_new_key)
if files_without_old_key or files_with_new_key:
sublime.error_message(confirmation_message)
else:
sublime.message_dialog(confirmation_message)
|
mit
| -4,607,190,421,713,108,000
| 38.664179
| 119
| 0.649417
| false
| 3.361164
| false
| false
| false
|
SIPp/pysipp
|
pysipp/launch.py
|
1
|
5708
|
"""
Launchers for invoking SIPp user agents
"""
import subprocess
import os
import shlex
import select
import threading
import signal
import time
from . import utils
from pprint import pformat
from collections import OrderedDict, namedtuple
log = utils.get_logger()
Streams = namedtuple("Streams", "stdout stderr")
class TimeoutError(Exception):
"SIPp process timeout exception"
class PopenRunner(object):
"""Run a sequence of SIPp agents asynchronously. If any process terminates
with a non-zero exit code, immediately kill all remaining processes and
collect std streams.
Adheres to an interface similar to `multiprocessing.pool.AsyncResult`.
"""
def __init__(
self,
subprocmod=subprocess,
osmod=os,
poller=select.epoll,
):
# these could optionally be rpyc proxy objs
self.spm = subprocmod
self.osm = osmod
self.poller = poller()
# collector thread placeholder
self._waiter = None
# store proc results
self._procs = OrderedDict()
def __call__(self, cmds, block=True, rate=300, **kwargs):
if self._waiter and self._waiter.is_alive():
raise RuntimeError("Not all processes from a prior run have completed")
if self._procs:
raise RuntimeError(
"Process results have not been cleared from previous run"
)
sp = self.spm
os = self.osm
DEVNULL = open(os.devnull, "wb")
fds2procs = OrderedDict()
# run agent commands in sequence
for cmd in cmds:
log.debug('launching cmd:\n"{}"\n'.format(cmd))
proc = sp.Popen(shlex.split(cmd), stdout=DEVNULL, stderr=sp.PIPE)
fd = proc.stderr.fileno()
log.debug("registering fd '{}' for pid '{}'".format(fd, proc.pid))
fds2procs[fd] = self._procs[cmd] = proc
# register for stderr hangup events
self.poller.register(proc.stderr.fileno(), select.EPOLLHUP)
# limit launch rate
time.sleep(1.0 / rate)
# launch waiter
self._waiter = threading.Thread(target=self._wait, args=(fds2procs,))
self._waiter.daemon = True
self._waiter.start()
return self.get(**kwargs) if block else self._procs
def _wait(self, fds2procs):
log.debug("started waiter for procs {}".format(fds2procs))
signalled = None
left = len(fds2procs)
collected = 0
while collected < left:
pairs = self.poller.poll() # wait on hangup events
log.debug("received hangup for pairs '{}'".format(pairs))
for fd, status in pairs:
collected += 1
proc = fds2procs[fd]
# attach streams so they can be read more then once
log.debug("collecting streams for {}".format(proc))
proc.streams = Streams(*proc.communicate()) # timeout=2))
if proc.returncode != 0 and not signalled:
# stop all other agents if there is a failure
signalled = self.stop()
log.debug("terminating waiter thread")
def get(self, timeout=180):
"""Block up to `timeout` seconds for all agents to complete.
Either return (cmd, proc) pairs or raise `TimeoutError` on timeout
"""
if self._waiter.is_alive():
self._waiter.join(timeout=timeout)
if self._waiter.is_alive():
# kill them mfin SIPps
signalled = self.stop()
self._waiter.join(timeout=10)
if self._waiter.is_alive():
# try to stop a few more times
for _ in range(3):
signalled = self.stop()
self._waiter.join(timeout=1)
if self._waiter.is_alive():
# some procs failed to terminate via signalling
raise RuntimeError("Unable to kill all agents!?")
# all procs were killed by SIGUSR1
raise TimeoutError(
"pids '{}' failed to complete after '{}' seconds".format(
pformat([p.pid for p in signalled.values()]), timeout
)
)
return self._procs
def stop(self):
"""Stop all agents with SIGUSR1 as per SIPp's signal handling"""
return self._signalall(signal.SIGUSR1)
def terminate(self):
"""Kill all agents with SIGTERM"""
return self._signalall(signal.SIGTERM)
def _signalall(self, signum):
signalled = OrderedDict()
for cmd, proc in self.iterprocs():
proc.send_signal(signum)
log.warn(
"sent signal '{}' to cmd '{}' with pid '{}'".format(
signum, cmd, proc.pid
)
)
signalled[cmd] = proc
return signalled
def iterprocs(self):
"""Iterate all processes which are still alive yielding
(cmd, proc) pairs
"""
return (
(cmd, proc)
for cmd, proc in self._procs.items()
if proc and proc.poll() is None
)
def is_alive(self):
"""Return bool indicating whether some agents are still alive"""
return any(self.iterprocs())
def ready(self):
"""Return bool indicating whether all agents have completed"""
return not self.is_alive()
def clear(self):
"""Clear all processes from the last run"""
assert self.ready(), "Not all processes have completed"
self._procs.clear()
|
gpl-2.0
| -7,029,735,260,306,038,000
| 32.576471
| 83
| 0.563595
| false
| 4.452418
| false
| false
| false
|
Khan/pyobjc-framework-FSEvents
|
setup.py
|
1
|
1152
|
'''
Wrappers for the "FSEvents" API in MacOS X. The functions in this framework
allow you to reliably observe changes to the filesystem, even when your
program is not running al the time.
These wrappers don't include documentation, please check Apple's documention
for information on how to use this framework and PyObjC's documentation
for general tips and tricks regarding the translation between Python
and (Objective-)C frameworks
'''
from pyobjc_setup import setup, Extension
setup(
min_os_level='10.5',
name='pyobjc-framework-FSEvents',
version="2.5.1",
description = "Wrappers for the framework FSEvents on Mac OS X",
packages = [ "FSEvents" ],
# setup_requires doesn't like git links, so we just have to
# pip install these first:
#setup_requires = [
# 'https://github.com/Khan/pyobjc-core/tarball/master',
#],
dependency_links = [
'https://github.com/Khan/pyobjc-core/tarball/master',
'https://github.com/Khan/pyobjc-framework-Cocoa/tarball/master',
],
ext_modules = [
Extension("FSEvents._callbacks",
[ "Modules/_callbacks.m" ],
),
],
)
|
mit
| -8,843,281,425,080,310,000
| 33.909091
| 76
| 0.684896
| false
| 3.611285
| false
| false
| false
|
maxmind/GeoIP2-python
|
tests/webservice_test.py
|
1
|
12944
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import asyncio
import copy
import ipaddress
import json
import sys
from typing import cast, Dict
import unittest
sys.path.append("..")
# httpretty currently doesn't work, but mocket with the compat interface
# does.
from mocket import Mocket # type: ignore
from mocket.plugins.httpretty import httpretty, httprettified # type: ignore
import geoip2
from geoip2.errors import (
AddressNotFoundError,
AuthenticationError,
GeoIP2Error,
HTTPError,
InvalidRequestError,
OutOfQueriesError,
PermissionRequiredError,
)
from geoip2.webservice import AsyncClient, Client
class TestBaseClient(unittest.TestCase):
base_uri = "https://geoip.maxmind.com/geoip/v2.1/"
country = {
"continent": {"code": "NA", "geoname_id": 42, "names": {"en": "North America"}},
"country": {
"geoname_id": 1,
"iso_code": "US",
"names": {"en": "United States of America"},
},
"maxmind": {"queries_remaining": 11},
"registered_country": {
"geoname_id": 2,
"is_in_european_union": True,
"iso_code": "DE",
"names": {"en": "Germany"},
},
"traits": {"ip_address": "1.2.3.4", "network": "1.2.3.0/24"},
}
# this is not a comprehensive representation of the
# JSON from the server
insights = cast(Dict, copy.deepcopy(country))
insights["traits"]["user_count"] = 2
insights["traits"]["static_ip_score"] = 1.3
def _content_type(self, endpoint):
return (
"application/vnd.maxmind.com-"
+ endpoint
+ "+json; charset=UTF-8; version=1.0"
)
@httprettified
def test_country_ok(self):
httpretty.register_uri(
httpretty.GET,
self.base_uri + "country/1.2.3.4",
body=json.dumps(self.country),
status=200,
content_type=self._content_type("country"),
)
country = self.run_client(self.client.country("1.2.3.4"))
self.assertEqual(
type(country), geoip2.models.Country, "return value of client.country"
)
self.assertEqual(country.continent.geoname_id, 42, "continent geoname_id is 42")
self.assertEqual(country.continent.code, "NA", "continent code is NA")
self.assertEqual(
country.continent.name, "North America", "continent name is North America"
)
self.assertEqual(country.country.geoname_id, 1, "country geoname_id is 1")
self.assertIs(
country.country.is_in_european_union,
False,
"country is_in_european_union is False",
)
self.assertEqual(country.country.iso_code, "US", "country iso_code is US")
self.assertEqual(
country.country.names, {"en": "United States of America"}, "country names"
)
self.assertEqual(
country.country.name,
"United States of America",
"country name is United States of America",
)
self.assertEqual(
country.maxmind.queries_remaining, 11, "queries_remaining is 11"
)
self.assertIs(
country.registered_country.is_in_european_union,
True,
"registered_country is_in_european_union is True",
)
self.assertEqual(
country.traits.network, ipaddress.ip_network("1.2.3.0/24"), "network"
)
self.assertEqual(country.raw, self.country, "raw response is correct")
@httprettified
def test_me(self):
httpretty.register_uri(
httpretty.GET,
self.base_uri + "country/me",
body=json.dumps(self.country),
status=200,
content_type=self._content_type("country"),
)
implicit_me = self.run_client(self.client.country())
self.assertEqual(
type(implicit_me), geoip2.models.Country, "country() returns Country object"
)
explicit_me = self.run_client(self.client.country())
self.assertEqual(
type(explicit_me),
geoip2.models.Country,
"country('me') returns Country object",
)
@httprettified
def test_200_error(self):
httpretty.register_uri(
httpretty.GET,
self.base_uri + "country/1.1.1.1",
body="",
status=200,
content_type=self._content_type("country"),
)
with self.assertRaisesRegex(
GeoIP2Error, "could not decode the response as JSON"
):
self.run_client(self.client.country("1.1.1.1"))
@httprettified
def test_bad_ip_address(self):
with self.assertRaisesRegex(
ValueError, "'1.2.3' does not appear to be an IPv4 " "or IPv6 address"
):
self.run_client(self.client.country("1.2.3"))
@httprettified
def test_no_body_error(self):
httpretty.register_uri(
httpretty.GET,
self.base_uri + "country/" + "1.2.3.7",
body="",
status=400,
content_type=self._content_type("country"),
)
with self.assertRaisesRegex(
HTTPError, "Received a 400 error for .* with no body"
):
self.run_client(self.client.country("1.2.3.7"))
@httprettified
def test_weird_body_error(self):
httpretty.register_uri(
httpretty.GET,
self.base_uri + "country/" + "1.2.3.8",
body='{"wierd": 42}',
status=400,
content_type=self._content_type("country"),
)
with self.assertRaisesRegex(
HTTPError,
"Response contains JSON but it does not " "specify code or error keys",
):
self.run_client(self.client.country("1.2.3.8"))
@httprettified
def test_bad_body_error(self):
httpretty.register_uri(
httpretty.GET,
self.base_uri + "country/" + "1.2.3.9",
body="bad body",
status=400,
content_type=self._content_type("country"),
)
with self.assertRaisesRegex(
HTTPError, "it did not include the expected JSON body"
):
self.run_client(self.client.country("1.2.3.9"))
@httprettified
def test_500_error(self):
httpretty.register_uri(
httpretty.GET, self.base_uri + "country/" + "1.2.3.10", status=500
)
with self.assertRaisesRegex(HTTPError, r"Received a server error \(500\) for"):
self.run_client(self.client.country("1.2.3.10"))
@httprettified
def test_300_error(self):
httpretty.register_uri(
httpretty.GET,
self.base_uri + "country/" + "1.2.3.11",
status=300,
content_type=self._content_type("country"),
)
with self.assertRaisesRegex(
HTTPError, r"Received a very surprising HTTP status \(300\) for"
):
self.run_client(self.client.country("1.2.3.11"))
@httprettified
def test_ip_address_required(self):
self._test_error(400, "IP_ADDRESS_REQUIRED", InvalidRequestError)
@httprettified
def test_ip_address_not_found(self):
self._test_error(404, "IP_ADDRESS_NOT_FOUND", AddressNotFoundError)
@httprettified
def test_ip_address_reserved(self):
self._test_error(400, "IP_ADDRESS_RESERVED", AddressNotFoundError)
@httprettified
def test_permission_required(self):
self._test_error(403, "PERMISSION_REQUIRED", PermissionRequiredError)
@httprettified
def test_auth_invalid(self):
self._test_error(400, "AUTHORIZATION_INVALID", AuthenticationError)
@httprettified
def test_license_key_required(self):
self._test_error(401, "LICENSE_KEY_REQUIRED", AuthenticationError)
@httprettified
def test_account_id_required(self):
self._test_error(401, "ACCOUNT_ID_REQUIRED", AuthenticationError)
@httprettified
def test_user_id_required(self):
self._test_error(401, "USER_ID_REQUIRED", AuthenticationError)
@httprettified
def test_account_id_unkown(self):
self._test_error(401, "ACCOUNT_ID_UNKNOWN", AuthenticationError)
@httprettified
def test_user_id_unkown(self):
self._test_error(401, "USER_ID_UNKNOWN", AuthenticationError)
@httprettified
def test_out_of_queries_error(self):
self._test_error(402, "OUT_OF_QUERIES", OutOfQueriesError)
def _test_error(self, status, error_code, error_class):
msg = "Some error message"
body = {"error": msg, "code": error_code}
httpretty.register_uri(
httpretty.GET,
self.base_uri + "country/1.2.3.18",
body=json.dumps(body),
status=status,
content_type=self._content_type("country"),
)
with self.assertRaisesRegex(error_class, msg):
self.run_client(self.client.country("1.2.3.18"))
@httprettified
def test_unknown_error(self):
msg = "Unknown error type"
ip = "1.2.3.19"
body = {"error": msg, "code": "UNKNOWN_TYPE"}
httpretty.register_uri(
httpretty.GET,
self.base_uri + "country/" + ip,
body=json.dumps(body),
status=400,
content_type=self._content_type("country"),
)
with self.assertRaisesRegex(InvalidRequestError, msg):
self.run_client(self.client.country(ip))
@httprettified
def test_request(self):
httpretty.register_uri(
httpretty.GET,
self.base_uri + "country/" + "1.2.3.4",
body=json.dumps(self.country),
status=200,
content_type=self._content_type("country"),
)
self.run_client(self.client.country("1.2.3.4"))
request = httpretty.last_request
self.assertEqual(
request.path, "/geoip/v2.1/country/1.2.3.4", "correct URI is used"
)
self.assertEqual(
request.headers["Accept"], "application/json", "correct Accept header"
)
self.assertRegex(
request.headers["User-Agent"],
"^GeoIP2-Python-Client/",
"Correct User-Agent",
)
self.assertEqual(
request.headers["Authorization"],
"Basic NDI6YWJjZGVmMTIzNDU2",
"correct auth",
)
@httprettified
def test_city_ok(self):
httpretty.register_uri(
httpretty.GET,
self.base_uri + "city/" + "1.2.3.4",
body=json.dumps(self.country),
status=200,
content_type=self._content_type("city"),
)
city = self.run_client(self.client.city("1.2.3.4"))
self.assertEqual(type(city), geoip2.models.City, "return value of client.city")
self.assertEqual(
city.traits.network, ipaddress.ip_network("1.2.3.0/24"), "network"
)
@httprettified
def test_insights_ok(self):
httpretty.register_uri(
httpretty.GET,
self.base_uri + "insights/1.2.3.4",
body=json.dumps(self.insights),
status=200,
content_type=self._content_type("country"),
)
insights = self.run_client(self.client.insights("1.2.3.4"))
self.assertEqual(
type(insights), geoip2.models.Insights, "return value of client.insights"
)
self.assertEqual(
insights.traits.network, ipaddress.ip_network("1.2.3.0/24"), "network"
)
self.assertEqual(insights.traits.static_ip_score, 1.3, "static_ip_score is 1.3")
self.assertEqual(insights.traits.user_count, 2, "user_count is 2")
def test_named_constructor_args(self):
id = 47
key = "1234567890ab"
client = self.client_class(account_id=id, license_key=key)
self.assertEqual(client._account_id, str(id))
self.assertEqual(client._license_key, key)
def test_missing_constructor_args(self):
with self.assertRaises(TypeError):
self.client_class(license_key="1234567890ab")
with self.assertRaises(TypeError):
self.client_class("47")
class TestClient(TestBaseClient):
def setUp(self):
self.client_class = Client
self.client = Client(42, "abcdef123456")
def run_client(self, v):
return v
class TestAsyncClient(TestBaseClient):
def setUp(self):
self._loop = asyncio.new_event_loop()
self.client_class = AsyncClient
self.client = AsyncClient(42, "abcdef123456")
def tearDown(self):
self._loop.run_until_complete(self.client.close())
self._loop.close()
def run_client(self, v):
return self._loop.run_until_complete(v)
del TestBaseClient
if __name__ == "__main__":
unittest.main()
|
apache-2.0
| -4,090,489,454,764,451,300
| 32.020408
| 88
| 0.584518
| false
| 3.698286
| true
| false
| false
|
pbl-cloud/paas-manager
|
paas_manager/app/util/gmail.py
|
1
|
1115
|
import sys
import smtplib
from email.mime.text import MIMEText
from email.utils import formatdate
from ... import config
def create_message(from_addr, to_addr, subject, message, encoding):
body = MIMEText(message, 'plain', encoding)
body['Subject'] = subject
body['From'] = from_addr
body['To'] = to_addr
body['Date'] = formatdate()
return body
def send_via_gmail(from_addr, to_addr, body):
s = smtplib.SMTP('smtp.gmail.com', 587)
s.ehlo()
s.starttls()
s.ehlo()
s.login( config['gmail']['user'], config['gmail']['password'])
s.sendmail(from_addr, [to_addr], body.as_string())
s.close()
def gmail(message, to_addr):
body = create_message(
config['gmail']['user'], to_addr, '[Notification]', message, 'utf8')
send_via_gmail(config['gmail']['user'], to_addr, body)
return
if __name__ == '__main__':
argvs = sys.argv
argc = len(argvs)
if (argc < 3):
print('USAGE: python gmail.py address message')
raise SystemExit(0)
else:
to_addr = argvs[1]
message = argvs[2]
gmail(message, to_addr)
|
mit
| -1,493,291,774,116,415,200
| 24.340909
| 76
| 0.612556
| false
| 3.308605
| false
| false
| false
|
jkomiyama/duelingbanditlib
|
gather.py
|
1
|
1151
|
#!/usr/bin/env python
# coding:utf-8
#a tool for merging multiple simulation results
import sys,os,re
def avg(elems):
return sum(elems)/float(len(elems))
def splitavg(splits):
l = len(splits[0])
for sp in splits:
if len(sp) != l:
print "split size not match"
sys.exit()
sums = [0 for i in range(l)]
for sp in splits:
for i in range(l):
sums[i] += float(sp[i])
return map(lambda i:i/len(splits), sums)
def gather(filenames):
lines_files = []
for afile in filenames:
lines_files.append([line.strip() for line in file(afile, "r").readlines() if len(line)>0])
l = 0
for i in range(len(lines_files)-1):
if len(lines_files[i]) != len(lines_files[i+1]):
print "line num does not match!"
sys.exit()
while l < len(lines_files[0]):
if len(lines_files[0][l])==0:
pass
elif lines_files[0][l][0]=="#":
print lines_files[0][l]
else:
splits = [lines_files[i][l].split(" ") for i in range(len(lines_files))]
avgs = splitavg(splits)
avgs[0] = int(avgs[0])
print " ".join(map(str, avgs))
l+=1
if __name__ == "__main__":
gather(sys.argv[1:])
|
mit
| -3,247,382,617,199,091,000
| 24.577778
| 94
| 0.591659
| false
| 2.8775
| false
| false
| false
|
jadsonjs/DataScience
|
python/arrays_dimesion.py
|
1
|
2664
|
# Consider the case where you have one sequence of multiple time steps and one feature.
from numpy import array
data = array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
# We can then use the reshape() function on the NumPy array to reshape this one-dimensional array
# into a three-dimensional array with 1 sample, 10 time steps, and 1 feature at each time step.
data = data.reshape((1, 10, 1))
print(data.shape)
# Consider the case where you have multiple parallel series as input for your model.
# For example, this could be two parallel series of 10 values:
#series 1: 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0
#series 2: 1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1
from numpy import array
data = array([ [0.1, 1.0],
[0.2, 0.9],
[0.3, 0.8],
[0.4, 0.7],
[0.5, 0.6],
[0.6, 0.5],
[0.7, 0.4],
[0.8, 0.3],
[0.9, 0.2],
[1.0, 0.1]])
#This data can be framed as 1 sample with 10 time steps and 2 features.
#It can be reshaped as a 3D array as follows:
#model = Sequential()
#model.add(LSTM(32, input_shape=(10, 2)))
#model.add(Dense(1))
data = data.reshape(1, 10, 2)
print(data.shape)
#Here, we have 25 samples, 200 time steps per sample, and 1 feature
# split into samples (e.g. 5000/200 = 25)
samples = list()
length = 200
# step over the 5,000 in jumps of 200
for i in range(0,n,length):
# grab from i to i + 200
sample = data[i:i+length]
samples.append(sample)
print(len(samples))
data = array(samples)
print(data.shape)
# reshape into [samples, timesteps, features]
# expect [25, 200, 1]
data = data.reshape((len(samples), length, 1))
print(data.shape)
#https://machinelearningmastery.com/reshape-input-data-long-short-term-memory-networks-keras/
#
#For a feed-forward network, your input has the shape (number of samples, number of features). With an LSTM/RNN, you add a time dimension,
#and your input shape becomes (number of samples, number of timesteps, number of features). This is in the documentation.
#So if your feature dimension is 5, and you have 2 timesteps, your input could look like
#[ [
# [1,2,3,4,5],
# [2,3,4,5,6]
# ],
# [
# [2,4,6,8,0],
# [9,8,7,6,5]
# ]
#]
#Your output shape depends on how you configure the net. If your LSTM/RNN has return_sequences=False, you'll have one label
#per sequence;
#if you set return_sequences=True, you'll have one label per timestep.
#So in the example, [ [[1,2,3,4,5], [2,3,4,5,6]], [[2,4,6,8,0], [9,8,7,6,5]] ]
#input_shape is (2, 2, 5).
#And a 'sequence' is '[[1,2,3,4,5], [2,3,4,5,6]]' I assume.
#and has 2 timesteps
|
apache-2.0
| -1,277,704,131,615,162,400
| 25.64
| 139
| 0.631381
| false
| 2.648111
| false
| false
| false
|
SegundoBob/GNXrepeats
|
hrngpS2.py
|
1
|
1831
|
#!/usr/bin/python
#coding=utf-8
#@+leo-ver=5-thin
#@+node:bob07.20140715160011.1575: * @file hrngpS2.py
#@@first
#@@first
#@@language python
#@@tabwidth -4
import os
import sys
import leo.core.leoBridge as leoBridge
from leo_lib import lib_leo03
#@+others
#@+node:bob07.20140715160011.1576: ** gnxRepeats()
def gnxRepeats(cmdrx, infoList):
hrnGnx = cmdrx.hiddenRootNode.gnx
gnxDict = {hrnGnx: cmdrx.hiddenRootNode.h}
errorFlag = False
for vnode in lib_leo03.bffvWalk(cmdrx):
hdr = '"{0}"'.format(vnode.h)
if vnode.gnx in gnxDict:
errorFlag = True
hdr = '"{0}" {1}'.format(gnxDict[vnode.gnx], hdr)
gnxDict[vnode.gnx] = hdr
infoList.append('Error: {0}'.format(errorFlag))
gnxList = gnxDict.keys()
gnxList.sort()
for gnx in gnxList:
infoList.append('{gnx} {hdrs}'.format(gnx=gnx, hdrs=gnxDict[gnx]))
#@-others
TestDir = 'hidden_root_tsts'
def main():
infoList = list()
fpn1 = sys.argv[1]
bridge = leoBridge.controller(gui='nullGui', verbose=False,
loadPlugins=False, readSettings=False)
leoG = bridge.globals()
infoList.append('After bridge create: {0}'.format(leoG.app.nodeIndices.lastIndex))
cmdr1 = bridge.openLeoFile(fpn1)
infoList.append('After {fpn} open: {idx}'.format(fpn=fpn1, idx=leoG.app.nodeIndices.lastIndex))
rp = cmdr1.rootPosition()
posx = rp.insertAfter()
posx.h = '{cnt} - {idx}'.format(cnt=2, idx=1)
infoList.append('After adding 1 vnode: {idx}'.format(fpn=fpn1, idx=leoG.app.nodeIndices.lastIndex))
gnxRepeats(cmdr1, infoList)
cmdr1.save()
cmdr1.close()
fpnError = os.path.join(TestDir, 'SlaveLog.txt')
fdError = open(fpnError, 'w')
fdError.write('\n'.join(infoList) + '\n')
fdError.close()
if __name__ == "__main__":
main()
#@-leo
|
mit
| 6,549,371,217,770,367,000
| 27.609375
| 103
| 0.647187
| false
| 2.653623
| false
| false
| false
|
SINGROUP/pycp2k
|
pycp2k/classes/_opt_ri_basis3.py
|
1
|
1543
|
from pycp2k.inputsection import InputSection
class _opt_ri_basis3(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Delta_i_rel = None
self.Delta_ri = None
self.Eps_deriv = None
self.Max_iter = None
self.Num_func = None
self.Basis_size = None
self._name = "OPT_RI_BASIS"
self._keywords = {'Num_func': 'NUM_FUNC', 'Delta_i_rel': 'DELTA_I_REL', 'Basis_size': 'BASIS_SIZE', 'Delta_ri': 'DELTA_RI', 'Eps_deriv': 'EPS_DERIV', 'Max_iter': 'MAX_ITER'}
self._aliases = {'Max_num_iter': 'Max_iter', 'Dri': 'Delta_ri', 'Di_rel': 'Delta_i_rel', 'Eps_num_deriv': 'Eps_deriv'}
@property
def Di_rel(self):
"""
See documentation for Delta_i_rel
"""
return self.Delta_i_rel
@property
def Dri(self):
"""
See documentation for Delta_ri
"""
return self.Delta_ri
@property
def Eps_num_deriv(self):
"""
See documentation for Eps_deriv
"""
return self.Eps_deriv
@property
def Max_num_iter(self):
"""
See documentation for Max_iter
"""
return self.Max_iter
@Di_rel.setter
def Di_rel(self, value):
self.Delta_i_rel = value
@Dri.setter
def Dri(self, value):
self.Delta_ri = value
@Eps_num_deriv.setter
def Eps_num_deriv(self, value):
self.Eps_deriv = value
@Max_num_iter.setter
def Max_num_iter(self, value):
self.Max_iter = value
|
lgpl-3.0
| 7,370,216,220,963,202,000
| 24.716667
| 181
| 0.552819
| false
| 3.269068
| false
| false
| false
|
uglyfruitcake/Axelrod
|
axelrod/tests/unit/test_cooperator.py
|
1
|
1499
|
"""Test for the cooperator strategy."""
import axelrod
from .test_player import TestPlayer
C, D = axelrod.Actions.C, axelrod.Actions.D
class TestCooperator(TestPlayer):
name = "Cooperator"
player = axelrod.Cooperator
expected_classifier = {
'memory_depth': 0,
'stochastic': False,
'inspects_source': False,
'manipulates_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def test_strategy(self):
"""Starts by cooperating."""
self.first_play_test(C)
def test_effect_of_strategy(self):
"""Simply does the opposite to what the strategy did last time."""
self.markov_test([C, C, C, C])
class TestTrickyCooperator(TestPlayer):
name = "Tricky Cooperator"
player = axelrod.TrickyCooperator
expected_classifier = {
'memory_depth': 10,
'stochastic': False,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def test_strategy(self):
"""Starts by cooperating."""
self.first_play_test(C)
def test_effect_of_strategy(self):
"""Test if it tries to trick opponent"""
self.responses_test([C, C, C], [C, C, C], [D])
self.responses_test([C, C, C, D, D], [C, C, C, C, D], [C])
history = [C, C, C, D, D] + [C] * 11
opponent_histroy = [C, C, C, C, D] + [D] + [C] * 10
self.responses_test(history, opponent_histroy,[D])
|
mit
| -220,543,415,429,763,360
| 27.283019
| 74
| 0.588392
| false
| 3.301762
| true
| false
| false
|
souravbadami/oppia
|
core/storage/base_model/gae_models.py
|
1
|
36051
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base model class."""
from constants import constants
from core.platform import models
import utils
from google.appengine.datastore import datastore_query
from google.appengine.ext import ndb
transaction_services = models.Registry.import_transaction_services()
# The delimiter used to separate the version number from the model instance
# id. To get the instance id from a snapshot id, use Python's rfind()
# method to find the location of this delimiter.
_VERSION_DELIMITER = '-'
# Constants used for generating ids.
MAX_RETRIES = 10
RAND_RANGE = (1 << 30) - 1
ID_LENGTH = 12
class BaseModel(ndb.Model):
"""Base model for all persistent object storage classes."""
# When this entity was first created. This can be overwritten and
# set explicitly.
created_on = ndb.DateTimeProperty(auto_now_add=True, indexed=True)
# When this entity was last updated. This cannot be set directly.
last_updated = ndb.DateTimeProperty(auto_now=True, indexed=True)
# Whether the current version of the model instance is deleted.
deleted = ndb.BooleanProperty(indexed=True, default=False)
@property
def id(self):
"""A unique id for this model instance."""
return self.key.id()
def _pre_put_hook(self):
"""This is run before model instances are saved to the datastore.
Subclasses of BaseModel should override this method.
"""
pass
class EntityNotFoundError(Exception):
"""Raised when no entity for a given id exists in the datastore."""
pass
@staticmethod
def export_data(user_id):
"""This method should be implemented by subclasses.
Args:
user_id: str. The ID of the user whose data should be exported.
Raises:
NotImplementedError: The method is not overwritten in derived
classes.
"""
raise NotImplementedError
@classmethod
def get(cls, entity_id, strict=True):
"""Gets an entity by id.
Args:
entity_id: str.
strict: bool. Whether to fail noisily if no entity with the given id
exists in the datastore. Default is True.
Returns:
None, if strict == False and no undeleted entity with the given id
exists in the datastore. Otherwise, the entity instance that
corresponds to the given id.
Raises:
base_models.BaseModel.EntityNotFoundError: if strict == True and
no undeleted entity with the given id exists in the datastore.
"""
entity = cls.get_by_id(entity_id)
if entity and entity.deleted:
entity = None
if strict and entity is None:
raise cls.EntityNotFoundError(
'Entity for class %s with id %s not found' %
(cls.__name__, entity_id))
return entity
@classmethod
def get_multi(cls, entity_ids, include_deleted=False):
"""Gets list of entities by list of ids.
Args:
entity_ids: list(str).
include_deleted: bool. Whether to include deleted entities in the
return list. Default is False.
Returns:
list(*|None). A list that contains model instances that match
the corresponding entity_ids in the input list. If an instance is
not found, or it has been deleted and include_deleted is False,
then the corresponding entry is None.
"""
entity_keys = []
none_argument_indices = []
for index, entity_id in enumerate(entity_ids):
if entity_id:
entity_keys.append(ndb.Key(cls, entity_id))
else:
none_argument_indices.append(index)
entities = ndb.get_multi(entity_keys)
for index in none_argument_indices:
entities.insert(index, None)
if not include_deleted:
for i in xrange(len(entities)):
if entities[i] and entities[i].deleted:
entities[i] = None
return entities
@classmethod
def put_multi(cls, entities):
"""Stores the given ndb.Model instances.
Args:
entities: list(ndb.Model).
"""
ndb.put_multi(entities)
@classmethod
def delete_multi(cls, entities):
"""Deletes the given ndb.Model instances.
Args:
entities: list(ndb.Model).
"""
keys = [entity.key for entity in entities]
ndb.delete_multi(keys)
def delete(self):
"""Deletes this instance."""
super(BaseModel, self).key.delete()
@classmethod
def get_all(cls, include_deleted=False):
"""Gets iterable of all entities of this class.
Args:
include_deleted: bool. If True, then entities that have been marked
deleted are returned as well. Defaults to False.
Returns:
iterable. Filterable iterable of all entities of this class.
"""
query = cls.query()
if not include_deleted:
query = query.filter(cls.deleted == False) # pylint: disable=singleton-comparison
return query
@classmethod
def get_new_id(cls, entity_name):
"""Gets a new id for an entity, based on its name.
The returned id is guaranteed to be unique among all instances of this
entity.
Args:
entity_name: The name of the entity. Coerced to a utf-8 encoded
string. Defaults to ''.
Returns:
str. New unique id for this entity class.
Raises:
Exception: An ID cannot be generated within a reasonable number
of attempts.
"""
try:
entity_name = unicode(entity_name).encode(encoding='utf-8')
except Exception:
entity_name = ''
for _ in range(MAX_RETRIES):
new_id = utils.convert_to_hash(
'%s%s' % (entity_name, utils.get_random_int(RAND_RANGE)),
ID_LENGTH)
if not cls.get_by_id(new_id):
return new_id
raise Exception('New id generator is producing too many collisions.')
@classmethod
def _fetch_page_sorted_by_last_updated(
cls, query, page_size, urlsafe_start_cursor):
"""Fetches a page of entities sorted by their last_updated attribute in
descending order (newly updated first).
Args:
query: ndb.Query.
page_size: int. The maximum number of entities to be returned.
urlsafe_start_cursor: str or None. If provided, the list of returned
entities starts from this datastore cursor. Otherwise,
the returned entities start from the beginning of the full
list of entities.
Returns:
3-tuple of (results, cursor, more) as described in fetch_page() at:
https://developers.google.com/appengine/docs/python/ndb/queryclass,
where:
results: List of query results.
cursor: str or None. A query cursor pointing to the next batch
of results. If there are no more results, this will be None.
more: bool. If True, there are (probably) more results after
this batch. If False, there are no further results after
this batch.
"""
if urlsafe_start_cursor:
start_cursor = datastore_query.Cursor(urlsafe=urlsafe_start_cursor)
else:
start_cursor = None
result = query.order(-cls.last_updated).fetch_page(
page_size, start_cursor=start_cursor)
return (
result[0],
(result[1].urlsafe() if result[1] else None),
result[2])
class BaseCommitLogEntryModel(BaseModel):
"""Base Model for the models that store the log of commits to a
construct.
"""
# Update superclass model to make these properties indexed.
created_on = ndb.DateTimeProperty(auto_now_add=True, indexed=True)
last_updated = ndb.DateTimeProperty(auto_now=True, indexed=True)
# The id of the user.
user_id = ndb.StringProperty(indexed=True, required=True)
# The username of the user, at the time of the edit.
username = ndb.StringProperty(indexed=True, required=True)
# The type of the commit: 'create', 'revert', 'edit', 'delete'.
commit_type = ndb.StringProperty(indexed=True, required=True)
# The commit message.
commit_message = ndb.TextProperty(indexed=False)
# The commit_cmds dict for this commit.
commit_cmds = ndb.JsonProperty(indexed=False, required=True)
# The status of the entity after the edit event ('private', 'public').
post_commit_status = ndb.StringProperty(indexed=True, required=True)
# Whether the entity is community-owned after the edit event.
post_commit_community_owned = ndb.BooleanProperty(indexed=True)
# Whether the entity is private after the edit event. Having a
# separate field for this makes queries faster, since an equality query
# on this property is faster than an inequality query on
# post_commit_status.
post_commit_is_private = ndb.BooleanProperty(indexed=True)
# The version number of the model after this commit.
version = ndb.IntegerProperty()
@classmethod
def create(
cls, entity_id, version, committer_id, committer_username,
commit_type, commit_message, commit_cmds, status,
community_owned):
"""This method returns an instance of the CommitLogEntryModel for a
construct with the common fields filled.
Args:
entity_id: str. The ID of the construct corresponding to this
commit log entry model (e.g. the exp_id for an exploration,
the story_id for a story, etc.).
version: int. The version number of the model after the commit.
committer_id: str. The user_id of the user who committed the
change.
committer_username: str. The username of the user who committed the
change.
commit_type: str. The type of commit. Possible values are in
core.storage.base_models.COMMIT_TYPE_CHOICES.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, which should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. Unique command.
and then additional arguments for that command.
status: str. The status of the entity after the commit.
community_owned: bool. Whether the entity is community_owned after
the commit.
Returns:
CommitLogEntryModel. Returns the respective CommitLogEntryModel
instance of the construct from which this is called.
"""
return cls(
id=cls._get_instance_id(entity_id, version),
user_id=committer_id,
username=committer_username,
commit_type=commit_type,
commit_message=commit_message,
commit_cmds=commit_cmds,
version=version,
post_commit_status=status,
post_commit_community_owned=community_owned,
post_commit_is_private=(
status == constants.ACTIVITY_STATUS_PRIVATE)
)
@classmethod
def _get_instance_id(cls, target_entity_id, version):
"""This method should be implemented in the inherited classes.
Args:
target_entity_id: str. The ID of the construct corresponding to this
commit log entry model (e.g. the exp_id for an exploration,
the story_id for a story, etc.).
version: int. The version number of the model after the commit.
Raises:
NotImplementedError: The method is not overwritten in derived
classes.
"""
raise NotImplementedError
@classmethod
def get_all_commits(cls, page_size, urlsafe_start_cursor):
"""Fetches a list of all the commits sorted by their last updated
attribute.
Args:
page_size: int. The maximum number of entities to be returned.
urlsafe_start_cursor: str or None. If provided, the list of
returned entities starts from this datastore cursor.
Otherwise, the returned entities start from the beginning
of the full list of entities.
Returns:
3-tuple of (results, cursor, more) as described in fetch_page() at:
https://developers.google.com/appengine/docs/python/ndb/queryclass,
where:
results: List of query results.
cursor: str or None. A query cursor pointing to the next
batch of results. If there are no more results, this might
be None.
more: bool. If True, there are (probably) more results after
this batch. If False, there are no further results after
this batch.
"""
return cls._fetch_page_sorted_by_last_updated(
cls.query(), page_size, urlsafe_start_cursor)
@classmethod
def get_commit(cls, target_entity_id, version):
"""Returns the commit corresponding to an instance id and
version number.
Args:
target_entity_id: str. The ID of the construct corresponding to this
commit log entry model (e.g. the exp_id for an exploration,
the story_id for a story, etc.).
version: int. The version number of the instance
after the commit.
Returns:
BaseCommitLogEntryModel. The commit with the target entity id and
version number.
"""
commit_id = cls._get_instance_id(target_entity_id, version)
return cls.get_by_id(commit_id)
class VersionedModel(BaseModel):
"""Model that handles storage of the version history of model instances.
To use this class, you must declare a SNAPSHOT_METADATA_CLASS and a
SNAPSHOT_CONTENT_CLASS. The former must contain the String fields
'committer_id', 'commit_type' and 'commit_message', and a JSON field for
the Python list of dicts, 'commit_cmds'. The latter must contain the JSON
field 'content'. The item that is being versioned must be serializable to a
JSON blob.
Note that commit() should be used for VersionedModels, as opposed to put()
for direct subclasses of BaseModel.
"""
# The class designated as the snapshot model. This should be a subclass of
# BaseSnapshotMetadataModel.
SNAPSHOT_METADATA_CLASS = None
# The class designated as the snapshot content model. This should be a
# subclass of BaseSnapshotContentModel.
SNAPSHOT_CONTENT_CLASS = None
# Whether reverting is allowed. Default is False.
ALLOW_REVERT = False
# IMPORTANT: Subclasses should only overwrite things above this line.
# The possible commit types.
_COMMIT_TYPE_CREATE = 'create'
_COMMIT_TYPE_REVERT = 'revert'
_COMMIT_TYPE_EDIT = 'edit'
_COMMIT_TYPE_DELETE = 'delete'
# A list containing the possible commit types.
COMMIT_TYPE_CHOICES = [
_COMMIT_TYPE_CREATE, _COMMIT_TYPE_REVERT, _COMMIT_TYPE_EDIT,
_COMMIT_TYPE_DELETE
]
# The reserved prefix for keys that are automatically inserted into a
# commit_cmd dict by this model.
_AUTOGENERATED_PREFIX = 'AUTO'
# The command string for a revert commit.
CMD_REVERT_COMMIT = '%s_revert_version_number' % _AUTOGENERATED_PREFIX
# The command string for a delete commit.
CMD_DELETE_COMMIT = '%s_mark_deleted' % _AUTOGENERATED_PREFIX
# The current version number of this instance. In each PUT operation,
# this number is incremented and a snapshot of the modified instance is
# stored in the snapshot metadata and content models. The snapshot
# version number starts at 1 when the model instance is first created.
# All data in this instance represents the version at HEAD; data about the
# previous versions is stored in the snapshot models.
version = ndb.IntegerProperty(default=0)
def _require_not_marked_deleted(self):
"""Checks whether the model instance is deleted."""
if self.deleted:
raise Exception('This model instance has been deleted.')
def _compute_snapshot(self):
"""Generates a snapshot (dict) from the model property values."""
return self.to_dict(exclude=['created_on', 'last_updated'])
def _reconstitute(self, snapshot_dict):
"""Populates the model instance with the snapshot.
Args:
snapshot_dict: dict(str, *). The snapshot with the model
property values.
Returns:
VersionedModel. The instance of the VersionedModel class populated
with the the snapshot.
"""
self.populate(**snapshot_dict)
return self
def _reconstitute_from_snapshot_id(self, snapshot_id):
"""Gets a reconstituted instance of this model class, based on the given
snapshot id.
Args:
snapshot_id: str.
Returns:
VersionedModel. Reconstituted instance.
"""
snapshot_model = self.SNAPSHOT_CONTENT_CLASS.get(snapshot_id)
snapshot_dict = snapshot_model.content
reconstituted_model = self._reconstitute(snapshot_dict)
# TODO(sll): The 'created_on' and 'last_updated' values here will be
# slightly different from the values the entity model would have had,
# since they correspond to the corresponding fields for the snapshot
# content model instead. Figure out whether this is a problem or not,
# and whether we need to record the contents of those fields in the
# actual entity model (in which case we also need a way to deal with
# old snapshots that don't have this information).
reconstituted_model.created_on = snapshot_model.created_on
reconstituted_model.last_updated = snapshot_model.last_updated
return reconstituted_model
@classmethod
def _get_snapshot_id(cls, instance_id, version_number):
"""Gets a unique snapshot id for this instance and version.
Args:
instance_id: str.
version_number: int.
Returns:
str. The unique snapshot id corresponding to the given instance and
version.
"""
return '%s%s%s' % (
instance_id, _VERSION_DELIMITER, version_number)
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
"""Evaluates and executes commit. Main function for all commit types.
Args:
committer_id: str. The user_id of the user who committed the change.
commit_type: str. Unique identifier of commit type. Possible values
are in COMMIT_TYPE_CHOICES.
commit_message: str.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, should give sufficient information to
reconstruct the commit. Dict always contains:
cmd: str. Unique command.
And then additional arguments for that command. For example:
{'cmd': 'AUTO_revert_version_number'
'version_number': 4}
Raises:
Exception: No snapshot metadata class has been defined.
Exception: No snapshot content class has been defined.
Exception: commit_cmds is not a list of dicts.
"""
if self.SNAPSHOT_METADATA_CLASS is None:
raise Exception('No snapshot metadata class defined.')
if self.SNAPSHOT_CONTENT_CLASS is None:
raise Exception('No snapshot content class defined.')
if not isinstance(commit_cmds, list):
raise Exception(
'Expected commit_cmds to be a list of dicts, received %s'
% commit_cmds)
self.version += 1
snapshot = self._compute_snapshot()
snapshot_id = self._get_snapshot_id(self.id, self.version)
snapshot_metadata_instance = self.SNAPSHOT_METADATA_CLASS( # pylint: disable=not-callable
id=snapshot_id, committer_id=committer_id, commit_type=commit_type,
commit_message=commit_message, commit_cmds=commit_cmds)
snapshot_content_instance = self.SNAPSHOT_CONTENT_CLASS( # pylint: disable=not-callable
id=snapshot_id, content=snapshot)
transaction_services.run_in_transaction(
ndb.put_multi,
[snapshot_metadata_instance, snapshot_content_instance, self])
def delete(self, committer_id, commit_message, force_deletion=False):
"""Deletes this model instance.
Args:
committer_id: str. The user_id of the user who committed the change.
commit_message: str.
force_deletion: bool. If True this model is deleted
completely from storage, otherwise it is only marked as deleted.
Default is False.
Raises:
Exception: This model instance has been already deleted.
"""
if force_deletion:
current_version = self.version
version_numbers = [str(num + 1) for num in range(current_version)]
snapshot_ids = [
self._get_snapshot_id(self.id, version_number)
for version_number in version_numbers]
metadata_keys = [
ndb.Key(self.SNAPSHOT_METADATA_CLASS, snapshot_id)
for snapshot_id in snapshot_ids]
ndb.delete_multi(metadata_keys)
content_keys = [
ndb.Key(self.SNAPSHOT_CONTENT_CLASS, snapshot_id)
for snapshot_id in snapshot_ids]
ndb.delete_multi(content_keys)
super(VersionedModel, self).delete()
else:
self._require_not_marked_deleted() # pylint: disable=protected-access
self.deleted = True
commit_cmds = [{
'cmd': self.CMD_DELETE_COMMIT
}]
self._trusted_commit(
committer_id, self._COMMIT_TYPE_DELETE, commit_message,
commit_cmds)
def put(self, *args, **kwargs):
"""For VersionedModels, this method is replaced with commit()."""
raise NotImplementedError
def commit(self, committer_id, commit_message, commit_cmds):
"""Saves a version snapshot and updates the model.
Args:
committer_id: str. The user_id of the user who committed the change.
commit_message: str.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, should give sufficient information to
reconstruct the commit. Dict always contains:
cmd: str. Unique command.
And then additional arguments for that command. For example:
{'cmd': 'AUTO_revert_version_number'
'version_number': 4}
Raises:
Exception: This model instance has been already deleted.
Exception: commit_cmd is in invalid format.
"""
self._require_not_marked_deleted()
for item in commit_cmds:
if not isinstance(item, dict):
raise Exception(
'Expected commit_cmds to be a list of dicts, received %s'
% commit_cmds)
for commit_cmd in commit_cmds:
if 'cmd' not in commit_cmd:
raise Exception(
'Invalid commit_cmd: %s. Expected a \'cmd\' key.'
% commit_cmd)
if commit_cmd['cmd'].startswith(self._AUTOGENERATED_PREFIX):
raise Exception(
'Invalid change list command: %s' % commit_cmd['cmd'])
commit_type = (
self._COMMIT_TYPE_CREATE if self.version == 0 else
self._COMMIT_TYPE_EDIT)
self._trusted_commit(
committer_id, commit_type, commit_message, commit_cmds)
@classmethod
def revert(cls, model, committer_id, commit_message, version_number):
"""Reverts model to previous version.
Args:
model: VersionedModel.
committer_id: str. The user_id of the user who committed the change.
commit_message: str.
version_number: int. Version to revert to.
Raises:
Exception: This model instance has been deleted.
Exception: Reverting is not allowed on this model.
"""
model._require_not_marked_deleted() # pylint: disable=protected-access
if not model.ALLOW_REVERT:
raise Exception(
'Reverting objects of type %s is not allowed.'
% model.__class__.__name__)
commit_cmds = [{
'cmd': model.CMD_REVERT_COMMIT,
'version_number': version_number
}]
# Do not overwrite the version number.
current_version = model.version
# If a new property is introduced after a certain version of a model,
# the property should be its default value when an old snapshot of the
# model is applied during reversion. E.g. states_schema_version in
# ExplorationModel may be added after some version of a saved
# exploration. If that exploration is reverted to a version that does
# not have a states_schema_version property, it should revert to the
# default states_schema_version value rather than taking the
# states_schema_version value from the latest exploration version.
# pylint: disable=protected-access
snapshot_id = model._get_snapshot_id(model.id, version_number)
new_model = cls(id=model.id)
new_model._reconstitute_from_snapshot_id(snapshot_id)
new_model.version = current_version
new_model._trusted_commit(
committer_id, cls._COMMIT_TYPE_REVERT, commit_message,
commit_cmds)
# pylint: enable=protected-access
@classmethod
def get_version(cls, entity_id, version_number):
"""Gets model instance representing the given version.
The snapshot content is used to populate this model instance. The
snapshot metadata is not used.
Args:
entity_id: str.
version_number: int.
Returns:
VersionedModel. Model instance representing given version.
Raises:
Exception: This model instance has been deleted.
"""
# pylint: disable=protected-access
cls.get(entity_id)._require_not_marked_deleted()
snapshot_id = cls._get_snapshot_id(entity_id, version_number)
return cls(
id=entity_id,
version=version_number)._reconstitute_from_snapshot_id(snapshot_id)
# pylint: enable=protected-access
@classmethod
def get_multi_versions(cls, entity_id, version_numbers):
"""Gets model instances for each version specified in version_numbers.
Args:
entity_id: str. ID of the entity.
version_numbers: list(int). List of version numbers.
Returns:
list(VersionedModel). Model instances representing the given
versions.
Raises:
ValueError. The given entity_id is invalid.
ValueError. Requested version number cannot be higher than the
current version number.
ValueError. At least one version number is invalid.
"""
instances = []
entity = cls.get(entity_id, strict=False)
if not entity:
raise ValueError('The given entity_id %s is invalid.' % (entity_id))
current_version = entity.version
max_version = max(version_numbers)
if max_version > current_version:
raise ValueError(
'Requested version number %s cannot be higher than the current '
'version number %s.' % (max_version, current_version))
snapshot_ids = []
# pylint: disable=protected-access
for version in version_numbers:
snapshot_id = cls._get_snapshot_id(entity_id, version)
snapshot_ids.append(snapshot_id)
snapshot_models = cls.SNAPSHOT_CONTENT_CLASS.get_multi(snapshot_ids)
for snapshot_model in snapshot_models:
if snapshot_model is None:
raise ValueError(
'At least one version number is invalid.')
snapshot_dict = snapshot_model.content
reconstituted_model = cls(id=entity_id)._reconstitute(
snapshot_dict)
reconstituted_model.created_on = snapshot_model.created_on
reconstituted_model.last_updated = snapshot_model.last_updated
instances.append(reconstituted_model)
# pylint: enable=protected-access
return instances
@classmethod
def get(cls, entity_id, strict=True, version=None):
"""Gets model instance.
Args:
entity_id: str.
strict: bool. Whether to fail noisily if no entity with the given id
exists in the datastore. Default is True.
version: int. Version we want to get. Default is None.
Returns:
VersionedModel. If version is None, get the newest version of the
model. Otherwise, get the specified version.
"""
if version is None:
return super(VersionedModel, cls).get(entity_id, strict=strict)
else:
return cls.get_version(entity_id, version)
@classmethod
def get_snapshots_metadata(
cls, model_instance_id, version_numbers, allow_deleted=False):
"""Gets a list of dicts, each representing a model snapshot.
One dict is returned for each version number in the list of version
numbers requested. If any of the version numbers does not exist, an
error is raised.
Args:
model_instance_id: str. Id of requested model.
version_numbers: list(int). List of version numbers.
allow_deleted: bool. If is False, an error is raised if the current
model has been deleted. Default is False.
Returns:
list(dict). Each dict contains metadata for a particular snapshot.
It has the following keys:
committer_id: str. The user_id of the user who committed the
change.
commit_message: str.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, should give sufficient information to
reconstruct the commit. Dict always contains:
cmd: str. Unique command.
And then additional arguments for that command. For example:
{'cmd': 'AUTO_revert_version_number'
'version_number': 4}
commit_type: str. Unique identifier of commit type. Possible
values are in COMMIT_TYPE_CHOICES.
version_number: int.
created_on_ms: float. Snapshot creation time in milliseconds
since the Epoch.
Raises:
Exception: There is no model instance corresponding to at least one
of the given version numbers.
"""
# pylint: disable=protected-access
if not allow_deleted:
cls.get(model_instance_id)._require_not_marked_deleted()
snapshot_ids = [
cls._get_snapshot_id(model_instance_id, version_number)
for version_number in version_numbers]
# pylint: enable=protected-access
metadata_keys = [
ndb.Key(cls.SNAPSHOT_METADATA_CLASS, snapshot_id)
for snapshot_id in snapshot_ids]
returned_models = ndb.get_multi(metadata_keys)
for ind, model in enumerate(returned_models):
if model is None:
raise Exception(
'Invalid version number %s for model %s with id %s'
% (version_numbers[ind], cls.__name__, model_instance_id))
return [{
'committer_id': model.committer_id,
'commit_message': model.commit_message,
'commit_cmds': model.commit_cmds,
'commit_type': model.commit_type,
'version_number': version_numbers[ind],
'created_on_ms': utils.get_time_in_millisecs(model.created_on),
} for (ind, model) in enumerate(returned_models)]
class BaseSnapshotMetadataModel(BaseModel):
"""Base class for snapshot metadata classes.
The id of this model is computed using VersionedModel.get_snapshot_id().
"""
# The id of the user who committed this revision.
committer_id = ndb.StringProperty(required=True)
# The type of the commit associated with this snapshot.
commit_type = ndb.StringProperty(
required=True, choices=VersionedModel.COMMIT_TYPE_CHOICES)
# The commit message associated with this snapshot.
commit_message = ndb.TextProperty(indexed=False)
# A sequence of commands that can be used to describe this commit.
# Represented as a list of dicts.
commit_cmds = ndb.JsonProperty(indexed=False)
def get_unversioned_instance_id(self):
"""Gets the instance id from the snapshot id.
Returns:
str. Instance id part of snapshot id.
"""
return self.id[:self.id.rfind(_VERSION_DELIMITER)]
def get_version_string(self):
"""Gets the version number from the snapshot id.
Returns:
str. Version number part of snapshot id.
"""
return self.id[self.id.rfind(_VERSION_DELIMITER) + 1:]
class BaseSnapshotContentModel(BaseModel):
"""Base class for snapshot content classes.
The id of this model is computed using VersionedModel.get_snapshot_id().
"""
# The snapshot content, as a JSON blob.
content = ndb.JsonProperty(indexed=False)
def get_unversioned_instance_id(self):
"""Gets the instance id from the snapshot id.
Returns:
str. Instance id part of snapshot id.
"""
return self.id[:self.id.rfind(_VERSION_DELIMITER)]
def get_version_string(self):
"""Gets the version number from the snapshot id.
Returns:
str. Version number part of snapshot id.
"""
return self.id[self.id.rfind(_VERSION_DELIMITER) + 1:]
class BaseMapReduceBatchResultsModel(BaseModel):
"""Base model for batch storage for MR jobs.
This model turns off caching, because this results in stale data being
shown after each MapReduce job run. Classes which are used by a MR job to
store its batch results should subclass this class.
"""
_use_cache = False
_use_memcache = False
|
apache-2.0
| 8,921,728,561,893,953,000
| 38.100868
| 98
| 0.618596
| false
| 4.546727
| false
| false
| false
|
pettarin/penelope
|
setup.py
|
1
|
2657
|
#!/usr/bin/env python
# coding=utf-8
"""
Set penelope package up
"""
from setuptools import Extension
from setuptools import setup
__author__ = "Alberto Pettarin"
__copyright__ = "Copyright 2012-2016, Alberto Pettarin (www.albertopettarin.it)"
__license__ = "MIT"
__version__ = "3.1.3"
__email__ = "alberto@albertopettarin.it"
__status__ = "Production"
setup(
name="penelope",
packages=["penelope"],
package_data={"penelope": ["res/*"]},
version="3.1.3.0",
description="Penelope is a multi-tool for creating, editing and converting dictionaries, especially for eReader devices",
author="Alberto Pettarin",
author_email="alberto@albertopettarin.it",
url="https://github.com/pettarin/penelope",
license="MIT License",
long_description=open("README.rst", "r").read(),
install_requires=["lxml>=3.0", "marisa-trie>=0.7.2"],
scripts=["bin/penelope"],
keywords=[
"Dictionary",
"Dictionaries",
"Index",
"Merge",
"Flatten",
"eReader",
"eReaders",
"Bookeen",
"CSV",
"EPUB",
"MOBI",
"Kindle",
"Kobo",
"StarDict",
"XML",
"MARISA",
"kindlegen",
"dictzip",
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Desktop Environment",
"Topic :: Documentation",
"Topic :: Office/Business",
"Topic :: Software Development :: Internationalization",
"Topic :: Software Development :: Localization",
"Topic :: Text Editors",
"Topic :: Text Editors :: Text Processing",
"Topic :: Text Processing",
"Topic :: Text Processing :: General",
"Topic :: Text Processing :: Indexing",
"Topic :: Text Processing :: Linguistic",
"Topic :: Text Processing :: Markup",
"Topic :: Text Processing :: Markup :: HTML",
"Topic :: Text Processing :: Markup :: XML",
"Topic :: Utilities"
],
)
|
mit
| -1,803,094,169,845,251,800
| 31.012048
| 125
| 0.572074
| false
| 3.924668
| false
| false
| false
|
DomainDrivenConsulting/dogen
|
projects/masd.dogen.dia/python/add_to_package.py
|
1
|
1625
|
# -*- mode: python; tab-width: 4; indent-tabs-mode: nil -*-
#
# Copyright (C) 2012-2015 Marco Craveiro <marco.craveiro@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# First locate the parent
#
for layer in dia.active_display().diagram.data.layers:
for object in layer.objects:
if object.type.name == "UML - LargePackage":
if object.properties["name"].value == "transforms":
parent = object
print "found parent"
#
# Then update all objects without a parent. Make sure all classes
# that are orphaned actually belong to this package before running.
#
if parent != None:
for layer in dia.active_display().diagram.data.layers:
for object in layer.objects:
if object.type.name == "UML - Class":
if object.parent == None:
print object.properties["name"].value
object.parent = parent
print "done"
|
gpl-3.0
| -5,694,978,738,083,802,000
| 37.690476
| 70
| 0.675077
| false
| 4.072682
| false
| false
| false
|
kikocorreoso/mplutils
|
mplutils/axes.py
|
1
|
8516
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 21 23:43:37 2016
@author: kiko
"""
from __future__ import division, absolute_import
from .settings import RICH_DISPLAY
import numpy as np
if RICH_DISPLAY:
from IPython.display import display
def axes_set_better_defaults(ax,
axes_color = '#777777',
grid = False,
show = False):
"""
Enter an Axes instance and it will change the defaults to an opinionated
version of how a simple plot should be.
Parameters:
-----------
ax : matplotlib.axes.Axes or matplotlib.axes.Subplot instance
axes_color : str
A string indicating a valid matplotlib color.
grid : bool
If `True` the grid of the axes will be shown, if `False` (default)
the grid, if active, will be supressed.
show : bool
if `True` the figure will be shown.
If you are working in a rich display environment like the IPython
qtconsole or the Jupyter notebook it will use
`IPython.display.display` to show the figure.
If you are working otherwise it will call the `show` of the
`Figure` instance.
"""
ax.set_axis_bgcolor((1, 1, 1))
ax.grid(grid)
for key in ax.spines.keys():
if ax.spines[key].get_visible():
ax.spines[key].set_color(axes_color)
ax.tick_params(axis = 'x', colors = axes_color)
ax.tick_params(axis = 'y', colors = axes_color)
ax.figure.set_facecolor('white')
ax.figure.canvas.draw()
if show:
if RICH_DISPLAY:
display(ax.figure)
else:
ax.figure.show()
# http://matplotlib.org/examples/pylab_examples/spine_placement_demo.html
def axes_set_axis_position(ax,
spines = ['bottom', 'left'],
pan = 0,
show = False):
"""
Enter an Axes instance and depending the options it will display the
axis where you selected.
Parameters:
-----------
ax : matplotlib.axes.Axes or matplotlib.axes.Subplot instance
spines : str or iterable
A string or an iterable of strings with the following valid options:
'bottom' : To active the bottom x-axis.
'top' : To active the top x-axis.
'left' : To active the left y-axis.
'right' : To active the right y-axis.
pan : int or iterable
A integer value or an iterable of integer values indicating the value
to pan the axis. It has to have the same lenght and the same order
than the spines input.
show : bool
if `True` the figure will be shown.
If you are working in a rich display environment like the IPython
qtconsole or the Jupyter notebook it will use
`IPython.display.display` to show the figure.
If you are working otherwise it will call the `show` of the
`Figure` instance.
"""
if np.isscalar(spines):
spines = (spines,)
len_spines = 1
else:
len_spines = len(spines)
if np.isscalar(pan):
pan = np.repeat(pan, len_spines)
len_pan = 1
else:
len_pan = len(pan)
if len_pan > 1 and len_pan != len_spines:
raise ValueError(('Length of `spines` and `pan` mismatch. `pan` ')
('should be a scalar or should have the same length than `spines`.'))
i = 0
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', pan[i])) # outward by `pan` points
spine.set_smart_bounds(True)
i += 1
else:
#spine.set_color('none') # don't draw spine
spine.set_visible(False)
# turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
ax.tick_params(labelleft = True)
if 'right' in spines:
ax.yaxis.set_ticks_position('right')
ax.tick_params(labelright = True)
if 'left' in spines and 'right' in spines:
ax.yaxis.set_ticks_position('both')
ax.tick_params(labelleft = True, labelright = True)
if 'left' not in spines and 'right' not in spines:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
ax.tick_params(labelbottom = True)
if 'top' in spines:
ax.xaxis.set_ticks_position('top')
ax.tick_params(labeltop = True)
if 'bottom' in spines and 'top' in spines:
ax.xaxis.set_ticks_position('both')
ax.tick_params(labelbottom = True, labeltop = True)
if 'bottom' not in spines and 'top' not in spines:
ax.xaxis.set_ticks([])
ax.figure.canvas.draw()
if show:
if RICH_DISPLAY:
display(ax.figure)
else:
ax.figure.show()
def axes_set_origin(ax,
x = 0,
y = 0,
xticks_position = 'bottom',
yticks_position = 'left',
xticks_visible = True,
yticks_visible = True,
show = False):
"""
function to locate x-axis and y-axis on the position you want.
Parameters:
-----------
ax : matplotlib.axes.Axes or matplotlib.axes.Subplot instance
x : int or float
Value indicating the position on the y-axis where you want the x-axis
to be located.
y : int or float
Value indicating the position on the x-axis where you want the y-axis
to be located.
xticks_position : str
Default value is 'bottom' if you want the ticks to be located below
the x-axis. 'top' if you want the ticks to be located above the x-axis.
yticks_position : str
Default value is 'left' if you want the ticks to be located on the left
side of the y-axis. 'right' if you want the ticks to be located on the
right side of the y-axis.
xticks_visible : bool
Default value is True if you want ticks visible on the x-axis. False
if you don't want to see the ticks on the x-axis.
yticks_visible : bool
Default value is True if you want ticks visible on the y-axis. False
if you don't want to see the ticks on the y-axis.
show : bool
if `True` the figure will be shown.
If you are working in a rich display environment like the IPython
qtconsole or the Jupyter notebook it will use
`IPython.display.display` to show the figure.
If you are working otherwise it will call the `show` of the
`Figure` instance.
"""
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position(xticks_position)
ax.spines['bottom'].set_position(('data', x))
ax.yaxis.set_ticks_position(yticks_position)
ax.spines['left'].set_position(('data', y))
if not xticks_visible:
ax.set_xticks([])
if not yticks_visible:
ax.set_yticks([])
ax.figure.canvas.draw()
if show:
if RICH_DISPLAY:
display(ax.figure)
else:
ax.figure.show()
def axes_set_aspect_ratio(ax, ratio = 'equal', show = True):
"""
function that accepts an Axes instance and update the information
setting the aspect ratio of the axis to the defined quantity
Parameters:
-----------
ax : matplotlib.axes.Axes or matplotlib.axes.Subplot instance
ratio : str or int/float
The value can be a string with the following values:
'equal' : (default) same scaling from data to plot units for x and y
'auto' : automatic; fill position rectangle with data
Or a:
number (int or float) : a circle will be stretched such that the
height is num times the width. aspec t =1 is the same as
aspect='equal'.
show : bool
if `True` the figure will be shown.
If you are working in a rich display environment like the IPython
qtconsole or the Jupyter notebook it will use
`IPython.display.display` to show the figure.
If you are working otherwise it will call the `show` of the
`Figure` instance.
"""
ax.set_aspect(ratio, adjustable = None)
if show:
if RICH_DISPLAY:
display(ax.figure)
else:
ax.figure.show()
|
mit
| -877,875,270,445,470,800
| 36.685841
| 80
| 0.588539
| false
| 3.95174
| false
| false
| false
|
ancafarcas/superdesk-core
|
superdesk/media/media_operations.py
|
1
|
5561
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import arrow
import magic
import hashlib
import logging
import requests
from bson import ObjectId
from io import BytesIO
from PIL import Image
from flask import json
from .image import get_meta, fix_orientation
from .video import get_meta as video_meta
import base64
from superdesk.errors import SuperdeskApiError
logger = logging.getLogger(__name__)
def hash_file(afile, hasher, blocksize=65536):
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
return hasher.hexdigest()
def get_file_name(file):
return hash_file(file, hashlib.sha256())
def download_file_from_url(url):
rv = requests.get(url, timeout=15)
if rv.status_code not in (200, 201):
raise SuperdeskApiError.internalError('Failed to retrieve file from URL: %s' % url)
mime = magic.from_buffer(rv.content, mime=True)
ext = str(mime).split('/')[1]
name = str(ObjectId()) + ext
return BytesIO(rv.content), name, str(mime)
def download_file_from_encoded_str(encoded_str):
content = encoded_str.split(';base64,')
mime = content[0].split(':')[1]
ext = content[0].split('/')[1]
name = str(ObjectId()) + ext
content = base64.b64decode(content[1])
return BytesIO(content), name, mime
def process_file_from_stream(content, content_type=None):
content_type = content_type or content.content_type
content = BytesIO(content.read())
if 'application/' in content_type:
content_type = magic.from_buffer(content.getvalue(), mime=True)
content.seek(0)
file_type, ext = content_type.split('/')
try:
metadata = process_file(content, file_type)
except OSError: # error from PIL when image is supposed to be an image but is not.
raise SuperdeskApiError.internalError('Failed to process file')
file_name = get_file_name(content)
content.seek(0)
metadata = encode_metadata(metadata)
metadata.update({'length': json.dumps(len(content.getvalue()))})
return file_name, content_type, metadata
def encode_metadata(metadata):
return dict((k.lower(), json.dumps(v)) for k, v in metadata.items())
def decode_metadata(metadata):
return dict((k.lower(), decode_val(v)) for k, v in metadata.items())
def decode_val(string_val):
"""Format dates that elastic will try to convert automatically."""
val = json.loads(string_val)
try:
arrow.get(val, 'YYYY-MM-DD') # test if it will get matched by elastic
return str(arrow.get(val))
except (Exception):
return val
def process_file(content, type):
"""Retrieves the media file metadata
:param BytesIO content: content stream
:param str type: type of media file
:return: dict metadata related to media file.
"""
if type == 'image':
return process_image(content)
if type in ('audio', 'video'):
return process_video(content)
return {}
def process_video(content):
"""Retrieves the video/audio metadata
:param BytesIO content: content stream
:return: dict video/audio metadata
"""
content.seek(0)
meta = video_meta(content)
content.seek(0)
return meta
def process_image(content):
"""Retrieves the image metadata
:param BytesIO content: content stream
:return: dict image metadata
"""
content.seek(0)
meta = get_meta(content)
fix_orientation(content)
content.seek(0)
return meta
def _get_cropping_data(doc):
"""Get PIL Image crop data from doc with superdesk crops specs.
:param doc: crop dict
"""
if all([doc.get('CropTop', None) is not None, doc.get('CropLeft', None) is not None,
doc.get('CropRight', None) is not None, doc.get('CropBottom', None) is not None]):
return (int(doc['CropLeft']), int(doc['CropTop']), int(doc['CropRight']), int(doc['CropBottom']))
def crop_image(content, file_name, cropping_data, exact_size=None, image_format=None):
"""Crop image stream to given crop.
:param content: image file stream
:param file_name
:param cropping_data: superdesk crop dict ({'CropLeft': 0, 'CropTop': 0, ...})
:param exact_size: dict with `width` and `height` values
"""
if not isinstance(cropping_data, tuple):
cropping_data = _get_cropping_data(cropping_data)
if cropping_data:
logger.debug('Opened image {} from stream, going to crop it'.format(file_name))
content.seek(0)
img = Image.open(content)
cropped = img.crop(cropping_data)
if exact_size and 'width' in exact_size and 'height' in exact_size:
cropped = cropped.resize((int(exact_size['width']), int(exact_size['height'])), Image.ANTIALIAS)
logger.debug('Cropped image {} from stream, going to save it'.format(file_name))
try:
out = BytesIO()
cropped.save(out, image_format or img.format)
out.seek(0)
setattr(out, 'width', cropped.size[0])
setattr(out, 'height', cropped.size[1])
return True, out
except Exception as io:
logger.exception('Failed to generate crop for filename: {}. Crop: {}'.format(file_name, cropping_data))
return False, io
return False, content
|
agpl-3.0
| 2,342,765,068,469,414,000
| 30.95977
| 115
| 0.660133
| false
| 3.656147
| false
| false
| false
|
FirmlyReality/docklet
|
src/master/testTaskMgr.py
|
2
|
5417
|
import master.taskmgr
from concurrent import futures
import grpc
from protos.rpc_pb2 import *
from protos.rpc_pb2_grpc import *
import threading, json, time, random
from utils import env
class SimulatedNodeMgr():
def get_batch_nodeips(self):
return ['0.0.0.0']
class SimulatedMonitorFetcher():
def __init__(self, ip):
self.info = {}
self.info['cpuconfig'] = [1,1,1,1,1,1,1,1]
self.info['meminfo'] = {}
self.info['meminfo']['free'] = 8 * 1024 * 1024 # (kb) simulate 8 GB memory
self.info['meminfo']['buffers'] = 8 * 1024 * 1024
self.info['meminfo']['cached'] = 8 * 1024 * 1024
self.info['diskinfo'] = []
self.info['diskinfo'].append({})
self.info['diskinfo'][0]['free'] = 16 * 1024 * 1024 * 1024 # (b) simulate 16 GB disk
self.info['gpuinfo'] = [1,1]
class SimulatedTaskController(WorkerServicer):
def __init__(self, worker):
self.worker = worker
def start_vnode(self, vnodeinfo, context):
print('[SimulatedTaskController] start vnode, taskid [%s] vnodeid [%d]' % (vnodeinfo.taskid, vnodeinfo.vnodeid))
return Reply(status=Reply.ACCEPTED,message="")
def stop_vnode(self, vnodeinfo, context):
print('[SimulatedTaskController] stop vnode, taskid [%s] vnodeid [%d]' % (vnodeinfo.taskid, vnodeinfo.vnodeid))
return Reply(status=Reply.ACCEPTED,message="")
def start_task(self, taskinfo, context):
print('[SimulatedTaskController] start task, taskid [%s] vnodeid [%d] token [%s]' % (taskinfo.taskid, taskinfo.vnodeid, taskinfo.token))
worker.process(taskinfo)
return Reply(status=Reply.ACCEPTED,message="")
def stop_task(self, taskinfo, context):
print('[SimulatedTaskController] stop task, taskid [%s] vnodeid [%d] token [%s]' % (taskinfo.taskid, taskinfo.vnodeid, taskinfo.token))
return Reply(status=Reply.ACCEPTED,message="")
class SimulatedWorker(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.thread_stop = False
self.tasks = []
def run(self):
worker_port = env.getenv('BATCH_WORKER_PORT')
server = grpc.server(futures.ThreadPoolExecutor(max_workers=5))
add_WorkerServicer_to_server(SimulatedTaskController(self), server)
server.add_insecure_port('[::]:' + worker_port)
server.start()
while not self.thread_stop:
for task in self.tasks:
seed = random.random()
if seed < 0.25:
report(task.taskid, task.vnodeid, RUNNING, task.token)
elif seed < 0.5:
report(task.taskid, task.vnodeid, COMPLETED, task.token)
self.tasks.remove(task)
break
elif seed < 0.75:
report(task.taskid, task.vnodeid, FAILED, task.token)
self.tasks.remove(task)
break
else:
pass
time.sleep(5)
server.stop(0)
def stop(self):
self.thread_stop = True
def process(self, task):
self.tasks.append(task)
class SimulatedJobMgr(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.thread_stop = False
def run(self):
while not self.thread_stop:
time.sleep(5)
server.stop(0)
def stop(self):
self.thread_stop = True
def report(self, task):
print('[SimulatedJobMgr] task[%s] status %d' % (task.info.id, task.status))
def assignTask(self, taskmgr, taskid, instance_count, retry_count, timeout, cpu, memory, disk, gpu):
task = {}
task['instCount'] = instance_count
task['retryCount'] = retry_count
task['expTime'] = timeout
task['at_same_time'] = True
task['multicommand'] = True
task['command'] = 'ls'
task['srcAddr'] = ''
task['envVars'] = {'a':'1'}
task['stdErrRedPth'] = ''
task['stdOutRedPth'] = ''
task['image'] = 'root_root_base'
task['cpuSetting'] = cpu
task['memorySetting'] = memory
task['diskSetting'] = disk
task['gpuSetting'] = 0
task['mapping'] = []
taskmgr.add_task('root', taskid, task)
class SimulatedLogger():
def info(self, msg):
print('[INFO] ' + msg)
def warning(self, msg):
print('[WARNING] ' + msg)
def error(self, msg):
print('[ERROR] ' + msg)
def test():
global worker
global jobmgr
global taskmgr
worker = SimulatedWorker()
worker.start()
jobmgr = SimulatedJobMgr()
jobmgr.start()
taskmgr = master.taskmgr.TaskMgr(SimulatedNodeMgr(), SimulatedMonitorFetcher, master_ip='', scheduler_interval=2, external_logger=SimulatedLogger())
# taskmgr.set_jobmgr(jobmgr)
taskmgr.start()
add('task_0', instance_count=2, retry_count=2, timeout=60, cpu=2, memory=2048, disk=2048, gpu=0)
def test2():
global jobmgr
global taskmgr
jobmgr = SimulatedJobMgr()
jobmgr.start()
taskmgr = master.taskmgr.TaskMgr(SimulatedNodeMgr(), SimulatedMonitorFetcher, master_ip='', scheduler_interval=2, external_logger=SimulatedLogger())
taskmgr.set_jobmgr(jobmgr)
taskmgr.start()
add('task_0', instance_count=2, retry_count=2, timeout=60, cpu=2, memory=2048, disk=2048, gpu=0)
def add(taskid, instance_count, retry_count, timeout, cpu, memory, disk, gpu):
global jobmgr
global taskmgr
jobmgr.assignTask(taskmgr, taskid, instance_count, retry_count, timeout, cpu, memory, disk, gpu)
def report(taskid, instanceid, status, token):
global taskmgr
master_port = env.getenv('BATCH_MASTER_PORT')
channel = grpc.insecure_channel('%s:%s' % ('0.0.0.0', master_port))
stub = MasterStub(channel)
response = stub.report(ReportMsg(taskmsgs=[TaskMsg(taskid=taskid, username='root', vnodeid=instanceid, subTaskStatus=status, token=token)]))
def stop():
global worker
global jobmgr
global taskmgr
worker.stop()
jobmgr.stop()
taskmgr.stop()
|
bsd-3-clause
| -676,241,921,716,462,500
| 27.067358
| 149
| 0.690419
| false
| 2.918642
| false
| false
| false
|
fastinetserver/portage-idfetch
|
pym/portage/cache/sql_template.py
|
1
|
9336
|
# Copyright: 2005 Gentoo Foundation
# Author(s): Brian Harring (ferringb@gentoo.org)
# License: GPL2
import sys
from portage.cache import template, cache_errors
from portage.cache.template import reconstruct_eclasses
class SQLDatabase(template.database):
"""template class for RDBM based caches
This class is designed such that derivatives don't have to change much code, mostly constant strings.
_BaseError must be an exception class that all Exceptions thrown from the derived RDBMS are derived
from.
SCHEMA_INSERT_CPV_INTO_PACKAGE should be modified dependant on the RDBMS, as should SCHEMA_PACKAGE_CREATE-
basically you need to deal with creation of a unique pkgid. If the dbapi2 rdbms class has a method of
recovering that id, then modify _insert_cpv to remove the extra select.
Creation of a derived class involves supplying _initdb_con, and table_exists.
Additionally, the default schemas may have to be modified.
"""
SCHEMA_PACKAGE_NAME = "package_cache"
SCHEMA_PACKAGE_CREATE = "CREATE TABLE %s (\
pkgid INTEGER PRIMARY KEY, label VARCHAR(255), cpv VARCHAR(255), UNIQUE(label, cpv))" % SCHEMA_PACKAGE_NAME
SCHEMA_PACKAGE_DROP = "DROP TABLE %s" % SCHEMA_PACKAGE_NAME
SCHEMA_VALUES_NAME = "values_cache"
SCHEMA_VALUES_CREATE = "CREATE TABLE %s ( pkgid integer references %s (pkgid) on delete cascade, \
key varchar(255), value text, UNIQUE(pkgid, key))" % (SCHEMA_VALUES_NAME, SCHEMA_PACKAGE_NAME)
SCHEMA_VALUES_DROP = "DROP TABLE %s" % SCHEMA_VALUES_NAME
SCHEMA_INSERT_CPV_INTO_PACKAGE = "INSERT INTO %s (label, cpv) VALUES(%%s, %%s)" % SCHEMA_PACKAGE_NAME
_BaseError = ()
_dbClass = None
autocommits = False
# cleanse_keys = True
# boolean indicating if the derived RDBMS class supports replace syntax
_supports_replace = False
def __init__(self, location, label, auxdbkeys, *args, **config):
"""initialize the instance.
derived classes shouldn't need to override this"""
super(SQLDatabase, self).__init__(location, label, auxdbkeys, *args, **config)
config.setdefault("host","127.0.0.1")
config.setdefault("autocommit", self.autocommits)
self._initdb_con(config)
self.label = self._sfilter(self.label)
def _dbconnect(self, config):
"""should be overridden if the derived class needs special parameters for initializing
the db connection, or cursor"""
self.db = self._dbClass(**config)
self.con = self.db.cursor()
def _initdb_con(self,config):
"""ensure needed tables are in place.
If the derived class needs a different set of table creation commands, overload the approriate
SCHEMA_ attributes. If it needs additional execution beyond, override"""
self._dbconnect(config)
if not self._table_exists(self.SCHEMA_PACKAGE_NAME):
if self.readonly:
raise cache_errors.ReadOnlyRestriction("table %s doesn't exist" % \
self.SCHEMA_PACKAGE_NAME)
try:
self.con.execute(self.SCHEMA_PACKAGE_CREATE)
except self._BaseError as e:
raise cache_errors.InitializationError(self.__class__, e)
if not self._table_exists(self.SCHEMA_VALUES_NAME):
if self.readonly:
raise cache_errors.ReadOnlyRestriction("table %s doesn't exist" % \
self.SCHEMA_VALUES_NAME)
try:
self.con.execute(self.SCHEMA_VALUES_CREATE)
except self._BaseError as e:
raise cache_errors.InitializationError(self.__class__, e)
def _table_exists(self, tbl):
"""return true if a table exists
derived classes must override this"""
raise NotImplementedError
def _sfilter(self, s):
"""meta escaping, returns quoted string for use in sql statements"""
return "\"%s\"" % s.replace("\\","\\\\").replace("\"","\\\"")
def _getitem(self, cpv):
try:
self.con.execute("SELECT key, value FROM %s NATURAL JOIN %s "
"WHERE label=%s AND cpv=%s" % (self.SCHEMA_PACKAGE_NAME, self.SCHEMA_VALUES_NAME,
self.label, self._sfilter(cpv)))
except self._BaseError as e:
raise cache_errors.CacheCorruption(self, cpv, e)
rows = self.con.fetchall()
if len(rows) == 0:
raise KeyError(cpv)
vals = dict([(k,"") for k in self._known_keys])
vals.update(dict(rows))
return vals
def _delitem(self, cpv):
"""delete a cpv cache entry
derived RDBM classes for this *must* either support cascaded deletes, or
override this method"""
try:
try:
self.con.execute("DELETE FROM %s WHERE label=%s AND cpv=%s" % \
(self.SCHEMA_PACKAGE_NAME, self.label, self._sfilter(cpv)))
if self.autocommits:
self.commit()
except self._BaseError as e:
raise cache_errors.CacheCorruption(self, cpv, e)
if self.con.rowcount <= 0:
raise KeyError(cpv)
except Exception:
if not self.autocommits:
self.db.rollback()
# yes, this can roll back a lot more then just the delete. deal.
raise
def __del__(self):
# just to be safe.
if "db" in self.__dict__ and self.db != None:
self.commit()
self.db.close()
def _setitem(self, cpv, values):
try:
# insert.
try:
pkgid = self._insert_cpv(cpv)
except self._BaseError as e:
raise cache_errors.CacheCorruption(cpv, e)
# __getitem__ fills out missing values,
# so we store only what's handed to us and is a known key
db_values = []
for key in self._known_keys:
if key in values and values[key]:
db_values.append({"key":key, "value":values[key]})
if len(db_values) > 0:
try:
self.con.executemany("INSERT INTO %s (pkgid, key, value) VALUES(\"%s\", %%(key)s, %%(value)s)" % \
(self.SCHEMA_VALUES_NAME, str(pkgid)), db_values)
except self._BaseError as e:
raise cache_errors.CacheCorruption(cpv, e)
if self.autocommits:
self.commit()
except Exception:
if not self.autocommits:
try:
self.db.rollback()
except self._BaseError:
pass
raise
def _insert_cpv(self, cpv):
"""uses SCHEMA_INSERT_CPV_INTO_PACKAGE, which must be overloaded if the table definition
doesn't support auto-increment columns for pkgid.
returns the cpvs new pkgid
note this doesn't commit the transaction. The caller is expected to."""
cpv = self._sfilter(cpv)
if self._supports_replace:
query_str = self.SCHEMA_INSERT_CPV_INTO_PACKAGE.replace("INSERT","REPLACE",1)
else:
# just delete it.
try:
del self[cpv]
except (cache_errors.CacheCorruption, KeyError):
pass
query_str = self.SCHEMA_INSERT_CPV_INTO_PACKAGE
try:
self.con.execute(query_str % (self.label, cpv))
except self._BaseError:
self.db.rollback()
raise
self.con.execute("SELECT pkgid FROM %s WHERE label=%s AND cpv=%s" % \
(self.SCHEMA_PACKAGE_NAME, self.label, cpv))
if self.con.rowcount != 1:
raise cache_error.CacheCorruption(cpv, "Tried to insert the cpv, but found "
" %i matches upon the following select!" % len(rows))
return self.con.fetchone()[0]
def __contains__(self, cpv):
if not self.autocommits:
try:
self.commit()
except self._BaseError as e:
raise cache_errors.GeneralCacheCorruption(e)
try:
self.con.execute("SELECT cpv FROM %s WHERE label=%s AND cpv=%s" % \
(self.SCHEMA_PACKAGE_NAME, self.label, self._sfilter(cpv)))
except self._BaseError as e:
raise cache_errors.GeneralCacheCorruption(e)
return self.con.rowcount > 0
def __iter__(self):
if not self.autocommits:
try:
self.commit()
except self._BaseError as e:
raise cache_errors.GeneralCacheCorruption(e)
try:
self.con.execute("SELECT cpv FROM %s WHERE label=%s" %
(self.SCHEMA_PACKAGE_NAME, self.label))
except self._BaseError as e:
raise cache_errors.GeneralCacheCorruption(e)
# return [ row[0] for row in self.con.fetchall() ]
for x in self.con.fetchall():
yield x[0]
def iteritems(self):
try:
self.con.execute("SELECT cpv, key, value FROM %s NATURAL JOIN %s "
"WHERE label=%s" % (self.SCHEMA_PACKAGE_NAME, self.SCHEMA_VALUES_NAME,
self.label))
except self._BaseError as e:
raise cache_errors.CacheCorruption(self, cpv, e)
oldcpv = None
l = []
for x, y, v in self.con.fetchall():
if oldcpv != x:
if oldcpv != None:
d = dict(l)
if "_eclasses_" in d:
d["_eclasses_"] = reconstruct_eclasses(oldcpv, d["_eclasses_"])
else:
d["_eclasses_"] = {}
yield cpv, d
l.clear()
oldcpv = x
l.append((y,v))
if oldcpv != None:
d = dict(l)
if "_eclasses_" in d:
d["_eclasses_"] = reconstruct_eclasses(oldcpv, d["_eclasses_"])
else:
d["_eclasses_"] = {}
yield cpv, d
def commit(self):
self.db.commit()
def get_matches(self,match_dict):
query_list = []
for k,v in match_dict.items():
if k not in self._known_keys:
raise cache_errors.InvalidRestriction(k, v, "key isn't known to this cache instance")
v = v.replace("%","\\%")
v = v.replace(".*","%")
query_list.append("(key=%s AND value LIKE %s)" % (self._sfilter(k), self._sfilter(v)))
if len(query_list):
query = " AND "+" AND ".join(query_list)
else:
query = ''
print("query = SELECT cpv from package_cache natural join values_cache WHERE label=%s %s" % (self.label, query))
try:
self.con.execute("SELECT cpv from package_cache natural join values_cache WHERE label=%s %s" % \
(self.label, query))
except self._BaseError as e:
raise cache_errors.GeneralCacheCorruption(e)
return [ row[0] for row in self.con.fetchall() ]
if sys.hexversion >= 0x3000000:
items = iteritems
keys = __iter__
|
gpl-2.0
| 8,476,169,925,880,248,000
| 30.434343
| 114
| 0.678877
| false
| 3.053974
| true
| false
| false
|
Ryex/Rabbyt
|
rabbyt/sprites.py
|
1
|
6567
|
from rabbyt._sprites import cBaseSprite, cSprite
from rabbyt._rabbyt import pick_texture_target
from rabbyt.anims import anim_slot, swizzle, Animable
from rabbyt.primitives import Quad
class BaseSprite(cBaseSprite, Animable):
"""
``BaseSprite(...)``
This class provides some basic functionality for sprites:
* transformations (x, y, rot, scale)
* color (red, green, blue, alpha)
* bounding_radius (for collision detection)
``BaseSprite`` doesn't render anything itself You'll want to subclass it
and override either ``render()`` or ``render_after_transform()``.
You can pass any of the ``BaseSprite`` properties as keyword arguments.
(``x``, ``y``, ``xy``, etc.)
"""
x = anim_slot(default=0, index=0, doc="x coordinate of the sprite")
y = anim_slot(default=0, index=1, doc="y coordinate of the sprite")
rot = anim_slot(default=0, index=2, doc="rotation angle in degrees.")
red = anim_slot(default=1, index=3, doc="red color component")
green = anim_slot(default=1, index=4, doc="green color component")
blue = anim_slot(default=1, index=5, doc="blue color component")
alpha = anim_slot(default=1, index=6, doc="alpha color component")
scale_x = anim_slot(default=1, index=7, doc="x component of ``scale``")
scale_y = anim_slot(default=1, index=8, doc="y component of ``scale``")
xy = swizzle("x", "y")
rgb = swizzle("red", "green", "blue")
rgba = swizzle("red", "green", "blue", "alpha")
def _get_scale(self):
if self.scale_x == self.scale_y:
return self.scale_x
else:
return (self.scale_x, self.scale_y)
def _set_scale(self, s):
if hasattr(s, "__len__"):
self.scale_x, self.scale_y = s
else:
self.scale_x = self.scale_y = s
scale = property(_get_scale, _set_scale, doc=
"""
scale
``1.0`` is normal size; ``0.5`` is half size, ``2.0`` is double
size... you get the point.
You can scale the x and y axes independently by assigning a tuple with
a length of two.
""")
class Sprite(cSprite, BaseSprite):
"""
``Sprite(texture=None, shape=None, tex_shape=(0,1,1,0), ...)``
This class provides a basic, four point, textured sprite.
All arguments are optional.
``texture`` should be an image filename, a pyglet texture object, or
an OpenGL texture id. (See ``Sprite.texture`` for more information.)
If ``shape`` is not given it will default to the dimensions of the
texture if they are available. For more information on ``shape`` and
``tex_shape`` read the docstrings for ``Sprite.shape`` and
``Sprite.tex_shape``
Additionally, you can pass values for most of the properties as keyword
arguments. (``x``, ``y``, ``xy``, ``u``, ``v``, ``uv``, etc...)
"""
u = anim_slot(default=0, index=9, doc="texture offset")
v = anim_slot(default=0, index=10, doc="texture offset")
uv = swizzle("u", "v")
def __init__(self, texture=None, shape=None, tex_shape=None,
**kwargs):
BaseSprite.__init__(self)
cSprite.__init__(self)
self.red = self.green = self.blue = self.alpha = 1
self.x = self.y = 0
self.scale = 1
self.rot = 0
self.texture_id = -1
# If no shape or tex_shape was given, we want to have useful defaults
# in case the texture doesn't set them.
if shape is None:
s = 10.
self.shape = [s, s, -s, -s]
if tex_shape is None:
self.tex_shape = (0,1,1,0)
self.texture = texture
# If shape or tex_shape were given, we want them to override the
# values set when we set the texture.
if shape is not None:
self.shape = shape
if tex_shape is not None:
self.tex_shape = tex_shape
for name, value in list(kwargs.items()):
if hasattr(self.__class__, name) and isinstance(
getattr(self.__class__, name),
(swizzle, anim_slot, property)):
setattr(self, name, value)
else:
raise ValueError("unexpected keyword argument %r" % name)
def ensure_target(self):
if not self.texture_target:
target = pick_texture_target()
self.texture_target = target
def _get_texture(self):
return self._tex_obj
def _set_texture(self, texture):
self._tex_obj = texture
tex_size = None
if isinstance(texture, str):
from rabbyt._rabbyt import load_texture_file_hook
res = load_texture_file_hook(texture)
if isinstance(res, tuple) and len(res) == 2:
self.texture_id, tex_size = res
else:
self.texture = res # Recursive
elif isinstance(texture, int):
self.texture_id = texture
elif hasattr(texture, "id"):
if hasattr(texture, "target"):
self.texture_target = texture.target
self.texture_id = texture.id
if hasattr(texture, "tex_coords"):
self.tex_shape = texture.tex_coords
self.uv = 0,0
elif hasattr(texture, "tex_shape"):
self.tex_shape = texture.tex_shape
if hasattr(texture, "width") and hasattr(texture, "height"):
tex_size = (texture.width, texture.height)
elif texture is None:
self.texture_id = 0
else:
raise ValueError("texture should be either an int or str.")
if tex_size:
w, h = tex_size
self.shape = [-w/2, h/2, w/2, -h/2]
texture = property(_get_texture, _set_texture, doc=
"""
``Sprite.texture``
The texture used for this sprite.
The value can be in a variety of formats:
If it's a string, it will be used as a filename to load the
texture.
If it's an integer, it will be used as an OpenGL texture id.
If it's an object with an ``id`` attribute, it will be treated
as a pyglet texture object. (The ``width``, ``height``, and
``tex_coords`` attributes will set the sprite's ``shape`` and
``tex_shape`` properties.)
""")
__docs_all__ = ["BaseSprite", "Sprite"]
|
mit
| -8,949,477,883,439,346,000
| 35.525714
| 78
| 0.559769
| false
| 3.908929
| false
| false
| false
|
aronasorman/kolibri
|
kolibri/logger/serializers.py
|
1
|
2680
|
from kolibri.logger.models import AttemptLog, ContentRatingLog, ContentSessionLog, ContentSummaryLog, MasteryLog, UserSessionLog
from rest_framework import serializers
class ContentSessionLogSerializer(serializers.ModelSerializer):
class Meta:
model = ContentSessionLog
fields = ('pk', 'user', 'content_id', 'channel_id', 'start_timestamp',
'end_timestamp', 'time_spent', 'kind', 'extra_fields', 'progress')
class MasteryLogSerializer(serializers.ModelSerializer):
pastattempts = serializers.SerializerMethodField()
totalattempts = serializers.SerializerMethodField()
class Meta:
model = MasteryLog
fields = ('id', 'summarylog', 'start_timestamp', 'pastattempts', 'totalattempts',
'end_timestamp', 'completion_timestamp', 'mastery_criterion', 'mastery_level', 'complete')
def get_pastattempts(self, obj):
# will return a list of the latest 10 correct and hint_taken fields for each attempt.
return AttemptLog.objects.filter(masterylog__summarylog=obj.summarylog).values('correct', 'hinted').order_by('-start_timestamp')[:10]
def get_totalattempts(self, obj):
return AttemptLog.objects.filter(masterylog__summarylog=obj.summarylog).count()
class AttemptLogSerializer(serializers.ModelSerializer):
class Meta:
model = AttemptLog
fields = ('id', 'masterylog', 'start_timestamp', 'sessionlog',
'end_timestamp', 'completion_timestamp', 'item', 'time_spent',
'complete', 'correct', 'hinted', 'answer', 'simple_answer', 'interaction_history')
class ContentSummaryLogSerializer(serializers.ModelSerializer):
currentmasterylog = serializers.SerializerMethodField()
class Meta:
model = ContentSummaryLog
fields = ('pk', 'user', 'content_id', 'channel_id', 'start_timestamp', 'currentmasterylog',
'end_timestamp', 'completion_timestamp', 'time_spent', 'progress', 'kind', 'extra_fields')
def get_currentmasterylog(self, obj):
try:
current_log = obj.masterylogs.latest('end_timestamp')
return MasteryLogSerializer(current_log).data
except MasteryLog.DoesNotExist:
return None
class ContentRatingLogSerializer(serializers.ModelSerializer):
class Meta:
model = ContentRatingLog
fields = ('pk', 'user', 'content_id', 'channel_id', 'quality', 'ease', 'learning', 'feedback')
class UserSessionLogSerializer(serializers.ModelSerializer):
class Meta:
model = UserSessionLog
fields = ('pk', 'user', 'channels', 'start_timestamp', 'last_interaction_timestamp', 'pages')
|
mit
| 401,927,789,942,079,940
| 39.606061
| 141
| 0.680597
| false
| 4.135802
| false
| false
| false
|
SAAVY/magpie
|
client/blacklist.py
|
1
|
1546
|
from flask import current_app
from netaddr import IPNetwork, IPAddress
from netaddr.core import AddrFormatError
bl_website_ip = [] # array of tuples (network mask, port)
def build_website_blacklist(logger):
with open("config/blacklist_website_ip.txt") as f:
for line in f:
network_address = line.strip()
ip, separator, port = network_address.rpartition(':')
if not separator:
address = (network_address, '')
else:
address = (ip, port)
if not port:
logger.error("check blacklist_website_ip.txt: must specify port number after ':' in ip")
continue
try:
IPNetwork(address[0])
bl_website_ip.append(address)
except AddrFormatError as e:
logger.error("Format error. check blacklist_website_ip.txt: %s" % str(e))
def is_website_blacklisted(website_ip, website_port):
logger = current_app.logger
logger.debug("FUNC: is_website_blacklisted ip_address: %s port: %s" % (website_ip, website_port))
for network_mask, port in bl_website_ip:
try:
if IPAddress(website_ip) in IPNetwork(network_mask):
if port and website_port == port:
return True
elif port:
return False
return True
except Exception as e:
logger.exception("FUNC: is_website_blacklisted Exception: %s" % str(e))
return False
|
mit
| -8,088,540,729,857,940,000
| 36.707317
| 108
| 0.574386
| false
| 4.270718
| false
| false
| false
|
pycrystem/pycrystem
|
pyxem/tests/test_signals/test_power2d.py
|
1
|
3662
|
# -*- coding: utf-8 -*-
# Copyright 2017-2019 The pyXem developers
#
# This file is part of pyXem.
#
# pyXem is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyXem is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyXem. If not, see <http://www.gnu.org/licenses/>.
import pytest
import numpy as np
import dask.array as da
from hyperspy.signals import Signal2D
from pyxem.signals.power2d import Power2D, LazyPower2D
class TestComputeAndAsLazy2D:
def test_2d_data_compute(self):
dask_array = da.random.random((100, 150), chunks=(50, 50))
s = LazyPower2D(dask_array)
scale0, scale1, metadata_string = 0.5, 1.5, "test"
s.axes_manager[0].scale = scale0
s.axes_manager[1].scale = scale1
s.metadata.Test = metadata_string
s.compute()
assert s.__class__ == Power2D
assert not hasattr(s.data, "compute")
assert s.axes_manager[0].scale == scale0
assert s.axes_manager[1].scale == scale1
assert s.metadata.Test == metadata_string
assert dask_array.shape == s.data.shape
def test_4d_data_compute(self):
dask_array = da.random.random((4, 4, 10, 15), chunks=(1, 1, 10, 15))
s = LazyPower2D(dask_array)
s.compute()
assert s.__class__ == Power2D
assert dask_array.shape == s.data.shape
def test_2d_data_as_lazy(self):
data = np.random.random((100, 150))
s = Power2D(data)
scale0, scale1, metadata_string = 0.5, 1.5, "test"
s.axes_manager[0].scale = scale0
s.axes_manager[1].scale = scale1
s.metadata.Test = metadata_string
s_lazy = s.as_lazy()
assert s_lazy.__class__ == LazyPower2D
assert hasattr(s_lazy.data, "compute")
assert s_lazy.axes_manager[0].scale == scale0
assert s_lazy.axes_manager[1].scale == scale1
assert s_lazy.metadata.Test == metadata_string
assert data.shape == s_lazy.data.shape
def test_4d_data_as_lazy(self):
data = np.random.random((4, 10, 15))
s = Power2D(data)
s_lazy = s.as_lazy()
assert s_lazy.__class__ == LazyPower2D
assert data.shape == s_lazy.data.shape
class TestPower:
@pytest.fixture
def flat_pattern(self):
pd = Power2D(data=np.ones(shape=(2, 2, 5, 5)))
return pd
@pytest.mark.parametrize("k_region", [None, [2.0, 4.0]])
@pytest.mark.parametrize("sym", [None, 4, [2, 4]])
def test_power_signal_get_map(self, flat_pattern, k_region, sym):
flat_pattern.get_map(k_region=k_region, symmetry=sym)
@pytest.mark.parametrize("k_region", [None, [2.0, 4.0]])
@pytest.mark.parametrize("sym", [[2, 4]])
def test_power_signal_plot_symmetries(self, flat_pattern, k_region, sym):
flat_pattern.plot_symmetries(k_region=k_region, symmetry=sym)
class TestDecomposition:
def test_decomposition_is_performed(self, diffraction_pattern):
s = Power2D(diffraction_pattern)
s.decomposition()
assert s.learning_results is not None
def test_decomposition_class_assignment(self, diffraction_pattern):
s = Power2D(diffraction_pattern)
s.decomposition()
assert isinstance(s, Power2D)
|
gpl-3.0
| 3,197,397,849,401,754,000
| 35.62
| 77
| 0.647733
| false
| 3.326067
| true
| false
| false
|
McGillX/edx_data_research
|
edx_data_research/parsing/parse_course_structure.py
|
1
|
4815
|
import json
from edx_data_research.parsing.parse import Parse
class CourseStructure(Parse):
def __init__(self, args):
super(CourseStructure, self).__init__(args)
self.collections = ['course_structure']
self.course_structure_file = args.course_structure_file
self.drop = args.drop
def migrate(self):
if self.drop:
self.collections['course_structure'].drop()
json_data = self._load_json_data(self.course_structure_file)
json_data = self._parse_key_names(json_data)
json_data = self._delete_category(json_data, 'conditional')
json_data = self._delete_category(json_data, 'wrapper')
json_data = self._build_parent_data(json_data)
json_data = self._update_parent_data(json_data)
for key in json_data:
self.collections['course_structure'].insert(json_data[key])
def _load_json_data(self, file_name):
'''Retrieve data from the json file'''
with open(file_name) as file_handler:
json_data = json.load(file_handler)
return json_data
def _parse_key_names(self, json_data):
'''Parse key names'''
new_json_data = {}
for key in json_data:
new_key = key.split('/')[-1]
json_data[key]['_id'] = new_key
if json_data[key]['children']:
for index, child in enumerate(json_data[key]['children']):
json_data[key]['children'][index] = child.split('/')[-1]
new_json_data[new_key] = json_data[key]
return new_json_data
def _delete_category(self, json_data, category):
'''Delete data with given category from json_data '''
for key in json_data.keys():
if json_data[key]['category'] == category:
for item in json_data.keys():
if json_data[item]['children'] and key in json_data[item]['children']:
parent_id = item
index_child = json_data[parent_id]['children'].index(key)
left_list = json_data[parent_id]['children'][:index_child]
right_list = json_data[parent_id]['children'][index_child + 1:]
json_data[parent_id]['children'] = left_list + json_data[key]['children'] + right_list
del json_data[key]
return json_data
def _build_parent_data(self, json_data):
'''Build parent data'''
error_count = 0
for key in json_data:
if json_data[key]['children']:
for index, child_key in enumerate(json_data[key]['children']):
try:
json_data[child_key]['parent_data'] = {}
except:
error_count += 1
continue
parent_category = json_data[key]['category']
parent_order_key = parent_category + '_order'
parent_id_key = parent_category + '_id'
parent_display_name_key = parent_category + '_display_name'
json_data[child_key]['parent_data'][parent_order_key] = index
json_data[child_key]['parent_data'][parent_id_key] = json_data[key]['_id']
json_data[child_key]['parent_data'][parent_display_name_key] = json_data[key]['metadata']['display_name']
print "Number of errors when building parent data: {0}".format(error_count)
return json_data
def _update_parent_data(self, json_data):
for key in json_data:
if json_data[key]['category'] == 'sequential':
chapter_id = json_data[key]['parent_data']['chapter_id']
chapter_parent_data = json_data[chapter_id]['parent_data']
json_data[key]['parent_data'].update(chapter_parent_data)
for key in json_data:
if json_data[key]['category'] == 'vertical':
sequential_id = json_data[key]['parent_data']['sequential_id']
sequential_parent_data = json_data[sequential_id]['parent_data']
json_data[key]['parent_data'].update(sequential_parent_data)
for key in json_data:
if json_data[key]['category'] not in set(['vertical', 'sequential', 'chapter', 'course']):
try:
vertical_id = json_data[key]['parent_data']['vertical_id']
vertical_parent_data = json_data[vertical_id]['parent_data']
json_data[key]['parent_data'].update(vertical_parent_data)
except:
print "ERROR: {0}".format(json_data[key])
return json_data
|
mit
| 5,415,329,089,375,148,000
| 46.636364
| 125
| 0.538525
| false
| 4.143718
| false
| false
| false
|
lizardsystem/threedilib
|
threedilib/modeling/convert.py
|
1
|
8275
|
# (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst.
# -*- coding: utf-8 -*-
"""
Convert shapefiles with z coordinates. Choose from the following formats:
'inp' to create an inp file, 'img' to create an image with a plot of the
feature, or 'shp' to output a shapefile with the average height of a
feature stored in an extra attribute.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
import argparse
import math
import os
import shutil
import tempfile
from matplotlib.backends import backend_agg
from matplotlib import figure
from osgeo import gdal
from osgeo import ogr
from PIL import Image
ogr.UseExceptions()
def get_parser():
""" Return argument parser. """
parser = argparse.ArgumentParser(
description=__doc__,
)
parser.add_argument('source_path',
metavar='SOURCE',
help=('Path to source shapefile.'))
parser.add_argument('target_path',
metavar='TARGET',
help=('Path to target file.'))
parser.add_argument('-of', '--output-format',
metavar='FORMAT',
choices=['inp', 'img', 'shp'],
default='shp',
help=("Path to output."))
return parser
class InputFileWriter(object):
""" Writer for input files. """
def __init__(self, path):
"""
Init the counters and tmpdirs
"""
self.path = path
self.node_count = 0
self.link_count = 0
def __enter__(self):
""" Setup tempfiles. """
self.temp_directory = tempfile.mkdtemp()
self.node_file = open(
os.path.join(self.temp_directory, 'nodes'), 'a+',
)
self.link_file = open(
os.path.join(self.temp_directory, 'links'), 'a+',
)
return self
def __exit__(self, type, value, traceback):
""" Write 'inputfile' at path. """
with open(self.path, 'w') as input_file:
self.node_file.seek(0)
input_file.write(self.node_file.read())
input_file.write('-1\n')
self.link_file.seek(0)
input_file.write(self.link_file.read())
self.node_file.close()
self.link_file.close()
shutil.rmtree(self.temp_directory)
def _write_node(self, node):
""" Write a node. """
self.node_count += 1
self.node_file.write('{} {} {} {}\n'.format(
self.node_count, node[0], node[1], -node[2] # Depth, not height!
))
def _write_link(self):
""" Write a link between previous node and next node."""
self.link_count += 1
self.link_file.write('{} {} {}\n'.format(
self.link_count, self.node_count, self.node_count + 1,
))
def _add_wkb_line_string(self, wkb_line_string):
""" Add linestring as nodes and links. """
nodes = [wkb_line_string.GetPoint(i)
for i in range(wkb_line_string.GetPointCount())]
# Add nodes and links up to the last node
for i in range(len(nodes) - 1):
self._write_node(nodes[i])
self._write_link()
# Add last node, link already covered.
self._write_node(nodes[-1])
def add_feature(self, feature):
""" Add feature as nodes and links. """
geometry = feature.geometry()
geometry_type = geometry.GetGeometryType()
if geometry_type == ogr.wkbLineString25D:
self._add_wkb_line_string(geometry)
elif geometry_type == ogr.wkbMultiLineString25D:
for wkb_line_string in geometry:
self._add_wkb_line_string(wkb_line_string)
class ImageWriter(object):
""" Writer for images. """
def __init__(self, path):
self.count = 0
self.path = path
def __enter__(self):
return self
def _add_wkb_line_string(self, wkb_line_string, label):
""" Plot linestring as separate image. """
# Get data
x, y, z = zip(*[wkb_line_string.GetPoint(i)
for i in range(wkb_line_string.GetPointCount())])
# Determine distance along line
l = [0]
for i in range(len(z) - 1):
l.append(l[-1] + math.sqrt(
(x[i + 1] - x[i]) ** 2 + (y[i + 1] - y[i]) ** 2,
))
# Plot in matplotlib
fig = figure.Figure()
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
axes.plot(l, z, label=label)
axes.legend(loc='best', frameon=False)
# Write to image
backend_agg.FigureCanvasAgg(fig)
buf, size = fig.canvas.print_to_buffer()
image = Image.fromstring('RGBA', size, buf)
root, ext = os.path.splitext(self.path)
image.save(root + '{:00.0f}'.format(self.count) + ext)
self.count += 1
def add_feature(self, feature):
""" Currently saves every feature in a separate image. """
# Plotlabel
label = '\n'.join([': '.join(str(v) for v in item)
for item in feature.items().items()])
# Plot according to geometry type
geometry = feature.geometry()
geometry_type = geometry.GetGeometryType()
if geometry_type == ogr.wkbLineString25D:
self._add_wkb_line_string(geometry, label=label)
elif geometry_type == ogr.wkbMultiLineString25D:
for wkb_line_string in geometry:
self._add_wkb_line_string(wkb_line_string, label=label)
def __exit__(self, type, value, traceback):
pass
class ShapefileWriter(object):
""" Writer for shapefiles. """
ATTRIBUTE = b'kruinhoogt'
def __init__(self, path):
self.count = 0
self.path = path
self.datasource = None
self.layer = None
def __enter__(self):
return self
def create_datasource(self, feature):
""" Create a datasource based on feature. """
root, ext = os.path.splitext(os.path.basename(self.path))
driver = ogr.GetDriverByName(b'ESRI Shapefile')
datasource = driver.CreateDataSource(self.path)
layer = datasource.CreateLayer(root)
for i in range(feature.GetFieldCount()):
layer.CreateField(feature.GetFieldDefnRef(i))
field_defn = ogr.FieldDefn(self.ATTRIBUTE, ogr.OFTReal)
layer.CreateField(field_defn)
self.datasource = datasource
self.layer = layer
def add_feature(self, feature):
""" Currently saves every feature in a separate image. """
if self.layer is None:
self.create_datasource(feature)
layer_defn = self.layer.GetLayerDefn()
# elevation
geometry = feature.geometry().Clone()
geometry_type = geometry.GetGeometryType()
if geometry_type == ogr.wkbLineString25D:
elevation = min([p[2] for p in geometry.GetPoints()])
else:
# multilinestring
elevation = min([p[2]
for g in geometry
for p in g.GetPoints()])
geometry.FlattenTo2D()
new_feature = ogr.Feature(layer_defn)
new_feature.SetGeometry(geometry)
for k, v in feature.items().items():
new_feature[k] = v
new_feature[self.ATTRIBUTE] = elevation
self.layer.CreateFeature(new_feature)
def __exit__(self, type, value, traceback):
pass
def convert(source_path, target_path, output_format):
""" Convert shapefile to inp file."""
source_dataset = ogr.Open(str(source_path))
writers = dict(
inp=InputFileWriter,
img=ImageWriter,
shp=ShapefileWriter,
)
with writers[output_format](target_path) as writer:
for source_layer in source_dataset:
total = source_layer.GetFeatureCount()
for count, source_feature in enumerate(source_layer, 1):
writer.add_feature(source_feature)
gdal.TermProgress_nocb(count / total)
def main():
""" Call convert() with commandline args. """
convert(**vars(get_parser().parse_args()))
if __name__ == '__main__':
exit(main())
|
gpl-3.0
| 6,584,589,084,668,582,000
| 32.1
| 77
| 0.570997
| false
| 3.927385
| false
| false
| false
|
holzenburg/feedshare
|
feedshare/feedlists/migrations/0002_auto__add_feedlistfeed__del_field_feed_feedlist__del_field_feed_tags__.py
|
1
|
8074
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FeedListFeed'
db.create_table(u'feedlists_feedlistfeed', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('feedlist', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['feedlists.FeedList'])),
('feed', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['feedlists.Feed'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
))
db.send_create_signal(u'feedlists', ['FeedListFeed'])
# Deleting field 'Feed.feedlist'
db.delete_column(u'feedlists_feed', 'feedlist_id')
# Deleting field 'Feed.tags'
db.delete_column(u'feedlists_feed', 'tags')
# Adding field 'Feed.site_url'
db.add_column(u'feedlists_feed', 'site_url',
self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True),
keep_default=False)
# Changing field 'Feed.description'
db.alter_column(u'feedlists_feed', 'description', self.gf('django.db.models.fields.TextField')(null=True))
# Changing field 'Feed.title'
db.alter_column(u'feedlists_feed', 'title', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))
# Changing field 'FeedList.description'
db.alter_column(u'feedlists_feedlist', 'description', self.gf('django.db.models.fields.TextField')(null=True))
# Changing field 'FeedList.title'
db.alter_column(u'feedlists_feedlist', 'title', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))
# Changing field 'FeedList.author_email'
db.alter_column(u'feedlists_feedlist', 'author_email', self.gf('django.db.models.fields.EmailField')(max_length=255, null=True))
# Changing field 'FeedList.url'
db.alter_column(u'feedlists_feedlist', 'url', self.gf('django.db.models.fields.URLField')(max_length=255, null=True))
# Changing field 'FeedList.author'
db.alter_column(u'feedlists_feedlist', 'author', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))
# Changing field 'FeedList.file'
db.alter_column(u'feedlists_feedlist', 'file', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True))
def backwards(self, orm):
# Deleting model 'FeedListFeed'
db.delete_table(u'feedlists_feedlistfeed')
# User chose to not deal with backwards NULL issues for 'Feed.feedlist'
raise RuntimeError("Cannot reverse this migration. 'Feed.feedlist' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'Feed.feedlist'
db.add_column(u'feedlists_feed', 'feedlist',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['feedlists.FeedList']),
keep_default=False)
# Adding field 'Feed.tags'
db.add_column(u'feedlists_feed', 'tags',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
# Deleting field 'Feed.site_url'
db.delete_column(u'feedlists_feed', 'site_url')
# Changing field 'Feed.description'
db.alter_column(u'feedlists_feed', 'description', self.gf('django.db.models.fields.TextField')(default=''))
# Changing field 'Feed.title'
db.alter_column(u'feedlists_feed', 'title', self.gf('django.db.models.fields.CharField')(default='', max_length=255))
# Changing field 'FeedList.description'
db.alter_column(u'feedlists_feedlist', 'description', self.gf('django.db.models.fields.TextField')(default=''))
# Changing field 'FeedList.title'
db.alter_column(u'feedlists_feedlist', 'title', self.gf('django.db.models.fields.CharField')(default='', max_length=255))
# Changing field 'FeedList.author_email'
db.alter_column(u'feedlists_feedlist', 'author_email', self.gf('django.db.models.fields.EmailField')(default='', max_length=255))
# Changing field 'FeedList.url'
db.alter_column(u'feedlists_feedlist', 'url', self.gf('django.db.models.fields.URLField')(default='', max_length=255))
# Changing field 'FeedList.author'
db.alter_column(u'feedlists_feedlist', 'author', self.gf('django.db.models.fields.CharField')(default='', max_length=255))
# User chose to not deal with backwards NULL issues for 'FeedList.file'
raise RuntimeError("Cannot reverse this migration. 'FeedList.file' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'FeedList.file'
db.alter_column(u'feedlists_feedlist', 'file', self.gf('django.db.models.fields.files.FileField')(max_length=100))
models = {
u'feedlists.feed': {
'Meta': {'object_name': 'Feed'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.TextField', [], {})
},
u'feedlists.feedlist': {
'Meta': {'object_name': 'FeedList'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'author_email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'datetime_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datetime_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feeds': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['feedlists.Feed']", 'through': u"orm['feedlists.FeedListFeed']", 'symmetrical': 'False'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processing_error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'views': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'feedlists.feedlistfeed': {
'Meta': {'object_name': 'FeedListFeed'},
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feedlists.Feed']"}),
'feedlist': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feedlists.FeedList']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['feedlists']
|
mit
| 8,905,969,177,572,606,000
| 56.269504
| 180
| 0.614194
| false
| 3.607685
| false
| false
| false
|
RealP/Everpy
|
examples.py
|
1
|
1776
|
"""Some examples of how to use modules."""
# from everpy_extras import EverPyExtras
from everpy_pro import EverPyPro
import everpy_utilities
PATH_TO_ENSCRIPT = r"C:\Program Files (x86)\Evernote\Evernote\ENScript.exe"
def createnote(epy):
"""Example of how to make a note from python."""
content = open("README.md", "r").read()
notebook = "_INBOX"
title = "Everpy Generated Note"
tags = ["everpy"]
attachments = ["README.md"]
epy.create_note_from_content(content, notebook_name=notebook, title=title, tags=tags, file_attachments=attachments)
def main():
"""Example usages."""
dev_token = everpy_utilities.get_token()
try:
my_evernote = EverPyPro(dev_token, PATH_TO_ENSCRIPT)
except:
everpy_utilities.refresh_token()
my_evernote = EverPyPro(dev_token, PATH_TO_ENSCRIPT)
# Find and replace
# my_evernote.find_and_replace("evernote", "Evernote", "any:")
# Creating a note.
# createnote(my_evernote)
# Opening client with specific search attributes
# my_evernote.get_notes_to_manage()
# or
# my_evernote.search_notes("stack:Work intitle:\"new employee\"")
# Creating a note from an hmtl template
# my_evernote.create_note(open("Templates/testnote.html", "r").read(), title="testnote", notebook="_INBOX", tags=["everpy"], attachments=["Templates/testnote.html"])
##############################
# VVVV Tests may not work VVVV.
# my_evernote.create_template("Templates/simple_sections.txt")
my_evernote.create_template("Templates/card_template.txt")
# my_evernote.create_textnote_from_file("template.html", notebook_name="_INBOX")
# my_evernote.learn_notebooks()
# print(my_evernote.note_book_dict)
if __name__ == '__main__':
main()
|
gpl-3.0
| -8,499,846,340,086,754,000
| 33.823529
| 169
| 0.662162
| false
| 3.338346
| false
| false
| false
|
Stemer114/Reprap_KTY-84-130
|
repetier/KTY84-130_repetier.py
|
1
|
1879
|
# based on python script from
# http://diyhpl.us/reprap/trunk/users/wizard23/python/lookupTables/KTY84-130.py
#
# adapted by Stemer114 for usage with 4.7k pull-up resistor
# table format for repetier firmware
# https://github.com/Stemer114/Reprap_KTY-84-130
#
# generates a Lookuptable for the following termistor
# KTY 84-130
# http://www.datasheetcatalog.org/datasheet/philips/KTY84_SERIES_5.pdf
# usage:
# python KTY84-130.py >ThermistorTable.h
# copy ThermistorTable.h into your firmware dir
# enable the lookup table in firmware config.h (depends on firmware)
# resistor values are taken from data sheet page 4, table 1
# temperature range is 0C to 300C in steps of 10K
# the negative temperature entries and the entry for 25C are omitted
resistorValues = [
498,
538,
581,
626,
672,
722,
773,
826,
882,
940,
1000,
1062,
1127,
1194,
1262,
1334,
1407,
1482,
1560,
1640,
1722,
1807,
1893,
1982,
2073,
2166,
2261,
2357,
2452,
2542,
2624]
tempValues = range(0, 301, 10)
if len(tempValues) != len(resistorValues):
print "Length of temValues %d and resistorValues %d does not match" % (len(tempValues), len(resistorValues))
else:
print "// reprap thermistor table for KTY 84-130 temperature sensor"
print "// adapted for repetier firmware user thermistortable 1 format"
print "// for further details see https://github.com/Stemer114/Reprap_KTY-84-130"
print ""
print "// consult the readme for how to insert the table into"
print "// repetier Configuration.h"
print "#define NUM_TEMPS_USERTHERMISTOR1 %d" % (len(tempValues))
print "#define USER_THERMISTORTABLE1 {\ "
suffix = ","
for i in range(0, len(tempValues)):
current = 5.0/(4700.0+resistorValues[i])
voltage = current*resistorValues[i]
adValue = round(voltage*1023.0/5.0)
if i == len(tempValues)-1:
suffix = ""
print " {%d*4, %d*8}%s \ " % (adValue, tempValues[i], suffix)
print "};"
|
mit
| 9,178,785,433,663,180,000
| 24.391892
| 109
| 0.722193
| false
| 2.727141
| false
| false
| false
|
cs411-entree-app/entree
|
entree_project/entree_project/urls.py
|
1
|
1445
|
"""entree_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import url, include
from django.contrib import admin
from django.views.generic import RedirectView
from entree import views
handler400 = 'entree.views.bad_request'
handler403 = 'entree.views.permission_denied'
handler404 = 'entree.views.page_not_found'
handler500 = 'entree.views.server_error'
urlpatterns = [
url(r'^$', RedirectView.as_view(url='entree/')),
url(r'^entree/', include('entree.urls', namespace='entree')),
url(r'^admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += [
url(r'^400/$', views.bad_request),
url(r'^403/$', views.permission_denied),
url(r'^404/$', views.page_not_found),
url(r'^500/$', views.server_error),
]
|
apache-2.0
| -1,928,786,586,874,997,500
| 35.125
| 79
| 0.692734
| false
| 3.424171
| false
| false
| false
|
Ruide/angr-dev
|
angr/angr/blade.py
|
1
|
12198
|
import networkx
import pyvex
from .slicer import SimSlicer
class Blade(object):
"""
Blade is a light-weight program slicer that works with networkx DiGraph containing CFGNodes.
It is meant to be used in angr for small or on-the-fly analyses.
"""
def __init__(self, graph, dst_run, dst_stmt_idx, direction='backward', project=None, cfg=None, ignore_sp=False,
ignore_bp=False, ignored_regs=None, max_level=3):
"""
:param networkx.DiGraph graph: A graph representing the control flow graph. Note that it does not take
angr.analyses.CFGAccurate or angr.analyses.CFGFast.
:param int dst_run: An address specifying the target SimRun.
:param int dst_stmt_idx: The target statement index. -1 means executing until the last statement.
:param str direction: 'backward' or 'forward' slicing. Forward slicing is not yet supported.
:param angr.Project project: The project instance.
:param angr.analyses.CFGBase cfg: the CFG instance. It will be made mandatory later.
:param bool ignore_sp: Whether the stack pointer should be ignored in dependency tracking. Any
dependency from/to stack pointers will be ignored if this options is True.
:param bool ignore_bp: Whether the base pointer should be ignored or not.
:param int max_level: The maximum number of blocks that we trace back for.
:return: None
"""
self._graph = graph
self._dst_run = dst_run
self._dst_stmt_idx = dst_stmt_idx
self._ignore_sp = ignore_sp
self._ignore_bp = ignore_bp
self._max_level = max_level
self._slice = networkx.DiGraph()
self.project = project
self._cfg = cfg
if self._cfg is None:
# `cfg` is made optional only for compatibility concern. It will be made a positional parameter later.
raise AngrBladeError('"cfg" must be specified.')
if not self._in_graph(self._dst_run):
raise AngrBladeError("The specified SimRun %s doesn't exist in graph." % self._dst_run)
self._ignored_regs = set()
if ignored_regs:
for r in ignored_regs:
if isinstance(r, (int, long)):
self._ignored_regs.add(r)
else:
self._ignored_regs.add(self.project.arch.registers[r][0])
self._run_cache = { }
self._traced_runs = set()
if direction == 'backward':
self._backward_slice()
elif direction == 'forward':
raise AngrBladeError('Forward slicing is not implemented yet')
else:
raise AngrBladeError("Unknown slicing direction %s", direction)
#
# Properties
#
@property
def slice(self):
return self._slice
#
# Public methods
#
def dbg_repr(self, arch=None):
if arch is None and self.project is not None:
arch = self.project.arch
s = ""
block_addrs = list(set([ a for a, _ in self.slice.nodes_iter() ]))
for block_addr in block_addrs:
block_str = "IRSB %#x\n" % block_addr
block = self.project.factory.block(block_addr).vex
included_stmts = set([ stmt for _, stmt in self.slice.nodes_iter() if _ == block_addr ])
for i, stmt in enumerate(block.statements):
if arch is not None:
if isinstance(stmt, pyvex.IRStmt.Put):
reg_name = arch.translate_register_name(stmt.offset)
stmt_str = stmt.__str__(reg_name=reg_name)
elif isinstance(stmt, pyvex.IRStmt.WrTmp) and isinstance(stmt.data, pyvex.IRExpr.Get):
reg_name = arch.translate_register_name(stmt.data.offset)
stmt_str = stmt.__str__(reg_name=reg_name)
else:
stmt_str = str(stmt)
else:
stmt_str = str(stmt)
block_str += "%02s: %s\n" % ("+" if i in included_stmts else "-",
stmt_str
)
s += block_str
s += "\n"
return s
#
# Private methods
#
def _get_irsb(self, v):
"""
Get the IRSB object from an address, a SimRun, or a CFGNode.
:param v: Can be one of the following: an address, or a CFGNode.
:return: The IRSB instance.
:rtype: pyvex.IRSB
"""
if isinstance(v, CFGNode):
v = v.addr
if type(v) in (int, long):
# Generate an IRSB from self._project
if v in self._run_cache:
return self._run_cache[v]
if self.project:
irsb = self.project.factory.block(v).vex
self._run_cache[v] = irsb
return irsb
else:
raise AngrBladeError("Project must be specified if you give me all addresses for SimRuns")
else:
raise AngrBladeError('Unsupported SimRun argument type %s', type(v))
def _get_cfgnode(self, thing):
"""
Get the CFGNode corresponding to the specific address.
:param thing: Can be anything that self._normalize() accepts. Usually it's the address of the node
:return: the CFGNode instance
:rtype: CFGNode
"""
return self._cfg.get_any_node(self._get_addr(thing))
def _get_addr(self, v):
"""
Get address of the basic block or CFG node specified by v.
:param v: Can be one of the following: a CFGNode, or an address.
:return: The address.
:rtype: int
"""
if isinstance(v, CFGNode):
return v.addr
elif type(v) in (int, long):
return v
else:
raise AngrBladeError('Unsupported SimRun argument type %s' % type(v))
def _in_graph(self, v):
return self._get_cfgnode(v) in self._graph
def _inslice_callback(self, stmt_idx, stmt, infodict): # pylint:disable=unused-argument
tpl = (infodict['irsb_addr'], stmt_idx)
if 'prev' in infodict and infodict['prev']:
prev = infodict['prev']
self._slice.add_edge(tpl, prev)
else:
self._slice.add_node(tpl)
infodict['prev'] = tpl
infodict['has_statement'] = True
def _backward_slice(self):
"""
Backward slicing.
We support the following IRStmts:
# WrTmp
# Put
We support the following IRExprs:
# Get
# RdTmp
# Const
:return:
"""
temps = set()
regs = set()
# Retrieve the target: are we slicing from a register(IRStmt.Put), or a temp(IRStmt.WrTmp)?
stmts = self._get_irsb(self._dst_run).statements
if self._dst_stmt_idx != -1:
dst_stmt = stmts[self._dst_stmt_idx]
if type(dst_stmt) is pyvex.IRStmt.Put:
regs.add(dst_stmt.offset)
elif type(dst_stmt) is pyvex.IRStmt.WrTmp:
temps.add(dst_stmt.tmp)
else:
raise AngrBladeError('Incorrect type of the specified target statement. We only support Put and WrTmp.')
prev = (self._get_addr(self._dst_run), self._dst_stmt_idx)
else:
next_expr = self._get_irsb(self._dst_run).next
if type(next_expr) is pyvex.IRExpr.RdTmp:
temps.add(next_expr.tmp)
elif type(next_expr) is pyvex.IRExpr.Const:
# A const doesn't rely on anything else!
pass
else:
raise AngrBladeError('Unsupported type for irsb.next: %s' % type(next_expr))
# Then we gotta start from the very last statement!
self._dst_stmt_idx = len(stmts) - 1
prev = (self._get_addr(self._dst_run), 'default')
slicer = SimSlicer(self.project.arch, stmts,
target_tmps=temps,
target_regs=regs,
target_stack_offsets=None,
inslice_callback=self._inslice_callback,
inslice_callback_infodict={
'irsb_addr': self._get_irsb(self._dst_run)._addr,
'prev': prev,
})
regs = slicer.final_regs
if self._ignore_sp and self.project.arch.sp_offset in regs:
regs.remove(self.project.arch.sp_offset)
if self._ignore_bp and self.project.arch.bp_offset in regs:
regs.remove(self.project.arch.bp_offset)
for offset in self._ignored_regs:
if offset in regs:
regs.remove(offset)
stack_offsets = slicer.final_stack_offsets
prev = slicer.inslice_callback_infodict['prev']
if regs or stack_offsets:
cfgnode = self._get_cfgnode(self._dst_run)
in_edges = self._graph.in_edges(cfgnode, data=True)
for pred, _, data in in_edges:
if 'jumpkind' in data and data['jumpkind'] == 'Ijk_FakeRet':
continue
self._backward_slice_recursive(self._max_level - 1, pred, regs, stack_offsets, prev,
data.get('stmt_idx', None)
)
def _backward_slice_recursive(self, level, run, regs, stack_offsets, prev, exit_stmt_idx):
if level <= 0:
return
temps = set()
regs = regs.copy()
stmts = self._get_irsb(run).statements
if exit_stmt_idx is None or exit_stmt_idx == 'default':
# Initialize the temps set with whatever in the `next` attribute of this irsb
next_expr = self._get_irsb(run).next
if type(next_expr) is pyvex.IRExpr.RdTmp:
temps.add(next_expr.tmp)
else:
exit_stmt = self._get_irsb(run).statements[exit_stmt_idx]
if type(exit_stmt.guard) is pyvex.IRExpr.RdTmp:
temps.add(exit_stmt.guard.tmp)
# Put it in our slice
irsb_addr = self._get_addr(run)
self._inslice_callback(exit_stmt_idx, exit_stmt, {'irsb_addr': irsb_addr, 'prev': prev})
prev = (irsb_addr, exit_stmt_idx)
infodict = {'irsb_addr' : self._get_addr(run),
'prev' : prev,
'has_statement': False
}
slicer = SimSlicer(self.project.arch, stmts,
target_tmps=temps,
target_regs=regs,
target_stack_offsets=stack_offsets,
inslice_callback=self._inslice_callback,
inslice_callback_infodict=infodict
)
if not infodict['has_statement']:
# put this block into the slice
self._inslice_callback(0, None, infodict)
if run in self._traced_runs:
return
self._traced_runs.add(run)
regs = slicer.final_regs
if self._ignore_sp and self.project.arch.sp_offset in regs:
regs.remove(self.project.arch.sp_offset)
if self._ignore_bp and self.project.arch.bp_offset in regs:
regs.remove(self.project.arch.bp_offset)
stack_offsets = slicer.final_stack_offsets
prev = slicer.inslice_callback_infodict['prev']
if regs or stack_offsets:
in_edges = self._graph.in_edges(self._get_cfgnode(run), data=True)
for pred, _, data in in_edges:
if 'jumpkind' in data and data['jumpkind'] == 'Ijk_FakeRet':
continue
self._backward_slice_recursive(level - 1, pred, regs, stack_offsets, prev, data.get('stmt_idx', None))
from .errors import AngrBladeError, AngrBladeSimProcError
from .analyses.cfg.cfg_node import CFGNode
|
bsd-2-clause
| 6,807,302,043,918,270,000
| 34.876471
| 120
| 0.542712
| false
| 3.948851
| false
| false
| false
|
blomquisg/heat
|
heat/common/client.py
|
1
|
21833
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# HTTPSClientAuthConnection code comes courtesy of ActiveState website:
# http://code.activestate.com/recipes/
# 577548-https-httplib-client-connection-with-certificate-v/
import collections
import errno
import functools
import httplib
import logging
import os
import urllib
import urlparse
try:
from eventlet.green import socket, ssl
except ImportError:
import socket
import ssl
try:
import sendfile
SENDFILE_SUPPORTED = True
except ImportError:
SENDFILE_SUPPORTED = False
from heat.common import auth
from heat.common import exception, utils
# common chunk size for get and put
CHUNKSIZE = 65536
def handle_unauthorized(func):
"""
Wrap a function to re-authenticate and retry.
"""
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except exception.NotAuthorized:
self._authenticate(force_reauth=True)
return func(self, *args, **kwargs)
return wrapped
def handle_redirects(func):
"""
Wrap the _do_request function to handle HTTP redirects.
"""
MAX_REDIRECTS = 5
@functools.wraps(func)
def wrapped(self, method, url, body, headers):
for _ in xrange(MAX_REDIRECTS):
try:
return func(self, method, url, body, headers)
except exception.RedirectException as redirect:
if redirect.url is None:
raise exception.InvalidRedirect()
url = redirect.url
raise exception.MaxRedirectsExceeded(redirects=MAX_REDIRECTS)
return wrapped
class ImageBodyIterator(object):
"""
A class that acts as an iterator over an image file's
chunks of data. This is returned as part of the result
tuple from `heat.client.Client.get_image`
"""
def __init__(self, source):
"""
Constructs the object from a readable image source
(such as an HTTPResponse or file-like object)
"""
self.source = source
def __iter__(self):
"""
Exposes an iterator over the chunks of data in the
image file.
"""
while True:
chunk = self.source.read(CHUNKSIZE)
if chunk:
yield chunk
else:
break
class SendFileIterator:
"""
Emulate iterator pattern over sendfile, in order to allow
send progress be followed by wrapping the iteration.
"""
def __init__(self, connection, body):
self.connection = connection
self.body = body
self.offset = 0
self.sending = True
def __iter__(self):
class OfLength:
def __init__(self, len):
self.len = len
def __len__(self):
return self.len
while self.sending:
sent = sendfile.sendfile(self.connection.sock.fileno(),
self.body.fileno(),
self.offset,
CHUNKSIZE)
self.sending = (sent != 0)
self.offset += sent
yield OfLength(sent)
class HTTPSClientAuthConnection(httplib.HTTPSConnection):
"""
Class to make a HTTPS connection, with support for
full client-based SSL Authentication
:see http://code.activestate.com/recipes/
577548-https-httplib-client-connection-with-certificate-v/
"""
def __init__(self, host, port, key_file, cert_file,
ca_file, timeout=None, insecure=False):
httplib.HTTPSConnection.__init__(self, host, port, key_file=key_file,
cert_file=cert_file)
self.key_file = key_file
self.cert_file = cert_file
self.ca_file = ca_file
self.timeout = timeout
self.insecure = insecure
def connect(self):
"""
Connect to a host on a given (SSL) port.
If ca_file is pointing somewhere, use it to check Server Certificate.
Redefined/copied and extended from httplib.py:1105 (Python 2.6.x).
This is needed to pass cert_reqs=ssl.CERT_REQUIRED as parameter to
ssl.wrap_socket(), which forces SSL to check server certificate against
our client certificate.
"""
sock = socket.create_connection((self.host, self.port), self.timeout)
if self._tunnel_host:
self.sock = sock
self._tunnel()
# Check CA file unless 'insecure' is specificed
if self.insecure is True:
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=ssl.CERT_NONE)
else:
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
ca_certs=self.ca_file,
cert_reqs=ssl.CERT_REQUIRED)
class BaseClient(object):
"""A base client class"""
DEFAULT_PORT = 80
DEFAULT_DOC_ROOT = None
# Standard CA file locations for Debian/Ubuntu, RedHat/Fedora,
# Suse, FreeBSD/OpenBSD
DEFAULT_CA_FILE_PATH = '/etc/ssl/certs/ca-certificates.crt:'\
'/etc/pki/tls/certs/ca-bundle.crt:'\
'/etc/ssl/ca-bundle.pem:'\
'/etc/ssl/cert.pem'
OK_RESPONSE_CODES = (
httplib.OK,
httplib.CREATED,
httplib.ACCEPTED,
httplib.NO_CONTENT,
)
REDIRECT_RESPONSE_CODES = (
httplib.MOVED_PERMANENTLY,
httplib.FOUND,
httplib.SEE_OTHER,
httplib.USE_PROXY,
httplib.TEMPORARY_REDIRECT,
)
def __init__(self, host, port=None, use_ssl=False, auth_tok=None,
creds=None, doc_root=None, key_file=None,
cert_file=None, ca_file=None, insecure=False,
configure_via_auth=True):
"""
Creates a new client to some service.
:param host: The host where service resides
:param port: The port where service resides
:param use_ssl: Should we use HTTPS?
:param auth_tok: The auth token to pass to the server
:param creds: The credentials to pass to the auth plugin
:param doc_root: Prefix for all URLs we request from host
:param key_file: Optional PEM-formatted file that contains the private
key.
If use_ssl is True, and this param is None (the
default), then an environ variable
heat_CLIENT_KEY_FILE is looked for. If no such
environ variable is found, ClientConnectionError
will be raised.
:param cert_file: Optional PEM-formatted certificate chain file.
If use_ssl is True, and this param is None (the
default), then an environ variable
heat_CLIENT_CERT_FILE is looked for. If no such
environ variable is found, ClientConnectionError
will be raised.
:param ca_file: Optional CA cert file to use in SSL connections
If use_ssl is True, and this param is None (the
default), then an environ variable
heat_CLIENT_CA_FILE is looked for.
:param insecure: Optional. If set then the server's certificate
will not be verified.
"""
self.host = host
self.port = port or self.DEFAULT_PORT
self.use_ssl = use_ssl
self.auth_tok = auth_tok
self.creds = creds or {}
self.connection = None
self.configure_via_auth = configure_via_auth
# doc_root can be a nullstring, which is valid, and why we
# cannot simply do doc_root or self.DEFAULT_DOC_ROOT below.
self.doc_root = (doc_root if doc_root is not None
else self.DEFAULT_DOC_ROOT)
self.auth_plugin = self.make_auth_plugin(self.creds)
self.key_file = key_file
self.cert_file = cert_file
self.ca_file = ca_file
self.insecure = insecure
self.connect_kwargs = self.get_connect_kwargs()
def get_connect_kwargs(self):
connect_kwargs = {}
if self.use_ssl:
if self.key_file is None:
self.key_file = os.environ.get('heat_CLIENT_KEY_FILE')
if self.cert_file is None:
self.cert_file = os.environ.get('heat_CLIENT_CERT_FILE')
if self.ca_file is None:
self.ca_file = os.environ.get('heat_CLIENT_CA_FILE')
# Check that key_file/cert_file are either both set or both unset
if self.cert_file is not None and self.key_file is None:
msg = _("You have selected to use SSL in connecting, "
"and you have supplied a cert, "
"however you have failed to supply either a "
"key_file parameter or set the "
"heat_CLIENT_KEY_FILE environ variable")
raise exception.ClientConnectionError(msg)
if self.key_file is not None and self.cert_file is None:
msg = _("You have selected to use SSL in connecting, "
"and you have supplied a key, "
"however you have failed to supply either a "
"cert_file parameter or set the "
"heat_CLIENT_CERT_FILE environ variable")
raise exception.ClientConnectionError(msg)
if (self.key_file is not None and
not os.path.exists(self.key_file)):
msg = _("The key file you specified %s does not "
"exist") % self.key_file
raise exception.ClientConnectionError(msg)
connect_kwargs['key_file'] = self.key_file
if (self.cert_file is not None and
not os.path.exists(self.cert_file)):
msg = _("The cert file you specified %s does not "
"exist") % self.cert_file
raise exception.ClientConnectionError(msg)
connect_kwargs['cert_file'] = self.cert_file
if (self.ca_file is not None and
not os.path.exists(self.ca_file)):
msg = _("The CA file you specified %s does not "
"exist") % self.ca_file
raise exception.ClientConnectionError(msg)
if self.ca_file is None:
for ca in self.DEFAULT_CA_FILE_PATH.split(":"):
if os.path.exists(ca):
self.ca_file = ca
break
connect_kwargs['ca_file'] = self.ca_file
connect_kwargs['insecure'] = self.insecure
return connect_kwargs
def set_auth_token(self, auth_tok):
"""
Updates the authentication token for this client connection.
"""
# FIXME(sirp): Nova image/heat.py currently calls this. Since this
# method isn't really doing anything useful[1], we should go ahead and
# rip it out, first in Nova, then here. Steps:
#
# 1. Change auth_tok in heat to auth_token
# 2. Change image/heat.py in Nova to use client.auth_token
# 3. Remove this method
#
# [1] http://mail.python.org/pipermail/tutor/2003-October/025932.html
self.auth_tok = auth_tok
def configure_from_url(self, url):
"""
Setups the connection based on the given url.
The form is:
<http|https>://<host>:port/doc_root
"""
parsed = urlparse.urlparse(url)
self.use_ssl = parsed.scheme == 'https'
self.host = parsed.hostname
self.port = parsed.port or 80
self.doc_root = parsed.path
# ensure connection kwargs are re-evaluated after the service catalog
# publicURL is parsed for potential SSL usage
self.connect_kwargs = self.get_connect_kwargs()
def make_auth_plugin(self, creds):
"""
Returns an instantiated authentication plugin.
"""
strategy = creds.get('strategy', 'noauth')
plugin = auth.get_plugin_from_strategy(strategy, creds)
return plugin
def get_connection_type(self):
"""
Returns the proper connection type
"""
if self.use_ssl:
return HTTPSClientAuthConnection
else:
return httplib.HTTPConnection
def _authenticate(self, force_reauth=False):
"""
Use the authentication plugin to authenticate and set the auth token.
:param force_reauth: For re-authentication to bypass cache.
"""
auth_plugin = self.auth_plugin
if not auth_plugin.is_authenticated or force_reauth:
auth_plugin.authenticate()
self.auth_tok = auth_plugin.auth_token
management_url = auth_plugin.management_url
if management_url and self.configure_via_auth:
self.configure_from_url(management_url)
@handle_unauthorized
def do_request(self, method, action, body=None, headers=None,
params=None):
"""
Make a request, returning an HTTP response object.
:param method: HTTP verb (GET, POST, PUT, etc.)
:param action: Requested path to append to self.doc_root
:param body: Data to send in the body of the request
:param headers: Headers to send with the request
:param params: Key/value pairs to use in query string
:returns: HTTP response object
"""
if not self.auth_tok:
self._authenticate()
url = self._construct_url(action, params)
return self._do_request(method=method, url=url, body=body,
headers=headers)
def _construct_url(self, action, params=None):
"""
Create a URL object we can use to pass to _do_request().
"""
path = '/'.join([self.doc_root or '', action.lstrip('/')])
scheme = "https" if self.use_ssl else "http"
netloc = "%s:%d" % (self.host, self.port)
if isinstance(params, dict):
for (key, value) in params.items():
if value is None:
del params[key]
query = urllib.urlencode(params)
else:
query = None
return urlparse.ParseResult(scheme, netloc, path, '', query, '')
@handle_redirects
def _do_request(self, method, url, body, headers):
"""
Connects to the server and issues a request. Handles converting
any returned HTTP error status codes to OpenStack/heat exceptions
and closing the server connection. Returns the result data, or
raises an appropriate exception.
:param method: HTTP method ("GET", "POST", "PUT", etc...)
:param url: urlparse.ParsedResult object with URL information
:param body: data to send (as string, filelike or iterable),
or None (default)
:param headers: mapping of key/value pairs to add as headers
:note
If the body param has a read attribute, and method is either
POST or PUT, this method will automatically conduct a chunked-transfer
encoding and use the body as a file object or iterable, transferring
chunks of data using the connection's send() method. This allows large
objects to be transferred efficiently without buffering the entire
body in memory.
"""
if url.query:
path = url.path + "?" + url.query
else:
path = url.path
try:
connection_type = self.get_connection_type()
headers = headers or {}
if 'x-auth-token' not in headers and self.auth_tok:
headers['x-auth-token'] = self.auth_tok
c = connection_type(url.hostname, url.port, **self.connect_kwargs)
def _pushing(method):
return method.lower() in ('post', 'put')
def _simple(body):
return body is None or isinstance(body, basestring)
def _filelike(body):
return hasattr(body, 'read')
def _sendbody(connection, iter):
connection.endheaders()
for sent in iter:
# iterator has done the heavy lifting
pass
def _chunkbody(connection, iter):
connection.putheader('Transfer-Encoding', 'chunked')
connection.endheaders()
for chunk in iter:
connection.send('%x\r\n%s\r\n' % (len(chunk), chunk))
connection.send('0\r\n\r\n')
# Do a simple request or a chunked request, depending
# on whether the body param is file-like or iterable and
# the method is PUT or POST
#
if not _pushing(method) or _simple(body):
# Simple request...
c.request(method, path, body, headers)
elif _filelike(body) or self._iterable(body):
c.putrequest(method, path)
for header, value in headers.items():
c.putheader(header, value)
iter = self.image_iterator(c, headers, body)
if self._sendable(body):
# send actual file without copying into userspace
_sendbody(c, iter)
else:
# otherwise iterate and chunk
_chunkbody(c, iter)
else:
raise TypeError('Unsupported image type: %s' % body.__class__)
res = c.getresponse()
status_code = self.get_status_code(res)
if status_code in self.OK_RESPONSE_CODES:
return res
elif status_code in self.REDIRECT_RESPONSE_CODES:
raise exception.RedirectException(res.getheader('Location'))
elif status_code == httplib.UNAUTHORIZED:
raise exception.NotAuthorized(res.read())
elif status_code == httplib.FORBIDDEN:
raise exception.NotAuthorized(res.read())
elif status_code == httplib.NOT_FOUND:
raise exception.NotFound(res.read())
elif status_code == httplib.CONFLICT:
raise exception.Duplicate(res.read())
elif status_code == httplib.BAD_REQUEST:
raise exception.Invalid(res.read())
elif status_code == httplib.MULTIPLE_CHOICES:
raise exception.MultipleChoices(body=res.read())
elif status_code == httplib.INTERNAL_SERVER_ERROR:
raise Exception("Internal Server error: %s" % res.read())
else:
raise Exception("Unknown error occurred! %s" % res.read())
except (socket.error, IOError), e:
raise exception.ClientConnectionError(e)
def _seekable(self, body):
# pipes are not seekable, avoids sendfile() failure on e.g.
# cat /path/to/image | heat add ...
# or where add command is launched via popen
try:
os.lseek(body.fileno(), 0, os.SEEK_SET)
return True
except OSError as e:
return (e.errno != errno.ESPIPE)
def _sendable(self, body):
return (SENDFILE_SUPPORTED and
hasattr(body, 'fileno') and
self._seekable(body) and
not self.use_ssl)
def _iterable(self, body):
return isinstance(body, collections.Iterable)
def image_iterator(self, connection, headers, body):
if self._sendable(body):
return SendFileIterator(connection, body)
elif self._iterable(body):
return utils.chunkreadable(body)
else:
return ImageBodyIterator(body)
def get_status_code(self, response):
"""
Returns the integer status code from the response, which
can be either a Webob.Response (used in testing) or httplib.Response
"""
if hasattr(response, 'status_int'):
return response.status_int
else:
return response.status
def _extract_params(self, actual_params, allowed_params):
"""
Extract a subset of keys from a dictionary. The filters key
will also be extracted, and each of its values will be returned
as an individual param.
:param actual_params: dict of keys to filter
:param allowed_params: list of keys that 'actual_params' will be
reduced to
:retval subset of 'params' dict
"""
result = {}
for param in actual_params:
if param in allowed_params:
result[param] = actual_params[param]
elif 'Parameters.member.' in param:
result[param] = actual_params[param]
return result
|
apache-2.0
| -8,148,688,654,965,902,000
| 35.880068
| 79
| 0.573032
| false
| 4.439406
| true
| false
| false
|
Kriechi/mitmproxy
|
mitmproxy/addons/tlsconfig.py
|
1
|
12516
|
import os
from pathlib import Path
from typing import List, Optional, TypedDict, Any
from OpenSSL import SSL
from mitmproxy import certs, ctx, exceptions, connection
from mitmproxy.net import tls as net_tls
from mitmproxy.options import CONF_BASENAME
from mitmproxy.proxy import context
from mitmproxy.proxy.layers import tls
# We manually need to specify this, otherwise OpenSSL may select a non-HTTP2 cipher by default.
# https://ssl-config.mozilla.org/#config=old
DEFAULT_CIPHERS = (
'ECDHE-ECDSA-AES128-GCM-SHA256', 'ECDHE-RSA-AES128-GCM-SHA256', 'ECDHE-ECDSA-AES256-GCM-SHA384',
'ECDHE-RSA-AES256-GCM-SHA384', 'ECDHE-ECDSA-CHACHA20-POLY1305', 'ECDHE-RSA-CHACHA20-POLY1305',
'DHE-RSA-AES128-GCM-SHA256', 'DHE-RSA-AES256-GCM-SHA384', 'DHE-RSA-CHACHA20-POLY1305', 'ECDHE-ECDSA-AES128-SHA256',
'ECDHE-RSA-AES128-SHA256', 'ECDHE-ECDSA-AES128-SHA', 'ECDHE-RSA-AES128-SHA', 'ECDHE-ECDSA-AES256-SHA384',
'ECDHE-RSA-AES256-SHA384', 'ECDHE-ECDSA-AES256-SHA', 'ECDHE-RSA-AES256-SHA', 'DHE-RSA-AES128-SHA256',
'DHE-RSA-AES256-SHA256', 'AES128-GCM-SHA256', 'AES256-GCM-SHA384', 'AES128-SHA256', 'AES256-SHA256', 'AES128-SHA',
'AES256-SHA', 'DES-CBC3-SHA'
)
class AppData(TypedDict):
server_alpn: Optional[bytes]
http2: bool
def alpn_select_callback(conn: SSL.Connection, options: List[bytes]) -> Any:
app_data: AppData = conn.get_app_data()
server_alpn = app_data["server_alpn"]
http2 = app_data["http2"]
if server_alpn and server_alpn in options:
return server_alpn
http_alpns = tls.HTTP_ALPNS if http2 else tls.HTTP1_ALPNS
for alpn in options: # client sends in order of preference, so we are nice and respect that.
if alpn in http_alpns:
return alpn
else:
return SSL.NO_OVERLAPPING_PROTOCOLS
class TlsConfig:
"""
This addon supplies the proxy core with the desired OpenSSL connection objects to negotiate TLS.
"""
certstore: certs.CertStore = None # type: ignore
# TODO: We should support configuring TLS 1.3 cipher suites (https://github.com/mitmproxy/mitmproxy/issues/4260)
# TODO: We should re-use SSL.Context options here, if only for TLS session resumption.
# This may require patches to pyOpenSSL, as some functionality is only exposed on contexts.
# TODO: This addon should manage the following options itself, which are current defined in mitmproxy/options.py:
# - upstream_cert
# - add_upstream_certs_to_client_chain
# - ciphers_client
# - ciphers_server
# - key_size
# - certs
# - cert_passphrase
# - ssl_verify_upstream_trusted_ca
# - ssl_verify_upstream_trusted_confdir
def load(self, loader):
loader.add_option(
name="tls_version_client_min",
typespec=str,
default=net_tls.DEFAULT_MIN_VERSION.name,
choices=[x.name for x in net_tls.Version],
help=f"Set the minimum TLS version for client connections.",
)
loader.add_option(
name="tls_version_client_max",
typespec=str,
default=net_tls.DEFAULT_MAX_VERSION.name,
choices=[x.name for x in net_tls.Version],
help=f"Set the maximum TLS version for client connections.",
)
loader.add_option(
name="tls_version_server_min",
typespec=str,
default=net_tls.DEFAULT_MIN_VERSION.name,
choices=[x.name for x in net_tls.Version],
help=f"Set the minimum TLS version for server connections.",
)
loader.add_option(
name="tls_version_server_max",
typespec=str,
default=net_tls.DEFAULT_MAX_VERSION.name,
choices=[x.name for x in net_tls.Version],
help=f"Set the maximum TLS version for server connections.",
)
def tls_clienthello(self, tls_clienthello: tls.ClientHelloData):
conn_context = tls_clienthello.context
only_non_http_alpns = (
conn_context.client.alpn_offers and
all(x not in tls.HTTP_ALPNS for x in conn_context.client.alpn_offers)
)
tls_clienthello.establish_server_tls_first = conn_context.server.tls and (
ctx.options.connection_strategy == "eager" or
ctx.options.add_upstream_certs_to_client_chain or
ctx.options.upstream_cert and (
only_non_http_alpns or
not conn_context.client.sni
)
)
def tls_start(self, tls_start: tls.TlsStartData):
if tls_start.conn == tls_start.context.client:
self.create_client_proxy_ssl_conn(tls_start)
else:
self.create_proxy_server_ssl_conn(tls_start)
def create_client_proxy_ssl_conn(self, tls_start: tls.TlsStartData) -> None:
client: connection.Client = tls_start.context.client
server: connection.Server = tls_start.context.server
entry = self.get_cert(tls_start.context)
if not client.cipher_list and ctx.options.ciphers_client:
client.cipher_list = ctx.options.ciphers_client.split(":")
# don't assign to client.cipher_list, doesn't need to be stored.
cipher_list = client.cipher_list or DEFAULT_CIPHERS
if ctx.options.add_upstream_certs_to_client_chain: # pragma: no cover
# exempted from coverage until https://bugs.python.org/issue18233 is fixed.
extra_chain_certs = server.certificate_list
else:
extra_chain_certs = []
ssl_ctx = net_tls.create_client_proxy_context(
min_version=net_tls.Version[ctx.options.tls_version_client_min],
max_version=net_tls.Version[ctx.options.tls_version_client_max],
cipher_list=cipher_list,
cert=entry.cert,
key=entry.privatekey,
chain_file=entry.chain_file,
request_client_cert=False,
alpn_select_callback=alpn_select_callback,
extra_chain_certs=extra_chain_certs,
dhparams=self.certstore.dhparams,
)
tls_start.ssl_conn = SSL.Connection(ssl_ctx)
tls_start.ssl_conn.set_app_data(AppData(
server_alpn=server.alpn,
http2=ctx.options.http2,
))
tls_start.ssl_conn.set_accept_state()
def create_proxy_server_ssl_conn(self, tls_start: tls.TlsStartData) -> None:
client: connection.Client = tls_start.context.client
server: connection.Server = tls_start.context.server
assert server.address
if ctx.options.ssl_insecure:
verify = net_tls.Verify.VERIFY_NONE
else:
verify = net_tls.Verify.VERIFY_PEER
if server.sni is True:
server.sni = client.sni or server.address[0]
if not server.alpn_offers:
if client.alpn_offers:
if ctx.options.http2:
server.alpn_offers = tuple(client.alpn_offers)
else:
server.alpn_offers = tuple(x for x in client.alpn_offers if x != b"h2")
elif client.tls_established:
# We would perfectly support HTTP/1 -> HTTP/2, but we want to keep things on the same protocol version.
# There are some edge cases where we want to mirror the regular server's behavior accurately,
# for example header capitalization.
server.alpn_offers = []
elif ctx.options.http2:
server.alpn_offers = tls.HTTP_ALPNS
else:
server.alpn_offers = tls.HTTP1_ALPNS
if not server.cipher_list and ctx.options.ciphers_server:
server.cipher_list = ctx.options.ciphers_server.split(":")
# don't assign to client.cipher_list, doesn't need to be stored.
cipher_list = server.cipher_list or DEFAULT_CIPHERS
client_cert: Optional[str] = None
if ctx.options.client_certs:
client_certs = os.path.expanduser(ctx.options.client_certs)
if os.path.isfile(client_certs):
client_cert = client_certs
else:
server_name: str = server.sni or server.address[0]
p = os.path.join(client_certs, f"{server_name}.pem")
if os.path.isfile(p):
client_cert = p
ssl_ctx = net_tls.create_proxy_server_context(
min_version=net_tls.Version[ctx.options.tls_version_client_min],
max_version=net_tls.Version[ctx.options.tls_version_client_max],
cipher_list=cipher_list,
verify=verify,
sni=server.sni,
ca_path=ctx.options.ssl_verify_upstream_trusted_confdir,
ca_pemfile=ctx.options.ssl_verify_upstream_trusted_ca,
client_cert=client_cert,
alpn_protos=server.alpn_offers,
)
tls_start.ssl_conn = SSL.Connection(ssl_ctx)
if server.sni:
tls_start.ssl_conn.set_tlsext_host_name(server.sni.encode())
tls_start.ssl_conn.set_connect_state()
def running(self):
# FIXME: We have a weird bug where the contract for configure is not followed and it is never called with
# confdir or command_history as updated.
self.configure("confdir") # pragma: no cover
def configure(self, updated):
if "confdir" not in updated and "certs" not in updated:
return
certstore_path = os.path.expanduser(ctx.options.confdir)
self.certstore = certs.CertStore.from_store(
path=certstore_path,
basename=CONF_BASENAME,
key_size=ctx.options.key_size,
passphrase=ctx.options.cert_passphrase.encode("utf8") if ctx.options.cert_passphrase else None,
)
if self.certstore.default_ca.has_expired():
ctx.log.warn(
"The mitmproxy certificate authority has expired!\n"
"Please delete all CA-related files in your ~/.mitmproxy folder.\n"
"The CA will be regenerated automatically after restarting mitmproxy.\n"
"See https://docs.mitmproxy.org/stable/concepts-certificates/ for additional help.",
)
for certspec in ctx.options.certs:
parts = certspec.split("=", 1)
if len(parts) == 1:
parts = ["*", parts[0]]
cert = Path(parts[1]).expanduser()
if not cert.exists():
raise exceptions.OptionsError(f"Certificate file does not exist: {cert}")
try:
self.certstore.add_cert_file(
parts[0],
cert,
passphrase=ctx.options.cert_passphrase.encode("utf8") if ctx.options.cert_passphrase else None,
)
except ValueError as e:
raise exceptions.OptionsError(f"Invalid certificate format for {cert}: {e}") from e
def get_cert(self, conn_context: context.Context) -> certs.CertStoreEntry:
"""
This function determines the Common Name (CN), Subject Alternative Names (SANs) and Organization Name
our certificate should have and then fetches a matching cert from the certstore.
"""
altnames: List[str] = []
organization: Optional[str] = None
# Use upstream certificate if available.
if conn_context.server.certificate_list:
upstream_cert = conn_context.server.certificate_list[0]
if upstream_cert.cn:
altnames.append(upstream_cert.cn)
altnames.extend(upstream_cert.altnames)
if upstream_cert.organization:
organization = upstream_cert.organization
# Add SNI. If not available, try the server address as well.
if conn_context.client.sni:
altnames.append(conn_context.client.sni)
elif conn_context.server.address:
altnames.append(conn_context.server.address[0])
# As a last resort, add *something* so that we have a certificate to serve.
if not altnames:
altnames.append("mitmproxy")
# only keep first occurrence of each hostname
altnames = list(dict.fromkeys(altnames))
# RFC 2818: If a subjectAltName extension of type dNSName is present, that MUST be used as the identity.
# In other words, the Common Name is irrelevant then.
return self.certstore.get_cert(altnames[0], altnames, organization)
|
mit
| -6,556,963,244,387,718,000
| 42.762238
| 119
| 0.622803
| false
| 3.723892
| true
| false
| false
|
Carreau/difflib2.py
|
examples/lcs_cutmodule.py
|
1
|
5199
|
from __future__ import print_function
from array import array
from itertools import islice
def lcs_cut2(s1, s2, lcs_low_bound=0, bg=None, debug=False):
"""Compule the length of the LCS 2 sequences s1 and s2.
lcs_low_bound : (int), hint of lower bound for the lenght of the lcs
to search for. Default to 0.
Algorithmic description:
This is a derivation of Hirschberg's algorithm which include some
optimisation for specific case.
This shoudl use an O(n) memory (n = len(s1)) and should have a worse
case scenario time complexity of O(n**2).
In the best case scenario, (l ~ n) the time complexity is closer to O(n*l)
where l is the lenght of the longest common subsequence.
Though, detail of implementaiton of s1 and s2 object slicing will
affect the optimal performace.
bg is four debug purpose, to see how the algorithme behave visually
using iptyhonblocks. uncomment bg lines below to use.
"""
m = len(s1)
n = len(s2)
if n==0 or m==0:
return 0
# rng is for row "rang" in french, "c" is for current and "p" for previous.
# array are n+1 so that last elemnt is 0. This allow
# to avoid special casing j=0 as j-1 will wrap arround.
# alternative is to offset all indexes by 1, wichi becames hard to
# track
rngc = array('i',[0 for x in range(n+1)]) ## current row
rngp = array('i',[0 for x in range(n+1)]) ## previous row
# current max value of the LCS durrgin the search.
currentmax = lcs_low_bound
# correspond to rngc[j-1], used to avoid lookup in the array
# through the loop to shave off soem execution time.
rngcjm = None
# lower and upper bound for current loop on s2/j
limm,limpp = 0,0
# lower bound for iteration on s1/i and
# another lower bound s2/j
mini,minj = 0,0
if debug:
import pdb; pdb.set_trace()
for i,c1 in enumerate(s1):
# current row become previous, and we reuse previous to avoid
# creating a new empty list.
rngc, rngp = rngp, rngc
limm,limp= max(i-m+currentmax,0,minj-1),min(i+n-currentmax+1,n)
rngcjm = rngc[limm-1]
if i < mini:
print('continue')
continue
isl = islice(s2,limm,limp)
rsl = range(limm,limp)
zsl = zip(rsl,isl)
for j,c2 in zsl:
# if bg:
# bg[i,j].green=255
if c1 == c2 :
if i == 0 or j == 0:
newval = 1
else:
newval = rngp[j-1]+1
# here we will peak ahead as far as possible
# while the two string are matching,
# for strings with high similarity
# this with give us hints on which part of the
# lcs matrix we do not need to explore.
#
# we do this only once, if we are at
# the beginning of the matching streem.
if s1[i-1] != s2[j-1] or i==0 or j==0:
lookahead = -1
k = min(m-i,n-j)
for cx,cy in zip(s1[i:i+k],s2[j:j+k]):
if cx==cy:
lookahead +=1
else:
break
# if bg:
# for xx in range(0,lookahead):
# bg[i+xx,j+xx].blue=255
tmp = rngc[j]+lookahead
# if we are on i,j and have a value M
# then it is useless to process columns that have :
# - a j value lower than M-j
# - a i value lower than M-i
lminj=tmp-j
lmini=tmp-i
if lmini > mini:
mini=lmini
if lminj > minj:
minj=lminj
for xx in range(0,minj):
rngp[xx]=tmp-1
rngc[xx]=tmp-1
# if bg:
# for xx in range(0,lminj):
# for lh in range(i,m):
# bg[lh,xx].red =255
# for xx in range(0,lmini):
# for lh in range(j,n):
# bg[xx,lh].red =255
# bg[i+lookahead,j+lookahead].red =255
if j >= limp+1:
break
if tmp > currentmax:
currentmax = tmp
assert(currentmax <=m)
assert(currentmax <=n)
limp= min(i+n-currentmax+1,n)
if newval > currentmax:
currentmax = newval
else :
b = rngp[j]
newval = rngcjm if rngcjm > b else b
# assert(newval <= i+1)
# assert(newval <= j+1)
rngc[j] = rngcjm = newval
print(rngc)
print('==',rngc)
return rngc[-2]
|
bsd-3-clause
| -6,169,857,475,565,440,000
| 36.956204
| 79
| 0.476053
| false
| 3.8369
| false
| false
| false
|
myriadrf/pyLMS7002M
|
pyLMS7002M/LimeSDRMini.py
|
1
|
11270
|
#***************************************************************
#* Name: LimeSDRMini.py
#* Purpose: Class implementing LimeSDRMini functions
#* Author: Lime Microsystems ()
#* Created: 2018-04-16
#* Copyright: Lime Microsystems (limemicro.com)
#* License:
#**************************************************************
from weakproxy import *
from copy import copy
from LMS7002 import *
from timeit import default_timer as timer
import atexit
from cyLimeLib import *
class LimeSDRMini(object):
def __init__(self, fRef = 40.0e6, verbose=0):
"""
Initialize communication with LimeSDRMini.
"""
boards = cyLimeLib.getDeviceList()
if len(boards)==0:
raise ValueError("LimeSDR not found")
self.cyDev = None
for i in range(0,len(boards)):
if "LimeSDR Mini" in boards[i]:
self.cyDev = cyLimeLib(boards[i])
break
if self.cyDev==None:
raise ValueError("LimeSDRMini not found")
self.usb = self.cyDev
# http://stackoverflow.com/questions/8907905/del-myclass-doesnt-call-object-del
# https://docs.python.org/3/reference/datamodel.html#object.__del__
# solution is to avoid __del__, define an explict close() and call it atexit
atexit.register(self.close)
#self.usb.setConfiguration()
self.verbose = verbose
self.bulkControl = False
self.fRef = fRef # reference frequency
FW_VER, DEV_TYPE, LMS_PROTOCOL_VER, HW_VER, EXP_BOARD = self.getInfo()
if DEV_TYPE!=17:
ret = "FW_VER : "+str(FW_VER)+"\n"
ret += "DEV_TYPE : "+str(DEV_TYPE)+"\n"
ret += "LMS_PROTOCOL_VER : " + str(LMS_PROTOCOL_VER)+"\n"
ret += "HW_VER : " + str(HW_VER)+"\n"
ret += "EXP_BOARD : " + str(EXP_BOARD)+"\n"
raise ValueError("The board is not LimeSDR.\nBoard info:\n"+ret)
if verbose>0:
self.printInfo()
#
# Initialize on-board chips
#
self.LMS7002 = LMS7002(SPIwriteFn=Proxy(self.LMS7002_Write), SPIreadFn=Proxy(self.LMS7002_Read)
, verbose=verbose, MCUProgram=Proxy(self.MCUProgram), fRef = self.fRef)
self.LMS7002.MIMO = 'MIMO'
def close(self):
"""
Close communication with LimeSDR
"""
del self.cyDev
@staticmethod
def findLMS7002(backend="PyUSB"):
return cyLimeLib.getDeviceList()
def log(self, logMsg):
print logMsg
def getCommandNumber(self, cmdName):
if cmdName == "CMD_GET_INFO":
return 0x00
elif cmdName == "CMD_LMS7002_RST":
return 0x20
elif cmdName == "LMS_RST_DEACTIVATE":
return 0x00
elif cmdName == "LMS_RST_ACTIVATE":
return 0x01
elif cmdName == "LMS_RST_PULSE":
return 0x02
elif cmdName == "CMD_LMS7002_WR":
return 0x21
elif cmdName == "CMD_LMS7002_RD":
return 0x22
elif cmdName == "CMD_PROG_MCU":
return 0x2C
else:
raise ValueError("Unknown command "+cmdName)
def getLMS7002(self):
return self.LMS7002
#
# Low level communication
#
@staticmethod
def bytes2string(bytes):
"""
Convert the byte array to string.
Used for serial communication.
"""
s = ""
for i in range(0,len(bytes)):
s += chr(bytes[i])
return s
@staticmethod
def string2bytes(string):
"""
Convert the string to byte array.
Used for serial communication.
"""
bytes = [0]*int(len(string))
for i in range(0, len(string)):
bytes[i] = ord(string[i])
return bytes
def sendCommand(self, command, nDataBlocks=0, periphID=0, data=[]):
"""
Send the command to LimeSDR.
Function returns (status, data)
"""
nData = len(data)
if nData>56:
raise ValueError("Length of data must be less than 56, "+str(nData)+" bytes given")
return self.cyDev.transferLMS64C(command, data)
#
# Utility functions
#
def getInfo(self):
"""
Get the information about LimeSDR.
Function returns
(FW_VER, DEV_TYPE, LMS_PROTOCOL_VER, HW_VER, EXP_BOARD)
"""
command = self.getCommandNumber("CMD_GET_INFO")
status, rxData = self.sendCommand(command)
if status != 1:
raise IOError("Command returned with status "+str(status))
FW_VER = rxData[0]
DEV_TYPE = rxData[1]
LMS_PROTOCOL_VER = rxData[2]
HW_VER = rxData[3]
EXP_BOARD = rxData[4]
return (FW_VER, DEV_TYPE, LMS_PROTOCOL_VER, HW_VER, EXP_BOARD)
def printInfo(self):
"""
Print info about LimeSDR
"""
FW_VER, DEV_TYPE, LMS_PROTOCOL_VER, HW_VER, EXP_BOARD = self.getInfo()
self.log("FW_VER : "+str(FW_VER))
self.log("DEV_TYPE : "+str(DEV_TYPE))
self.log("LMS_PROTOCOL_VER : " + str(LMS_PROTOCOL_VER))
self.log("HW_VER : " + str(HW_VER))
self.log("EXP_BOARD : " + str(EXP_BOARD))
def LMS7002_Reset(self, rstType="pulse"):
"""
Reset LMS7002.
rstType specifies the type of reset:
pulse - activate and deactivate reset
activate - activate reset
deactivate - deactivate reset
"""
command = self.getCommandNumber("CMD_LMS7002_RST")
if rstType=="pulse":
data = [self.getCommandNumber("LMS_RST_PULSE")]
elif rstType=="activate":
data = [self.getCommandNumber("LMS_RST_ACTIVATE")]
elif rstType=="deactivate":
data = [self.getCommandNumber("LMS_RST_DEACTIVATE")]
else:
raise ValueError("Invalid reset type "+str(rstType))
rxStatus, rxData = self.sendCommand(command, data=data)
if rxStatus != 1:
raise IOError("Command returned with status "+str(status))
self.LMS7002.loadResetValues()
self.cyDev.LMSInit()
def LMS7002_Write(self, regList, packetSize=14):
"""
Write the data to LMS7002 via SPI interface.
regList is a list of registers to write in the format:
[ (regAddr, regData), (regAddr, regData), ...]
packetSize controls the number of register writes in a single USB transfer
"""
command = self.getCommandNumber("CMD_LMS7002_WR")
nDataBlocks = len(regList)
toSend = copy(regList)
while len(toSend)>0:
nPackets = 0
data = []
while nPackets<packetSize and len(toSend)>0:
regAddr, regData = toSend[0]
toSend.pop(0)
regAddrH = regAddr >> 8
regAddrL = regAddr % 256
regDataH = regData >> 8
regDataL = regData % 256
data += [regAddrH, regAddrL, regDataH, regDataL]
nPackets += 1
rxStatus, rxData = self.sendCommand(command, nDataBlocks = nPackets, data=data)
if rxStatus != 1:
raise IOError("Command returned with status "+str(rxStatus))
def LMS7002_Read(self, regList, packetSize=14):
"""
Read the data from LMS7002 via SPI interface.
regList is a list of registers to read in the format:
[ regAddr, regAddr, ...]
packetSize controls the number of register writes in a single USB transfer
"""
command = self.getCommandNumber("CMD_LMS7002_RD")
nDataBlocks = len(regList)
toRead = copy(regList)
regData = []
while len(toRead)>0:
nPackets = 0
data = []
while nPackets<packetSize and len(toRead)>0:
regAddr = toRead[0]
toRead.pop(0)
regAddrH = regAddr >> 8
regAddrL = regAddr % 256
data += [regAddrH, regAddrL]
nPackets += 1
rxStatus, rxData = self.sendCommand(command, nDataBlocks = nPackets, data=data)
if rxStatus != 1:
raise IOError("Command returned with status "+str(rxStatus))
for i in range(0, nPackets):
regDataH = rxData[i*4+2]
regDataL = rxData[i*4+3]
regData.append( (regDataH << 8) + regDataL)
return regData
#
# LMS7002 MCU program
#
def MCUProgram(self, mcuProgram, Mode):
ver, rev, mask = self.getLMS7002().chipInfo
if mask==1:
# MCU has 16k RAM
if len(mcuProgram)>16384:
raise ValueError("MCU program for mask 1 chips must be less than 16 kB. Given program size:"+str(len(mcuProgram)))
if len(mcuProgram)==8192: # Check if program is 8k
mcuProgram += [0]*8192 # Extend it to 16k
self._MCUProgram_Direct(mcuProgram, Mode)
else:
# MCU has 8k RAM
if len(mcuProgram)>8192:
raise ValueError("MCU program for mask 0 chips must be less than 8 kB. Given program size:"+str(len(mcuProgram)))
self._MCUProgram_Direct(mcuProgram, Mode)
def _MCUProgram_Direct(self, mcuProgram, Mode):
"""
Write the data to LMS7002 MCU via SPI interface.
MCU is programmed directly by using bulk interface MCU commands.
mcuProgram is 8192 or 16384 bytes long array holding the MCU program.
mode selects the MCU programming mode.
"""
if Mode not in [0, 1,2,3, 'EEPROM_AND_SRAM', 'SRAM', 'SRAM_FROM_EEPROM']:
raise ValueError("Mode should be [1,2,3, 'EEPROM_AND_SRAM', 'SRAM', 'SRAM_FROM_EEPROM']")
if Mode==0:
return
elif Mode==1 or Mode=='EEPROM_AND_SRAM':
mode = 1
elif Mode==2 or Mode=='SRAM':
mode = 2
else:
mode = 3
if len(mcuProgram)!=8192 and len(mcuProgram)!=16384:
raise ValueError("MCU program should be 8192 or 16384 bytes long")
toSend = [ (2, 0), (2, mode)] # Write 0 to address 2, write mode to address 2 (mSPI_CTRL)
self.LMS7002_Write(toSend)
lms7002 = self.getLMS7002()
pos = 0
while pos<len(mcuProgram):
startTime = timer()
while lms7002.mSPI.EMPTY_WRITE_BUFF==0:
if timer()-startTime>1:
raise IOError("MCU programming timeout")
for j in range(0, 4):
toSend = []
for i in range(0, 8):
toSend.append( (4, mcuProgram[pos]) )
pos += 1
self.LMS7002_Write(toSend)
if mode==3:
break
startTime = timer()
while lms7002.mSPI.PROGRAMMED==0:
if timer()-startTime>1:
raise IOError("MCU programming timeout")
|
apache-2.0
| -7,336,002,023,781,494,000
| 34.329154
| 130
| 0.536823
| false
| 3.78569
| false
| false
| false
|
rgblabs/rgbTools
|
rgbTools/utils/filesystem.py
|
1
|
2692
|
import maya.cmds as cmds
def which (program):
'''
If application is found, returns path.
This works with both full application paths, and applications available
within the OS's defined PATH
'''
import os
def is_exe (fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def getOSPaths ():
import os
paths = []
for path in os.environ["PATH"].split(os.pathsep):
paths.append(path.strip('"'))
return paths
def getPythonPaths ():
import sys
paths = []
for pythonPath in sys.path:
paths.append(pythonPath)
return paths
def getUserPaths ():
upaths = {}
upaths['userAppDir'] = cmds.internalVar(userAppDir=1)
upaths['userScriptDir'] = cmds.internalVar(userScriptDir=1)
upaths['userPrefDir'] = cmds.internalVar(userPrefDir=1)
upaths['userPresetsDir'] = cmds.internalVar(userPresetsDir=1)
upaths['userShelfDir'] = cmds.internalVar(userShelfDir=1)
upaths['userMarkingMenuDir'] = cmds.internalVar(userMarkingMenuDir=1)
upaths['userBitmapsDir'] = cmds.internalVar(userBitmapsDir=1)
upaths['userTmpDir'] = cmds.internalVar(userTmpDir=1)
upaths['userWorkspaceDir'] = cmds.internalVar(userWorkspaceDir=1)
return upaths
def getEnvPaths():
import os
import sys
import maya.mel as mel
scriptPaths = mel.eval("getenv \"MAYA_SCRIPT_PATH\"")
plugInPaths = mel.eval("getenv \"MAYA_PLUG_IN_PATH\"")
pythonPaths = mel.eval("getenv \"PYTHONPATH\"")
iconPaths = mel.eval("getenv \"XBMLANGPATH\"")
pathPaths = mel.eval("getenv \"PATH\"")
sysPaths = sys.path
return {
'MAYA_SCRIPT_PATH' : scriptPaths.split(os.pathsep),
'MAYA_PLUG_IN_PATH' : plugInPaths.split(os.pathsep),
'PYTHONPATH' : pythonPaths.split(os.pathsep),
'XBMLANGPATH' : iconPaths.split(os.pathsep),
'PATH' : pathPaths.split(os.pathsep),
'sys' : sysPaths
}
def getCurrentFilePath ():
return cmds.file(query=True, sceneName=True)
def crashRecoverDialog ():
dirpath = cmds.internalVar(userTmpDir=1)
mask = dirpath+'*.ma'
filepath = cmds.fileDialog(title='Recover Crash File...', directoryMask=mask)
if filepath is not '':
cmds.file(filepath, open=True)
cmds.file(renameToSave=True)
|
mit
| -6,761,105,692,595,262,000
| 28.911111
| 81
| 0.635587
| false
| 3.403287
| false
| false
| false
|
sevagas/macro_pack
|
src/vbLib/Base64ToText.py
|
1
|
1559
|
VBA = \
r"""
Function Base64ToText(ByVal vCode)
Dim oXML, oNode
Dim tempString As String
tempString = "Msxm"
tempString = tempString & "l2.DO"
tempString = tempString & "MDoc"
tempString = tempString & "ument.3.0"
Set oXML = CreateObject(tempString)
Set oNode = oXML.CreateElement("base64")
oNode.DataType = "bin.base64"
oNode.Text = vCode
Base64ToText = Stream_BinaryToString(oNode.nodeTypedValue)
Set oNode = Nothing
Set oXML = Nothing
End Function
'Stream_BinaryToString Function
'2003 Antonin Foller, http://www.motobit.com
'Binary - VT_UI1 | VT_ARRAY data To convert To a string
Private Function Stream_BinaryToString(Binary)
Const adTypeText = 2
Const adTypeBinary = 1
'Create Stream object
Dim BinaryStream 'As New Stream
Dim tmpString As String
tmpString = "ADO"
tmpString = tmpString & "DB.St"
tmpString = tmpString & "ream"
Set BinaryStream = CreateObject(tmpString)
'Specify stream type - we want To save binary data.
BinaryStream.Type = adTypeBinary
'Open the stream And write binary data To the object
BinaryStream.Open
BinaryStream.Write Binary
'Change stream type To text/string
BinaryStream.Position = 0
BinaryStream.Type = adTypeText
'Specify charset For the output text (unicode) data.
BinaryStream.Charset = "us-ascii"
'Open the stream And get text/string data from the object
Stream_BinaryToString = BinaryStream.ReadText
Set BinaryStream = Nothing
End Function
"""
|
apache-2.0
| -7,680,315,020,665,851,000
| 27.87037
| 62
| 0.695959
| false
| 3.676887
| false
| false
| false
|
wevoice/wesub
|
apps/socialauth/views.py
|
1
|
12656
|
from django.shortcuts import render_to_response, redirect
from django.contrib import messages
from django.template import RequestContext
from django.contrib.auth import authenticate, login
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import logout
from django.utils.http import urlencode
from auth.backends import OpenIdBackend
from socialauth.models import AuthMeta
from socialauth.forms import EditProfileForm
from thirdpartyaccounts.models import TwitterAccount
"""
from socialauth.models import YahooContact, TwitterContact, FacebookContact,\
SocialProfile, GmailContact
"""
from openid_consumer.views import begin
from socialauth.lib import oauthtwitter2 as oauthtwitter
from socialauth.lib.facebook import get_facebook_signature
from oauth import oauth
from datetime import datetime
from django.utils.http import urlquote
from utils.translation import get_user_languages_from_cookie
from auth.models import UserLanguage
TWITTER_CONSUMER_KEY = getattr(settings, 'TWITTER_CONSUMER_KEY', '')
TWITTER_CONSUMER_SECRET = getattr(settings, 'TWITTER_CONSUMER_SECRET', '')
def get_url_host(request):
# FIXME: Duplication
if request.is_secure():
protocol = 'https'
else:
protocol = 'http'
host = request.get_host()
return '%s://%s' % (protocol, host)
def login_page(request):
payload = {'fb_api_key':settings.FACEBOOK_API_KEY,}
return render_to_response('socialauth/login_page.html', payload, RequestContext(request))
def twitter_login(request, next=None):
callback_url = None
if next is not None:
callback_url = '%s%s?next=%s' % \
(get_url_host(request),
reverse("socialauth_twitter_login_done"),
urlquote(next))
twitter = oauthtwitter.TwitterOAuthClient(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_CONSUMER_SECRET)
request_token = twitter.fetch_request_token(callback_url)
request.session['request_token'] = request_token.to_string()
signin_url = twitter.authorize_token_url(request_token)
return HttpResponseRedirect(signin_url)
def twitter_login_done(request):
request_token = request.session.get('request_token', None)
oauth_verifier = request.GET.get("oauth_verifier", None)
# If there is no request_token for session,
# Means we didn't redirect user to twitter
if not request_token:
# Redirect the user to the login page,
# So the user can click on the sign-in with twitter button
return HttpResponse("We didn't redirect you to twitter...")
token = oauth.OAuthToken.from_string(request_token)
# If the token from session and token from twitter does not match
# means something bad happened to tokens
if token.key != request.GET.get('oauth_token', 'no-token'):
del request.session['request_token']
if request.GET.get('denied', None) is not None:
messages.info(request, "Twitter authorization cancelled.")
return redirect('profiles:account')
messages.error(request, "Something wrong! Tokens do not match...")
# Redirect the user to the login page
return redirect('auth:login')
twitter = oauthtwitter.TwitterOAuthClient(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_CONSUMER_SECRET)
access_token = twitter.fetch_access_token(token, oauth_verifier)
request.session['access_token'] = access_token.to_string()
if request.session.get('no-login', False):
# The user is trying to link a Twitter account to their Amara account.
if not request.user.is_authenticated():
messages.error(request, 'You must be logged in.')
return redirect('auth:login')
try:
from socialauth.lib.oauthtwitter import OAuthApi
twitter = OAuthApi(TWITTER_CONSUMER_KEY,
TWITTER_CONSUMER_SECRET, access_token)
userinfo = twitter.GetUserInfo()
except Exception, e:
# TODO: Raise something more useful here
raise e
username = userinfo.screen_name
try:
account = TwitterAccount.objects.get(username=username)
if request.user.pk != account.user.pk:
messages.error(request, 'Account already linked')
return redirect('profiles:account')
except TwitterAccount.DoesNotExist:
TwitterAccount.objects.create(user=request.user,
username=username, access_token=access_token.to_string())
del request.session['no-login']
messages.info(request, 'Successfully linked a Twitter account')
return redirect('profiles:account')
request.session['access_token'] = access_token.to_string()
user = authenticate(access_token=access_token)
# if user is authenticated then login user
if user:
if not user.userlanguage_set.exists():
langs = get_user_languages_from_cookie(request)
for l in langs:
UserLanguage.objects.get_or_create(user=user, language=l)
login(request, user)
else:
# We were not able to authenticate user
# Redirect to login page
del request.session['access_token']
del request.session['request_token']
return HttpResponseRedirect(reverse('socialauth_login_page'))
# authentication was successful, use is now logged in
return HttpResponseRedirect(request.GET.get('next', settings.LOGIN_REDIRECT_URL))
def openid_login(request, confirmed=True):
if 'openid_identifier' in request.GET:
user_url = request.GET.get('openid_identifier')
request.session['openid_provider'] = user_url
return begin(request, user_url = user_url, confirmed=confirmed)
else:
if 'google.com' in request.POST.get('openid_url', ''):
request.session['openid_provider'] = 'Google'
return begin(request, user_url='https://www.google.com/accounts/o8/id', confirmed=confirmed)
elif 'yahoo.com' in request.POST.get('openid_url', ''):
request.session['openid_provider'] = 'Yahoo'
else:
request.session['openid_provider'] = 'Openid'
return begin(request, confirmed=confirmed)
def gmail_login(request):
request.session['openid_provider'] = 'Google'
return begin(request, user_url='https://www.google.com/accounts/o8/id')
def udacity_login(request, confirmed=True):
request.session['openid_provider'] = 'Udacity'
return begin(request, user_url='https://www.udacity.com/openid/server', confirmed=confirmed)
def gmail_login_complete(request):
pass
def yahoo_login(request):
request.session['openid_provider'] = 'Yahoo'
return begin(request, user_url='http://yahoo.com/')
def openid_done(request, provider=None, confirmed=True):
"""
When the request reaches here, the user has completed the Openid
authentication flow. He has authorised us to login via Openid, so
request.openid is populated.
After coming here, we want to check if we are seeing this openid first time.
If we are, we will create a new Django user for this Openid, else login the
existing openid.
"""
if not provider:
provider = request.session.get('openid_provider', '')
if request.openid:
#check for already existing associations
openid_key = str(request.openid)
#authenticate and login
if not confirmed:
(existing, suggested_email) = OpenIdBackend.pre_authenticate(openid_key=openid_key, request=request, provider=provider)
if not existing:
if provider == 'Udacity':
return redirect('auth:confirm_create_user', 'udacity', suggested_email)
elif provider == 'Openid':
openid_url = request.GET.get('openid_url', '')
response = redirect('auth:confirm_create_user', 'openid', suggested_email)
if openid_url:
response['Location'] += '?' + urlencode({'openid_url': openid_url})
return response
else:
return redirect(reverse('auth:confirm_create_user', provider, suggested_email))
email = request.GET.get('email', None)
user = authenticate(openid_key=openid_key, request=request, provider=provider, email=email)
if user:
if not user.userlanguage_set.exists():
langs = get_user_languages_from_cookie(request)
for l in langs:
UserLanguage.objects.get_or_create(user=user, language=l)
login(request, user)
next = None
if 'openid_next' in request.session:
next = request.session.get('openid_next')
if 'next' in request.GET:
next = request.GET['next']
if next is not None and len(next.strip()) > 0 :
return HttpResponseRedirect(next)
redirect_url = reverse('profiles:profile', args=(user,))
return HttpResponseRedirect(redirect_url)
else:
return HttpResponseRedirect(settings.LOGIN_URL)
else:
return HttpResponseRedirect(settings.LOGIN_URL)
def facebook_login_done(request):
API_KEY = settings.FACEBOOK_API_KEY
API_SECRET = settings.FACEBOOK_SECRET_KEY
REST_SERVER = 'http://api.facebook.com/restserver.php'
# FB Connect will set a cookie with a key == FB App API Key if the user has been authenticated
if API_KEY in request.COOKIES:
signature_hash = get_facebook_signature(API_KEY, API_SECRET, request.COOKIES, True)
# The hash of the values in the cookie to make sure they're not forged
# AND If session hasn't expired
if(signature_hash == request.COOKIES[API_KEY]) and (datetime.fromtimestamp(float(request.COOKIES[API_KEY+'_expires'])) > datetime.now()):
#Log the user in now.
user = authenticate(cookies=request.COOKIES)
if user:
# if user is authenticated then login user
login(request, user)
return HttpResponseRedirect(reverse('socialauth_signin_complete'))
else:
#Delete cookies and redirect to main Login page.
del request.COOKIES[API_KEY + '_session_key']
del request.COOKIES[API_KEY + '_user']
return HttpResponseRedirect(reverse('socialauth_login_page'))
return HttpResponseRedirect(reverse('socialauth_login_page'))
def openid_login_page(request):
return render_to_response('openid/index.html', {}, RequestContext(request))
def signin_complete(request):
payload = {}
return render_to_response('socialauth/signin_complete.html', payload, RequestContext(request))
@login_required
def editprofile(request):
if request.method == 'POST':
edit_form = EditProfileForm(user=request.user, data=request.POST)
if edit_form.is_valid():
user = edit_form.save()
try:
user.authmeta.is_profile_modified = True
user.authmeta.save()
except AuthMeta.DoesNotExist:
pass
if user.openidprofile_set.all().count():
openid_profile = user.openidprofile_set.all()[0]
openid_profile.is_valid_username = True
openid_profile.save()
try:
#If there is a profile. notify that we have set the username
profile = user.get_profile()
profile.is_valid_username = True
profile.save()
except:
pass
request.user.message_set.create(message='Your profile has been updated.')
return HttpResponseRedirect('.')
if request.method == 'GET':
edit_form = EditProfileForm(user = request.user)
payload = {'edit_form':edit_form}
return render_to_response('socialauth/editprofile.html', payload, RequestContext(request))
def social_logout(request):
# Todo
# still need to handle FB cookies, session etc.
# let the openid_consumer app handle openid-related cleanup
from openid_consumer.views import signout as oid_signout
oid_signout(request)
# normal logout
logout_response = logout(request)
if getattr(settings, 'LOGOUT_REDIRECT_URL', None):
return HttpResponseRedirect(settings.LOGOUT_REDIRECT_URL)
else:
return logout_response
|
agpl-3.0
| 7,060,478,658,976,568,000
| 41.469799
| 145
| 0.656606
| false
| 4.200465
| false
| false
| false
|
itu-oss-project-team/oss-github-analysis-project
|
github_analysis_tool/analyzer/tf-idf.py
|
1
|
3362
|
import os.path
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from math import log10
import yaml
from github_analysis_tool.services.database_service import DatabaseService
class Tfidf:
def __init__(self, secret_config):
# Generate a github_requester with imported GitHub tokens
self.__databaseService = DatabaseService(secret_config['mysql'])
self.__commits = []
def addCommitToDictionary(self, commit_sha, commit_message):
commit_msg = str(commit_message).encode('utf-8')
commit_msg = str(commit_msg)
#sha, message, tf-idf
self.__commits.append([commit_sha, commit_msg, 0])
def printValues(self, commitList):
print("size: " + str(len(commitList)) + "\n")
for commit in commitList:
commit_msg = str(commit[1])
print(commit_msg + " tf-idf: " + str(commit[2]))
def generateContainer(self):
repos = self.__databaseService.getAllRepos(get_only_ids=True)
for repo_id in repos:
commits = self.__databaseService.getCommitsOfRepo(repo_id, get_only_shas=False)
for commit in commits:
self.addCommitToDictionary(commit["sha"], commit["message"])
return
def tf_idf(self, keywords, threshold_value=0):
scored_commits = []
count_of_all_occurances=0
print("Total number of commits: " + str(len(self.__commits)))
#idf calculation
for commit in self.__commits:
commit_msg = commit[1]
for word in commit_msg.split():
for keyword in keywords:
if word == keyword:
count_of_all_occurances += 1
break
idf = log10(len(self.__commits)/count_of_all_occurances)
print("idf: " + str(idf))
#tf calculation for each commit message
for commit in self.__commits:
commit_msg = commit[1]
count_of_similarities_in_msg=0
for word in commit_msg.split():
for keyword in keywords:
if word == keyword:
count_of_similarities_in_msg += 1
score = count_of_similarities_in_msg / len(commit_msg.split())
score = score * idf
commit[2] = score
if score > threshold_value:
#sha, message, score
scored_commits.append([commit[0], commit[1], commit[2]])
scored_commits.sort(key=lambda x:x[2])
return scored_commits
def main():
with open(os.path.join(os.path.dirname(__file__), os.pardir, 'config_secret.yaml'), 'r') as ymlfile:
secret_config = yaml.load(ymlfile)
tfidf = Tfidf(secret_config)
tfidf.generateContainer()
print("\nBUG-FIX COMMITS\n")
bugfix_commits = tfidf.tf_idf(["Fix", "fixed", "edit", "edited", "modify", "modified", "correct", "corrected"], 0.0)
tfidf.printValues(bugfix_commits)
print("\nADD NEW FEATURE COMMITS\n")
add_commits = tfidf.tf_idf(["add", "added", "implement", "implemented", "feat", "feature"], 0.0)
tfidf.printValues(add_commits)
print("\nREMOVE COMMITS\n")
remove_commits = tfidf.tf_idf(["delete", "deleted", "remove", "removed"], 0.0)
tfidf.printValues(remove_commits)
return
main()
|
mit
| 8,553,894,782,115,481,000
| 34.020833
| 120
| 0.594289
| false
| 3.735556
| false
| false
| false
|
hirokihamasaki/irma
|
frontend/frontend/api/v1_1/controllers/files.py
|
1
|
7314
|
# Copyright (c) 2013-2016 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import logging
from bottle import response, request
from frontend.api.v1_1.errors import process_error
from frontend.helpers.utils import guess_hash_type
from frontend.models.sqlobjects import FileWeb, File
from frontend.api.v1_1.schemas import FileWebSchema_v1_1, ScanSchema_v1_1, \
FileSchema_v1_1
from lib.common.utils import decode_utf8
from lib.irma.common.exceptions import IrmaDatabaseResultNotFound
file_web_schema = FileWebSchema_v1_1()
scan_schema = ScanSchema_v1_1()
file_web_schema.context = {'formatted': True}
log = logging.getLogger(__name__)
file_web_schema_lite = FileWebSchema_v1_1(exclude=['probe_results'])
file_web_schema_lite.context = {'formatted': True}
def list(db):
""" Search a file using query filters (tags + hash or name). Support
pagination.
:param all params are sent using query method
:rtype: dict of 'total': int, 'page': int, 'per_page': int,
'items': list of file(s) found
:return:
on success 'items' contains a list of files found
on error 'msg' gives reason message
"""
try:
name = None
if 'name' in request.query:
name = decode_utf8(request.query['name'])
h_value = request.query.get('hash')
search_tags = request.query.get('tags')
if search_tags is not None:
search_tags = search_tags.split(',')
log.debug("name %s h_value %s search_tags %s",
name, h_value, search_tags)
if name is not None and h_value is not None:
raise ValueError("Can't find using both name and hash")
# Get values from query or default
offset = request.query.get("offset", default=0)
offset = int(offset)
limit = request.query.get("limit", default=25)
limit = int(limit)
if name is not None:
base_query = FileWeb.query_find_by_name(name, search_tags, db)
elif h_value is not None:
h_type = guess_hash_type(h_value)
if h_type is None:
raise ValueError("Hash not supported")
base_query = FileWeb.query_find_by_hash(
h_type, h_value, search_tags, db)
else:
# FIXME this is just a temporary way to output
# all files, need a dedicated
# file route and controller
base_query = FileWeb.query_find_by_name("", search_tags, db)
# TODO: Find a way to move pagination as a BaseQuery like in
# flask_sqlalchemy.
# https://github.com/mitsuhiko/flask-sqlalchemy/blob/master/flask_sqlalchemy/__init__.py#L422
items = base_query.limit(limit).offset(offset).all()
if offset == 0 and len(items) < limit:
total = len(items)
else:
total = base_query.count()
log.debug("Found %s results", total)
response.content_type = "application/json; charset=UTF-8"
return {
'total': total,
'offset': offset,
'limit': limit,
'items': file_web_schema_lite.dump(items, many=True).data,
}
except Exception as e:
log.exception(e)
process_error(e)
def get(sha256, db):
""" Detail about one file and all known scans summary where file was
present (identified by sha256). Support pagination.
:param all params are sent using query method
:param if alt parameter is "media", response will contains the binary data
:rtype: dict of 'total': int, 'page': int, 'per_page': int,
:return:
on success fileinfo contains file information
on success 'items' contains a list of files found
on error 'msg' gives reason message
"""
try:
log.debug("h_value %s", sha256)
# Check wether its a download attempt or not
if request.query.alt == "media":
return _download(sha256, db)
# Get values from query or default
offset = request.query.get("offset", default=0)
offset = int(offset)
limit = request.query.get("limit", default=25)
limit = int(limit)
file = File.load_from_sha256(sha256, db)
# query all known results not only those with different names
base_query = FileWeb.query_find_by_hash("sha256", sha256, None, db,
distinct_name=False)
# TODO: Find a way to move pagination as a BaseQuery like in
# flask_sqlalchemy.
# https://github.com/mitsuhiko/flask-sqlalchemy/blob/master/flask_sqlalchemy/__init__.py#L422
items = base_query.limit(limit).offset(offset).all()
if offset == 0 and len(items) < limit:
total = len(items)
else:
total = base_query.count()
log.debug("offset %d limit %d total %d", offset, limit, total)
file_web_schema = FileWebSchema_v1_1(exclude=('probe_results',
'file_infos'))
fileinfo_schema = FileSchema_v1_1()
# TODO: allow formatted to be a parameter
formatted = True
fileinfo_schema.context = {'formatted': formatted}
response.content_type = "application/json; charset=UTF-8"
return {
'file_infos': fileinfo_schema.dump(file).data,
'total': total,
'offset': offset,
'limit': limit,
'items': file_web_schema.dump(items, many=True).data,
}
except Exception as e:
log.exception(e)
process_error(e)
def add_tag(sha256, tagid, db):
""" Attach a tag to a file.
"""
try:
log.debug("h_value %s tagid %s", sha256, tagid)
fobj = File.load_from_sha256(sha256, db)
fobj.add_tag(tagid, db)
db.commit()
except Exception as e:
log.exception(e)
process_error(e)
def remove_tag(sha256, tagid, db):
""" Remove a tag attached to a file.
"""
try:
log.debug("h_value %s tagid %s", sha256, tagid)
fobj = File.load_from_sha256(sha256, db)
fobj.remove_tag(tagid, db)
db.commit()
except Exception as e:
log.exception(e)
process_error(e)
# called by get
def _download(sha256, db):
"""Retrieve a file based on its sha256"""
log.debug("h_value %s", sha256)
fobj = File.load_from_sha256(sha256, db)
# check if file is still present
if fobj.path is None:
raise IrmaDatabaseResultNotFound("downloading a removed file")
# Force download
ctype = 'application/octet-stream; charset=UTF-8'
# Suggest Filename to sha256
cdisposition = "attachment; filename={}".format(sha256)
response.headers["Content-Type"] = ctype
response.headers["Content-Disposition"] = cdisposition
return open(fobj.path).read()
|
apache-2.0
| 4,231,076,620,459,806,000
| 34.852941
| 101
| 0.613071
| false
| 3.76622
| false
| false
| false
|
qisanstudio/qsapp-suibe
|
src/suibe/models/channel.py
|
1
|
3718
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from jinja2 import Markup
from flask import url_for
from studio.core.engines import db
from sqlalchemy.ext.hybrid import hybrid_property
from suibe.models.article import ArticleModel
__all__ = [
'NaviChannelModel',
'ChannelModel',
'ChannelSummaryModel',
'NaviModel',
]
def articles_order_by():
return [db.desc(ArticleModel.is_sticky),
db.desc(ArticleModel.date_published)]
class NaviChannelModel(db.Model):
__tablename__ = 'navi_channel'
navi_id = db.Column(db.Integer(), db.ForeignKey('navi.id'),
primary_key=True, index=True)
channel_id = db.Column(db.Integer(), db.ForeignKey('channel.id'),
primary_key=True, index=True)
class ChannelModel(db.Model):
__tablename__ = 'channel'
id = db.Column(db.Integer(), nullable=False, primary_key=True)
parent_id = db.Column(db.Integer(),
db.ForeignKey('channel.id'),
index=True)
name = db.Column(db.Unicode(256), nullable=False, unique=True, index=True)
date_created = db.Column(db.DateTime(timezone=True),
nullable=False, index=True,
server_default=db.func.current_timestamp())
_summary = db.relationship(
'ChannelSummaryModel',
backref=db.backref('channel', lazy='joined', innerjoin=True),
primaryjoin='ChannelModel.id==ChannelSummaryModel.id',
foreign_keys='[ChannelSummaryModel.id]',
uselist=False, cascade='all, delete-orphan')
@hybrid_property
def summary(self):
return self._summary.content
@summary.setter
def summary_setter(self, value):
if not self._summary:
self._summary = ChannelSummaryModel(id=self.id, content=value)
self._summary.content = value
@property
def html(self):
return Markup(self.summary)
parent = db.relationship('ChannelModel',
remote_side=[id],
backref='channels')
articles = db.relationship(
'ArticleModel',
primaryjoin='and_(ChannelModel.id==ArticleModel.cid,'
'ArticleModel.date_published<=func.now())',
order_by=articles_order_by,
foreign_keys='[ArticleModel.cid]',
passive_deletes='all', lazy='dynamic')
all_articles = db.relationship(
'ArticleModel',
primaryjoin='ChannelModel.id==ArticleModel.cid',
order_by=articles_order_by,
foreign_keys='[ArticleModel.cid]',
backref=db.backref(
'channel', lazy='joined', innerjoin=True),
passive_deletes='all', lazy='dynamic')
@property
def url(self):
return url_for("views.channel", cid=self.id)
def __str__(self):
return self.name
class ChannelSummaryModel(db.Model):
__tablename__ = 'channel_summary'
id = db.Column(db.Integer(), db.ForeignKey('channel.id'),
nullable=False, primary_key=True)
content = db.Column(db.UnicodeText(), nullable=False)
class NaviModel(db.Model):
__tablename__ = 'navi'
id = db.Column(db.Integer(), nullable=False, primary_key=True)
name = db.Column(db.Unicode(256), nullable=False, unique=True, index=True)
date_created = db.Column(db.DateTime(timezone=True),
nullable=False, index=True,
server_default=db.func.current_timestamp())
channels = db.relationship('ChannelModel',
secondary=NaviChannelModel.__table__)
def __str__(self):
return self.name
|
mit
| -6,052,231,150,220,093,000
| 31.060345
| 78
| 0.600861
| false
| 4.050109
| false
| false
| false
|
martinohanlon/pgzero-pong
|
pong.py
|
1
|
3151
|
from math import sin, cos, radians
from time import sleep
#setup the constants
WIDTH = 500
HEIGHT = 300
BALLSPEED = 10
PADDLESPEED = 5
MAXBOUNCEANGLE = 75
def reset_game(angle):
#setup ball properties
ball.pos = WIDTH / 2, HEIGHT / 2
ball.x_float = float(ball.x)
ball.y_float = float(ball.y)
ball.angle = angle
ball.x_vel = BALLSPEED * cos(radians(ball.angle))
ball.y_vel = BALLSPEED * sin(radians(ball.angle))
#position the paddles
pad1.pos = 10, HEIGHT / 2
pad2.pos = WIDTH - 10, HEIGHT / 2
#create a rectangle of the playing area
screenRect = Rect(10,0,WIDTH - 10,HEIGHT)
#create ball
ball = Actor('ball')
#create paddles
pad1 = Actor('paddle')
pad2 = Actor('paddle')
#reset the game
reset_game(180)
#setup the goals
goals = [0, 0]
def draw():
screen.clear()
ball.draw()
pad1.draw()
pad2.draw()
def update():
#move the paddles
if keyboard.q:
pad1.top -= PADDLESPEED
if keyboard.a:
pad1.top += PADDLESPEED
if keyboard.k:
pad2.top -= PADDLESPEED
if keyboard.m:
pad2.top += PADDLESPEED
#move the ball
ball_old_x = ball.x_float
ball_old_y = ball.y_float
ball.x_float = ball.x_float + ball.x_vel
ball.y_float = ball.y_float + ball.y_vel
ball.x = int(round(ball.x_float))
ball.y = int(round(ball.y_float))
#move the ball back to where it was?
reset_ball = False
#has the ball left the screen?
if not screenRect.contains(ball):
#did it hit the top or bottom?
if ball.top < 0 or ball.bottom > HEIGHT:
ball.y_vel *= -1
reset_ball = True
#it must have hit the side
else:
if ball.left < 10:
print("Player 2 goal")
goals[1] += 1
reset_game(180)
sleep(2)
print("Score {} : {}".format(goals[0], goals[1]))
elif ball.right > WIDTH - 10:
print("player 1 goal")
goals[1] += 1
reset_game(0)
sleep(2)
print("Score {} : {}".format(goals[0], goals[1]))
#has the ball hit a paddle
if pad1.colliderect(ball):
#work out the bounce angle
bounce_angle = ((ball.y - pad1.y) / (pad1.height / 2)) * MAXBOUNCEANGLE
ball.angle = max(0 - MAXBOUNCEANGLE, min(MAXBOUNCEANGLE, bounce_angle))
#work out the ball velocity
ball.x_vel = BALLSPEED * cos(radians(ball.angle))
ball.y_vel = BALLSPEED * sin(radians(ball.angle))
reset_ball = True
elif pad2.colliderect(ball):
bounce_angle = 180 - (((ball.y - pad2.y) / (pad2.height / 2)) * MAXBOUNCEANGLE)
ball.angle = max(180 - MAXBOUNCEANGLE, min(180 + MAXBOUNCEANGLE, bounce_angle))
ball.x_vel = BALLSPEED * cos(radians(ball.angle))
ball.y_vel = BALLSPEED * sin(radians(ball.angle))
reset_ball = True
if reset_ball:
ball.x_float = ball_old_x
ball.y_float = ball_old_y
ball.x = int(round(ball.x_float))
ball.y = int(round(ball.y_float))
|
mit
| 6,057,346,098,873,700,000
| 25.478992
| 87
| 0.569026
| false
| 3.026897
| false
| false
| false
|
zqqf16/SYM
|
SYM/Models/models.py
|
1
|
1880
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import re
import sys
import json
import requests
def get_raw(url):
r = requests.get(url)
if r.status_code != 200:
return None
return r.text
def parse_models(regex, text):
result = []
lastModel = ""
model_regex = re.compile(r'.*\d,\d')
for item in regex.findall(text):
if model_regex.match(item):
result.append([item, lastModel])
else:
lastModel = item
return result
def get_all_models(url):
text = get_raw(url)
if not text:
print("Connect to url failed")
return
results = [
["i386", "Simulator"],
["x86_64", "Simulator"],
]
ipad = re.compile(r'rowspan.*(iPad[\w \(\)-.]*)')
results += parse_models(ipad, text)
iPhone = re.compile(r'rowspan.*(iPhone[\w \(\)-.]*)')
results += parse_models(iPhone, text)
iPod = re.compile(r'rowspan.*(iPod[\w \(\)-.]*)')
results += parse_models(iPod, text)
watch = re.compile(r'rowspan.*(Watch[\w \(\)-.]*)')
results += parse_models(watch, text)
return results
def json_output(results):
json_dict = { m[0]: m[1] for m in results }
print(json.dumps(json_dict, indent=4))
def nsdict_output(results):
print("@{")
for m in results:
print(' @"{}": @"{}",'.format(m[0], m[1]))
print('}')
def text_output(results):
for m in results:
print('{}:{}'.format(*m))
def pretty(results, fmt='json'):
if fmt == 'nsdict':
nsdict_output(results)
elif fmt == 'json':
json_output(results)
else:
text_output(results)
if __name__ == '__main__':
results = get_all_models('https://www.theiphonewiki.com/w/index.php?title=Models&action=edit')
fmt = 'text'
if len(sys.argv) > 1:
fmt = sys.argv[1]
pretty(results, fmt)
|
mit
| 1,235,590,171,324,613,600
| 22.5125
| 98
| 0.55
| false
| 3.208191
| false
| false
| false
|
iamsteadman/bambu-api
|
bambu_api/__init__.py
|
1
|
1314
|
"""
Quickly expose your models to a JSON or XML API, authenticated via HTTP or
OAuth.
"""
__version__ = '2.0.1'
from bambu_api.options import *
from bambu_api.sites import APISite
from bambu_api.exceptions import APIException
from bambu_api.decorators import argument, returns, named
from django.conf import settings
from datetime import datetime
default_app_config = 'bambu_api.apps.APIConfig'
site = APISite()
def autodiscover():
"""
Works like ``django.contrib.admin.autodiscover``, running thorugh each of the packages within a
project's ``INSTALLED_APPS`` setting, to find instances of an ``api`` module which might contain
calls to ``bambu_api.site.register``.
Unlike ``django.contrib.admin.autodiscover``, you do not need to call this function manually.
"""
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
from copy import copy, deepcopy
from bambu_api.endpoints import *
for app in settings.INSTALLED_APPS:
mod = import_module(app)
try:
before_import_registry = copy(site._registry)
import_module('%s.api' % app)
except:
site._registry = before_import_registry
if module_has_submodule(mod, 'api'):
raise
|
apache-2.0
| -4,812,282,785,063,941,000
| 31.04878
| 100
| 0.690259
| false
| 3.910714
| false
| false
| false
|
azumimuo/family-xbmc-addon
|
plugin.video.bubbles/resources/lib/sources/german/hoster/open/moviesever.py
|
1
|
5229
|
# -*- coding: utf-8 -*-
"""
Bubbles Addon
Copyright (C) 2016 Viper2k4
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import base64
import re
import urllib
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
from resources.lib.modules import source_utils
from resources.lib.modules import dom_parser
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.domains = ['moviesever.com/']
self.base_link = 'http://moviesever.com/'
self.search_link = '/?s=%s'
self.get_link = 'http://play.seriesever.net/me/moviesever.php'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases), year)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
rels = dom_parser.parse_dom(rels, 'li')
rels = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'options'}, req='href'), dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
rels = [(i[0][0].attrs['href'][1:], re.findall('\/flags\/(\w+)\.png$', i[1][0].attrs['src'])) for i in rels if i[0] and i[1]]
rels = [i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de']
r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]
r = [(re.findall('link"?\s*:\s*"(.+?)"', ''.join([x.content for x in i])), dom_parser.parse_dom(i, 'iframe', attrs={'class': 'metaframe'}, req='src')) for i in r]
r = [i[0][0] if i[0] else i[1][0].attrs['src'] for i in r if i[0] or i[1]]
for i in r:
try:
i = re.sub('\[.+?\]|\[/.+?\]', '', i)
i = client.replaceHTMLCodes(i)
if not i.startswith('http'): i = self.__decode_hash(i)
valid, host = source_utils.is_host_valid(i, hostDict)
if not valid: continue
if 'google' in i: host = 'gvideo'; direct = True; urls = directstream.google(i)
elif 'ok.ru' in i: host = 'vk'; direct = True; urls = directstream.odnoklassniki(i)
elif 'vk.com' in i: host = 'vk'; direct = True; urls = directstream.vk(i)
else: direct = False; urls = [{'quality': 'SD', 'url': i}]
for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'de', 'url': x['url'], 'direct': direct, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
if url.startswith('/'): url = 'http:%s' % url
return url
def __decode_hash(self, hash):
hash = hash.replace("!BeF", "R")
hash = hash.replace("@jkp", "Ax")
hash += '=' * (-len(hash) % 4)
try: return base64.b64decode(hash)
except: return
def __search(self, titles, year):
try:
query = self.search_link % (urllib.quote_plus(cleantitle.query(titles[0])))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(query)
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'details'})
r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'title'}), dom_parser.parse_dom(i, 'span', attrs={'class': 'year'})) for i in r]
r = [(dom_parser.parse_dom(i[0][0], 'a', req='href'), i[1][0].content) for i in r if i[0] and i[1]]
r = [(i[0][0].attrs['href'], i[0][0].content, i[1]) for i in r if i[0]]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return
|
gpl-2.0
| -8,893,008,589,265,289,000
| 40.84
| 174
| 0.552113
| false
| 3.469808
| false
| false
| false
|
ShashaQin/frappe
|
frappe/email/bulk.py
|
1
|
11335
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import HTMLParser
import smtplib
from frappe import msgprint, throw, _
from frappe.email.smtp import SMTPServer, get_outgoing_email_account
from frappe.email.email_body import get_email, get_formatted_html
from frappe.utils.verified_command import get_signed_params, verify_request
from html2text import html2text
from frappe.utils import get_url, nowdate, encode, now_datetime, add_days, split_emails, cstr, cint
class BulkLimitCrossedError(frappe.ValidationError): pass
def send(recipients=None, sender=None, subject=None, message=None, reference_doctype=None,
reference_name=None, unsubscribe_method=None, unsubscribe_params=None, unsubscribe_message=None,
attachments=None, reply_to=None, cc=(), show_as_cc=(), message_id=None, in_reply_to=None, send_after=None,
expose_recipients=False, bulk_priority=1, communication=None):
"""Add email to sending queue (Bulk Email)
:param recipients: List of recipients.
:param sender: Email sender.
:param subject: Email subject.
:param message: Email message.
:param reference_doctype: Reference DocType of caller document.
:param reference_name: Reference name of caller document.
:param bulk_priority: Priority for bulk email, default 1.
:param unsubscribe_method: URL method for unsubscribe. Default is `/api/method/frappe.email.bulk.unsubscribe`.
:param unsubscribe_params: additional params for unsubscribed links. default are name, doctype, email
:param attachments: Attachments to be sent.
:param reply_to: Reply to be captured here (default inbox)
:param message_id: Used for threading. If a reply is received to this email, Message-Id is sent back as In-Reply-To in received email.
:param in_reply_to: Used to send the Message-Id of a received email back as In-Reply-To.
:param send_after: Send this email after the given datetime. If value is in integer, then `send_after` will be the automatically set to no of days from current date.
:param communication: Communication link to be set in Bulk Email record
"""
if not unsubscribe_method:
unsubscribe_method = "/api/method/frappe.email.bulk.unsubscribe"
if not recipients:
return
if isinstance(recipients, basestring):
recipients = split_emails(recipients)
if isinstance(send_after, int):
send_after = add_days(nowdate(), send_after)
email_account = get_outgoing_email_account(True, append_to=reference_doctype)
if not sender or sender == "Administrator":
sender = email_account.default_sender
check_bulk_limit(recipients)
formatted = get_formatted_html(subject, message, email_account=email_account)
try:
text_content = html2text(formatted)
except HTMLParser.HTMLParseError:
text_content = "See html attachment"
if reference_doctype and reference_name:
unsubscribed = [d.email for d in frappe.db.get_all("Email Unsubscribe", "email",
{"reference_doctype": reference_doctype, "reference_name": reference_name})]
unsubscribed += [d.email for d in frappe.db.get_all("Email Unsubscribe", "email",
{"global_unsubscribe": 1})]
else:
unsubscribed = []
recipients = [r for r in list(set(recipients)) if r and r not in unsubscribed]
for email in recipients:
email_content = formatted
email_text_context = text_content
if reference_doctype:
unsubscribe_link = get_unsubscribe_link(
reference_doctype=reference_doctype,
reference_name=reference_name,
email=email,
recipients=recipients,
expose_recipients=expose_recipients,
unsubscribe_method=unsubscribe_method,
unsubscribe_params=unsubscribe_params,
unsubscribe_message=unsubscribe_message,
show_as_cc=show_as_cc
)
email_content = email_content.replace("<!--unsubscribe link here-->", unsubscribe_link.html)
email_text_context += unsubscribe_link.text
# show as cc
cc_message = ""
if email in show_as_cc:
cc_message = _("This email was sent to you as CC")
email_content = email_content.replace("<!-- cc message -->", cc_message)
email_text_context = cc_message + "\n" + email_text_context
# add to queue
add(email, sender, subject, email_content, email_text_context, reference_doctype,
reference_name, attachments, reply_to, cc, message_id, in_reply_to, send_after, bulk_priority,
email_account=email_account, communication=communication)
def add(email, sender, subject, formatted, text_content=None,
reference_doctype=None, reference_name=None, attachments=None, reply_to=None,
cc=(), message_id=None, in_reply_to=None, send_after=None, bulk_priority=1,
email_account=None, communication=None):
"""add to bulk mail queue"""
e = frappe.new_doc('Bulk Email')
e.recipient = email
e.priority = bulk_priority
try:
mail = get_email(email, sender=sender, formatted=formatted, subject=subject,
text_content=text_content, attachments=attachments, reply_to=reply_to, cc=cc, email_account=email_account)
mail.set_message_id(message_id)
if in_reply_to:
mail.set_in_reply_to(in_reply_to)
e.message = cstr(mail.as_string())
e.sender = mail.sender
except frappe.InvalidEmailAddressError:
# bad email id - don't add to queue
return
e.reference_doctype = reference_doctype
e.reference_name = reference_name
e.communication = communication
e.send_after = send_after
e.insert(ignore_permissions=True)
def check_bulk_limit(recipients):
# get count of mails sent this month
this_month = frappe.db.sql("""select count(name) from `tabBulk Email` where
status='Sent' and MONTH(creation)=MONTH(CURDATE())""")[0][0]
# if using settings from site_config.json, check bulk limit
# No limit for own email settings
smtp_server = SMTPServer()
if (smtp_server.email_account
and getattr(smtp_server.email_account, "from_site_config", False)
or frappe.flags.in_test):
monthly_bulk_mail_limit = frappe.conf.get('monthly_bulk_mail_limit') or 500
if (this_month + len(recipients)) > monthly_bulk_mail_limit:
throw(_("Cannot send this email. You have crossed the sending limit of {0} emails for this month.").format(monthly_bulk_mail_limit),
BulkLimitCrossedError)
def get_unsubscribe_link(reference_doctype, reference_name,
email, recipients, expose_recipients, show_as_cc,
unsubscribe_method, unsubscribe_params, unsubscribe_message):
email_sent_to = recipients if expose_recipients else [email]
email_sent_cc = ", ".join([e for e in email_sent_to if e in show_as_cc])
email_sent_to = ", ".join([e for e in email_sent_to if e not in show_as_cc])
if email_sent_cc:
email_sent_message = _("This email was sent to {0} and copied to {1}").format(email_sent_to, email_sent_cc)
else:
email_sent_message = _("This email was sent to {0}").format(email_sent_to)
if not unsubscribe_message:
unsubscribe_message = _("Unsubscribe from this list")
unsubscribe_url = get_unsubcribed_url(reference_doctype, reference_name, email,
unsubscribe_method, unsubscribe_params)
html = """<div style="margin: 15px auto; padding: 0px 7px; text-align: center; color: #8d99a6;">
{email}
<p style="margin: 15px auto;">
<a href="{unsubscribe_url}" style="color: #8d99a6; text-decoration: underline;
target="_blank">{unsubscribe_message}
</a>
</p>
</div>""".format(
unsubscribe_url = unsubscribe_url,
email=email_sent_message,
unsubscribe_message=unsubscribe_message
)
text = "\n{email}\n\n{unsubscribe_message}: {unsubscribe_url}".format(
email=email_sent_message,
unsubscribe_message=unsubscribe_message,
unsubscribe_url=unsubscribe_url
)
return frappe._dict({
"html": html,
"text": text
})
def get_unsubcribed_url(reference_doctype, reference_name, email, unsubscribe_method, unsubscribe_params):
params = {"email": email.encode("utf-8"),
"doctype": reference_doctype.encode("utf-8"),
"name": reference_name.encode("utf-8")}
if unsubscribe_params:
params.update(unsubscribe_params)
query_string = get_signed_params(params)
# for test
frappe.local.flags.signed_query_string = query_string
return get_url(unsubscribe_method + "?" + get_signed_params(params))
@frappe.whitelist(allow_guest=True)
def unsubscribe(doctype, name, email):
# unsubsribe from comments and communications
if not verify_request():
return
try:
frappe.get_doc({
"doctype": "Email Unsubscribe",
"email": email,
"reference_doctype": doctype,
"reference_name": name
}).insert(ignore_permissions=True)
except frappe.DuplicateEntryError:
frappe.db.rollback()
else:
frappe.db.commit()
return_unsubscribed_page(email, doctype, name)
def return_unsubscribed_page(email, doctype, name):
frappe.respond_as_web_page(_("Unsubscribed"), _("{0} has left the conversation in {1} {2}").format(email, _(doctype), name))
def flush(from_test=False):
"""flush email queue, every time: called from scheduler"""
smtpserver = SMTPServer()
auto_commit = not from_test
# additional check
check_bulk_limit([])
if frappe.are_emails_muted():
msgprint(_("Emails are muted"))
from_test = True
frappe.db.sql("""update `tabBulk Email` set status='Expired'
where datediff(curdate(), creation) > 3 and status='Not Sent'""", auto_commit=auto_commit)
for i in xrange(500):
if cint(frappe.defaults.get_defaults().get("hold_bulk")):
break
email = frappe.db.sql("""select * from `tabBulk Email` where
status='Not Sent' and ifnull(send_after, "2000-01-01 00:00:00") < %s
order by priority desc, creation asc limit 1 for update""", now_datetime(), as_dict=1)
if email:
email = email[0]
else:
break
frappe.db.sql("""update `tabBulk Email` set status='Sending' where name=%s""",
(email["name"],), auto_commit=auto_commit)
if email.communication:
frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit)
try:
if not from_test:
smtpserver.setup_email_account(email.reference_doctype)
smtpserver.sess.sendmail(email["sender"], email["recipient"], encode(email["message"]))
frappe.db.sql("""update `tabBulk Email` set status='Sent' where name=%s""",
(email["name"],), auto_commit=auto_commit)
if email.communication:
frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit)
except (smtplib.SMTPServerDisconnected,
smtplib.SMTPConnectError,
smtplib.SMTPHeloError,
smtplib.SMTPAuthenticationError,
frappe.ValidationError):
# bad connection, retry later
frappe.db.sql("""update `tabBulk Email` set status='Not Sent' where name=%s""",
(email["name"],), auto_commit=auto_commit)
if email.communication:
frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit)
# no need to attempt further
return
except Exception, e:
frappe.db.sql("""update `tabBulk Email` set status='Error', error=%s
where name=%s""", (unicode(e), email["name"]), auto_commit=auto_commit)
if email.communication:
frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit)
# NOTE: removing commit here because we pass auto_commit
# finally:
# frappe.db.commit()
def clear_outbox():
"""Remove mails older than 31 days in Outbox. Called daily via scheduler."""
frappe.db.sql("""delete from `tabBulk Email` where
datediff(now(), creation) > 31""")
|
mit
| 8,331,891,317,508,942,000
| 34.870253
| 166
| 0.727305
| false
| 3.308523
| true
| false
| false
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractMaddertranslatesCom.py
|
1
|
1779
|
def extractMaddertranslatesCom(item):
'''
Parser for 'maddertranslates.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Form A Slaves Only Harem Guild', 'An S Rank Adventurer Me Along With Those Girls Who Are Slaves, Form A Slaves Only Harem Guild', 'translated'),
('IT IS A DIFFERENT WORLD AND YET I AM CULTIVATING MONSTERS', 'It Is A Different World And Yet I Am Cultivating Monsters', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
titlemap = [
('The Bloodshot One-Eyed Zombie Emperor ', 'The Bloodshot One-Eyed Zombie Emperor', 'translated'),
('An S Rank Adventurer Me Along With Those Girls Who Are Slaves, Form A Slaves Only Harem Guild', 'An S Rank Adventurer Me Along With Those Girls Who Are Slaves, Form A Slaves Only Harem Guild', 'translated'),
('Tensei Shoujo no Rirekisho', 'Tensei Shoujo no Rirekisho', 'translated'),
('Master of Dungeon', 'Master of Dungeon', 'oel'),
]
for titlecomponent, name, tl_type in titlemap:
if titlecomponent.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
bsd-3-clause
| 8,159,679,789,587,798,000
| 52.939394
| 217
| 0.589657
| false
| 3.325234
| false
| false
| false
|
chincisan/google-python-exercises
|
basic/string1.py
|
1
|
3560
|
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic string exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in string2.py.
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
message = 'Number of donuts: '
if count < 10:
return message + str(count)
else:
return message + 'many'
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
if len(s)<2:
return''
else:
return s[:2] + s[-2:]
return
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
# inline-cool solution
return s[0] + s.replace(s[0],'*')[1:]
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
return b[:2] + a[2:] + ' ' + a[:2] + b[2:]
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Provided main() calls the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print 'donuts'
# Each line calls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print
print 'both_ends'
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print
print 'fix_start'
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print
print 'mix_up'
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
|
apache-2.0
| -8,113,552,083,929,637,000
| 29.689655
| 78
| 0.666011
| false
| 3.114611
| true
| false
| false
|
WillianPaiva/1flow
|
oneflow/base/templatetags/base_utils.py
|
1
|
6677
|
# -*- coding: utf-8 -*-
"""
Copyright 2012-2014 Olivier Cortès <oc@1flow.io>
This file is part of the 1flow project.
1flow is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
1flow is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with 1flow. If not, see http://www.gnu.org/licenses/
"""
import re
from django.template import Library, Node, TemplateSyntaxError
from django.template.base import Node, TemplateSyntaxError
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from sparks.foundations import utils as sfu
register = Library()
def get_view_name(context):
# context['request'].resolver_match.func
# context['request'].resolver_match.args
# context['request'].resolver_match.kwargs
# context['request'].resolver_match.view_name
try:
return context['request'].resolver_match.view_name
except AttributeError:
# Happens on / when the request is a
# WSGIRequest and not an HttpRequest.
return u'home'
@register.simple_tag(takes_context=True)
def reverse_active(context, views_names, return_value=None):
""" In the template:
class="{% reverse_active "view_name" %}"
class="{% reverse_active "view_name1,view_name2" "my-active" %}"
Taken from http://gnuvince.wordpress.com/2007/09/14/a-django-template-tag-for-the-current-active-page/ #NOQA
and extended a lot to simplify template calls…
"""
for view_name in views_names.split(','):
if reverse(view_name) == context['request'].path:
return return_value or u'active'
return u''
@register.simple_tag(takes_context=True)
def view_name_active(context, pattern, return_value=None):
""" Same as reverse active, but for URLs without any
view. :param:`pattern` must be a valid regular expression.
class="{% active "/help/" "top-menu-element-active" %}"
"""
view_name = get_view_name(context)
if re.search(pattern, view_name):
return return_value or u'active'
return u''
class CaptureasNode(Node):
def __init__(self, nodelist, varname):
self.nodelist = nodelist
self.varname = varname
def render(self, context):
output = self.nodelist.render(context)
context[self.varname] = output
return ''
class FirstOfAsNode(Node):
def __init__(self, args, variable_name=None):
self.vars = args
self.variable_name = variable_name
def render(self, context):
for var in self.vars:
value = var.resolve(context, True)
if value:
if self.variable_name:
context[self.variable_name] = value
break
else:
return smart_text(value)
return ''
@register.tag(name='captureas')
def do_captureas(parser, token):
""" Taken from http://djangosnippets.org/snippets/545/ verbatim. Handy!
Initial source: https://code.djangoproject.com/ticket/7239
"""
try:
tag_name, args = token.contents.split(None, 1)
except ValueError:
raise TemplateSyntaxError(
"'captureas' node requires a variable name.")
nodelist = parser.parse(('endcaptureas',))
parser.delete_first_token()
return CaptureasNode(nodelist, args)
@register.tag
def firstofas(parser, token):
""" Original idea: https://code.djangoproject.com/ticket/12199 """
bits = token.split_contents()[1:]
variable_name = None
expecting_save_as = bits[-2] == 'as'
if expecting_save_as:
variable_name = bits.pop(-1)
bits = bits[:-1]
if len(bits) < 1:
raise TemplateSyntaxError(
"'firstofas' statement requires at least one argument")
return FirstOfAsNode([parser.compile_filter(bit) for bit in bits],
variable_name)
@register.inclusion_tag('snippets/countdown.html')
def countdown(value, redirect=None, limit=0, show_seconds=True,
format=None, spacer=None):
""" From http://www.plus2net.com/javascript_tutorial/countdown.php """
if redirect is None:
redirect = '/'
if limit > 0:
operation = '+'
round_value = 0
counter_test = '<='
else:
operation = '-'
round_value = 0 # WAS: 2
counter_test = '>='
if format is None or format == 'long':
separator = ', '
short = False
units = {
'day': _('day'),
'days': _('days'),
'hour': _('hour'),
'hours': _('hours'),
'minute': _('minute'),
'minutes': _('minutes'),
'second': _('second'),
'seconds': _('seconds'),
}
elif format == 'abbr':
separator = ' '
short = True
units = {
'day': _('day'),
'days': _('days'),
'hour': _('hour'),
'hours': _('hours'),
'minute': _('min'),
'minutes': _('mins'),
'second': _('sec'),
'seconds': _('secs'),
}
elif format == 'short':
separator = ' '
short = True
units = {
'day': _('d'),
'days': _('d'),
'hour': _('h'),
'hours': _('h'),
'minute': _('m'),
'minutes': _('m'),
'second': _('s'),
'seconds': _('s'),
}
else:
raise TemplateSyntaxError("'countdown' 'format' keyword argument "
"must be either 'short', 'abbr' or 'long'")
return {
'name': sfu.unique_hash(only_letters=True),
'units': units,
'short': short,
'value': value,
'limit': limit,
'unit_sep': ' ' if spacer is None else spacer,
'redirect': redirect,
'operation': operation,
'separator': separator,
'round_value': round_value,
'show_seconds': show_seconds,
'counter_test': counter_test,
}
@register.filter
def lookup(d, key):
return d[key]
|
agpl-3.0
| 7,640,967,080,721,701,000
| 27.279661
| 116
| 0.574318
| false
| 4.057143
| false
| false
| false
|
Keats/gutenberg
|
components/site/benches/gen.py
|
1
|
5070
|
"""
Generates test sites for use in benchmark.
Tested with python3 and probably does not work on Windows.
"""
import datetime
import os
import random
import shutil
TAGS = ["a", "b", "c", "d", "e", "f", "g"]
CATEGORIES = ["c1", "c2", "c3", "c4"]
PAGE = """
+++
title = "Hello"
date = REPLACE_DATE
[taxonomies]
tags = REPLACE_TAG
categories = ["REPLACE_CATEGORY"]
+++
# Modus cognitius profanam ne duae virtutis mundi
## Ut vita
Lorem markdownum litora, care ponto nomina, et ut aspicit gelidas sui et
purpureo genuit. Tamen colla venientis [delphina](http://nil-sol.com/ecquis)
Tusci et temptata citaeque curam isto ubi vult vulnere reppulit.
- Seque vidit flendoque de quodam
- Dabit minimos deiecto caputque noctis pluma
- Leti coniunx est Helicen
- Illius pulvereumque Icare inpositos
- Vivunt pereo pluvio tot ramos Olenios gelidis
- Quater teretes natura inde
### A subsection
Protinus dicunt, breve per, et vivacis genus Orphei munere. Me terram [dimittere
casside](http://corpus.org/) pervenit saxo primoque frequentat genuum sorori
praeferre causas Libys. Illud in serpit adsuetam utrimque nunc haberent,
**terrae si** veni! Hectoreis potes sumite [Mavortis retusa](http://tua.org/)
granum captantur potuisse Minervae, frugum.
> Clivo sub inprovisoque nostrum minus fama est, discordia patrem petebat precatur
absumitur, poena per sit. Foramina *tamen cupidine* memor supplex tollentes
dictum unam orbem, Anubis caecae. Viderat formosior tegebat satis, Aethiopasque
sit submisso coniuge tristis ubi!
## Praeceps Corinthus totidem quem crus vultum cape
```rs
#[derive(Debug)]
pub struct Site {
/// The base path of the zola site
pub base_path: PathBuf,
/// The parsed config for the site
pub config: Config,
pub pages: HashMap<PathBuf, Page>,
pub sections: HashMap<PathBuf, Section>,
pub tera: Tera,
live_reload: bool,
output_path: PathBuf,
static_path: PathBuf,
pub tags: Option<Taxonomy>,
pub categories: Option<Taxonomy>,
/// A map of all .md files (section and pages) and their permalink
/// We need that if there are relative links in the content that need to be resolved
pub permalinks: HashMap<String, String>,
}
```
## More stuff
And a shortcode:
{{ youtube(id="my_youtube_id") }}
### Another subsection
Gotta make the toc do a little bit of work
# A big title
- hello
- world
- !
```py
if __name__ == "__main__":
gen_site("basic-blog", [""], 250, paginate=True)
```
"""
def gen_skeleton(name, is_blog):
if os.path.exists(name):
shutil.rmtree(name)
os.makedirs(os.path.join(name, "content"))
os.makedirs(os.path.join(name, "static"))
with open(os.path.join(name, "config.toml"), "w") as f:
if is_blog:
f.write("""
title = "My site"
base_url = "https://replace-this-with-your-url.com"
theme = "sample"
taxonomies = [
{name = "tags", rss = true},
{name = "categories"}
]
[extra.author]
name = "Vincent Prouillet"
""")
else:
f.write("""
title = "My site"
base_url = "https://replace-this-with-your-url.com"
theme = "sample"
[extra.author]
name = "Vincent Prouillet"
""")
# Re-use the test templates
shutil.copytree("../../../test_site/templates", os.path.join(name, "templates"))
shutil.copytree("../../../test_site/themes", os.path.join(name, "themes"))
def gen_section(path, num_pages, is_blog):
with open(os.path.join(path, "_index.md"), "w") as f:
if is_blog:
f.write("""
+++
paginate_by = 5
sort_by = "date"
template = "section_paginated.html"
+++
""")
else:
f.write("+++\n+++\n")
day = datetime.date.today()
for (i, page) in enumerate(range(0, num_pages)):
with open(os.path.join(path, "page-{}.md".format(i)), "w") as f:
f.write(
PAGE
.replace("REPLACE_DATE", str(day + datetime.timedelta(days=1)))
.replace("REPLACE_CATEGORY", random.choice(CATEGORIES))
.replace("REPLACE_TAG", str([random.choice(TAGS), random.choice(TAGS)]))
)
def gen_site(name, sections, num_pages_per_section, is_blog=False):
gen_skeleton(name, is_blog)
for section in sections:
path = os.path.join(name, "content", section) if section else os.path.join(name, "content")
if section:
os.makedirs(path)
gen_section(path, num_pages_per_section, is_blog)
if __name__ == "__main__":
gen_site("small-blog", [""], 30, is_blog=True)
gen_site("medium-blog", [""], 250, is_blog=True)
gen_site("big-blog", [""], 1000, is_blog=True)
gen_site("huge-blog", [""], 10000, is_blog=True)
gen_site("extra-huge-blog", [""], 100000, is_blog=True)
gen_site("small-kb", ["help", "help1", "help2", "help3", "help4", "help5", "help6", "help7", "help8", "help9"], 10)
gen_site("medium-kb", ["help", "help1", "help2", "help3", "help4", "help5", "help6", "help7", "help8", "help9"], 100)
gen_site("huge-kb", ["help", "help1", "help2", "help3", "help4", "help5", "help6", "help7", "help8", "help9"], 1000)
|
mit
| -6,960,706,027,240,699,000
| 27.806818
| 121
| 0.641223
| false
| 2.849916
| false
| false
| false
|
salcho/antares
|
ui/loggerWidget.py
|
1
|
1137
|
#!/usr/bin/env python
import gtk
import logging
import time
from ui.IWidget import IWidget
from core.data import logger
from core.log import addStreamHandler
#TODO: Implement file-like thread to show logging!
class loggerWidget(IWidget):
def __init__(self):
IWidget.__init__(self)
self.frame = gtk.Frame('Logger')
self.text_view = None
def start(self):
self.frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)
sw = gtk.ScrolledWindow()
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.text_view = gtk.TextView()
self.text_view.set_editable(False)
self.text_view.set_wrap_mode(gtk.WRAP_NONE)
self.text_view.set_justification(gtk.JUSTIFY_LEFT)
self.text_view.set_cursor_visible(True)
sw.add_with_viewport(self.text_view)
self.frame.add(sw)
# Add handler to the logger
handler = handlerClass()
addStreamHandler(handler)
def updateView(self, record):
buf = self.text_view.get_buffer()
buf.insert(buf.get_end_iter(), record)
def getWidget(self):
return self.frame
class handlerClass(logging.StreamHandler):
def emit(self, record):
loggerWidget.updateView(record )
self.flush()
|
mit
| 3,276,869,649,480,475,600
| 24.863636
| 59
| 0.737907
| false
| 2.930412
| false
| false
| false
|
zestyr/lbry
|
lbrynet/dht/msgtypes.py
|
1
|
1593
|
#!/usr/bin/env python
#
# This library is free software, distributed under the terms of
# the GNU Lesser General Public License Version 3, or any later version.
# See the COPYING file included in this archive
#
# The docstrings in this module contain epytext markup; API documentation
# may be created by processing this file with epydoc: http://epydoc.sf.net
from lbrynet.core.utils import generate_id
class Message(object):
""" Base class for messages - all "unknown" messages use this class """
def __init__(self, rpcID, nodeID):
self.id = rpcID
self.nodeID = nodeID
class RequestMessage(Message):
""" Message containing an RPC request """
def __init__(self, nodeID, method, methodArgs, rpcID=None):
if rpcID is None:
rpcID = generate_id()
Message.__init__(self, rpcID, nodeID)
self.request = method
self.args = methodArgs
class ResponseMessage(Message):
""" Message containing the result from a successful RPC request """
def __init__(self, rpcID, nodeID, response):
Message.__init__(self, rpcID, nodeID)
self.response = response
class ErrorMessage(ResponseMessage):
""" Message containing the error from an unsuccessful RPC request """
def __init__(self, rpcID, nodeID, exceptionType, errorMessage):
ResponseMessage.__init__(self, rpcID, nodeID, errorMessage)
if isinstance(exceptionType, type):
self.exceptionType = '%s.%s' % (exceptionType.__module__, exceptionType.__name__)
else:
self.exceptionType = exceptionType
|
mit
| 4,668,161,594,579,503,000
| 32.1875
| 93
| 0.670433
| false
| 4.148438
| false
| false
| false
|
shalzuth/BraveHaxvius
|
IDAScripts/GetNetworkKeys.py
|
1
|
2891
|
from idautils import *
from idaapi import *
def get_string(addr):
out = ""
while True:
if Byte(addr) != 0:
out += chr(Byte(addr))
else:
break
addr += 1
return out
def get_string_from_head(head):
refs = DataRefsFrom(head)
for ref in refs:
refs2 = DataRefsFrom(ref)
for ref2 in refs2:
stringval = get_string(ref2)
return stringval
def dumpkvp(functionName, addr, key):
if key in functionName and 'Request' in functionName:
functionName = functionName[3:]
functionName = functionName[:functionName.index(key)]
functionName = ''.join([i for i in functionName if not i.isdigit()])
functionName = functionName[:len(functionName)-7]
for (startea, endea) in Chunks(addr):
for head in Heads(startea, endea):
operand = GetDisasm(head)
if 'R0, [PC,R0]' in operand:
#if ', =(' in operand:
stringval = get_string_from_head(head)
if key is 'getUrl':
stringval = stringval[14:22]
if 'action' in stringval:
stringval = 'action'
if not (functionName in requests):
requests[functionName] = {}
requests[functionName][key[3:]] = stringval
if 'aActionsymbol' in operand:
stringval = get_string_from_head(head)
if key is 'getUrl':
stringval = stringval[14:22]
if 'action' in stringval:
stringval = 'action'
if not (functionName in requests):
requests[functionName] = {}
requests[functionName][key[3:]] = stringval
def dumpbody(functionName, addr, key):
if key in functionName and 'Request' in functionName:
functionName = functionName[3:]
functionName = functionName[:functionName.index(key)]
functionName = ''.join([i for i in functionName if not i.isdigit()])
functionName = functionName[:len(functionName)-7]
stringval = ""
basenode = ""
for (startea, endea) in Chunks(addr):
for head in Heads(startea, endea):
operand = GetDisasm(head)
if 'mov' in operand and 'ds:(off' in operand:
stringval = get_string_from_head(head)
if '_ZN9JsonGroup7addNodeEv' in operand:
if not (functionName in requests):
requests[functionName] = {}
if not ("Parameters" in requests[functionName]):
requests[functionName]["Parameters"] = {}
basenode = stringval
requests[functionName]["Parameters"][basenode] = {}
if '_ZN8JsonNode8addParamEPK' in operand:
requests[functionName]["Parameters"][basenode] = stringval
requests = {}
for funcea in Functions(0x100000, 0x14ea010):
functionName = GetFunctionName(funcea)
dumpkvp(functionName, funcea, 'getUrl')
dumpkvp(functionName, funcea, 'getRequestID')
dumpkvp(functionName, funcea, 'getEncodeKey')
#dumpbody(functionName, funcea, 'createBody')
print requests
import json
filename = os.path.expanduser("~/OneDrive/Documents/GitHub/BraveHaxvius/DataExtractor/network2.json")
with open(filename, 'w') as fp:
json.dump(requests, fp)
|
mit
| -4,050,094,355,223,870,000
| 31.863636
| 101
| 0.684192
| false
| 3.105263
| false
| false
| false
|
eRestin/MezzGIS
|
mezzanine/conf/forms.py
|
1
|
3040
|
from __future__ import unicode_literals
from future.builtins import int
from collections import defaultdict
from django import forms
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.template.defaultfilters import urlize
from mezzanine.conf import settings, registry
from mezzanine.conf.models import Setting
FIELD_TYPES = {
bool: forms.BooleanField,
int: forms.IntegerField,
float: forms.FloatField,
}
class SettingsForm(forms.Form):
"""
Form for settings - creates a field for each setting in
``mezzanine.conf`` that is marked as editable.
"""
def __init__(self, *args, **kwargs):
super(SettingsForm, self).__init__(*args, **kwargs)
settings.use_editable()
# Create a form field for each editable setting's from its type.
for name in sorted(registry.keys()):
setting = registry[name]
if setting["editable"]:
field_class = FIELD_TYPES.get(setting["type"], forms.CharField)
kwargs = {
"label": setting["label"] + ":",
"required": setting["type"] in (int, float),
"initial": getattr(settings, name),
"help_text": self.format_help(setting["description"]),
}
if setting["choices"]:
field_class = forms.ChoiceField
kwargs["choices"] = setting["choices"]
self.fields[name] = field_class(**kwargs)
css_class = field_class.__name__.lower()
self.fields[name].widget.attrs["class"] = css_class
def __iter__(self):
"""
Calculate and apply a group heading to each field and order by the
heading.
"""
fields = list(super(SettingsForm, self).__iter__())
group = lambda field: field.name.split("_", 1)[0].title()
misc = _("Miscellaneous")
groups = defaultdict(int)
for field in fields:
groups[group(field)] += 1
for (i, field) in enumerate(fields):
setattr(fields[i], "group", group(field))
if groups[fields[i].group] == 1:
fields[i].group = misc
return iter(sorted(fields, key=lambda x: (x.group == misc, x.group)))
def save(self):
"""
Save each of the settings to the DB.
"""
for (name, value) in self.cleaned_data.items():
setting_obj, created = Setting.objects.get_or_create(name=name)
setting_obj.value = value
setting_obj.save()
def format_help(self, description):
"""
Format the setting's description into HTML.
"""
for bold in ("``", "*"):
parts = []
for i, s in enumerate(description.split(bold)):
parts.append(s if i % 2 == 0 else "<b>%s</b>" % s)
description = "".join(parts)
return mark_safe(urlize(description).replace("\n", "<br>"))
|
bsd-2-clause
| 8,096,700,865,729,011,000
| 35.190476
| 79
| 0.565789
| false
| 4.293785
| false
| false
| false
|
volab/pyvorcv
|
setup.py
|
1
|
3971
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# VoR-CV
# The MIT License
#
# Copyright (c) 2010,2015 Jeremie DECOCK (http://www.jdhp.org)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
import sys
from vorcv import __version__ as VERSION
# See : http://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = ['Development Status :: 4 - Beta',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.4',
'Topic :: Multimedia :: Video',
'Topic :: Multimedia :: Video :: Capture',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Scientific/Engineering :: Artificial Intelligence']
KEYWORDS = 'opencv image recognition robotics'
# You can either specify manually the list of packages to include in the
# distribution or use "setuptools.find_packages()" to include them
# automatically with a recursive search (from the root directory of the
# project).
#PACKAGES = find_packages()
PACKAGES = ['vorcv']
# The following list contains all dependencies that Python will try to
# install with this project
INSTALL_REQUIRES = ['numpy']
#INSTALL_REQUIRES = []
SCRIPTS = ["scripts/vorcv-demo",
"scripts/vorcv-circle-detection-calibration"]
# Entry point can be used to create plugins or to automatically generate
# system commands to call specific functions.
# Syntax: "name_of_the_command_to_make = package.module:function".
ENTRY_POINTS = {}
#ENTRY_POINTS = {
# 'console_scripts': [
# 'vorcv-demo = vorcv.demo:main',
# ],
#}
README_FILE = 'README.rst'
def get_long_description():
with open(README_FILE, 'r') as fd:
desc = fd.read()
return desc
setup(author='Jeremie DECOCK',
author_email='jd.jdhp@gmail.com',
maintainer='Jeremie DECOCK',
maintainer_email='jd.jdhp@gmail.com',
name='pyvorcv',
description="The PyVoR-CV project, a computer vision library made for some VoRobotics projects (VoR11, VoR12, ...).",
long_description=get_long_description(),
url='http://www.jdhp.org/',
download_url='http://www.jdhp.org/',# Where the package can be downloaded
classifiers=CLASSIFIERS,
#license='MIT', # Useless if license is already in CLASSIFIERS
keywords=KEYWORDS,
packages=PACKAGES,
include_package_data=True, # Use the MANIFEST.in file
install_requires=INSTALL_REQUIRES,
#platforms=['Linux'],
#requires=['pyserial'],
scripts=SCRIPTS,
entry_points=ENTRY_POINTS,
version=VERSION)
|
mit
| 7,094,162,803,281,906,000
| 32.940171
| 123
| 0.684211
| false
| 3.927794
| false
| false
| false
|
robertnishihara/ray
|
python/ray/dashboard/node_stats.py
|
1
|
13362
|
from collections import defaultdict
from ray.dashboard.util import to_unix_time, format_reply_id
from base64 import b64decode
import ray
import threading
import json
import traceback
import copy
import logging
from datetime import datetime
import time
from typing import Dict
import re
from operator import itemgetter
logger = logging.getLogger(__name__)
PYCLASSNAME_RE = re.compile(r"(.+?)\(")
def _group_actors_by_python_class(actors):
groups = defaultdict(list)
for actor in actors.values():
actor_title = actor.get("actorTitle")
if not actor_title:
groups["Unknown Class"].append(actor)
else:
match = PYCLASSNAME_RE.search(actor_title)
if match:
# Catches case of actorTitle like
# Foo(bar, baz, [1,2,3]) -> Foo
class_name = match.groups()[0]
groups[class_name].append(actor)
else:
# Catches case of e.g. just Foo
# in case of actor task
groups[actor_title].append(actor)
return groups
def _get_actor_group_stats(group):
state_to_count = defaultdict(lambda: 0)
executed_tasks = 0
min_timestamp = None
num_timestamps = 0
sum_timestamps = 0
now = time.time() * 1000 # convert S -> MS
for actor in group:
state_to_count[actor["state"]] += 1
if "timestamp" in actor:
if not min_timestamp or actor["timestamp"] < min_timestamp:
min_timestamp = actor["timestamp"]
num_timestamps += 1
sum_timestamps += now - actor["timestamp"]
if "numExecutedTasks" in actor:
executed_tasks += actor["numExecutedTasks"]
if num_timestamps > 0:
avg_lifetime = int((sum_timestamps / num_timestamps) / 1000)
max_lifetime = int((now - min_timestamp) / 1000)
else:
avg_lifetime = 0
max_lifetime = 0
return {
"stateToCount": state_to_count,
"avgLifetime": avg_lifetime,
"maxLifetime": max_lifetime,
"numExecutedTasks": executed_tasks,
}
class NodeStats(threading.Thread):
def __init__(self, redis_address, redis_password=None):
self.redis_key = "{}.*".format(ray.gcs_utils.REPORTER_CHANNEL)
self.redis_client = ray.services.create_redis_client(
redis_address, password=redis_password)
self._node_stats = {}
self._ip_to_hostname = {}
self._addr_to_owner_addr = {}
self._addr_to_actor_id = {}
self._addr_to_extra_info_dict = {}
self._node_stats_lock = threading.Lock()
self._default_info = {
"actorId": "",
"children": {},
"currentTaskFuncDesc": [],
"ipAddress": "",
"jobId": "",
"numExecutedTasks": 0,
"numLocalObjects": 0,
"numObjectRefsInScope": 0,
"port": 0,
"state": 0,
"taskQueueLength": 0,
"usedObjectStoreMemory": 0,
"usedResources": {},
}
# Mapping from IP address to PID to list of log lines
self._logs = defaultdict(lambda: defaultdict(list))
# Mapping from IP address to PID to list of error messages
self._errors = defaultdict(lambda: defaultdict(list))
ray.state.state._initialize_global_state(
redis_address=redis_address, redis_password=redis_password)
super().__init__()
def _insert_log_counts(self):
for ip, logs_by_pid in self._logs.items():
hostname = self._ip_to_hostname.get(ip)
if not hostname or hostname not in self._node_stats:
continue
logs_by_pid = {pid: len(logs) for pid, logs in logs_by_pid.items()}
self._node_stats[hostname]["log_count"] = logs_by_pid
def _insert_error_counts(self):
for ip, errs_by_pid in self._errors.items():
hostname = self._ip_to_hostname.get(ip)
if not hostname or hostname not in self._node_stats:
continue
errs_by_pid = {pid: len(errs) for pid, errs in errs_by_pid.items()}
self._node_stats[hostname]["error_count"] = errs_by_pid
def _purge_outdated_stats(self):
def current(then, now):
if (now - then) > 5:
return False
return True
now = to_unix_time(datetime.utcnow())
self._node_stats = {
k: v
for k, v in self._node_stats.items() if current(v["now"], now)
}
def get_node_stats(self):
with self._node_stats_lock:
self._purge_outdated_stats()
self._insert_error_counts()
self._insert_log_counts()
node_stats = sorted(
(v for v in self._node_stats.values()),
key=itemgetter("boot_time"))
return {"clients": node_stats}
# Gets actors in a flat way to allow for grouping by actor type.
def get_actors(self, workers_info_by_node, infeasible_tasks, ready_tasks):
now = time.time()
actors: Dict[str, Dict[str, any]] = {}
# construct flattened actor tree
with self._node_stats_lock:
for addr, actor_id in self._addr_to_actor_id.items():
actors[actor_id] = copy.deepcopy(self._default_info)
actors[actor_id].update(self._addr_to_extra_info_dict[addr])
for node_id, workers_info in workers_info_by_node.items():
for worker_info in workers_info:
if "coreWorkerStats" in worker_info:
core_worker_stats = worker_info["coreWorkerStats"]
addr = (core_worker_stats["ipAddress"],
str(core_worker_stats["port"]))
if addr in self._addr_to_actor_id:
actor_info = actors[self._addr_to_actor_id[addr]]
format_reply_id(core_worker_stats)
actor_info.update(core_worker_stats)
actor_info["averageTaskExecutionSpeed"] = round(
actor_info["numExecutedTasks"] /
(now - actor_info["timestamp"] / 1000), 2)
actor_info["nodeId"] = node_id
actor_info["pid"] = worker_info["pid"]
def _update_from_actor_tasks(task, task_spec_type,
invalid_state_type):
actor_id = ray.utils.binary_to_hex(
b64decode(task[task_spec_type]["actorId"]))
if invalid_state_type == "pendingActor":
task["state"] = -1
elif invalid_state_type == "infeasibleActor":
task["state"] = -2
else:
raise ValueError(f"Invalid argument"
"invalid_state_type={invalid_state_type}")
task["actorTitle"] = task["functionDescriptor"][
"pythonFunctionDescriptor"]["className"]
format_reply_id(task)
actors[actor_id] = task
for infeasible_task in infeasible_tasks:
_update_from_actor_tasks(infeasible_task,
"actorCreationTaskSpec",
"infeasibleActor")
for ready_task in ready_tasks:
_update_from_actor_tasks(ready_task, "actorCreationTaskSpec",
"pendingActor")
actor_groups = _group_actors_by_python_class(actors)
stats_by_group = {
name: _get_actor_group_stats(group)
for name, group in actor_groups.items()
}
response_data = {}
for name, group in actor_groups.items():
response_data[name] = {
"entries": group,
"summary": stats_by_group[name]
}
return response_data
def get_logs(self, hostname, pid):
ip = self._node_stats.get(hostname, {"ip": None})["ip"]
logs = self._logs.get(ip, {})
if pid:
logs = {pid: logs.get(pid, [])}
return logs
def get_errors(self, hostname, pid):
ip = self._node_stats.get(hostname, {"ip": None})["ip"]
errors = self._errors.get(ip, {})
if pid:
errors = {pid: errors.get(pid, [])}
return errors
def run(self):
p = self.redis_client.pubsub(ignore_subscribe_messages=True)
p.psubscribe(self.redis_key)
logger.info("NodeStats: subscribed to {}".format(self.redis_key))
log_channel = ray.gcs_utils.LOG_FILE_CHANNEL
p.subscribe(log_channel)
logger.info("NodeStats: subscribed to {}".format(log_channel))
error_channel = ray.gcs_utils.RAY_ERROR_PUBSUB_PATTERN
p.psubscribe(error_channel)
logger.info("NodeStats: subscribed to {}".format(error_channel))
actor_channel = ray.gcs_utils.RAY_ACTOR_PUBSUB_PATTERN
p.psubscribe(actor_channel)
logger.info("NodeStats: subscribed to {}".format(actor_channel))
current_actor_table = ray.actors()
with self._node_stats_lock:
for actor_data in current_actor_table.values():
addr = (actor_data["Address"]["IPAddress"],
str(actor_data["Address"]["Port"]))
owner_addr = (actor_data["OwnerAddress"]["IPAddress"],
str(actor_data["OwnerAddress"]["Port"]))
self._addr_to_owner_addr[addr] = owner_addr
self._addr_to_actor_id[addr] = actor_data["ActorID"]
self._addr_to_extra_info_dict[addr] = {
"jobId": actor_data["JobID"],
"state": actor_data["State"],
"timestamp": actor_data["Timestamp"]
}
for x in p.listen():
try:
with self._node_stats_lock:
channel = ray.utils.decode(x["channel"])\
if "pattern" not in x or x["pattern"] is None\
else x["pattern"]
data = x["data"]
if channel == log_channel:
data = json.loads(ray.utils.decode(data))
ip = data["ip"]
pid = str(data["pid"])
self._logs[ip][pid].extend(data["lines"])
elif channel == str(error_channel):
pubsub_msg = ray.gcs_utils.PubSubMessage.FromString(
data)
error_data = ray.gcs_utils.ErrorTableData.FromString(
pubsub_msg.data)
message = error_data.error_message
message = re.sub(r"\x1b\[\d+m", "", message)
match = re.search(r"\(pid=(\d+), ip=(.*?)\)", message)
if match:
pid = match.group(1)
ip = match.group(2)
self._errors[ip][pid].append({
"message": message,
"timestamp": error_data.timestamp,
"type": error_data.type
})
elif channel == actor_channel:
pubsub_msg = ray.gcs_utils.PubSubMessage.FromString(
data)
actor_data = ray.gcs_utils.ActorTableData.FromString(
pubsub_msg.data)
addr = (actor_data.address.ip_address,
str(actor_data.address.port))
owner_addr = (actor_data.owner_address.ip_address,
str(actor_data.owner_address.port))
self._addr_to_owner_addr[addr] = owner_addr
self._addr_to_actor_id[addr] = ray.utils.binary_to_hex(
actor_data.actor_id)
self._addr_to_extra_info_dict[addr] = {
"jobId": ray.utils.binary_to_hex(
actor_data.job_id),
"state": actor_data.state,
"timestamp": actor_data.timestamp
}
elif channel == ray.gcs_utils.RAY_REPORTER_PUBSUB_PATTERN:
data = json.loads(ray.utils.decode(data))
self._ip_to_hostname[data["ip"]] = data["hostname"]
self._node_stats[data["hostname"]] = data
else:
try:
data = json.loads(ray.utils.decode(data))
except Exception as e:
data = f"Failed to load data because of {e}"
logger.warning("Unexpected channel data received, "
f"channel: {channel}, data: {data}")
except Exception:
logger.exception(traceback.format_exc())
continue
|
apache-2.0
| 5,768,132,720,916,335,000
| 40.496894
| 79
| 0.503068
| false
| 4.28132
| false
| false
| false
|
0/realtimepork
|
realtimepork/gpu.py
|
1
|
1605
|
"""
GPU utilities.
"""
from functools import wraps
from math import ceil
# Load everything we need in this module from PyCUDA (but don't autoinit until
# requested).
try:
from pycuda.tools import DeviceData
except ImportError:
_pycuda_available = False
else:
_pycuda_available = True
# Is this thing on?
_enabled = False
class PyCUDAMissingError(Exception):
pass
def _require_pycuda(f):
@wraps(f)
def wrapper(*args, **kwargs):
if not _pycuda_available:
raise PyCUDAMissingError('Unable to load PyCUDA.')
return f(*args, **kwargs)
return wrapper
@_require_pycuda
def enable():
"""
Initialize the GPU machinery.
"""
global _enabled
if _enabled:
return
import pycuda.autoinit
_enabled = True
def is_enabled():
"""
Check whether the GPU is available and initialized.
"""
return _enabled
@_require_pycuda
def carve_array(xn, yn):
"""
Determine the best grid and block sizes given the input size.
Parameters:
xn: Size in the x direction (shorter stride).
yn: Size in the y direction (longer stride).
Returns:
Grid size tuple, block size tuple.
"""
dev = DeviceData()
# Align with the warp size in the x direction and use what remains for the
# y direction.
x_threads = dev.warp_size
y_threads = dev.max_threads // x_threads
assert x_threads * y_threads <= dev.max_threads
x_blocks = int(ceil(xn / x_threads))
y_blocks = int(ceil(yn / y_threads))
return (x_blocks, y_blocks), (x_threads, y_threads, 1)
|
mit
| 6,945,346,836,958,899,000
| 17.662791
| 78
| 0.640498
| false
| 3.672769
| false
| false
| false
|
hfercc/mese2014
|
lib/rest_framework/mixins.py
|
1
|
7228
|
"""
Basic building blocks for generic class based views.
We don't bind behaviour to http method handlers yet,
which allows mixin classes to be composed in interesting ways.
"""
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.http import Http404
from rest_framework import status
from rest_framework.response import Response
from rest_framework.request import clone_request
from rest_framework.settings import api_settings
import warnings
def _get_validation_exclusions(obj, pk=None, slug_field=None, lookup_field=None):
"""
Given a model instance, and an optional pk and slug field,
return the full list of all other field names on that model.
For use when performing full_clean on a model instance,
so we only clean the required fields.
"""
include = []
if pk:
# Pending deprecation
pk_field = obj._meta.pk
while pk_field.rel:
pk_field = pk_field.rel.to._meta.pk
include.append(pk_field.name)
if slug_field:
# Pending deprecation
include.append(slug_field)
if lookup_field and lookup_field != 'pk':
include.append(lookup_field)
return [field.name for field in obj._meta.fields if field.name not in include]
class CreateModelMixin(object):
"""
Create a model instance.
"""
def create(self, request, *args, **kwargs):
data = dict(request.DATA)
data.update(**kwargs)
serializer = self.get_serializer(data=data, files=request.FILES)
if serializer.is_valid():
self.pre_save(serializer.object)
self.object = serializer.save(force_insert=True)
self.post_save(self.object, created=True)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED,
headers=headers)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def get_success_headers(self, data):
try:
return {'Location': data[api_settings.URL_FIELD_NAME]}
except (TypeError, KeyError):
return {}
class ListModelMixin(object):
"""
List a queryset.
"""
empty_error = "Empty list and '%(class_name)s.allow_empty' is False."
def list(self, request, *args, **kwargs):
self.object_list = self.filter_queryset(self.get_queryset())
# Default is to allow empty querysets. This can be altered by setting
# `.allow_empty = False`, to raise 404 errors on empty querysets.
if not self.allow_empty and not self.object_list:
warnings.warn(
'The `allow_empty` parameter is due to be deprecated. '
'To use `allow_empty=False` style behavior, You should override '
'`get_queryset()` and explicitly raise a 404 on empty querysets.',
PendingDeprecationWarning
)
class_name = self.__class__.__name__
error_msg = self.empty_error % {'class_name': class_name}
raise Http404(error_msg)
# Switch between paginated or standard style responses
page = self.paginate_queryset(self.object_list)
if page is not None:
serializer = self.get_pagination_serializer(page)
else:
serializer = self.get_serializer(self.object_list, many=True)
return Response(serializer.data)
class RetrieveModelMixin(object):
"""
Retrieve a model instance.
"""
def retrieve(self, request, *args, **kwargs):
self.object = self.get_object()
serializer = self.get_serializer(self.object)
return Response(serializer.data)
class UpdateModelMixin(object):
"""
Update a model instance.
"""
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
self.object = self.get_object_or_none()
serializer = self.get_serializer(self.object, data=request.DATA,
files=request.FILES, partial=partial)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
try:
self.pre_save(serializer.object)
except ValidationError as err:
# full_clean on model instance may be called in pre_save,
# so we have to handle eventual errors.
return Response(err.message_dict, status=status.HTTP_400_BAD_REQUEST)
if self.object is None:
self.object = serializer.save(force_insert=True)
self.post_save(self.object, created=True)
return Response(serializer.data, status=status.HTTP_201_CREATED)
self.object = serializer.save(force_update=True)
self.post_save(self.object, created=False)
return Response(serializer.data, status=status.HTTP_200_OK)
def partial_update(self, request, *args, **kwargs):
kwargs['partial'] = True
return self.update(request, *args, **kwargs)
def get_object_or_none(self):
try:
return self.get_object()
except Http404:
if self.request.method == 'PUT':
# For PUT-as-create operation, we need to ensure that we have
# relevant permissions, as if this was a POST request. This
# will either raise a PermissionDenied exception, or simply
# return None.
self.check_permissions(clone_request(self.request, 'POST'))
else:
# PATCH requests where the object does not exist should still
# return a 404 response.
raise
def pre_save(self, obj):
"""
Set any attributes on the object that are implicit in the request.
"""
# pk and/or slug attributes are implicit in the URL.
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
lookup = self.kwargs.get(lookup_url_kwarg, None)
pk = self.kwargs.get(self.pk_url_kwarg, None)
slug = self.kwargs.get(self.slug_url_kwarg, None)
slug_field = slug and self.slug_field or None
if lookup:
setattr(obj, self.lookup_field, lookup)
if pk:
setattr(obj, 'pk', pk)
if slug:
setattr(obj, slug_field, slug)
# Ensure we clean the attributes so that we don't eg return integer
# pk using a string representation, as provided by the url conf kwarg.
if hasattr(obj, 'full_clean'):
exclude = _get_validation_exclusions(obj, pk, slug_field, self.lookup_field)
obj.full_clean(exclude)
class DestroyModelMixin(object):
"""
Destroy a model instance.
"""
def destroy(self, request, *args, **kwargs):
obj = self.get_object()
self.pre_delete(obj)
obj.delete()
self.post_delete(obj)
return Response(status=status.HTTP_204_NO_CONTENT)
|
apache-2.0
| 8,436,068,386,180,543,000
| 34.505051
| 88
| 0.606945
| false
| 4.259281
| false
| false
| false
|
aerostitch/nagios_checks
|
hdfs_datanode_balancing_status.py
|
1
|
4396
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Joseph Herlant <herlantj@gmail.com>
# File name: hdfs_datanode_balancing_status.py
# Creation date: 2014-10-08
#
# Distributed under terms of the GNU GPLv3 license.
"""
This nagios active check parses the Hadoop HDFS web interface url:
http://<namenode>:<port>/dfsnodelist.jsp?whatNodes=LIVE
to check that no datanode is beyond the balancing threshold (in both ways).
The goal of this check is to check if the balancer needs to be run manually and
do its job correctly (while running for example in cron jobs).
The output includes performance datas and is truncated if longer than 1024
chars. The values of the output are the variation between the average disk
usage of the nodes over the cluster and the disk usage of the current node on
the cluster.
A negative value of X means that the node is X percent under the average disk
usage of the datanodes over the cluster. A positive value means that it's over
the average.
Tested on: Hadoop CDH3U5
"""
__author__ = 'Joseph Herlant'
__copyright__ = 'Copyright 2014, Joseph Herlant'
__credits__ = ['Joseph Herlant']
__license__ = 'GNU GPLv3'
__version__ = '1.0.0'
__maintainer__ = 'Joseph Herlant'
__email__ = 'herlantj@gmail.com'
__status__ = 'Production'
__website__ = 'https://github.com/aerostitch/'
from mechanize import Browser
from BeautifulSoup import BeautifulSoup
import argparse, sys
if __name__ == '__main__':
# use -h argument to get help
parser = argparse.ArgumentParser(
description='A Nagios check to verify that all datanodes of an HDFS \
cluster is in under the balancing threshold \
using the namenode web interface.')
parser.add_argument('-n', '--namenode', required=True,
help='hostname of the namenode of the cluster')
parser.add_argument('-p', '--port', type=int, default=50070,
help='port of the namenode http interface. \
Defaults to 50070.')
parser.add_argument(
'-w', '--warning', type=int, default=10,
help='warning threshold. If the datanode usage differs from average \
usage to more than this threshold, raise a warning. Defaults to 10.'
)
parser.add_argument(
'-c', '--critical', type=int, default=15,
help='critical threshold. If the datanode usage differs from average \
usage to more than this threshold, raise a critical. Defaults to 15.'
)
args = parser.parse_args()
# Get the web page from the namenode
url = "http://%s:%d/dfsnodelist.jsp?whatNodes=LIVE" % (args.namenode, args.port)
try:
page = Browser().open(url)
except IOError:
print 'CRITICAL: Cannot access namenode interface on %s:%d!' % (args.namenode, args.port)
sys.exit(2)
# parse the page and storing the {datanode: pct_usage} hash
html = page.read()
soup = BeautifulSoup(html)
datanodes = soup.findAll('td', {'class' : 'name'})
pcused = soup.findAll('td', {'class' : 'pcused', 'align' : 'right'})
nodes_pct = {}
for (idx, node) in enumerate(datanodes):
pct = float(pcused[idx].contents[0].strip())
node = datanodes[idx].findChildren('a')[0].contents[0].strip()
nodes_pct[node] = pct
# Each node variation against the average pct must be under the threshold
w_msg = ''
c_msg = ''
perfdata = ''
avg = 0
if len(nodes_pct) > 0:
avg = float(sum(nodes_pct.values()))/len(nodes_pct)
else:
print 'CRITICAL: Unable to find any node.'
sys.exit(2)
for (node, pct) in nodes_pct.items():
if abs(pct-avg) >= args.critical:
c_msg += ' %s=%.1f,' % (node, pct-avg)
perfdata += ' %s=%.1f,' % (node, pct-avg)
elif abs(avg-pct) >= args.warning:
w_msg += ' %s=%.1f,' % (node, pct-avg)
perfdata += ' %s=%.1f,' % (node, pct-avg)
else:
perfdata += ' %s=%.1f,' % (node, pct-avg)
# Prints the values and exits with the nagios exit code
if len(c_msg) > 0:
print ('CRITICAL:%s%s |%s' % (c_msg, w_msg, perfdata)).strip(',')[:1024]
sys.exit(2)
elif len(w_msg) > 0:
print ('WARNING:%s |%s' % (w_msg, perfdata)).strip(',')[:1024]
sys.exit(1)
else:
print ('OK |%s' % (perfdata)).strip(',')[:1024]
sys.exit(0)
|
gpl-2.0
| 1,909,556,536,262,776,600
| 37.226087
| 97
| 0.619882
| false
| 3.43706
| false
| false
| false
|
drongo-framework/drongo-wing-auth
|
wing_auth/views.py
|
1
|
1681
|
from drongo_utils.helpers import URLHelper
from wing_jinja2 import Jinja2
url = URLHelper.url
template = Jinja2.template
class AuthViews(object):
def __init__(self, app, module, base_url):
self.app = app
self.module = module
self.base_url = base_url
URLHelper.mount(app, self, base_url)
@url(pattern='/login')
@template('auth/login.html.j2')
def login_view(self, ctx):
q = ctx.request.query
if 'next' in q:
ctx.session.next = q['next'][0]
@url(pattern='/login', method='POST')
def login_do(self, ctx):
q = ctx.request.query
username = q['username'][0]
password = q['password'][0]
svc = self.module.services.UserLoginService(
username=username,
password=password
)
result = svc.check_credentials()
token = svc.create_token()
if result:
if self.module.config.token_in_session:
svc.authenticate_session(ctx, token)
_next = None
if 'next' in q:
_next = q['next'][0]
if _next is None and 'next' in ctx.session:
_next = ctx.session.next
if _next is None:
_next = '/'
ctx.response.set_redirect(_next)
else:
ctx.response.set_redirect('/auth/login')
@url(pattern='/logout')
def logout_do(self, ctx):
q = ctx.request.query
svc = self.module.services.UserLogoutService()
token = ctx.auth.get('token')
svc.expire_token(token)
_next = q.get('next', ['/'])[0]
ctx.response.set_redirect(_next)
|
mit
| -2,225,894,349,991,006,000
| 24.469697
| 55
| 0.545509
| false
| 3.760626
| false
| false
| false
|
malaonline/Server
|
server/app/migrations/0064_auto_20160201_1843.py
|
1
|
1150
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-01 10:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0063_auto_20160201_1830'),
]
operations = [
migrations.RemoveField(
model_name='comment',
name='time_slot',
),
migrations.AddField(
model_name='timeslot',
name='comment',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='app.Comment'),
),
migrations.AlterField(
model_name='timeslot',
name='attendance',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='app.TimeSlotAttendance'),
),
migrations.AlterField(
model_name='timeslot',
name='complaint',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='app.TimeSlotComplaint'),
),
]
|
mit
| -8,703,896,778,187,276,000
| 31.857143
| 136
| 0.610435
| false
| 3.95189
| false
| false
| false
|
ankitrgadiya/cs50
|
project/miki/miki/edit.py
|
1
|
2137
|
import os
from flask import (
Blueprint, flash, g, redirect, render_template, request, url_for,
current_app, Markup
)
from markdown import markdown
from werkzeug.exceptions import abort
from miki.auth import login_required
from miki.db import connect
bp = Blueprint('edit', __name__)
@bp.app_template_filter('markdown')
def markdown_filter(content):
return Markup(markdown(content))
@bp.route('/edit', methods=('GET', 'POST'))
@login_required
def edit():
if request.method == 'POST':
source = request.form.get('source', None)
content = request.form.get('content', None)
if not source or not content:
abort(406)
# Extract filename
file_name = os.path.splitext(os.path.basename(source))[0]
# Write content to markdown
md = open(os.path.join(
current_app.config.get('SOURCE'),
file_name + '.md'),
'w'
)
md.write(content)
md.close()
# Write content to html
html = open(os.path.join(
current_app.config.get('OUTPUT'),
file_name + '.html'),
'w'
)
html.write(render_template(
'page.html',
content=content,
name=file_name)
)
html.close()
# Redirect to generated html
return redirect('/' + file_name + '.html')
else:
# Check for args in request
if not request.args.get("file"):
raise RuntimeError("No file parameter passed!")
# Markdown file
md = os.path.join(
current_app.config.get('SOURCE'),
request.args.get('file')
)
# Try opening markdown
try:
# Save contents
md_file = open(md, 'r')
content = md_file.read()
md_file.close()
# If file do not exist
except FileNotFoundError:
content = ''
flash('Page do not exist yet.')
return render_template(
'edit.html',
content=content,
source=request.args.get("file")
)
|
gpl-3.0
| -4,613,021,155,570,770,000
| 23.848837
| 69
| 0.542349
| false
| 4.133462
| false
| false
| false
|
erh3cq/hyperspy
|
hyperspy/tests/io/test_io.py
|
1
|
6593
|
# -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import hashlib
import os
import logging
import tempfile
from pathlib import Path
from unittest.mock import patch
import numpy as np
import pytest
import hyperspy.api as hs
from hyperspy.signals import Signal1D
FULLFILENAME = Path(__file__).resolve().parent.joinpath("test_io_overwriting.hspy")
class TestIOOverwriting:
def setup_method(self, method):
self.s = Signal1D(np.arange(10))
self.new_s = Signal1D(np.ones(5))
# make sure we start from a clean state
self._clean_file()
self.s.save(FULLFILENAME)
self.s_file_hashed = self._hash_file(FULLFILENAME)
def _hash_file(self, filename):
with open(filename, "rb") as file:
md5_hash = hashlib.md5(file.read())
file_hashed = md5_hash.hexdigest()
return file_hashed
def _clean_file(self):
if os.path.exists(FULLFILENAME):
os.remove(FULLFILENAME)
def _check_file_is_written(self, filename):
# Check that we have a different hash, in case the file have different
# content from the original, the hash will be different.
return not self.s_file_hashed == self._hash_file(filename)
def test_io_overwriting_True(self):
# Overwrite is True, when file exists we overwrite
self.new_s.save(FULLFILENAME, overwrite=True)
assert self._check_file_is_written(FULLFILENAME)
def test_io_overwriting_False(self):
# Overwrite if False, file exists we don't overwrite
self.new_s.save(FULLFILENAME, overwrite=False)
assert not self._check_file_is_written(FULLFILENAME)
@pytest.mark.parametrize("overwrite", [None, True, False])
def test_io_overwriting_no_existing_file(self, overwrite):
self._clean_file() # remove the file
self.new_s.save(FULLFILENAME, overwrite=overwrite)
assert self._check_file_is_written(FULLFILENAME)
def test_io_overwriting_None_existing_file_y(self):
# Overwrite is None, when file exists we ask, mock `y` here
with patch("builtins.input", return_value="y"):
self.new_s.save(FULLFILENAME)
assert self._check_file_is_written(FULLFILENAME)
def test_io_overwriting_None_existing_file_n(self):
# Overwrite is None, when file exists we ask, mock `n` here
with patch("builtins.input", return_value="n"):
self.new_s.save(FULLFILENAME)
assert not self._check_file_is_written(FULLFILENAME)
def teardown_method(self, method):
self._clean_file()
def test_glob_wildcards():
s = Signal1D(np.arange(10))
with tempfile.TemporaryDirectory() as dirpath:
fnames = [os.path.join(dirpath, f"temp[1x{x}].hspy") for x in range(2)]
for f in fnames:
s.save(f)
with pytest.raises(ValueError, match="No filename matches this pattern"):
_ = hs.load(fnames[0])
t = hs.load([fnames[0]])
assert len(t) == 1
t = hs.load(fnames)
assert len(t) == 2
t = hs.load(os.path.join(dirpath, "temp*.hspy"))
assert len(t) == 2
t = hs.load(os.path.join(dirpath, "temp[*].hspy"), escape_square_brackets=True,)
assert len(t) == 2
with pytest.raises(ValueError, match="No filename matches this pattern"):
_ = hs.load(os.path.join(dirpath, "temp[*].hspy"))
# Test pathlib.Path
t = hs.load(Path(dirpath, "temp[1x0].hspy"))
assert len(t) == 1
t = hs.load([Path(dirpath, "temp[1x0].hspy"), Path(dirpath, "temp[1x1].hspy")])
assert len(t) == 2
t = hs.load(list(Path(dirpath).glob("temp*.hspy")))
assert len(t) == 2
t = hs.load(Path(dirpath).glob("temp*.hspy"))
assert len(t) == 2
def test_file_not_found_error():
with tempfile.TemporaryDirectory() as dirpath:
temp_fname = os.path.join(dirpath, "temp.hspy")
if os.path.exists(temp_fname):
os.remove(temp_fname)
with pytest.raises(ValueError, match="No filename matches this pattern"):
_ = hs.load(temp_fname)
with pytest.raises(FileNotFoundError):
_ = hs.load([temp_fname])
def test_file_reader_error():
# Only None, str or objects with attr "file_reader" are supported
s = Signal1D(np.arange(10))
with tempfile.TemporaryDirectory() as dirpath:
f = os.path.join(dirpath, "temp.hspy")
s.save(f)
with pytest.raises(ValueError, match="reader"):
_ = hs.load(f, reader=123)
def test_file_reader_warning(caplog):
# Test fallback to Pillow imaging library
s = Signal1D(np.arange(10))
with tempfile.TemporaryDirectory() as dirpath:
f = os.path.join(dirpath, "temp.hspy")
s.save(f)
with pytest.raises(ValueError, match="Could not load"):
with caplog.at_level(logging.WARNING):
_ = hs.load(f, reader="some_unknown_file_extension")
assert "Unable to infer file type from extension" in caplog.text
def test_file_reader_options():
s = Signal1D(np.arange(10))
with tempfile.TemporaryDirectory() as dirpath:
f = os.path.join(dirpath, "temp.hspy")
s.save(f)
# Test string reader
t = hs.load(Path(dirpath, "temp.hspy"), reader="hspy")
assert len(t) == 1
np.testing.assert_allclose(t.data, np.arange(10))
# Test object reader
from hyperspy.io_plugins import hspy
t = hs.load(Path(dirpath, "temp.hspy"), reader=hspy)
assert len(t) == 1
np.testing.assert_allclose(t.data, np.arange(10))
def test_save_default_format():
s = Signal1D(np.arange(10))
with tempfile.TemporaryDirectory() as dirpath:
f = os.path.join(dirpath, "temp")
s.save(f)
t = hs.load(Path(dirpath, "temp.hspy"))
assert len(t) == 1
|
gpl-3.0
| -7,554,021,842,762,585,000
| 31.638614
| 88
| 0.635219
| false
| 3.542719
| true
| false
| false
|
william-richard/moto
|
moto/dynamodb2/exceptions.py
|
1
|
5931
|
class InvalidIndexNameError(ValueError):
pass
class MockValidationException(ValueError):
def __init__(self, message):
self.exception_msg = message
class InvalidUpdateExpressionInvalidDocumentPath(MockValidationException):
invalid_update_expression_msg = (
"The document path provided in the update expression is invalid for update"
)
def __init__(self):
super(InvalidUpdateExpressionInvalidDocumentPath, self).__init__(
self.invalid_update_expression_msg
)
class InvalidUpdateExpression(MockValidationException):
invalid_update_expr_msg = "Invalid UpdateExpression: {update_expression_error}"
def __init__(self, update_expression_error):
self.update_expression_error = update_expression_error
super(InvalidUpdateExpression, self).__init__(
self.invalid_update_expr_msg.format(
update_expression_error=update_expression_error
)
)
class AttributeDoesNotExist(MockValidationException):
attr_does_not_exist_msg = (
"The provided expression refers to an attribute that does not exist in the item"
)
def __init__(self):
super(AttributeDoesNotExist, self).__init__(self.attr_does_not_exist_msg)
class ProvidedKeyDoesNotExist(MockValidationException):
provided_key_does_not_exist_msg = (
"The provided key element does not match the schema"
)
def __init__(self):
super(ProvidedKeyDoesNotExist, self).__init__(
self.provided_key_does_not_exist_msg
)
class ExpressionAttributeNameNotDefined(InvalidUpdateExpression):
name_not_defined_msg = "An expression attribute name used in the document path is not defined; attribute name: {n}"
def __init__(self, attribute_name):
self.not_defined_attribute_name = attribute_name
super(ExpressionAttributeNameNotDefined, self).__init__(
self.name_not_defined_msg.format(n=attribute_name)
)
class AttributeIsReservedKeyword(InvalidUpdateExpression):
attribute_is_keyword_msg = (
"Attribute name is a reserved keyword; reserved keyword: {keyword}"
)
def __init__(self, keyword):
self.keyword = keyword
super(AttributeIsReservedKeyword, self).__init__(
self.attribute_is_keyword_msg.format(keyword=keyword)
)
class ExpressionAttributeValueNotDefined(InvalidUpdateExpression):
attr_value_not_defined_msg = "An expression attribute value used in expression is not defined; attribute value: {attribute_value}"
def __init__(self, attribute_value):
self.attribute_value = attribute_value
super(ExpressionAttributeValueNotDefined, self).__init__(
self.attr_value_not_defined_msg.format(attribute_value=attribute_value)
)
class UpdateExprSyntaxError(InvalidUpdateExpression):
update_expr_syntax_error_msg = "Syntax error; {error_detail}"
def __init__(self, error_detail):
self.error_detail = error_detail
super(UpdateExprSyntaxError, self).__init__(
self.update_expr_syntax_error_msg.format(error_detail=error_detail)
)
class InvalidTokenException(UpdateExprSyntaxError):
token_detail_msg = 'token: "{token}", near: "{near}"'
def __init__(self, token, near):
self.token = token
self.near = near
super(InvalidTokenException, self).__init__(
self.token_detail_msg.format(token=token, near=near)
)
class InvalidExpressionAttributeNameKey(MockValidationException):
invalid_expr_attr_name_msg = (
'ExpressionAttributeNames contains invalid key: Syntax error; key: "{key}"'
)
def __init__(self, key):
self.key = key
super(InvalidExpressionAttributeNameKey, self).__init__(
self.invalid_expr_attr_name_msg.format(key=key)
)
class ItemSizeTooLarge(MockValidationException):
item_size_too_large_msg = "Item size has exceeded the maximum allowed size"
def __init__(self):
super(ItemSizeTooLarge, self).__init__(self.item_size_too_large_msg)
class ItemSizeToUpdateTooLarge(MockValidationException):
item_size_to_update_too_large_msg = (
"Item size to update has exceeded the maximum allowed size"
)
def __init__(self):
super(ItemSizeToUpdateTooLarge, self).__init__(
self.item_size_to_update_too_large_msg
)
class IncorrectOperandType(InvalidUpdateExpression):
inv_operand_msg = "Incorrect operand type for operator or function; operator or function: {f}, operand type: {t}"
def __init__(self, operator_or_function, operand_type):
self.operator_or_function = operator_or_function
self.operand_type = operand_type
super(IncorrectOperandType, self).__init__(
self.inv_operand_msg.format(f=operator_or_function, t=operand_type)
)
class IncorrectDataType(MockValidationException):
inc_data_type_msg = "An operand in the update expression has an incorrect data type"
def __init__(self):
super(IncorrectDataType, self).__init__(self.inc_data_type_msg)
class ConditionalCheckFailed(ValueError):
msg = "The conditional request failed"
def __init__(self):
super(ConditionalCheckFailed, self).__init__(self.msg)
class TransactionCanceledException(ValueError):
cancel_reason_msg = "Transaction cancelled, please refer cancellation reasons for specific reasons [{}]"
def __init__(self, errors):
msg = self.cancel_reason_msg.format(", ".join([str(err) for err in errors]))
super(TransactionCanceledException, self).__init__(msg)
class EmptyKeyAttributeException(MockValidationException):
empty_str_msg = "One or more parameter values were invalid: An AttributeValue may not contain an empty string"
def __init__(self):
super(EmptyKeyAttributeException, self).__init__(self.empty_str_msg)
|
apache-2.0
| -7,290,487,194,947,429,000
| 33.283237
| 134
| 0.685888
| false
| 4.170886
| false
| false
| false
|
tisnik/fabric8-analytics-common
|
vscode-visual-tests/features/src/gui.py
|
1
|
4392
|
# vim: set fileencoding=utf-8
"""Common functions for GUI-related tests."""
from PIL import Image
from os import path
from src.litava import locate_on_screen_using_litava
TYPING_INTERVAL = 0.25
DIRECTORY_WITH_REGIONS = "regions"
OUTPUT_DIRECTORY = "."
def perform_move_mouse_cursor(context, x=0, y=0):
"""Move mouse cursor to specifief coordinates."""
assert context is not None, "Context must be provided by Behave"
context.pyautogui.moveTo(x, y)
def check_location_existence(location):
"""Check if location exist and can be found on the screen."""
assert location is not None, "Region can not be found"
def perform_click_on_the_region(context):
"""Click on region found by previous test step."""
assert context is not None, "Context must be provided by Behave"
# get the already found location
location = context.location
check_location_existence(location)
# click on the center of location
x, y = context.pyautogui.center(location)
context.pyautogui.click(x, y)
def perform_right_click_on_the_region(context):
"""Click on region found by previous test step by the right mouse button."""
assert context is not None, "Context must be provided by Behave"
# get the already found location
location = context.location
check_location_existence(location)
# click on the center of location
x, y = context.pyautogui.center(location)
context.pyautogui.click(x, y, button="right")
def perform_type(context, what_to_type):
"""Type anything onto the screen."""
context.pyautogui.typewrite(what_to_type, interval=TYPING_INTERVAL)
def region_filename_in_directory(directory, version, region):
"""Generate filename for region residing in specified directory."""
# construct proper filename
region = region.replace(" ", "_")
filename = path.join(directory + "/" + version, region + ".png")
return filename
def entry_region_check(context, region):
"""Check if context and region are set."""
assert context is not None, "Context is not set (FATAL)"
assert region is not None, "Name of region is required parameter"
def filename_for_region(context, region):
"""Proper filename for file containing pattern for region."""
assert context is not None, "Context is not set (FATAL)"
assert region is not None, "Name of region is required parameter"
version = context.vs_code_version
return region_filename_in_directory(DIRECTORY_WITH_REGIONS, version, region)
def save_screenshot(context, region):
"""Save screenshot with the filename the same as the region."""
assert context is not None, "Context is not set (FATAL)"
assert region is not None, "Name of region is required parameter"
version = context.vs_code_version
filename = region_filename_in_directory(OUTPUT_DIRECTORY, version, region)
context.pyautogui.screenshot(filename)
def find_the_pattern(context, filename):
"""Try to find the pattern in a screenshot."""
SCREENSHOT_FILENAME = "screenshot.bmp"
PATTERN_FILENAME = "pattern.bmp"
# fuzzy search
if context.use_litava:
context.pyautogui.screenshot(SCREENSHOT_FILENAME)
img = Image.open(filename)
img.save(PATTERN_FILENAME)
return locate_on_screen_using_litava(SCREENSHOT_FILENAME, PATTERN_FILENAME)
else:
return None
def perform_find_the_region(context, region, alternate_region=None):
"""Try to find region on screen based on specified pattern."""
entry_region_check(context, region)
location = context.location = None
try:
# first step - try to localize primary region
filename = filename_for_region(context, region)
location = context.pyautogui.locateOnScreen(filename)
check_location_existence(location)
except Exception:
# the primary region can't be found: try the alternate region, if any
if alternate_region is not None:
perform_find_the_region(context, alternate_region)
# first region can't be found and alternate region is not specified -> a problem
else:
location = find_the_pattern(context, filename)
if location in None:
save_screenshot(context, region)
raise Exception("Region '{r}' can not be found on the screen".format(r=region))
context.location = location
|
apache-2.0
| -6,515,741,662,497,905,000
| 32.784615
| 95
| 0.698315
| false
| 4.003646
| false
| false
| false
|
jeremiedecock/snippets
|
python/pyqt/pyqt5/widget_QTableView_delegate_on_edit_using_spinbox_widget.py
|
1
|
4158
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Ref:
# - http://doc.qt.io/qt-5/modelview.html#3-4-delegates
# - http://doc.qt.io/qt-5/model-view-programming.html#delegate-classes
# - http://doc.qt.io/qt-5/qabstractitemdelegate.html#details
# - http://doc.qt.io/qt-5/qitemdelegate.html#details
# - http://doc.qt.io/qt-5/qstyleditemdelegate.html#details
# - http://doc.qt.io/qt-5/qtwidgets-itemviews-spinboxdelegate-example.html
import sys
from PyQt5.QtCore import Qt, QAbstractTableModel, QVariant
from PyQt5.QtWidgets import QApplication, QTableView, QStyledItemDelegate, QSpinBox
class MyData:
def __init__(self):
self._num_rows = 3
self._num_columns = 2
self._data = [[0 for j in range(self._num_columns)] for i in range(self._num_rows)]
def get_num_rows(self):
return self._num_rows
def get_num_columns(self):
return self._num_columns
def get_data(self, row_index, column_index):
value = self._data[row_index][column_index]
print("read ({},{}): {}".format(row_index, column_index, value))
return value
def set_data(self, row_index, column_index, value):
print("write ({},{}): {}".format(row_index, column_index, value))
self._data[row_index][column_index] = value
###############################################################################
class MyModel(QAbstractTableModel):
def __init__(self, data, parent=None):
super().__init__(parent)
self._data = data # DON'T CALL THIS ATTRIBUTE "data", A METHOD ALREADY HAVE THIS NAME (model.data(index, role)) !!!
def rowCount(self, parent):
return self._data.get_num_rows()
def columnCount(self, parent):
return self._data.get_num_columns()
def data(self, index, role):
if role == Qt.DisplayRole or role == Qt.EditRole:
# See https://stackoverflow.com/a/8480223
return self._data.get_data(index.row(), index.column())
return QVariant()
def setData(self, index, value, role):
if role == Qt.EditRole:
try:
self._data.set_data(index.row(), index.column(), value)
# The following line are necessary e.g. to dynamically update the QSortFilterProxyModel
self.dataChanged.emit(index, index, [Qt.EditRole])
except Exception as e:
print(e)
return False
return True
def flags(self, index):
return Qt.ItemIsSelectable | Qt.ItemIsEditable | Qt.ItemIsEnabled
###############################################################################
class MyDelegate(QStyledItemDelegate):
def createEditor(self, parent, option, index):
editor = QSpinBox(parent=parent)
# setFrame(): tell whether the line edit draws itself with a frame.
# If enabled (the default) the line edit draws itself inside a frame, otherwise the line edit draws itself without any frame.
editor.setFrame(False)
editor.setRange(0, 3)
return editor
def setEditorData(self, editor, index):
value = int(index.data(Qt.EditRole)) # equivalent of value = index.model().data(index, Qt.EditRole)
editor.setValue(value)
def setModelData(self, editor, model, index):
editor.interpretText()
value = editor.value()
model.setData(index, value, Qt.EditRole)
def updateEditorGeometry(self, editor, option, index):
editor.setGeometry(option.rect)
if __name__ == '__main__':
app = QApplication(sys.argv)
data = MyData()
table_view = QTableView()
my_model = MyModel(data)
table_view.setModel(my_model)
delegate = MyDelegate()
table_view.setItemDelegate(delegate)
table_view.show()
# The mainloop of the application. The event handling starts from this point.
# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.
exit_code = app.exec_()
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
|
mit
| -5,714,131,013,226,474,000
| 33.081967
| 133
| 0.616402
| false
| 3.732496
| false
| false
| false
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/patch_route_filter.py
|
1
|
2686
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class PatchRouteFilter(SubResource):
"""Route Filter Resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:param rules: Collection of RouteFilterRules contained within a route
filter.
:type rules: list[~azure.mgmt.network.v2017_08_01.models.RouteFilterRule]
:param peerings: A collection of references to express route circuit
peerings.
:type peerings:
list[~azure.mgmt.network.v2017_08_01.models.ExpressRouteCircuitPeering]
:ivar provisioning_state: The provisioning state of the resource. Possible
values are: 'Updating', 'Deleting', 'Succeeded' and 'Failed'.
:vartype provisioning_state: str
:ivar name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:vartype name: str
:ivar etag: A unique read-only string that changes whenever the resource
is updated.
:vartype etag: str
:ivar type: Resource type.
:vartype type: str
:param tags: Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'provisioning_state': {'readonly': True},
'name': {'readonly': True},
'etag': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'rules': {'key': 'properties.rules', 'type': '[RouteFilterRule]'},
'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCircuitPeering]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, id=None, rules=None, peerings=None, tags=None):
super(PatchRouteFilter, self).__init__(id=id)
self.rules = rules
self.peerings = peerings
self.provisioning_state = None
self.name = None
self.etag = None
self.type = None
self.tags = tags
|
mit
| -656,485,033,284,707,300
| 36.830986
| 91
| 0.599777
| false
| 4.094512
| false
| false
| false
|
googleads/google-ads-python
|
google/ads/googleads/v6/services/services/carrier_constant_service/client.py
|
1
|
17815
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v6.resources.types import carrier_constant
from google.ads.googleads.v6.services.types import carrier_constant_service
from .transports.base import (
CarrierConstantServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import CarrierConstantServiceGrpcTransport
class CarrierConstantServiceClientMeta(type):
"""Metaclass for the CarrierConstantService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[CarrierConstantServiceTransport]]
_transport_registry["grpc"] = CarrierConstantServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[CarrierConstantServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class CarrierConstantServiceClient(metaclass=CarrierConstantServiceClientMeta):
"""Service to fetch carrier constants."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CarrierConstantServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CarrierConstantServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> CarrierConstantServiceTransport:
"""Return the transport used by the client instance.
Returns:
CarrierConstantServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def carrier_constant_path(criterion_id: str,) -> str:
"""Return a fully-qualified carrier_constant string."""
return "carrierConstants/{criterion_id}".format(
criterion_id=criterion_id,
)
@staticmethod
def parse_carrier_constant_path(path: str) -> Dict[str, str]:
"""Parse a carrier_constant path into its component segments."""
m = re.match(r"^carrierConstants/(?P<criterion_id>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[credentials.Credentials] = None,
transport: Union[str, CarrierConstantServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the carrier constant service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.CarrierConstantServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, CarrierConstantServiceTransport):
# transport is a CarrierConstantServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = CarrierConstantServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_carrier_constant(
self,
request: carrier_constant_service.GetCarrierConstantRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> carrier_constant.CarrierConstant:
r"""Returns the requested carrier constant in full
detail.
Args:
request (:class:`google.ads.googleads.v6.services.types.GetCarrierConstantRequest`):
The request object. Request message for
[CarrierConstantService.GetCarrierConstant][google.ads.googleads.v6.services.CarrierConstantService.GetCarrierConstant].
resource_name (:class:`str`):
Required. Resource name of the
carrier constant to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v6.resources.types.CarrierConstant:
A carrier criterion that can be used
in campaign targeting.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a carrier_constant_service.GetCarrierConstantRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, carrier_constant_service.GetCarrierConstantRequest
):
request = carrier_constant_service.GetCarrierConstantRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_carrier_constant
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("CarrierConstantServiceClient",)
|
apache-2.0
| 2,523,804,490,509,615,000
| 39.488636
| 136
| 0.623463
| false
| 4.597419
| false
| false
| false
|
Aalto-LeTech/a-plus
|
shibboleth_login/auth_backend.py
|
1
|
6416
|
import logging
import urllib.parse
from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
from django.shortcuts import redirect
from .apps import app_settings, env_settings
from .parser import Parser
logger = logging.getLogger('aplus.shibboleth')
class ShibbolethAuthBackend(ModelBackend):
"""
Authenticates the trusted user from the Shibboleth middleware headers.
Creates a new user or updates changed fields on an existing user.
"""
def authenticate(self, request, shibd_meta=None):
if not shibd_meta:
return None
user_save_flag = False
UserModel = get_user_model()
username_field = getattr(UserModel, 'USERNAME_FIELD', 'username')
email_field = getattr(UserModel, 'EMAIL_FIELD', 'email')
username_len = UserModel._meta.get_field(username_field).max_length
email_len = UserModel._meta.get_field(email_field).max_length
first_name_len = UserModel._meta.get_field('first_name').max_length
last_name_len = UserModel._meta.get_field('last_name').max_length
parser = Parser(env=shibd_meta,
urldecode=env_settings.URL_DECODE)
# resolve username
username = self._get_scoped_limited(parser, env_settings.USER_ID, username_len)
if not username:
return None
username = username.lower()
# resolve email
email = self._get_scoped_limited(parser, env_settings.EMAIL, email_len)
if email:
email = UserModel.objects.normalize_email(email)
# find user
try:
user = UserModel.objects.filter(**{username_field: username}).get()
except UserModel.DoesNotExist:
user = None
# fallback, find user with email
if not user and app_settings.ALLOW_SEARCH_WITH_EMAIL:
qs = UserModel.objects.filter(**{email_field: email})
if qs.count() == 1:
user = qs.first()
# create missing users
if not user and app_settings.ALLOW_CREATE_NEW_USERS:
logger.info('Creating a new Shibboleth authenticated user: %s <%s>',
username, email)
user = UserModel(**{
username_field: username,
email_field: email or '',
})
if not email:
user.save()
# TODO: use real domain with support for this and pseudonymized users
user.email = '{:d}@localhost'.format(user.id)
user.set_unusable_password()
user_save_flag = True
if not user:
return None
# update email
if email and email != user.email:
user.email = email
user_save_flag = True
# update first_name
first_name = ' '.join(parser.get_values(env_settings.FIRST_NAME, ''))[:first_name_len]
if first_name and first_name != user.first_name:
user.first_name = first_name
user_save_flag = True
# update last_name
last_name = ' '.join(parser.get_values(env_settings.LAST_NAME, ''))[:last_name_len]
if last_name and last_name != user.last_name:
user.last_name = last_name
user_save_flag = True
# if not first_name or last_name, fallback to cn and displayName
if not user.first_name or not user.last_name:
# best effort to find best possible name..
full_name = max((
' '.join(parser.get_values(env_settings.FULL_NAME, '')),
' '.join(parser.get_values(env_settings.COMMON_NAME, '')),
), key=len)
first_, __, last_ = full_name.partition(' ')
if not user.first_name:
user.first_name = first_[:first_name_len]
if not user.last_name:
user.last_name = last_[:last_name_len]
user_save_flag = True
if user_save_flag:
# TODO: write better error reporting, when there is a validator to raise something
user.full_clean()
user.save()
# TODO: support multiple domains
profile = user.userprofile
sid_filters = env_settings.STUDENT_FILTERS.copy()
# following filter drops everything else except configured domain
sid_filters[1] = env_settings.STUDENT_DOMAIN.lower()
try:
student_ids = parser.get_urn_values(
env_settings.STUDENT_URN,
env_settings.STUDENT_IDS,
filters=sid_filters)
except KeyError as error:
logger.warning("Did not find a student id for user '%s', missing field '%s'",
username, error)
student_ids = ()
except ValueError as error:
logger.warning("Did not find a student id for user '%s', invalid data: %s",
username, error)
student_ids = ()
# example: ('123456', 'aalto.fi', 'studentID', 'int', 'mace:terena.org')
# -> (value (student number), the domain, id type, int|local, schema namespace)
student_id = next(iter(student_ids), (None,))[0]
if student_id and student_id != profile.student_id:
profile.student_id = student_id
profile.save()
return user
def _get_scoped_limited(self, parser, name, max_len):
try:
value = parser.get_single_value(name)
except KeyError:
logger.warning("Shibboleth login attempt without %s%s.",
env_settings.PREFIX, name)
return None
except ValueError as error:
logger.warning("Shibboleth login attempt with multiple values for %s%s: %s",
env_settings.PREFIX, name, str(error)[:512])
return None
if not value:
logger.warning("Shibboleth login attempt with empty %s%s.",
env_settings.PREFIX, name)
return None
if len(value) > max_len:
logger.warning("Shibboleth login attempt with too long %s%s (%d > %d).",
env_settings.PREFIX, name, len(value), max_len)
return None
if '@' not in value:
logger.warning("Shibboleth login attempt without domain in %s%s (%s).",
env_settings.PREFIX, name, value)
return None
return value
|
gpl-3.0
| 8,581,214,611,712,775,000
| 37.884848
| 94
| 0.580268
| false
| 4.091837
| false
| false
| false
|
Komodo/macros
|
Reflow_tower.py
|
1
|
1901
|
# This macro is to reorder selected lines, so that the shortest will be
# pushed to top, the longest will go to bottom (look like a tower).
#
# The macro is useful to reorder Python's "import" lines.
#
# Author: Nguyễn Hồng Quân (ng.hong.quan@gmail.com)
import eollib
from xpcom import components
viewSvc = components.classes["@activestate.com/koViewService;1"]\
.getService(components.interfaces.koIViewService)
view = viewSvc.currentView
view = view.queryInterface(components.interfaces.koIScintillaView)
sm = view.scimoz
# Make `start` the beginning position of the first selected line,
# and `end` the ending position of the last selected line.
start = sm.positionFromLine(sm.lineFromPosition(sm.selectionStart))
end = sm.getLineEndPosition(sm.lineFromPosition(sm.selectionEnd))
# Get list of selected lines. Also strip trailing spaces
lines = [l.rstrip() for l in sm.getTextRange(start, end).splitlines()]
# Sometimes, one line of code is splited to many, with trailing slash.
# We group these sublines to one and will count the length of the longest.
groupedlines = [] # Each element is a list of sublines
concat = False
for l in lines:
if l.endswith('\\'): # This line will be concatenated with following lines
if not concat:
groupedlines.append([l])
concat = True
else:
groupedlines[-1].append(l) # Append to the latest list of sublines
else:
if concat: # Last subline to concat
groupedlines[-1].append(l)
concat = False
else:
groupedlines.append([l])
# Reorder by length
groupedlines.sort(key=lambda group: max(len(l) for l in group))
# Flatten groupedlines
lines = []
for g in groupedlines:
lines.extend(g)
# Select part of document
sm.setSel(start, end)
# Replace selection content
eol = eollib.eol2eolStr[sm.eOLMode]
sm.replaceSel(eol.join(lines))
|
mpl-2.0
| 1,604,087,599,337,519,600
| 33.472727
| 79
| 0.712025
| false
| 3.530726
| false
| false
| false
|
alisheykhi/SocialPDA
|
graph_util.py
|
1
|
8934
|
import re,collections,operator
import networkx as nx
from privacy_level import privacy_level_generator
from numpy.random import zipf
from math import ceil
class ReadGraph():
extension = []
G = nx.Graph()
properties = {}
nodes = []
edges = []
privacy_level = []
sorted_degree_sequence = []
def __init__(self, file_name,level):
print "-----------------------------------------------------------"
print "___________________Developed for___________________________"
print "-----------------------------------------------------------"
print "title: SocialPDA: A Structure-Aware Approach for Personalized Degree Anonymity in Social Network Graphs"
print "Author: Ali Sheykhi and Mahdi Abadi"
print "Faculty of Electrical and Computer Engineering, Tarbiat Modares University, Tehran, Iran"
print "{ali.sheykhi, abadi}@modares.ac.ir"
print "-----------------------------------------------------------"
print "___________________Initial Setup___________________________"
print "-----------------------------------------------------------"
self.file_name = file_name
print "file name : ",self.file_name
ReadGraph.extension = ["csv", "txt", "gml", "net"]
self.converter(level)
def converter(self,level):
'''
chose correct converter
:return:
'''
file_type = self.file_type()
if file_type == "gml":
print "Convert gml file ... "
self.gml_to_graph(level)
if file_type == "txt":
print "Convert txt file ... "
self.txt_to_graph(level)
def file_type(self):
'''
return dataSet file type
:return: file name
'''
if self.is_valid():
return self.file_name.split(".")[-1]
def is_valid(self):
'''
check for valid graph type
:return:
'''
file_extension = self.file_name.split(".")[-1]
if (file_extension):
if (file_extension.lower() in ReadGraph.extension):
return True
else:
print "Unknown file extension \"",file_extension,"\", use:",ReadGraph.extension
return False
else:
print "file does not have an extension!"
return False
def gml_to_graph(self,level):
'''
convert gml graph to TUNGraph
:return:
'''
# try:
# file_path = "DataSet/"+self.file_name
# except:
# print "can't open "+self.file_name
# else:
# print "reading gml file ... "
# M = nx.MultiGraph(nx.read_gml('DataSet/polblogs.gml'))
# for u,v,data in M.edges_iter(data=True):
# if ReadGraph.G.has_edge(u,v):
# pass
# else:
# ReadGraph.G.add_edge(u, v)
# ReadGraph.properties ['edge_count'] = len(ReadGraph.edges)
# print len(ReadGraph.G.node)
# self.degree_sequence()
try:
file_path = "DataSet/"+self.file_name
ifile = open(file_path,'r')
except:
print "can't open "+self.file_name
else:
text = ifile.read()
ifile.close()
if text:
print "reading gml file ... "
pattern_meas = re.compile(r"source\s(\d+)\s+target\s(\d+)", re.VERBOSE | re.MULTILINE)
pattern_id = re.compile(r"id\s(\d+)", re.VERBOSE | re.MULTILINE)
for match in pattern_meas.finditer(text):
ReadGraph.edges.append("%s,%s" % (match.group(1), match.group(2)))
for match in pattern_id.finditer(text):
ReadGraph.nodes.append("%s" % match.group(1))
node_count = 0
for node in ReadGraph.nodes:
ReadGraph.G.add_node(int(node))
node_count += 1
for edge in ReadGraph.edges:
ReadGraph.G.add_edge(int(edge.split(",")[0]) ,int( edge.split(",")[1]))
sum = 0
count = 0
for NI in ReadGraph.G.degree().values():
#print "node: %d, out-degree %d, in-degree %d" % ( NI.GetId(), NI.GetOutDeg(), NI.GetInDeg())
sum += NI
count+=1
ReadGraph.properties ['edge_count'] = sum/2
self.degree_sequence(level)
def txt_to_graph(self,level):
"""
convert txt graph to TNUGraph
:return:
"""
try:
file_path = "DataSet/"+self.file_name
ifile = open(file_path ,'r')
except:
print "can't open "+self.file_name
else:
text = ifile.read()
ifile.close()
if text:
print "reading txt file ... "
nodes_list = []
if self.file_name.split(".")[0] == 'caida':
pattern_meas = re.compile(r"^(\d+)\s+(\d+)\s+([-]?\d+)$", re.VERBOSE | re.MULTILINE)
if self.file_name.split(".")[0] == 'caida_test':
pattern_meas = re.compile(r"^(\d+)\s+(\d+)\s+([-]?\d+)$", re.VERBOSE | re.MULTILINE)
if self.file_name.split(".")[0] == 'amazon':
pattern_meas = re.compile(r"^(\d+)\s+(\d+)", re.VERBOSE | re.MULTILINE)
for match in pattern_meas.finditer(text):
# nodes_list.append("%s" % int(match.group(1)))
# nodes_list.append("%s" % int(match.group(2)))
ReadGraph.G.add_edge(int(match.group(1)),int( match.group(2)))
# ReadGraph.nodes = list(set(nodes_list))
# for node in ReadGraph.nodes:
# ReadGraph.G.add_node(int(node))
# for edge in ReadGraph.edges:
# ReadGraph.G.add_edge(int(edge.split(",")[0]) ,int( edge.split(",")[1]))
sum = 0
count = 0
for NI in ReadGraph.G.degree().values():
#print "node: %d, out-degree %d, in-degree %d" % ( NI.GetId(), NI.GetOutDeg(), NI.GetInDeg())
sum += NI
count+=1
ReadGraph.properties ['edge_count'] = sum/2
self.degree_sequence(level)
def degree_sequence(self,level):
print nx.info(ReadGraph.G)
result_in_degree = ReadGraph.G.degree().values()
privacy_file_name = self.file_name.split(".")[0]+"_privacy.txt"
privacy_level = privacy_level_generator(file_name=privacy_file_name,lvl =level)
# departed = []
for node in ReadGraph.G.nodes():
if ReadGraph.G.degree(node):
current_node = dict(degree = ReadGraph.G.degree(node), id=node)
ReadGraph.sorted_degree_sequence.append(current_node)
# if ReadGraph.G.degree(node) == 1:
# departed.append(list(ReadGraph.G.edges_iter(node))[0])
# for item in departed:
# for item2 in departed:
# if item[1] == item2[0]:
# print item, item2
ReadGraph.sorted_degree_sequence.sort(key=lambda x:(x['degree']), reverse=True)
# for i in range (0,5):
# print ReadGraph.sorted_degree_sequence[i]
for i in range(0, len(ReadGraph.sorted_degree_sequence)):
if ReadGraph.sorted_degree_sequence[i]:
ReadGraph.sorted_degree_sequence[i]['privacy_level'] = int(privacy_level[i])
#ReadGraph.sorted_degree_sequence.sort(key=lambda x:(x['privacy_level'],x['degree']), reverse=True)
ReadGraph.properties['node_count'] = len(ReadGraph.sorted_degree_sequence)
max_degree = None
max_degree_id = None
for node in ReadGraph.sorted_degree_sequence:
if node['degree'] > max_degree:
max_degree = node['degree']
max_degree_id = node['id']
ReadGraph.properties ['max_degree_id'] = max_degree_id
ReadGraph.properties ['max_privacy'] = ReadGraph.sorted_degree_sequence[0]['privacy_level']
ReadGraph.properties ['max_privacy_id'] = ReadGraph.sorted_degree_sequence[0]['id']
ReadGraph.properties ['max_degree_size'] = max_degree
ReadGraph.properties ['avg_degree'] = 2 * (float (ReadGraph.properties ['edge_count'])/float (ReadGraph.properties ['node_count']))
node_occur = collections.Counter (result_in_degree)
sorted_node_oc = sorted(node_occur.items(), key=operator.itemgetter(1))
ReadGraph.properties ['k'] = sorted_node_oc[0][1]
print ReadGraph.properties
print "for example, the first node in sorted degree sequence is :" + str(ReadGraph.sorted_degree_sequence[0])
|
apache-2.0
| -4,987,266,179,027,643,000
| 39.243243
| 143
| 0.504589
| false
| 3.875922
| false
| false
| false
|
aringh/odl
|
odl/solvers/nonsmooth/forward_backward.py
|
1
|
6886
|
# Copyright 2014-2017 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Optimization methods based on a forward-backward splitting scheme."""
from __future__ import print_function, division, absolute_import
from odl.operator import Operator
__all__ = ('forward_backward_pd',)
def forward_backward_pd(x, f, g, L, h, tau, sigma, niter,
callback=None, **kwargs):
"""The forward-backward primal-dual splitting algorithm.
The algorithm minimizes the sum of several convex functionals composed with
linear operators,::
min_x f(x) + sum_i g_i(L_i x) + h(x)
where ``f``, ``g_i`` are convex functionals, ``L_i`` are linear
operator's, and ``h`` is a convex and differentiable functional.
The method can also be used to solve the more general problem::
min_x f(x) + sum_i (g_i @ l_i)(L_i x) + h(x)
where ``l_i`` are strongly convex functionals and @ is the infimal
convolution::
(g @ l)(x) = inf_y { g(y) + l(x-y) }
Note that the strong convexity of ``l_i`` makes the convex conjugate
``l_i^*`` differentialbe; see the Notes section for more information on
this.
Parameters
----------
x : `LinearSpaceElement`
Initial point, updated in-place.
f : `Functional`
The functional ``f``. Needs to have ``f.proximal``.
g : sequence of `Functional`'s
The functionals ``g_i``. Needs to have ``g_i.convex_conj.proximal``.
L : sequence of `Operator`'s'
Sequence of linear operators ``L_i``, with as many elements as
``g``.
h : `Functional`
The functional ``h``. Needs to have ``h.gradient``.
tau : float
Step size-like parameter for ``f``.
sigma : sequence of floats
Sequence of step size-like parameters for the sequence ``g``.
niter : int
Number of iterations.
callback : callable, optional
Function called with the current iterate after each iteration.
Other Parameters
----------------
l : sequence of `Functional`'s, optional
The functionals ``l_i``. Needs to have ``g_i.convex_conj.gradient``.
If omitted, the simpler problem without ``l_i`` will be considered.
Notes
-----
The mathematical problem to solve is
.. math::
\min_x f(x) + \sum_{i=0}^n (g_i \Box l_i)(L_i x) + h(x),
where :math:`f`, :math:`g_i`, :math:`l_i` and :math:`h` are functionals and
:math:`L_i` are linear operators. The infimal convolution :math:`g \Box l`
is defined by
.. math::
(g \Box l)(x) = \inf_y g(y) + l(x - y).
The exact conditions on the involved functionals are as follows: :math:`f`
and :math:`g_i` are proper, convex and lower semicontinuous, and :math:`h`
is convex and differentiable with :math:`\\eta^{-1}`-Lipschitz continuous
gradient, :math:`\\eta > 0`.
The optional operators :math:`\\nabla l_i^*` need to be
:math:`\\nu_i`-Lipschitz continuous. Note that in the paper, the condition
is formulated as :math:`l_i` being proper, lower
semicontinuous, and :math:`\\nu_i^{-1}`-strongly convex, which implies that
:math:`l_i^*` have :math:`\\nu_i`-Lipschitz continuous gradients.
If the optional operators :math:`\\nabla l_i^*` are omitted, the simpler
problem without :math:`l_i` will be considered. Mathematically, this is
done by taking :math:`l_i` to be the functionals that are zero only in the
zero element and :math:`\\infty` otherwise. This gives that :math:`l_i^*`
are the zero functionals, and hence the corresponding gradients are the
zero operators.
To guarantee convergence, the parameters :math:`\\tau`, :math:`\\sigma` and
:math:`L_i` need to satisfy
.. math::
2 \min \{ \\frac{1}{\\tau}, \\frac{1}{\sigma_1}, \\ldots,
\\frac{1}{\sigma_m} \} \cdot \min\{ \\eta, \\nu_1, \\ldots, \\nu_m \}
\cdot \\sqrt{1 - \\tau \\sum_{i=1}^n \\sigma_i ||L_i||^2} > 1,
where, if the simpler problem is considered, all :math:`\\nu_i` can be
considered to be :math:`\\infty`.
For reference on the forward-backward primal-dual algorithm, see [BC2015].
For more on proximal operators and algorithms see [PB2014].
See Also
--------
odl.solvers.nonsmooth.primal_dual_hybrid_gradient.pdhg :
Solver for similar problems without differentiability in any
of the terms.
odl.solvers.nonsmooth.douglas_rachford.douglas_rachford_pd :
Solver for similar problems without differentiability in any
of the terms.
References
----------
[BC2015] Bot, R I, and Csetnek, E R. *On the convergence rate of
a forward-backward type primal-dual splitting algorithm for convex
optimization problems*. Optimization, 64.1 (2015), pp 5--23.
[PB2014] Parikh, N, and Boyd, S. *Proximal Algorithms*.
Foundations and Trends in Optimization, 1 (2014), pp 127-239.
"""
# Problem size
m = len(L)
# Validate input
if not all(isinstance(op, Operator) for op in L):
raise ValueError('`L` not a sequence of operators')
if not all(op.is_linear for op in L):
raise ValueError('not all operators in `L` are linear')
if not all(x in op.domain for op in L):
raise ValueError('`x` not in the domain of all operators in `L`')
if len(sigma) != m:
raise ValueError('len(sigma) != len(L)')
if len(g) != m:
raise ValueError('len(prox_cc_g) != len(L)')
# Extract operators
prox_cc_g = [gi.convex_conj.proximal for gi in g]
grad_h = h.gradient
prox_f = f.proximal
l = kwargs.pop('l', None)
if l is not None:
if len(l) != m:
raise ValueError('`grad_cc_l` not same length as `L`')
grad_cc_l = [li.convex_conj.gradient for li in l]
if kwargs:
raise TypeError('unexpected keyword argument: {}'.format(kwargs))
# Pre-allocate values
v = [Li.range.zero() for Li in L]
y = x.space.zero()
for k in range(niter):
x_old = x
tmp_1 = grad_h(x) + sum(Li.adjoint(vi) for Li, vi in zip(L, v))
prox_f(tau)(x - tau * tmp_1, out=x)
y.lincomb(2.0, x, -1, x_old)
for i in range(m):
if l is not None:
# In this case gradients were given.
tmp_2 = sigma[i] * (L[i](y) - grad_cc_l[i](v[i]))
else:
# In this case gradients were not given. Therefore the gradient
# step is omitted. For more details, see the documentation.
tmp_2 = sigma[i] * L[i](y)
prox_cc_g[i](sigma[i])(v[i] + tmp_2, out=v[i])
if callback is not None:
callback(x)
|
mpl-2.0
| -3,281,394,788,236,555,000
| 35.242105
| 79
| 0.609352
| false
| 3.403856
| false
| false
| false
|
happy5214/pywikibot-core
|
pywikibot/comms/threadedhttp.py
|
1
|
5773
|
# -*- coding: utf-8 -*-
"""Http backend layer, formerly providing a httplib2 wrapper."""
from __future__ import absolute_import, unicode_literals
# (C) Pywikibot team, 2007-2015
__version__ = '$Id$'
__docformat__ = 'epytext'
# standard python libraries
import codecs
import sys
if sys.version_info[0] > 2:
from urllib.parse import urlparse
else:
from urlparse import urlparse
import pywikibot
from pywikibot.tools import UnicodeMixin
_logger = "comm.threadedhttp"
class HttpRequest(UnicodeMixin):
"""Object wrapper for HTTP requests that need to block origin thread.
self.data will be either:
* a tuple of (dict, unicode) if the request was successful
* an exception
"""
def __init__(self, uri, method="GET", params=None, body=None, headers=None,
callbacks=None, charset=None, **kwargs):
"""
Constructor.
See C{Http.request} for parameters.
"""
self.uri = uri
self.method = method
self.params = params
self.body = body
self.headers = headers
if isinstance(charset, codecs.CodecInfo):
self.charset = charset.name
elif charset:
self.charset = charset
elif headers and 'accept-charset' in headers:
self.charset = headers['accept-charset']
else:
self.charset = None
self.callbacks = callbacks
self.args = [uri, method, body, headers]
self.kwargs = kwargs
self._parsed_uri = None
self._data = None
@property
def data(self):
"""Return the requests response tuple."""
assert(self._data is not None)
return self._data
@data.setter
def data(self, value):
"""Set the requests response and invoke each callback."""
self._data = value
if self.callbacks:
for callback in self.callbacks:
callback(self)
@property
def exception(self):
"""Get the exception, if any."""
if isinstance(self.data, Exception):
return self.data
@property
def response_headers(self):
"""Return the response headers."""
if not self.exception:
return self.data.headers
@property
def raw(self):
"""Return the raw response body."""
if not self.exception:
return self.data.content
@property
def parsed_uri(self):
"""Return the parsed requested uri."""
if not self._parsed_uri:
self._parsed_uri = urlparse(self.uri)
return self._parsed_uri
@property
def hostname(self):
"""Return the host of the request."""
return self.parsed_uri.netloc
@property
def status(self):
"""Return the HTTP response status.
@rtype: int
"""
if not self.exception:
return self.data.status_code
@property
def header_encoding(self):
"""Return charset given by the response header."""
if not hasattr(self, '_header_encoding'):
content_type = self.response_headers.get('content-type', '')
pos = content_type.find('charset=')
if pos >= 0:
pos += len('charset=')
encoding = self.response_headers['content-type'][pos:]
self._header_encoding = encoding
elif 'json' in content_type:
# application/json | application/sparql-results+json
self._header_encoding = 'utf-8'
else:
self._header_encoding = None
return self._header_encoding
@property
def encoding(self):
"""Detect the response encoding."""
if not hasattr(self, '_encoding'):
if not self.charset and not self.header_encoding:
pywikibot.log(u"Http response doesn't contain a charset.")
charset = 'latin1'
else:
charset = self.charset
if (self.header_encoding and codecs.lookup(self.header_encoding) !=
(codecs.lookup(charset) if charset else None)):
if charset:
pywikibot.warning(
'Encoding "{0}" requested but "{1}" '
'received in the header.'.format(
charset, self.header_encoding))
try:
# TODO: Buffer decoded content, weakref does remove it too
# early (directly after this method)
self.raw.decode(self.header_encoding)
except UnicodeError as e:
self._encoding = e
else:
self._encoding = self.header_encoding
else:
self._encoding = None
if charset and (isinstance(self._encoding, Exception) or
not self._encoding):
try:
self.raw.decode(charset)
except UnicodeError as e:
self._encoding = e
else:
self._encoding = charset
if isinstance(self._encoding, Exception):
raise self._encoding
return self._encoding
def decode(self, encoding, errors='strict'):
"""Return the decoded response."""
return self.raw.decode(encoding, errors)
@property
def content(self):
"""Return the response decoded by the detected encoding."""
return self.decode(self.encoding)
def __unicode__(self):
"""Return the response decoded by the detected encoding."""
return self.content
def __bytes__(self):
"""Return the undecoded response."""
return self.raw
|
mit
| -4,610,961,845,625,407,500
| 29.544974
| 79
| 0.556556
| false
| 4.786899
| false
| false
| false
|
shaunokeefe/hoponit
|
hoponit/harvest/management/commands/api.py
|
1
|
1352
|
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from harvest import untappd
class Command(BaseCommand):
args = '<poll_id poll_id ...>'
help = 'Closes the specified poll for voting'
def handle(self, *args, **options):
cmd = args[0]
u = untappd.UntappdApi(settings.HARVEST_UNTAPPD_CLIENT_ENDPOINT)
if cmd == "venue":
venue_id = args[1]
venue = u.foursquare_id_to_untappd(venue_id)
if cmd == "fs":
venue_id = args[1]
venue = u.foursquare_id_to_untappd(venue_id)
print venue
if cmd == "checkins":
venue_id = args[1]
checkins = u.get_venue_checkins(venue_id=venue_id)
for checkin in checkins:
print "%s" % (checkin)
if cmd == "fscheckins":
venue_id = args[1]
checkins = u.get_venue_checkins(foursquare_venue_id=venue_id)
for checkin in checkins:
print "%s" % (checkin)
if cmd == "limit":
print check_limit()
if cmd == "suburb":
suburb = args[1]
venues = u.get_venues_for_suburb(suburb)
for venue in venues:
print "%s: %s" % (venue['id'], venue['name'])
self.stdout.write('Succcess')
|
mit
| -7,560,068,598,439,669,000
| 29.044444
| 73
| 0.542899
| false
| 3.346535
| false
| false
| false
|
andela-ooshodi/codango-debug
|
codango/resources/tests/test_routes.py
|
1
|
3625
|
from django.test import Client, TestCase
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from resources.models import Resource
from django.test.utils import setup_test_environment
setup_test_environment()
class CommunityViewTest(TestCase):
def setUp(self):
self.client = Client()
self.user = User.objects.create(username='Abiodun', password='shuaib')
self.user.set_password('shuaib')
self.user.save()
self.login = self.client.login(username='Abiodun', password='shuaib')
def create_resources(
self,
text='some more words',
resource_file='resource_file'):
return Resource.objects.create(
id=100,
text=text,
author=self.user,
resource_file=resource_file
)
def test_can_reach_ajax_community_page(self):
self.assertTrue(self.login)
response = self.client.get(
reverse('community', args=('all',)),
content_type='application/json',
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertTrue(response.status_code == 200)
self.assertContains(response, "There are currently no posts")
def test_can_post_new_ajax_content(self):
self.assertTrue(self.login)
response = self.client.post(
'/resource/create',
{'text': '1', },
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "success")
def test_add_an_empty_resource(self):
self.assertTrue(self.login)
response = self.client.post(
'/resource/newresource',
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 404)
def test_user_can_upvote(self):
self.assertTrue(self.login)
resource = self.create_resources()
response = self.client.post(
'/resource/100/like', {'resource_id': 100},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(resource.upvotes()), 1)
def test_user_can_downvote(self):
self.assertTrue(self.login)
resource = self.create_resources()
response = self.client.post(
'/resource/100/unlike', {'resource_id': 100},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(resource.downvotes()), 1)
def test_user_can_get_persisten_vote(self):
self.assertTrue(self.login)
resource = self.create_resources()
response = self.client.post(
'/resource/100/unlike', {'resource_id': 100},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
response = self.client.post(
'/resource/100/like', {'resource_id': 100},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(resource.upvotes()), 1)
def test_user_cannot_vote_more_than_once(self):
self.assertTrue(self.login)
resource = self.create_resources()
response = self.client.post(
'/resource/100/unlike', {'resource_id': 100},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
response = self.client.post(
'/resource/100/unlike', {'resource_id': 100},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(resource.upvotes()), 0)
|
mit
| 8,126,015,275,894,616,000
| 37.157895
| 78
| 0.622897
| false
| 3.961749
| true
| false
| false
|
MDAnalysis/mdanalysis
|
testsuite/MDAnalysisTests/coordinates/test_xdr.py
|
1
|
29773
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import pytest
from unittest.mock import patch
import errno
import numpy as np
import os
import shutil
import subprocess
from numpy.testing import (assert_equal, assert_almost_equal)
from MDAnalysisTests import make_Universe
from MDAnalysisTests.datafiles import (
PDB_sub_dry, PDB_sub_sol, TRR_sub_sol, TRR, XTC, GRO, PDB, CRD, PRMncdf,
NCDF, XTC_sub_sol, COORDINATES_XTC, COORDINATES_TOPOLOGY, COORDINATES_TRR)
from MDAnalysisTests.coordinates.base import (MultiframeReaderTest,
BaseReference, BaseWriterTest,
assert_timestep_almost_equal)
import MDAnalysis as mda
from MDAnalysis.coordinates.base import Timestep
from MDAnalysis.coordinates import XDR
class _XDRReader_Sub(object):
@pytest.fixture()
def atoms(self):
usol = mda.Universe(PDB_sub_sol, self.XDR_SUB_SOL)
return usol.select_atoms("not resname SOL")
def test_load_new_raises_ValueError(self):
# should fail if we load universe with a trajectory with different
# number of atoms when NOT using sub, same as before.
udry = mda.Universe(PDB_sub_dry)
with pytest.raises(ValueError):
udry.load_new(self.XDR_SUB_SOL)
def test_sub_coordinates(self, atoms):
"""
load solvated trajectory into universe with unsolvated protein.
"""
udry = mda.Universe(PDB_sub_dry)
udry.load_new(self.XDR_SUB_SOL, sub=atoms.indices)
ts = udry.atoms.ts
assert_timestep_almost_equal(ts, atoms.ts)
class TestTRRReader_Sub(_XDRReader_Sub):
XDR_SUB_SOL = TRR_sub_sol
class TestXTCReader_Sub(_XDRReader_Sub):
XDR_SUB_SOL = XTC_sub_sol
class _GromacsReader(object):
# This base class assumes same lengths and dt for XTC and TRR test cases!
filename = None
ref_unitcell = np.array(
[80.017, 80.017, 80.017, 60., 60., 90.], dtype=np.float32)
# computed with Gromacs: 362.26999999999998 nm**3 * 1000 A**3/nm**3
ref_volume = 362270.0
prec = 3
@pytest.fixture(scope='class')
def universe(self):
return mda.Universe(GRO, self.filename, convert_units=True)
def test_rewind_xdrtrj(self, universe):
universe.trajectory.rewind()
assert_equal(universe.coord.frame, 0, "rewinding to frame 1")
assert universe.trajectory._xdr._has_offsets == 1
def test_next_xdrtrj(self, universe):
universe.trajectory.rewind()
universe.trajectory.next()
assert_equal(universe.coord.frame, 1, "loading frame 1")
def test_jump_xdrtrj(self, universe):
universe.trajectory[4] # index is 0-based and frames are 0-based
assert_equal(universe.coord.frame, 4, "jumping to frame 4")
def test_jump_lastframe_xdrtrj(self, universe):
universe.trajectory[-1]
assert_equal(universe.coord.frame, 9,
"indexing last frame with trajectory[-1]")
def test_slice_xdrtrj(self, universe):
frames = [ts.frame for ts in universe.trajectory[2:9:3]]
assert_equal(frames, [2, 5, 8], "slicing xdrtrj [2:9:3]")
def test_reverse_xdrtrj(self, universe):
frames = [ts.frame for ts in universe.trajectory[::-1]]
assert_equal(frames, list(range(9, -1, -1)), "slicing xdrtrj [::-1]")
def test_coordinates(self, universe):
ca_nm = np.array(
[[6.043369675, 7.385184479, 1.381425762]], dtype=np.float32)
# coordinates in the base unit (needed for True)
ca_Angstrom = ca_nm * 10.0
universe.trajectory.rewind()
universe.trajectory.next()
universe.trajectory.next()
assert_equal(universe.coord.frame, 2, "failed to step to frame 3")
ca = universe.select_atoms('name CA and resid 122')
# low precision match (2 decimals in A, 3 in nm) because the above are
# the trr coords
assert_almost_equal(
ca.positions,
ca_Angstrom,
2,
err_msg="coords of Ca of resid 122 do not "
"match for frame 3")
def test_unitcell(self, universe):
"""Test that xtc/trr unitcell is read correctly (Issue 34)"""
universe.trajectory.rewind()
uc = universe.coord.dimensions
assert_almost_equal(
uc,
self.ref_unitcell,
self.prec,
err_msg="unit cell dimensions (rhombic dodecahedron)")
def test_volume(self, universe):
# need to reduce precision for test (nm**3 <--> A**3)
universe.trajectory.rewind()
vol = universe.coord.volume
assert_almost_equal(
vol,
self.ref_volume,
0,
err_msg="unit cell volume (rhombic dodecahedron)")
def test_dt(self, universe):
assert_almost_equal(
universe.trajectory.dt, 100.0, 4, err_msg="wrong timestep dt")
def test_totaltime(self, universe):
# test_totaltime(): need to reduce precision because dt is only precise
# to ~4 decimals and accumulating the inaccuracy leads to even lower
# precision in the totaltime (consequence of fixing Issue 64)
assert_almost_equal(
universe.trajectory.totaltime,
900.0,
3,
err_msg="wrong total length of trajectory")
def test_frame(self, universe):
universe.trajectory[4] # index is 0-based and frames are 0-based
assert_equal(universe.trajectory.frame, 4, "wrong frame number")
def test_time(self, universe):
universe.trajectory[4]
assert_almost_equal(
universe.trajectory.time, 400.0, 3, err_msg="wrong time of frame")
def test_get_Writer(self, universe, tmpdir):
ext = os.path.splitext(self.filename)[1]
outfile = str(tmpdir.join('xdr-reader-test' + ext))
with universe.trajectory.Writer(outfile) as W:
assert_equal(universe.trajectory.format, W.format)
assert_equal(universe.atoms.n_atoms, W.n_atoms)
def test_Writer(self, tmpdir):
universe = mda.Universe(GRO, self.filename, convert_units=True)
ext = os.path.splitext(self.filename)[1]
outfile = str(tmpdir.join('/xdr-reader-test' + ext))
with universe.trajectory.Writer(outfile) as W:
W.write(universe.atoms)
universe.trajectory.next()
W.write(universe.atoms)
universe.trajectory.rewind()
u = mda.Universe(GRO, outfile)
assert_equal(u.trajectory.n_frames, 2)
# prec = 6: TRR test fails; here I am generous and take self.prec =
# 3...
assert_almost_equal(u.atoms.positions, universe.atoms.positions,
self.prec)
def test_EOFraisesStopIteration(self, universe):
def go_beyond_EOF():
universe.trajectory[-1]
universe.trajectory.next()
with pytest.raises(StopIteration):
go_beyond_EOF()
class TestXTCReader(_GromacsReader):
filename = XTC
class TestXTCReaderClass(object):
def test_with_statement(self):
from MDAnalysis.coordinates.XTC import XTCReader
try:
with XTCReader(XTC) as trj:
N = trj.n_frames
frames = [ts.frame for ts in trj]
except:
raise AssertionError("with_statement not working for XTCReader")
assert_equal(
N,
10,
err_msg="with_statement: XTCReader reads wrong number of frames")
assert_equal(
frames,
np.arange(0, N),
err_msg="with_statement: XTCReader does not read all frames")
class TestTRRReader(_GromacsReader):
filename = TRR
def test_velocities(self, universe):
# frame 0, v in nm/ps
# from gmxdump -f MDAnalysisTests/data/adk_oplsaa.trr
# v[47675]={-7.86469e-01, 1.57479e+00, 2.79722e-01}
# v[47676]={ 2.70593e-08, 1.08052e-06, 6.97028e-07}
v_native = np.array(
[[-7.86469e-01, 1.57479e+00, 2.79722e-01],
[2.70593e-08, 1.08052e-06, 6.97028e-07]],
dtype=np.float32)
# velocities in the MDA base unit A/ps (needed for True)
v_base = v_native * 10.0
universe.trajectory.rewind()
assert_equal(universe.coord.frame, 0, "failed to read frame 1")
assert_almost_equal(
universe.trajectory.ts._velocities[[47675, 47676]],
v_base,
self.prec,
err_msg="ts._velocities for indices 47675,47676 do not "
"match known values")
assert_almost_equal(
universe.atoms.velocities[[47675, 47676]],
v_base,
self.prec,
err_msg="velocities for indices 47675,47676 do not "
"match known values")
for index, v_known in zip([47675, 47676], v_base):
assert_almost_equal(
universe.atoms[index].velocity,
v_known,
self.prec,
err_msg="atom[{0:d}].velocity does not match known values".
format(index))
class _XDRNoConversion(object):
filename = None
@pytest.fixture()
def universe(self):
return mda.Universe(PDB, self.filename, convert_units=False)
def test_coordinates(self, universe):
# note: these are the native coordinates in nm
ca_nm = np.array(
[[6.043369675, 7.385184479, 1.381425762]], dtype=np.float32)
universe.trajectory.rewind()
universe.trajectory.next()
universe.trajectory.next()
assert_equal(universe.trajectory.ts.frame, 2,
"failed to step to frame 3")
ca = universe.select_atoms('name CA and resid 122')
# low precision match because we also look at the trr: only 3 decimals
# in nm in xtc!
assert_almost_equal(
ca.positions,
ca_nm,
3,
err_msg="native coords of Ca of resid 122 "
"do not match for frame 3 with "
"convert_units=False")
class TestXTCNoConversion(_XDRNoConversion):
filename = XTC
class TestTRRNoConversion(_XDRNoConversion):
filename = TRR
class _GromacsWriter(object):
infilename = None # XTC or TRR
Writers = {
'.trr': mda.coordinates.TRR.TRRWriter,
'.xtc': mda.coordinates.XTC.XTCWriter,
}
@pytest.fixture(scope='class')
def universe(self):
return mda.Universe(GRO, self.infilename)
@pytest.fixture()
def Writer(self):
ext = os.path.splitext(self.infilename)[1]
return self.Writers[ext]
@pytest.fixture()
def outfile(self, tmpdir):
ext = os.path.splitext(self.infilename)[1]
return str(tmpdir.join('xdr-writer-test' + ext))
def test_write_trajectory(self, universe, Writer, outfile):
"""Test writing Gromacs trajectories (Issue 38)"""
with Writer(outfile, universe.atoms.n_atoms, dt=universe.trajectory.dt) as W:
for ts in universe.trajectory:
W.write(universe)
uw = mda.Universe(GRO, outfile)
# check that the coordinates are identical for each time step
for orig_ts, written_ts in zip(universe.trajectory, uw.trajectory):
assert_almost_equal(
written_ts._pos,
orig_ts._pos,
3,
err_msg="coordinate mismatch between "
"original and written trajectory at "
"frame %d (orig) vs %d (written)" % (orig_ts.frame,
written_ts.frame))
def test_timestep_not_modified_by_writer(self, universe, Writer, outfile):
trj = universe.trajectory
ts = trj.ts
trj[-1] # last timestep (so that time != 0)
x = ts._pos.copy()
time = ts.time
with Writer(outfile, trj.n_atoms, dt=trj.dt) as W:
# last timestep (so that time != 0) (say it again, just in case...)
trj[-1]
W.write(universe)
assert_equal(
ts._pos,
x,
err_msg="Positions in Timestep were modified by writer.")
assert_equal(
ts.time, time, err_msg="Time in Timestep was modified by writer.")
class TestXTCWriter(_GromacsWriter):
__test__ = True
infilename = XTC
class TestTRRWriter(_GromacsWriter):
__test__ = True
infilename = TRR
def test_velocities(self, universe, Writer, outfile):
with Writer(outfile, universe.atoms.n_atoms, dt=universe.trajectory.dt) as W:
for ts in universe.trajectory:
W.write(universe)
uw = mda.Universe(GRO, outfile)
# check that the velocities are identical for each time step
for orig_ts, written_ts in zip(universe.trajectory, uw.trajectory):
assert_almost_equal(
written_ts._velocities,
orig_ts._velocities,
3,
err_msg="velocities mismatch between "
"original and written trajectory at "
"frame %d (orig) vs %d (written)" % (orig_ts.frame,
written_ts.frame))
def test_gaps(self, universe, Writer, outfile):
"""Tests the writing and reading back of TRRs with gaps in any of
the coordinates/velocities properties."""
with Writer(outfile, universe.atoms.n_atoms, dt=universe.trajectory.dt) as W:
for ts in universe.trajectory:
# Inset some gaps in the properties: coords every 4 steps, vels
# every 2.
if ts.frame % 4 == 0:
ts.has_positions = False
if ts.frame % 2 == 0:
ts.has_velocities = False
W.write(universe)
uw = mda.Universe(GRO, outfile)
# check that the velocities are identical for each time step, except
# for the gaps (that we must make sure to raise exceptions on).
for orig_ts, written_ts in zip(universe.trajectory, uw.trajectory):
if ts.frame % 4 != 0:
assert_almost_equal(
written_ts.positions,
orig_ts.positions,
3,
err_msg="coordinates mismatch "
"between original and written "
"trajectory at frame {} (orig) "
"vs {} (written)".format(orig_ts.frame, written_ts.frame))
else:
with pytest.raises(mda.NoDataError):
getattr(written_ts, 'positions')
if ts.frame % 2 != 0:
assert_almost_equal(
written_ts.velocities,
orig_ts.velocities,
3,
err_msg="velocities mismatch "
"between original and written "
"trajectory at frame {} (orig) "
"vs {} (written)".format(orig_ts.frame, written_ts.frame))
else:
with pytest.raises(mda.NoDataError):
getattr(written_ts, 'velocities')
class _GromacsWriterIssue101(object):
Writers = {
'.trr': mda.coordinates.TRR.TRRWriter,
'.xtc': mda.coordinates.XTC.XTCWriter,
}
ext = None # set to '.xtc' or '.trr'
prec = 3
@pytest.fixture()
def Writer(self):
return self.Writers[self.ext]
@pytest.fixture()
def outfile(self, tmpdir):
return str(tmpdir.join('/xdr-writer-issue101' + self.ext))
def test_single_frame_GRO(self, Writer, outfile):
self._single_frame(GRO, Writer, outfile)
def test_single_frame_PDB(self, Writer, outfile):
self._single_frame(PDB, Writer, outfile)
def test_single_frame_CRD(self, Writer, outfile):
self._single_frame(CRD, Writer, outfile)
def _single_frame(self, filename, Writer, outfile):
u = mda.Universe(filename)
with Writer(outfile, u.atoms.n_atoms) as W:
W.write(u.atoms)
w = mda.Universe(filename, outfile)
assert_equal(w.trajectory.n_frames, 1,
"single frame trajectory has wrong number of frames")
assert_almost_equal(
w.atoms.positions,
u.atoms.positions,
self.prec,
err_msg="coordinates do not match for {0!r}".format(filename))
class TestXTCWriterSingleFrame(_GromacsWriterIssue101):
ext = ".xtc"
prec = 2
class TestTRRWriterSingleFrame(_GromacsWriterIssue101):
ext = ".trr"
class _GromacsWriterIssue117(object):
"""Issue 117: Cannot write XTC or TRR from AMBER NCDF"""
ext = None
prec = 5
@pytest.fixture()
def universe(self):
return mda.Universe(PRMncdf, NCDF)
def test_write_trajectory(self, universe, tmpdir):
"""Test writing Gromacs trajectories from AMBER NCDF (Issue 117)"""
outfile = str(tmpdir.join('xdr-writer-issue117' + self.ext))
with mda.Writer(outfile, n_atoms=universe.atoms.n_atoms) as W:
for ts in universe.trajectory:
W.write(universe)
uw = mda.Universe(PRMncdf, outfile)
# check that the coordinates are identical for each time step
for orig_ts, written_ts in zip(universe.trajectory, uw.trajectory):
assert_almost_equal(
written_ts._pos,
orig_ts._pos,
self.prec,
err_msg="coordinate mismatch "
"between original and written "
"trajectory at frame %d (orig) vs %d "
"(written)" % (orig_ts.frame, written_ts.frame))
class TestXTCWriterIssue117(_GromacsWriterIssue117):
__test__ = True
ext = ".xtc"
prec = 2
class TestTRRWriterIssue117(_GromacsWriterIssue117):
__test__ = True
ext = ".trr"
def test_triclinic_box():
"""Test coordinates.core.triclinic_box() (Issue 61)"""
unitcell = np.array([80.017, 55, 100.11, 60.00, 30.50, 90.00])
box = mda.coordinates.core.triclinic_vectors(unitcell)
new_unitcell = mda.coordinates.core.triclinic_box(box[0], box[1], box[2])
assert_almost_equal(
new_unitcell,
unitcell,
3,
err_msg="unitcell round-trip connversion failed (Issue 61)")
class XTCReference(BaseReference):
def __init__(self):
super(XTCReference, self).__init__()
self.trajectory = COORDINATES_XTC
self.topology = COORDINATES_TOPOLOGY
self.reader = mda.coordinates.XTC.XTCReader
self.writer = mda.coordinates.XTC.XTCWriter
self.ext = 'xtc'
self.prec = 3
self.changing_dimensions = True
class TestXTCReader_2(MultiframeReaderTest):
@staticmethod
@pytest.fixture()
def ref():
return XTCReference()
class TestXTCWriter_2(BaseWriterTest):
@staticmethod
@pytest.fixture()
def ref():
return XTCReference()
def test_different_precision(self, ref, tmpdir):
out = 'precision-test' + ref.ext
# store more then 9 atoms to enable compression
n_atoms = 40
with tmpdir.as_cwd():
with ref.writer(out, n_atoms, precision=5) as w:
u = make_Universe(size=(n_atoms, 1, 1), trajectory=True)
u.trajectory.ts.positions = np.random.random(size=(n_atoms, 3))
w.write(u)
xtc = mda.lib.formats.libmdaxdr.XTCFile(out)
frame = xtc.read()
assert_equal(len(xtc), 1)
assert_equal(xtc.n_atoms, n_atoms)
assert_equal(frame.prec, 10.0**5)
class TRRReference(BaseReference):
def __init__(self):
super(TRRReference, self).__init__()
self.trajectory = COORDINATES_TRR
self.topology = COORDINATES_TOPOLOGY
self.changing_dimensions = True
self.reader = mda.coordinates.TRR.TRRReader
self.writer = mda.coordinates.TRR.TRRWriter
self.ext = 'trr'
self.prec = 3
self.first_frame.velocities = self.first_frame.positions / 10
self.first_frame.forces = self.first_frame.positions / 100
self.second_frame.velocities = self.second_frame.positions / 10
self.second_frame.forces = self.second_frame.positions / 100
self.last_frame.velocities = self.last_frame.positions / 10
self.last_frame.forces = self.last_frame.positions / 100
self.jump_to_frame.velocities = self.jump_to_frame.positions / 10
self.jump_to_frame.forces = self.jump_to_frame.positions / 100
def iter_ts(self, i):
ts = self.first_frame.copy()
ts.positions = 2**i * self.first_frame.positions
ts.velocities = ts.positions / 10
ts.forces = ts.positions / 100
ts.time = i
ts.frame = i
return ts
class TestTRRReader_2(MultiframeReaderTest):
@staticmethod
@pytest.fixture()
def ref():
return TRRReference()
class TestTRRWriter_2(BaseWriterTest):
@staticmethod
@pytest.fixture()
def ref():
return TRRReference()
# tests writing and reading in one!
def test_lambda(self, ref, universe, tmpdir):
outfile = 'write-lambda-test' + ref.ext
with tmpdir.as_cwd():
with ref.writer(outfile, universe.trajectory.n_atoms) as W:
for i, ts in enumerate(universe.trajectory):
ts.data['lambda'] = i / float(universe.trajectory.n_frames)
W.write(universe)
reader = ref.reader(outfile)
for i, ts in enumerate(reader):
assert_almost_equal(ts.data['lambda'], i / float(reader.n_frames))
class _GromacsReader_offsets(object):
# This base class assumes same lengths and dt for XTC and TRR test cases!
filename = None
ref_unitcell = np.array(
[80.017, 80.017, 80.017, 60., 60., 90.], dtype=np.float32)
# computed with Gromacs: 362.26999999999998 nm**3 * 1000 A**3/nm**3
ref_volume = 362270.0
ref_offsets = None
_reader = None
prec = 3
@pytest.fixture(scope='class')
def traj(self, tmpdir_factory):
# copy of original test trajectory in a temporary folder. This is
# needed since offsets are automatically generated in the same
# directory. Here we also clean up nicely all files we generate
tmpdir = tmpdir_factory.mktemp('xtc')
shutil.copy(self.filename, str(tmpdir))
traj = str(tmpdir.join(os.path.basename(self.filename)))
# ensure initialization of offsets
self._reader(traj)
return traj
@pytest.fixture()
def trajectory(self, traj):
return self._reader(traj)
def test_offsets(self, trajectory, traj):
trajectory._read_offsets(store=True)
assert_almost_equal(
trajectory._xdr.offsets,
self.ref_offsets,
err_msg="wrong frame offsets")
outfile_offsets = XDR.offsets_filename(traj)
saved_offsets = XDR.read_numpy_offsets(outfile_offsets)
assert isinstance(saved_offsets, dict), \
"read_numpy_offsets did not return a dict"
assert_almost_equal(
trajectory._xdr.offsets,
saved_offsets['offsets'],
err_msg="error saving frame offsets")
assert_almost_equal(
self.ref_offsets,
saved_offsets['offsets'],
err_msg="saved frame offsets don't match "
"the known ones")
trajectory._load_offsets()
assert_almost_equal(
trajectory._xdr.offsets,
self.ref_offsets,
err_msg="error loading frame offsets")
assert_equal(saved_offsets['ctime'], os.path.getctime(traj))
assert_equal(saved_offsets['size'], os.path.getsize(traj))
def test_reload_offsets(self, traj):
self._reader(traj, refresh_offsets=True)
def test_nonexistant_offsets_file(self, traj):
# assert that a nonexistant file returns False during read-in
outfile_offsets = XDR.offsets_filename(traj)
with patch.object(np, "load") as np_load_mock:
np_load_mock.side_effect = IOError
saved_offsets = XDR.read_numpy_offsets(outfile_offsets)
assert_equal(saved_offsets, False)
def test_reload_offsets_if_offsets_readin_fails(self, trajectory):
# force the np.load call that is called in read_numpy_offsets
# during _load_offsets to give an IOError
# ensure that offsets are then read-in from the trajectory
with patch.object(np, "load") as np_load_mock:
np_load_mock.side_effect = IOError
trajectory._load_offsets()
assert_almost_equal(
trajectory._xdr.offsets,
self.ref_offsets,
err_msg="error loading frame offsets")
def test_persistent_offsets_size_mismatch(self, traj):
# check that stored offsets are not loaded when trajectory
# size differs from stored size
fname = XDR.offsets_filename(traj)
saved_offsets = XDR.read_numpy_offsets(fname)
assert isinstance(saved_offsets, dict), \
"read_numpy_offsets did not return a dict"
saved_offsets['size'] += 1
with open(fname, 'wb') as f:
np.savez(f, **saved_offsets)
with pytest.warns(UserWarning, match="Reload offsets"):
self._reader(traj)
def test_persistent_offsets_ctime_mismatch(self, traj):
# check that stored offsets are not loaded when trajectory
# ctime differs from stored ctime
fname = XDR.offsets_filename(traj)
saved_offsets = XDR.read_numpy_offsets(fname)
assert isinstance(saved_offsets, dict), \
"read_numpy_offsets did not return a dict"
saved_offsets['ctime'] += 1
with open(fname, 'wb') as f:
np.savez(f, **saved_offsets)
with pytest.warns(UserWarning, match="Reload offsets"):
self._reader(traj)
def test_persistent_offsets_natoms_mismatch(self, traj):
# check that stored offsets are not loaded when trajectory
# ctime differs from stored ctime
fname = XDR.offsets_filename(traj)
saved_offsets = XDR.read_numpy_offsets(fname)
assert isinstance(saved_offsets, dict), \
"read_numpy_offsets did not return a dict"
saved_offsets['n_atoms'] += 1
np.savez(fname, **saved_offsets)
with pytest.warns(UserWarning, match="Reload offsets"):
self._reader(traj)
def test_persistent_offsets_last_frame_wrong(self, traj):
fname = XDR.offsets_filename(traj)
saved_offsets = XDR.read_numpy_offsets(fname)
assert isinstance(saved_offsets, dict), \
"read_numpy_offsets did not return a dict"
idx_frame = 3
saved_offsets['offsets'][idx_frame] += 42
np.savez(fname, **saved_offsets)
with pytest.warns(UserWarning, match="seek failed"):
reader = self._reader(traj)
reader[idx_frame]
def test_unsupported_format(self, traj):
fname = XDR.offsets_filename(traj)
saved_offsets = XDR.read_numpy_offsets(fname)
assert isinstance(saved_offsets, dict), \
"read_numpy_offsets did not return a dict"
idx_frame = 3
saved_offsets.pop('n_atoms')
np.savez(fname, **saved_offsets)
# ok as long as this doesn't throw
reader = self._reader(traj)
reader[idx_frame]
def test_persistent_offsets_readonly(self, tmpdir):
shutil.copy(self.filename, str(tmpdir))
if os.name == 'nt':
# Windows platform has a unique way to deny write access
subprocess.call("icacls {fname} /deny Users:W".format(fname=tmpdir),
shell=True)
else:
os.chmod(str(tmpdir), 0o555)
filename = str(tmpdir.join(os.path.basename(self.filename)))
# try to write a offsets file
self._reader(filename)
assert_equal(os.path.exists(XDR.offsets_filename(filename)), False)
class TestXTCReader_offsets(_GromacsReader_offsets):
__test__ = True
filename = XTC
ref_offsets = np.array([
0, 165188, 330364, 495520, 660708, 825872, 991044, 1156212, 1321384,
1486544
])
_reader = mda.coordinates.XTC.XTCReader
class TestTRRReader_offsets(_GromacsReader_offsets):
__test__ = True
filename = TRR
ref_offsets = np.array([
0, 1144464, 2288928, 3433392, 4577856, 5722320, 6866784, 8011248,
9155712, 10300176
])
_reader = mda.coordinates.TRR.TRRReader
|
gpl-2.0
| 1,926,006,717,862,227,500
| 34.444048
| 85
| 0.604004
| false
| 3.663918
| true
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.