repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
davebridges/mousedb
|
mousedb/veterinary/views.py
|
1
|
7047
|
'''This module generates the views for the veterinary app.
There is one generic home view for the entire app as well as detail, create update and delete views for these models:
* :class:`~mousedb.veterinary.models.MedicalIssue`
* :class:`~mousedb.veterinary.models.MedicalCondition`
* :class:`~mousedb.veterinary.models.MedicalTreatment`
'''
from django.views.generic.base import TemplateView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.core.urlresolvers import reverse_lazy
from braces.views import LoginRequiredMixin, PermissionRequiredMixin
from mousedb.veterinary.models import MedicalIssue,MedicalCondition,MedicalTreatment
class VeterinaryHome(LoginRequiredMixin, TemplateView):
'''This view is the main page for the veterinary app.
This view contains links to all medical issues, conditions and treatments.
If this becomes too unwieldy over time, it might be necessary to limit medical_issues to the most recent few.'''
template_name = "veterinary_home.html"
def get_context_data(self, **kwargs):
'''Adds to the context all issues, conditions and treatments.'''
context = super(VeterinaryHome, self).get_context_data(**kwargs)
context['medical_issues'] = MedicalIssue.objects.all()
context['medical_conditions'] = MedicalCondition.objects.all()
context['medical_treatments'] = MedicalTreatment.objects.all()
return context
class MedicalIssueDetail(LoginRequiredMixin, DetailView):
'''This view is for details of a particular :class:`~mousedb.veterinary.MedicalIssue`.
It passes an object **medical_issue** when the url **/veterinary/medical-issue/<pk#>** is requested.'''
model = MedicalIssue
context_object_name = 'medical_issue'
template_name = 'medical_issue_detail.html'
class MedicalIssueCreate(PermissionRequiredMixin, CreateView):
'''This view is for creating a new :class:`~mousedb.veterinary.MedicalIssue`.
It requires the permissions to create a new medical issue and is found at the url **/veterinary/medical-issue/new**.'''
permission_required = 'veterinary.create_medicalissue'
model = MedicalIssue
fields = '__all__'
template_name = 'medical_issue_form.html'
class MedicalIssueUpdate(PermissionRequiredMixin, UpdateView):
'''This view is for updating a :class:`~mousedb.veterinary.MedicalIssue`.
It requires the permissions to update a medical issue and is found at the url **/veterinary/medical-issue/<pk$>/edit**.'''
permission_required = 'veterinary.update_medicalissue'
model = MedicalIssue
fields = '__all__'
context_object_name = 'medical_issue'
template_name = 'medical_issue_form.html'
class MedicalIssueDelete(PermissionRequiredMixin, DeleteView):
'''This view is for deleting a :class:`~mousedb.veterinary.MedicalIssue`.
It requires the permissions to delete a medical issue and is found at the url **/veterinary/medical-issue/<pk$>/delete**.'''
permission_required = 'veterinary.delete_medicalissue'
model = MedicalIssue
template_name = 'confirm_delete.html'
success_url = reverse_lazy('veterinary-home')
class MedicalConditionDetail(LoginRequiredMixin, DetailView):
'''This view is for details of a particular :class:`~mousedb.veterinary.MedicalCondition`.
It passes an object **medical_condition** when the url **/veterinary/medical-condition/<slug>** is requested.'''
model = MedicalCondition
context_object_name = 'medical_condition'
template_name = 'medical_condition_detail.html'
class MedicalConditionCreate(PermissionRequiredMixin, CreateView):
'''This view is for creating a new :class:`~mousedb.veterinary.MedicalCondition`.
It requires the permissions to create a new medical issue and is found at the url **/veterinary/medical-condition/new**.'''
permission_required = 'veterinary.create_medicalcondition'
model = MedicalCondition
fields = '__all__'
template_name = 'medical_condition_form.html'
class MedicalConditionUpdate(PermissionRequiredMixin, UpdateView):
'''This view is for updating a :class:`~mousedb.veterinary.MedicalCondition`.
It requires the permissions to update a medical issue and is found at the url **/veterinary/medical-condition/<slug>/edit**.'''
permission_required = 'veterinary.update_medicalcondition'
model = MedicalCondition
fields = '__all__'
context_object_name = 'medical_condition'
template_name = 'medical_condition_form.html'
class MedicalConditionDelete(PermissionRequiredMixin, DeleteView):
'''This view is for deleting a :class:`~mousedb.veterinary.MedicalCondition`.
It requires the permissions to delete a medical issue and is found at the url **/veterinary/medical-condition/<slug>/delete**.'''
permission_required = 'veterinary.delete_medicalcondition'
model = MedicalCondition
template_name = 'confirm_delete.html'
success_url = reverse_lazy('veterinary-home')
class MedicalTreatmentDetail(LoginRequiredMixin, DetailView):
'''This view is for details of a particular :class:`~mousedb.veterinary.MedicalTreatment`.
It passes an object **medical_treatment** when the url **/veterinary/medical-treatment/<slug>** is requested.'''
model = MedicalTreatment
context_object_name = 'medical_treatment'
template_name = 'medical_treatment_detail.html'
class MedicalTreatmentCreate(PermissionRequiredMixin, CreateView):
'''This view is for creating a new :class:`~mousedb.veterinary.MedicalTreatment`.
It requires the permissions to create a new medical issue and is found at the url **/veterinary/medical-treatment/new**.'''
permission_required = 'veterinary.create_medicaltreatment'
model = MedicalTreatment
fields = '__all__'
template_name = 'medical_treatment_form.html'
class MedicalTreatmentUpdate(PermissionRequiredMixin, UpdateView):
'''This view is for updating a :class:`~mousedb.veterinary.MedicalTreatment`.
It requires the permissions to update a medical issue and is found at the url **/veterinary/medical-treatment/<slug>/edit**.'''
permission_required = 'veterinary.update_medicaltreatment'
model = MedicalTreatment
fields = '__all__'
context_object_name = 'medical_treatment'
template_name = 'medical_treatment_form.html'
class MedicalTreatmentDelete(PermissionRequiredMixin, DeleteView):
'''This view is for deleting a :class:`~mousedb.veterinary.MedicalTreatment`.
It requires the permissions to delete a medical issue and is found at the url **/veterinary/medical-treatment/<slug>/delete**.'''
permission_required = 'veterinary.delete_medicaltreatment'
model = MedicalTreatment
template_name = 'confirm_delete.html'
success_url = reverse_lazy('veterinary-home')
|
bsd-3-clause
| 775,855,840,487,166,100
| 44.75974
| 133
| 0.724847
| false
| 3.782609
| false
| false
| false
|
ANR-DIADEMS/timeside-diadems
|
timeside/plugins/diadems/irit_singings.py
|
1
|
7234
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 Maxime Le Coz <lecoz@irit.fr>
# This file is part of TimeSide.
# TimeSide is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# TimeSide is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with TimeSide. If not, see <http://www.gnu.org/licenses/>.
# Author: Maxime Le Coz <lecoz@irit.fr>
from timeside.core import implements, interfacedoc
from timeside.core.analyzer import Analyzer, IAnalyzer
from timeside.plugins.diadems.irit_monopoly import IRITMonopoly
from timeside.plugins.diadems.irit_harmo_tracking import IRITHarmoTracker
from timeside.core.preprocessors import frames_adapter
from numpy import median, mean, linspace, argmin, argmax, array
from numpy.fft import rfft
from collections import Counter
class IRITSinging(Analyzer):
implements(IAnalyzer)
def __init__(self):
super(IRITSinging, self).__init__()
self.parents['irit_monopoly'] = IRITMonopoly()
self.parents['irit_harmo_tracking'] = IRITHarmoTracker()
self.thPoly = 0.15
self.thMono = 0.1
@interfacedoc
def setup(self, channels=None, samplerate=None, blocksize=None,
totalframes=None):
super(IRITSinging, self).setup(
channels, samplerate, blocksize, totalframes)
@staticmethod
@interfacedoc
def id():
return "irit_singing"
@staticmethod
@interfacedoc
def name():
return "IRIT Singings detection"
@staticmethod
@interfacedoc
def unit():
return ""
def __str__(self):
return "Singings segments"
@frames_adapter
def process(self, frames, eod=False):
return frames, eod
def post_process(self):
"""
:return:
"""
trackings = self.parents['irit_harmo_tracking'].results['irit_harmo_tracking']['data_object']["value"]
tr = sorted(trackings[0].nodes, key=lambda x: x.time)
tr_frame_rate = 1.0 / float(tr[1].time - tr[0].time)
pitch = self.parents['irit_monopoly'].results['irit_monopoly.pitch']['data_object']["value"]
segments_monopoly = self.parents['irit_monopoly'].results['irit_monopoly.segments']['data_object']
segments_monopoly = [(start, start + dur, label == 1) for start, dur, label in
zip(segments_monopoly["time"], segments_monopoly["duration"], segments_monopoly["label"])]
segments_chant = []
f0_frame_rate = 1.0 / float(pitch[1][0] - pitch[0][0])
for start, stop, label in segments_monopoly:
cumulChant = 0
# Attention aux changements de labels ...
if label:
segs = split_notes(extract_pitch(pitch, start, stop), f0_frame_rate)
for seg in segs:
if has_vibrato(seg[2], f0_frame_rate):
cumulChant += seg[1] - seg[0]
segments_chant += [(start, stop, cumulChant / (stop - start) >= self.thMono)]
else:
for start, stop, value in extended_vibrato(trackings, tr_frame_rate):
segments_chant += [(start, stop, value >= self.thPoly)]
label = {1: "Singing", 0: "Non Singing"}
segs = self.new_result(data_mode='label', time_mode='segment')
segs.id_metadata.id += '.' + 'segments'
segs.id_metadata.name += ' ' + 'Segments'
segs.data_object.label_metadata.label = label
segs.data_object.time = array([s[0] for s in segments_chant])
segs.data_object.duration = array([s[1] - s[0] for s in segments_chant])
segs.data_object.label = array([int(s[2]) for s in segments_chant])
self.add_result(segs)
def extended_vibrato(trackings, spectrogram_sampling_rate, number_of_extrema_for_rupture=3):
"""
Detection de vibrato en contexte polyphonique
"""
extremums = [s.start for s in trackings] + [s.stop for s in trackings]
last = max(extremums)
counter = Counter(extremums)
ruptures = [0] + sorted([time for time in counter if counter[time] >= number_of_extrema_for_rupture]) + [last]
scores = []
for i, rupture in enumerate(ruptures[:-1]):
sum_present = 0.0
sum_vibrato = 0.0
for s in trackings:
frequencies = s.get_portion(rupture, ruptures[i + 1])
if len(frequencies) > 0.05 * spectrogram_sampling_rate:
sum_present += len(frequencies)
if has_vibrato(frequencies, spectrogram_sampling_rate):
sum_vibrato += len(frequencies)
if sum_present > 0:
scores += [(rupture, ruptures[i + 1], sum_vibrato / sum_present)]
return scores
def extract_pitch(pitch, start, stop):
return [p for t, p in pitch if start <= t <= stop]
def smoothing(data, number_of_points=3, smoothing_function=mean):
"""
"""
w = number_of_points / 2
return [0.0] * w + [smoothing_function(data[i - w:i + w]) for i in range(w, len(data) - w)] + [0.0] * w
def split_notes(f0, f0_sample_rate, minimum_segment_length=0.0):
"""
Découpage en pseudo-notes en fonction de la fréquence fondamentale.
Retourne la liste des segments en secondes
"""
f0 = smoothing(f0, number_of_points=5, smoothing_function=median)
half_tone_ratio = 2**(1.0 / 12.0)
minimum_segment_length = minimum_segment_length / f0_sample_rate
ratios = [max([y1, y2]) / min([y1, y2]) if min([y1, y2]) > 0 else 0 for y1, y2 in zip(f0[:-2], f0[1:])]
boundaries = [0] + [i + 1 for i, ratio in enumerate(ratios) if ratio > half_tone_ratio]
return [(start * f0_sample_rate, stop * f0_sample_rate, f0[start:stop])
for start, stop in zip(boundaries[:-2], boundaries[1:]) if stop - start > minimum_segment_length]
def has_vibrato(serie, sampling_rate, minimum_frequency=4, maximum_frequency=8, Nfft=100):
"""
Calcul de vibrato sur une serie par la méthode de la transformée de Fourier de la dérivée.
"""
vibrato = False
frequency_scale = linspace(0, sampling_rate / 2, Nfft / 2)
index_min_vibrato = argmin(abs(frequency_scale - minimum_frequency))
index_max_vibrato = argmin(abs(frequency_scale - maximum_frequency))
derivative = [v1 - v2 for v1, v2 in zip(serie[:-2], serie[1:])]
fft_derivative = abs(rfft(derivative, Nfft))[:Nfft / 2]
i_max = argmax(fft_derivative)
if index_max_vibrato >= i_max >= index_min_vibrato:
vibrato = True
return vibrato
# Generate Grapher for IRITSinging analyzer
from timeside.core.grapher import DisplayAnalyzer
DisplayIritSinging = DisplayAnalyzer.create(
analyzer=IRITSinging,
result_id='irit_singing.segments',
grapher_id='grapher_irit_singing_segments',
grapher_name='Singings detection',
background='waveform',
staging=True)
|
gpl-2.0
| -1,683,434,666,844,066,000
| 35.14
| 119
| 0.639734
| false
| 3.301964
| false
| false
| false
|
SnapSearch/SnapSearch-Client-Python
|
src/SnapSearch/detector.py
|
1
|
11166
|
# -*- coding: utf-8 -*-
"""
SnapSearch.detector
~~~~~~~~~~~~~~~~~~~
:copyright: 2014 by `SnapSearch <https://snapsearch.io/>`_
:license: MIT, see LICENSE for more details.
:author: `LIU Yu <liuyu@opencps.net>`_
:date: 2014/03/08
"""
# future import should come first
from __future__ import with_statement
__all__ = ['Detector', ]
import json
import os
import re
import sys
import SnapSearch.api as api
import SnapSearch.error as error
from ._compat import u
class Detector(object):
"""
Detects if the incoming HTTP request a) came from a search engine robot
and b) is eligible for interception. The ``Detector`` inspects the
following aspects of the incoming HTTP request:
1. if the request uses HTTP or HTTPS protocol
2. if the request uses HTTP ``GET`` method
3. if the request is *not* from any ignored user agenets
(ignored robots take precedence over matched robots)
4. if the request is accessing any route *not* matching the whitelist
5. if the request is *not* accessing any route matching the blacklist
6. if the request is *not* accessing any resource with an invalid
file extension
7. if the request has ``_escaped_fragment_`` query parameter
8. if the request is from any matched user agents
"""
@property
def robots(self):
"""
``dict`` of ``list``'s of user agents from search engine robots:
.. code-block:: json
{
"ignore": [
# user agents to be ignored
]
"match": [
# user agents to be matched
]
}
Can be changed to customize ignored and matched search engine robots.
The ``ignore`` list takes precedence over the ``match`` list.
"""
return self.__robots
@property
def extensions(self):
"""
``dict`` of ``list``'s of valid file extensions:
.. code-block:: json
{
"generic": [
# valid generic extensions
],
"python": [
# valid python extensions
]
}
Can be changed to customize valid file extensions.
"""
return self.__extensions
# private properties
__slots__ = ['__check_file_extensions', '__extensions', '__ignored_routes',
'__matched_routes', '__robots', ]
def __init__(self,
ignored_routes=[],
matched_routes=[],
check_file_extensions=False,
robots_json=None,
extensions_json=None):
"""
Optional arguments:
:param ignored_routes: blacklisted route regular expressions.
:type ignored_routes: ``list`` or ``tuple``
:param matched_routes: whitelisted route regular expressions.
:type matched_routes: ``list`` or ``tuple``
:param check_file_extensions: to check if the URL is going to a static
file resource that should not be intercepted.
:type check_file_extensions: ``bool``
:param robots_json: absolute path to an external ``robots.json`` file.
:param extensions_json: absolute path to an external
``extensions.json`` file.
:raises AssertionError: if ``extensions.json`` is specified, yet
``check_file_extensions`` is ``False``.
"""
self.__ignored_routes = set(ignored_routes)
self.__matched_routes = set(matched_routes)
# ``extensions.json`` is specified, yet do not require checking file
# extensions. this probably means a mistake.
assert(not (not check_file_extensions and extensions_json)), \
"specified ``extensions_json`` " \
"yet ``check_file_extensions`` is false"
self.__check_file_extensions = check_file_extensions
# json.load() may raise IOError, TypeError, or ValueError
with open(robots_json or api.DEFAULT_ROBOTS_JSON) as f:
self.__robots = json.load(f)
f.close()
# same as above
with open(extensions_json or api.DEFAULT_EXTENSIONS_JSON) as f:
self.__extensions = json.load(f)
f.close()
pass # void return
def __call__(self, request):
"""
:param request: incoming HTTP request.
:type request: ``dict``
:returns: :RFC:`3986` percent-encoded full URL if the incoming HTTP
request is eligible for interception, or ``None`` otherwise.
:raises error.SnapSearchError: if the structure of either
``robots.json`` or ``extensions.json`` is invalid.
"""
# wrap the incoming HTTP request (CGI-style environ)
environ = api.AnyEnv(request)
# do not intercept protocols other than HTTP and HTTPS
if environ.scheme not in ("http", "https", ):
return None
# do not intercept HTTP methods other than GET
if environ.method not in ("GET", ):
return None
# user agent may not exist in the HTTP request
user_agent = environ.user_agent
# request uri with query string
real_path = environ.path_qs
# validate ``robots`` since it can be altered from outside
if not self._validate_robots():
raise error.SnapSearchError(
"structure of ``robots`` is invalid")
# do not intercept requests from ignored robots
ignore_regex = u("|").join(
[re.escape(tok) for tok in self.robots.get('ignore', [])])
if re.search(ignore_regex, user_agent, re.I | re.U):
return None
# do not intercept if there exist whitelisted route(s) (matched_routes)
# and that the requested route **does not** match any one of them.
if self.__matched_routes:
found = False
for route in self.__matched_routes:
route_regex = u(route)
if re.search(route_regex, real_path, re.I | re.U):
found = True
break
if not found:
return None
# do not intercept if there exist blacklisted route(s) (ignored_routes)
# and that the requested route **does** matches one of them.
if self.__ignored_routes:
for route in self.__ignored_routes:
route_regex = u(route)
if re.search(route_regex, real_path, re.I | re.U):
return None
# detect extensions in order to prevent direct requests to static files
if self.__check_file_extensions:
# validate ``extensions`` since it can be altered from outside
if not self._validate_extensions():
raise error.SnapSearchError(
"structure of ``extensions`` is invalid")
# create a set of file extensions common for HTML resources
valid_extensions = set(
[s.lower() for s in self.extensions.get('generic', [])])
valid_extensions.update(
[s.lower() for s in self.extensions.get('python', [])])
# file extension regex. it looks for "/{file}.{ext}" in an URL that
# is not preceded by '?' (query parameters) or '#' (hash fragment).
# it will acquire the last extension that is present in the URL so
# with "/{file1}.{ext1}/{file2}.{ext2}" the ext2 will be the
# matched extension. furthermore if a file has multiple extensions
# "/{file}.{ext1}.{ext2}", it will only match extension2 because
# unix systems don't consider extensions to be metadata, and
# windows only considers the last extension to be valid metadata.
# Basically the {file}.{ext1} could actually just be the filename.
extension_regex = u(r"""
^ # start of the string
(?: # begin non-capturing group
(?! # begin negative lookahead
[?#] # question mark '?' or hash '#'
.* # zero or more wildcard characters
/ # literal slash '/'
[^/?#]+ # {file} - has one or more of any character
# except '/', '?' or '#'
\. # literal dot '.'
[^/?#]+ # {extension} - has one or more of any character
# except '/', '?' or '#'
) # end negative lookahead (prevents any '?' or
# '#' that precedes {file}.{extension} by
# any characters)
. # one wildcard character
)* # end non-capturing group (captures any number
# of wildcard characters that passes the
# negative lookahead)
/ # literal slash '/'
[^/?#]+ # {file} - has one or more of any character
# except forward slash, question mark or hash
\. # literal dot '.'
([^/?#]+) # {extension} - subgroup has one or more of any
# character except '/', '?' or '#'
""")
# match extension regex against decoded path
matches = re.match(extension_regex, real_path, re.U | re.X)
if matches:
url_extension = matches.group(1).lower()
if url_extension not in valid_extensions:
return None
# detect escaped fragment (since the ignored user agents has already
# been detected, SnapSearch won't continue the interception loop)
if "_escaped_fragment_" in environ.GET:
return environ.url
# intercept requests from matched robots
matched_regex = u("|").join(
[re.escape(tok) for tok in self.robots.get('match', [])])
if re.search(matched_regex, user_agent, re.I | re.U):
return environ.url
# do not intercept if no match at all
return None
def _validate_robots(self):
# ``robots`` should be a ``dict`` object, if keys ``ignore`` and
# ``match`` exist, the respective values must be ``list`` objects.
return isinstance(self.robots, dict) and \
isinstance(self.robots.get('ignore', []), list) and \
isinstance(self.robots.get('match', []), list)
def _validate_extensions(self):
# ``extensions`` should be a ``dict`` object, if keys ``generic`` and
# ``python`` exist, the respective values must be ``list`` objects.
return isinstance(self.extensions, dict) and \
isinstance(self.extensions.get('generic', []), list) and \
isinstance(self.extensions.get('python', []), list)
pass
|
mit
| 7,516,432,823,431,704,000
| 38.178947
| 79
| 0.543346
| false
| 4.695542
| false
| false
| false
|
phoebe-project/phoebe2-docs
|
2.2/tutorials/irrad_method_horvat.py
|
1
|
3005
|
#!/usr/bin/env python
# coding: utf-8
# Lambert Scattering (irrad_method='horvat')
# ============================
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.2 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.2,<2.3"')
# As always, let's do imports and initialize a logger and a new bundle. See [Building a System](../tutorials/building_a_system.ipynb) for more details.
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger('error')
b = phoebe.default_binary()
# Relevant Parameters
# ---------------------------------
# For parameters that affect reflection and heating (irrad_frac_\*) see the tutorial on [reflection and heating](./reflection_heating.ipynb).
#
# The 'irrad_method' compute option dictates whether irradiation is handled according to the new Horvat scheme which includes Lambert Scattering, Wilson's original reflection scheme, or ignored entirely.
# In[3]:
print(b['irrad_method'])
# Influence on Light Curves (fluxes)
# ---------------------------------
#
# Let's (roughtly) reproduce Figure 8 from [Prsa et al. 2016](http://phoebe-project.org/publications/2016Prsa+) which shows the difference between Wilson and Horvat schemes for various inclinations.
#
# <img src="prsa+2016_fig8.png" alt="Figure 8" width="600px"/>
#
# First we'll roughly create a A0-K0 binary and set reasonable albedos.
# In[4]:
b['teff@primary'] = 11000
b['requiv@primary'] = 2.5
b['gravb_bol@primary'] = 1.0
b['teff@secondary'] = 5000
b['requiv@secondary'] = 0.85
b['q@binary'] = 0.8/3.0
b.flip_constraint('mass@primary', solve_for='sma@binary')
b['mass@primary'] = 3.0
# In[5]:
print(b.filter(qualifier=['mass', 'requiv', 'teff'], context='component'))
# In[6]:
b['irrad_frac_refl_bol@primary'] = 1.0
b['irrad_frac_refl_bol@secondary'] = 0.6
# We'll also disable any eclipsing effects.
# In[7]:
b['eclipse_method'] = 'only_horizon'
# Now we'll compute the light curves with wilson and horvat irradiation, and plot the relative differences between the two as a function of phase, for several different values of the inclination.
# In[8]:
phases = phoebe.linspace(0,1,101)
b.add_dataset('lc', times=b.to_time(phases))
# In[9]:
for incl in [0,30,60,90]:
b.set_value('incl@binary', incl)
b.run_compute(irrad_method='wilson')
fluxes_wilson = b.get_value('fluxes', context='model')
b.run_compute(irrad_method='horvat')
fluxes_horvat = b.get_value('fluxes', context='model')
plt.plot(phases, (fluxes_wilson-fluxes_horvat)/fluxes_wilson, label='i={}'.format(incl))
plt.xlabel('phase')
plt.ylabel('[F(wilson) - F(horvat)] / F(wilson)')
plt.legend(loc='upper center')
plt.show()
# In[ ]:
|
gpl-3.0
| -420,098,855,563,994,800
| 22.476563
| 203
| 0.66589
| false
| 2.940313
| false
| false
| false
|
afodor/pythonExamples
|
src/viterbi/viterbiExample.py
|
1
|
2894
|
import random
class MarkovState:
def __init__(self,charsToEmit, emissionProbs,transitionProbs):
self.charsToEmit = charsToEmit
self.emissionProbs = emissionProbs
self.transitionProbs = transitionProbs
def getEmissionIndex(self):
aRand = random.random()
cumulative = 0
index =0
for val in self.emissionProbs:
cumulative += val
if aRand <= cumulative:
return index
index = index + 1
return len(self.emissionProbs) - 1
def getIndexOfEmission(self, char):
for i in range(0, len(self.charsToEmit) ):
if str(self.charsToEmit[i]) == str(char):
return i
raise Exception("Cound not find " + str(char) )
def getTransitionIndex(self):
aRand = random.random()
cumulative = 0
index =0
for val in self.transitionProbs:
cumulative += val
if aRand <= cumulative:
return index
index = index + 1
return len(self.transitionProbs) - 1
def getMaxIndex( iterable ):
val = iterable[0]
index =0
returnVal =0
for i in iterable:
if i > val:
returnVal = index
index = index+1
return returnVal
def getViterbiPath( markovStates, output):
returnPath= []
oldViterbiProbs = []
oldViterbiProbs.append(1) # we are 100% sure we start in the first state
for i in range( 1, len(markovStates) ):
oldViterbiProbs.append( 0)
aTuple = ( oldViterbiProbs, 0)
returnPath.append( aTuple )
for i in range( 0,len(output)):
newViterbiProbs = []
for j in range( 0, len(markovStates)):
state = markovStates[j]
emissionProb = state.emissionProbs[state.getIndexOfEmission(output[i])]
vTimesA=[]
for k in range(0, len(markovStates)):
vTimesA.append (oldViterbiProbs[k] * markovStates[k].transitionProbs[j])
#print( "vTimesA" + str( vTimesA))
maxVal = vTimesA[ getMaxIndex(vTimesA) ]
newViterbiProbs.append( emissionProb * maxVal)
aTuple = (newViterbiProbs,getMaxIndex(newViterbiProbs))
returnPath.append( aTuple)
oldViterbiProbs = newViterbiProbs
return returnPath
dice = ( 1,2,3,4,5,6 )
fairState = MarkovState( dice, (1/6,1/6,1/6,1/6,1/6,1/6), ( 0.95, 0.05) )
loadedState = MarkovState( dice, (1/10,1/10,1/10,1/10,1/10,5/10), ( 0.10, 0.90) )
states = ( fairState, loadedState )
################################################
rolls = "266666"
getViterbiPath( states, rolls)
################################################
rolls = ""
trueStates = ""
state = states[0]
for i in range( 1, 100):
nextState = state.getTransitionIndex()
state = states[ nextState]
trueStates = trueStates + str(nextState)
rolls = rolls + str( dice[ state.getEmissionIndex()] )
rolls
trueStates
viterbiPath = getViterbiPath( states, rolls)
for i in range(0, len(rolls)):
print( str(rolls[i]) + " " + str(trueStates[i])+ " " + str(viterbiPath[i][1]))
################################################
|
gpl-2.0
| 38,707,374,708,130,250
| 25.561905
| 81
| 0.630615
| false
| 2.817916
| false
| false
| false
|
salazardetroya/libmesh
|
doc/statistics/libmesh_citations.py
|
1
|
2340
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
# Number of "papers using libmesh" by year.
#
# Note 1: this does not count citations "only," the authors must have actually
# used libmesh in part of their work. Therefore, these counts do not include
# things like Wolfgang citing us in his papers to show how Deal.II is
# superior...
#
# Note 2: I typically update this data after regenerating the web page,
# since bibtex2html renumbers the references starting from "1" each year.
#
# Note 3: These citations include anything that is not a dissertation/thesis.
# So, some are conference papers, some are journal articles, etc.
#
# Note 4: The libmesh paper came out in 2006, but there are some citations
# prior to that date, obviously. These counts include citations of the
# website libmesh.sf.net as well...
#
# Note 5: Preprints are listed as the "current year + 1" and are constantly
# being moved to their respective years after being published.
data = [
'2004', 5,
'\'05', 2,
'\'06', 13,
'\'07', 8,
'\'08', 23,
'\'09', 30,
'\'10', 24,
'\'11', 37,
'\'12', 50,
'\'13', 78,
'\'14', 60,
'\'15', 11,
'P', 8, # Preprints
'T', 36 # Theses
]
# Extract the x-axis labels from the data array
xlabels = data[0::2]
# Extract the publication counts from the data array
n_papers = data[1::2]
# The number of data points
N = len(xlabels);
# Get a reference to the figure
fig = plt.figure()
# 111 is equivalent to Matlab's subplot(1,1,1) command
ax = fig.add_subplot(111)
# Create an x-axis for plotting
x = np.linspace(1, N, N)
# Width of the bars
width = 0.8
# Make the bar chart. Plot years in blue, preprints and theses in green.
ax.bar(x[0:N-2], n_papers[0:N-2], width, color='b')
ax.bar(x[N-2:N], n_papers[N-2:N], width, color='g')
# Label the x-axis
plt.xlabel('P=Preprints, T=Theses')
# Set up the xtick locations and labels. Note that you have to offset
# the position of the ticks by width/2, where width is the width of
# the bars.
ax.set_xticks(np.linspace(1,N,N) + width/2)
ax.set_xticklabels(xlabels)
# Create a title string
title_string = 'LibMesh Citations, (' + str(sum(n_papers)) + ' Total)'
fig.suptitle(title_string)
# Save as PDF
plt.savefig('libmesh_citations.pdf')
# Local Variables:
# python-indent: 2
# End:
|
lgpl-2.1
| 302,535,557,434,097,100
| 26.529412
| 78
| 0.674359
| false
| 2.962025
| false
| false
| false
|
Strangemother/python-state-machine
|
scratch/machine_4/integration.py
|
1
|
4183
|
from tools import color_print as cl
class ConditionIntegrate(object):
def read_node(self, node):
'''
Read the conditions of a node.
'''
if hasattr(node, 'conditions') is False:
return
cnds = node.conditions()
# cl('yellow', 'get conditions for node', node)
self.integrate_conditions(cnds, node)
def integrate_conditions(self, conditions, node):
'''
Implement a list of conditions against one node.
'''
for c in conditions:
self.integrate_condition(c, node)
def integrate_condition(self, cond, node):
'''
Integrate the conditions into the condition runner
'''
if hasattr(self, 'condition_keys') is False:
setattr(self, 'condition_keys', {})
if hasattr(self, 'condition_nodes') is False:
setattr(self, 'condition_nodes', {})
names = self.get_integration_names(node, cond)
# cl('yellow', 'integrate conditions', node, cond, names)
self.append_with_names(names, cond)
# node, condition assications
ck = self.condition_keys
sc = str(cond)
if (sc in ck) is False:
ck[sc] = []
ck[sc].append(node.get_name())
def get_integration_names(self, node, condition):
node_name = node.get_name()
names = (node_name, str(condition), )
return names
def run_conditions(self, conditions, node, value, field):
# pprint(self.conditions._names)
# cl('yellow', 'run conditions', conditions, node, field)
pairs = []
# fetch associated conditions.
# make the condition perform the compare
for cond in conditions:
# get associated nodes for the condition
node_names = self.condition_keys.get(str(cond)) or []
# loop and get associated condition
for nn in node_names:
s = '{0}-{1}'.format(nn, str(cond))
r = self.get(s) or []
f = [(self.nodes.get(nn), set(r),)]
# cl('yellow', 'found', f)
pairs.extend( f )
res = {}
for parent_nodes, _conditions in pairs:
for cnd in _conditions:
for pn in parent_nodes:
v = cnd.validate(pn, node, value, field)
n = '{0}-{1}'.format(pn.get_name(), str(cnd))
res[n]= v
# cl('blue', 'conditions', res)
return res
def find_conditions(self, node, field, value):
n = '{0}_{1}_{2}'.format(node.get_name(), field, value)
# print '+ find conditions on', n
cnds = self.get_conditions(node, field, value)
# cl('yellow', '-- Matches condition', cnds)
return cnds
def get_conditions(self, node, name, value=None):
'''
Get conditions based upon node and name
'''
node_name = node
cl('red', 'get condition', node, name, value)
cnds = self.conditions
if hasattr(node_name, 'get_name'):
node_name = node.get_name()
name1 = '{0}_{1}'.format(node_name, name)
match_names = (name1, )
# exact match string
if value is not None:
vcn = '{0}_{1}_{2}'.format(node_name, name, value)
match_names += (vcn,)
res = []
for _n in match_names:
res += self.get_conditions_by_name(_n) or []
# print 'found conditions', res
return set(res)
def get_conditions_by_name(self, name):
'''
return the conditions matching a name provided.
'''
cnds = self.conditions.get(name)
# print 'get_condition_by_name:', name, cnds
return cnds
def condition_name(self, node, name, *args, **kw):
'''
create a name for a condition string match from the
values passed.
The node is the original object receiving the change.
name denoted the key changing.
returned is a string for the condition
'''
n = node.get_name()
a = [n, args[0]]
s = '_'.join(a)
return s
|
mit
| -4,974,785,414,804,036,000
| 31.176923
| 65
| 0.537413
| false
| 4.061165
| false
| false
| false
|
caio1982/capomastro
|
jenkins/utils.py
|
1
|
4525
|
from urlparse import urljoin
import xml.etree.ElementTree as ET
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template import Template, Context
from django.utils import timezone
from django.utils.text import slugify
PARAMETERS = ".//properties/hudson.model.ParametersDefinitionProperty/parameterDefinitions/"
def get_notifications_url(base, server):
"""
Returns the full URL for notifications given a base.
"""
url = urljoin(base, reverse("jenkins_notifications"))
return url + "?server=%d" % server.pk
def get_context_for_template(job, server):
"""
Returns a Context for the Job XML templating.
"""
defaults = DefaultSettings({"NOTIFICATION_HOST": "http://localhost"})
url = get_notifications_url(defaults.NOTIFICATION_HOST, server)
context_vars = {
"notifications_url": url,
"job": job,
"jobtype": job.jobtype,
}
return Context(context_vars)
def get_job_xml_for_upload(job, server):
"""
Return config_xml run through the template mechanism.
"""
template = Template(job.jobtype.config_xml)
context = get_context_for_template(job, server)
# We need to strip leading/trailing whitespace in order to avoid having the
# <?xml> PI not in the first line of the document.
job_xml = template.render(context).strip()
requestor = JenkinsParameter(
"REQUESTOR", "The username requesting the build", "")
job_xml = add_parameter_to_job(requestor, job_xml)
return job_xml
def generate_job_name(jobtype):
"""
Generates a "unique" id.
"""
return "%s_%s" % (slugify(jobtype.name), timezone.now().strftime("%s"))
class DefaultSettings(object):
"""
Allows easy configuration of default values for a Django settings.
e.g. values = DefaultSettings({"NOTIFICATION_HOST": "http://example.com"})
values.NOTIFICATION_HOST # returns the value from the default django
settings, or the default if not provided in the settings.
"""
class _defaults(object):
pass
def __init__(self, defaults):
self.defaults = self._defaults()
for key, value in defaults.iteritems():
setattr(self.defaults, key, value)
def __getattr__(self, key):
return getattr(settings, key, getattr(self.defaults, key))
def get_value_or_none(self, key):
"""
Doesn't raise an AttributeError in the event that the key doesn't
exist.
"""
return getattr(settings, key, getattr(self.defaults, key, None))
def parse_parameters_from_job(body):
"""
Parses the supplied XML document and extracts all parameters, returns a
list of dictionaries with the details of the parameters extracted.
"""
result = []
root = ET.fromstring(body)
for param in root.findall(PARAMETERS):
item = {}
for param_element in param.findall("./"):
item[param_element.tag] = param_element.text
result.append(item)
return result
class JenkinsParameter(object):
"""Represents a parameter for a Jenkins job."""
definition = "TextParameterDefinition"
def __init__(self, name, description, default):
self.name = name
self.description = description
self.default = default
@property
def type(self):
return "hudson.model.%s" % self.definition
def parameter_to_xml(param):
"""
Converts a JenkinsParameter to the XML element representation for a Jenkins
job parameter.
"""
element = ET.Element(param.type)
ET.SubElement(element, "name").text = param.name
ET.SubElement(element, "description").text = param.description
ET.SubElement(element, "defaultValue").text = param.default
return element
def add_parameter_to_job(param, job):
"""
Adds a JenkinsParameter to an existing job xml document, returns the job XML
as a string.
# NOTE: This does nothing to check whether or not the parameter already
# exists.
"""
root = ET.fromstring(job)
parameters_container = root.find(PARAMETERS[:-1])
if parameters_container is None:
parameters = root.find(".//hudson.model.ParametersDefinitionProperty")
if parameters is None:
parameters = ET.SubElement(root, "hudson.model.ParametersDefinitionProperty")
parameters_container = ET.SubElement(parameters, "parameterDefinitions")
parameters_container.append(parameter_to_xml(param))
return ET.tostring(root)
|
mit
| 3,326,860,643,878,144,500
| 29.993151
| 92
| 0.671381
| false
| 4.170507
| false
| false
| false
|
koomik/CouchPotatoServer
|
couchpotato/core/plugins/log/main.py
|
1
|
4216
|
import os
import traceback
from couchpotato.api import addApiView
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
log = CPLog(__name__)
class Logging(Plugin):
def __init__(self):
addApiView('logging.get', self.get, docs = {
'desc': 'Get the full log file by number',
'params': {
'nr': {'desc': 'Number of the log to get.'}
},
'return': {'type': 'object', 'example': """{
'success': True,
'log': string, //Log file
'total': int, //Total log files available
}"""}
})
addApiView('logging.partial', self.partial, docs = {
'desc': 'Get a partial log',
'params': {
'type': {'desc': 'Type of log', 'type': 'string: all(default), error, info, debug'},
'lines': {'desc': 'Number of lines. Last to first. Default 30'},
},
'return': {'type': 'object', 'example': """{
'success': True,
'log': string, //Log file
}"""}
})
addApiView('logging.clear', self.clear, docs = {
'desc': 'Remove all the log files'
})
addApiView('logging.log', self.log, docs = {
'desc': 'Log errors',
'params': {
'type': {'desc': 'Type of logging, default "error"'},
'**kwargs': {'type': 'object', 'desc': 'All other params will be printed in the log string.'},
}
})
def get(self, nr = 0, **kwargs):
nr = tryInt(nr)
current_path = None
total = 1
for x in range(0, 50):
path = '%s%s' % (Env.get('log_path'), '.%s' % x if x > 0 else '')
# Check see if the log exists
if not os.path.isfile(path):
total = x - 1
break
# Set current path
if x is nr:
current_path = path
log_content = ''
if current_path:
f = open(current_path, 'r')
log_content = f.read()
return {
'success': True,
'log': toUnicode(log_content),
'total': total,
}
def partial(self, type = 'all', lines = 30, **kwargs):
total_lines = tryInt(lines)
log_lines = []
for x in range(0, 50):
path = '%s%s' % (Env.get('log_path'), '.%s' % x if x > 0 else '')
# Check see if the log exists
if not os.path.isfile(path):
break
f = open(path, 'r')
reversed_lines = toUnicode(f.read()).split('[0m\n')
reversed_lines.reverse()
brk = False
for line in reversed_lines:
if type == 'all' or '%s ' % type.upper() in line:
log_lines.append(line)
if len(log_lines) >= total_lines:
brk = True
break
if brk:
break
log_lines.reverse()
return {
'success': True,
'log': '[0m\n'.join(log_lines),
}
def clear(self, **kwargs):
for x in range(0, 50):
path = '%s%s' % (Env.get('log_path'), '.%s' % x if x > 0 else '')
if not os.path.isfile(path):
continue
try:
# Create empty file for current logging
if x is 0:
self.createFile(path, '')
else:
os.remove(path)
except:
log.error('Couldn\'t delete file "%s": %s', (path, traceback.format_exc()))
return {
'success': True
}
def log(self, type = 'error', **kwargs):
try:
log_message = 'API log: %s' % kwargs
try:
getattr(log, type)(log_message)
except:
log.error(log_message)
except:
log.error('Couldn\'t log via API: %s', kwargs)
return {
'success': True
}
|
gpl-3.0
| -6,851,123,021,384,806,000
| 26.376623
| 110
| 0.463947
| false
| 4.026743
| false
| false
| false
|
WilsonWangTHU/neural_graph_evolution
|
graph_util/structure_mapper.py
|
1
|
7819
|
#!/usr/bin/env python2
# -----------------------------------------------------------------------------
# @author:
# Tingwu Wang, Jun 23rd, 2017
# -----------------------------------------------------------------------------
import init_path
from util import logger
from . import mujoco_parser
import numpy as np
_BASE_DIR = init_path.get_base_dir()
def map_output(transfer_env, i_value, added_constant, gnn_option_list):
'''
@brief:
i_value could be the logstd (1, num_action), policy_output/w
(64, num_action), policy_output/b (1, num_action)
'''
assert len(gnn_option_list) == 4
i_value = np.transpose(i_value) # make the num_action to the front
ienv, oenv = [env + '-v1' for env in transfer_env.split('2')]
ienv_info = mujoco_parser.parse_mujoco_graph(
ienv,
gnn_node_option=gnn_option_list[0],
root_connection_option=gnn_option_list[1],
gnn_output_option=gnn_option_list[2],
gnn_embedding_option=gnn_option_list[3]
)
oenv_info = mujoco_parser.parse_mujoco_graph(
oenv,
gnn_node_option=gnn_option_list[0],
root_connection_option=gnn_option_list[1],
gnn_output_option=gnn_option_list[2],
gnn_embedding_option=gnn_option_list[3]
)
if len(i_value.shape) > 1:
o_value = np.zeros([len(oenv_info['output_list']), i_value.shape[1]])
else:
# the b matrix
o_value = np.zeros([len(oenv_info['output_list'])])
assert len(i_value) == len(ienv_info['output_list'])
ienv_node_name_list = [node['name'] for node in ienv_info['tree']]
for output_id, output_node_id in enumerate(oenv_info['output_list']):
# get the name of the joint
node_name = oenv_info['tree'][output_node_id]['name']
# if the node is alreay in the input environment?
if node_name in ienv_node_name_list:
if ienv_node_name_list.index(node_name) not in \
ienv_info['output_list']:
logger.warning('Missing joint: {}'.format(node_name))
continue
o_value[output_id] = i_value[
ienv_info['output_list'].index(
ienv_node_name_list.index(node_name)
)
]
else:
# the name format: "@type_@name_@number", e.g.: joint_leg_1
assert len(node_name.split('_')) == 3
# find all the repetitive node and calculate the average
repetitive_struct_node_list = [
ienv_node_name_list.index(name)
for name in ienv_node_name_list
if node_name.split('_')[1] == name.split('_')[1]
]
num_reptitive_nodes = float(len(repetitive_struct_node_list))
assert len(repetitive_struct_node_list) >= 1
for i_node_id in repetitive_struct_node_list:
o_value[output_id] += i_value[
ienv_info['output_list'].index(i_node_id)
] / num_reptitive_nodes
return np.transpose(o_value) + added_constant
def map_input(transfer_env, i_value, added_constant, gnn_option_list):
assert len(gnn_option_list) == 4
ienv, oenv = [env + '-v1' for env in transfer_env.split('2')]
ienv_info = mujoco_parser.parse_mujoco_graph(
ienv,
gnn_node_option=gnn_option_list[0],
root_connection_option=gnn_option_list[1],
gnn_output_option=gnn_option_list[2],
gnn_embedding_option=gnn_option_list[3]
)
oenv_info = mujoco_parser.parse_mujoco_graph(
oenv,
gnn_node_option=gnn_option_list[0],
root_connection_option=gnn_option_list[1],
gnn_output_option=gnn_option_list[2],
gnn_embedding_option=gnn_option_list[3]
)
o_value = np.zeros([oenv_info['debug_info']['ob_size'], i_value.shape[1]])
assert len(i_value) == ienv_info['debug_info']['ob_size']
ienv_node_name_list = [node['name'] for node in ienv_info['tree']]
for output_id, output_node_id in oenv_info['input_dict'].items():
# get the name of the joint
node_name = oenv_info['tree'][output_id]['name']
# if the node is alreay in the input environment?
if node_name in ienv_node_name_list:
o_value[output_node_id] = i_value[
ienv_info['input_dict'][
ienv_node_name_list.index(node_name)
]
]
else:
continue
return o_value
def map_transfer_env_running_mean(ienv, oenv, running_mean_info,
observation_size,
gnn_node_option, root_connection_option,
gnn_output_option, gnn_embedding_option):
# parse the mujoco information
ienv_info = mujoco_parser.parse_mujoco_graph(
ienv,
gnn_node_option=gnn_node_option,
root_connection_option=root_connection_option,
gnn_output_option=gnn_output_option,
gnn_embedding_option=gnn_embedding_option
)
oenv_info = mujoco_parser.parse_mujoco_graph(
oenv,
gnn_node_option=gnn_node_option,
root_connection_option=root_connection_option,
gnn_output_option=gnn_output_option,
gnn_embedding_option=gnn_embedding_option
)
i_running_mean_info = running_mean_info
# we start the running mean by cutting the mean to 0.1
start_coeff = 1
o_running_mean_info = {
'step': i_running_mean_info['step'] * start_coeff,
'mean': np.zeros([observation_size]),
'variance': np.zeros([observation_size]),
'square_sum': np.zeros([observation_size]),
'sum': np.zeros([observation_size])
}
ienv_node_name_list = [node['name'] for node in ienv_info['tree']]
for node, oenv_digit in oenv_info['input_dict'].items():
node_name = oenv_info['tree'][node]['name']
# if the node is alreay in the input environment?
if node_name in ienv_node_name_list:
ienv_digit = ienv_info['input_dict'][
ienv_node_name_list.index(node_name)
]
assert len(ienv_digit) == len(oenv_digit)
# assign the value!
for key in ['square_sum', 'sum']:
o_running_mean_info[key][oenv_digit] = \
i_running_mean_info[key][ienv_digit] * start_coeff
for key in ['mean', 'variance']:
o_running_mean_info[key][oenv_digit] = \
i_running_mean_info[key][ienv_digit]
else:
# the name format: "@type_@name_@number", e.g.: joint_leg_1
assert len(node_name.split('_')) == 3
# find all the repetitive node and calculate the average
repetitive_struct_node_list = [
ienv_node_name_list.index(name)
for name in ienv_node_name_list
if node_name.split('_')[1] == name.split('_')[1]
]
assert len(repetitive_struct_node_list) >= 1
num_reptitive_nodes = float(len(repetitive_struct_node_list))
for i_node_id in repetitive_struct_node_list:
ienv_digit = ienv_info['input_dict'][i_node_id]
assert len(ienv_digit) == len(oenv_digit)
# assign the value!
for key in ['square_sum', 'sum']:
o_running_mean_info[key][oenv_digit] += \
i_running_mean_info[key][ienv_digit] * \
start_coeff / num_reptitive_nodes
for key in ['mean', 'variance']:
o_running_mean_info[key][oenv_digit] += \
i_running_mean_info[key][ienv_digit] / \
num_reptitive_nodes
return o_running_mean_info
|
mit
| 7,428,175,684,621,202,000
| 40.152632
| 79
| 0.553012
| false
| 3.399565
| false
| false
| false
|
vitordeatorreao/amproj
|
amproj/datasets/dataset.py
|
1
|
3520
|
"""Base class for a memory representation of any dataset"""
class Dataset:
"""Represents a dataset read to memory"""
def __init__(self, feature_names=[]):
"""Initializes a new instance of Dataset
Parameters
----------
feature_names : list<str>, optional
List of names of the features present in this dataset.
"""
if type(feature_names) != list:
raise TypeError(
"The `feature_names` argument must be of type list")
self.features = [str(name) for name in feature_names]
self.data = []
def add_datapoint(self, datapoint):
"""Adds a datapoint to the dataset
Parameters
----------
datapoint : list
A list containing the feature values.
"""
point = {} # datapoint to be built and inserted in the dataset
if len(self.features) == 0: # in case there are no feature names
if len(self.data) > 0 and len(self.data[0]) != len(datapoint):
raise TypeError("The new datapoint must be of the same size " +
"as the other datapoints. The new datapoint " +
"has size " + str(len(datapoint)) + ", but " +
"the other datapoints have size " +
str(len(self.data[0])) + ".")
i = 0
for value in datapoint:
point["feature" + str(i)] = self.__tryparse__(value)
i += 1
self.data.append(point)
return
if len(datapoint) != len(self.features):
raise TypeError("The datapoint must be of the same size as " +
"the features list. The features list has size " +
str(len(self.features)) + " and the datapoint " +
"has size " + str(len(datapoint)) + ". The " +
"datapoint is " + str(datapoint))
i = 0
for feature_name in self.features:
point[feature_name] = self.__tryparse__(datapoint[i])
i += 1
self.data.append(point) # actually adds the datapoint to the set
def __len__(self):
"""Returns the length of this dataset"""
return len(self.data)
def __iter__(self):
"""Iterates through the objects in this dataset"""
return iter(self.data)
def __getitem__(self, key):
"""Gets the dataset at the specified index"""
if type(key) != int:
raise TypeError("The index must be an integer, instead got " + key)
return self.data[key]
def __tryparse__(self, value):
"""Parses the value into int, float or string
Parameters
----------
value : str
A value to be parsed.
Returns
-------
val : int, float or str
The value after being parsed to its correct type.
Notes
-----
The value will be parsed in a try and error way. First, we try to cast
it to int. If that fails, we try to cast it to float. And if that fails
as well, we simply return it as string.
"""
value = value.strip()
if type(value) != str:
return value
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
return value
|
gpl-2.0
| 126,932,584,548,431,800
| 34.555556
| 79
| 0.511932
| false
| 4.637681
| false
| false
| false
|
lmorchard/badger
|
apps/socialconnect/views.py
|
1
|
12314
|
import urllib, urllib2
import cgi
import os
from django.conf import settings
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth import login, authenticate, REDIRECT_FIELD_NAME
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import Site
from django.utils.http import urlquote
from django.utils import simplejson as json
from django.utils.translation import ugettext, ugettext_lazy as _
from django.contrib import messages
from oauthtwitter import OAuthApi
from oauth import oauth
import oauthtwitter
from pinax.apps.account.utils import get_default_redirect, user_display
from pinax.apps.account.views import login as account_login
from socialconnect.utils import Router, BaseView
from socialconnect.forms import OauthSignupForm
from socialconnect.models import UserOauthAssociation
TWITTER_CONSUMER_KEY = getattr(settings, 'TWITTER_CONSUMER_KEY', 'YOUR_KEY')
TWITTER_CONSUMER_SECRET = getattr(settings, 'TWITTER_CONSUMER_SECRET', 'YOUR_SECRET')
FACEBOOK_CONSUMER_KEY = getattr(settings, 'FACEBOOK_CONSUMER_KEY', 'YOUR_KEY')
FACEBOOK_CONSUMER_SECRET = getattr(settings, 'FACEBOOK_CONSUMER_SECRET', 'YOUR_SECRET')
class ManagementView(BaseView):
"""Connection management view, mainly for removing associations"""
urlname_pattern = 'socialconnect_manage_%s'
def do_associations(self, request):
v = self.require_login(request)
if v is not True: return v
if request.method == "POST":
a_id = request.POST.get('id', None)
try:
assoc = UserOauthAssociation.objects.get(
user = request.user, id = a_id)
messages.add_message(request, messages.SUCCESS,
ugettext("""
Successfully deleted connection to %(auth_type)s
screen name %(username)s.
""") % {
"auth_type": assoc.auth_type,
"username": assoc.username
}
)
assoc.delete()
except UserOauthAssociation.DoesNotExist:
pass
return HttpResponseRedirect(reverse(
self.urlname_pattern % 'associations'))
associations = UserOauthAssociation.objects.filter(user=request.user)
return self.render(request, 'associations.html', {
'associations': associations
})
class BaseAuthView(BaseView):
def do_signin(self, request):
"""Perform sign in via OAuth"""
request.session['socialconnect_mode'] = request.GET.get('mode', 'signin')
next = request.GET.get(REDIRECT_FIELD_NAME, '/')
if next:
request.session['redirect_to'] = next
return HttpResponseRedirect(self.get_signin_url(request))
def do_callback(self, request):
"""Handle response from OAuth permit/deny"""
# TODO: Handle OAuth denial!
mode = request.session.get('socialconnect_mode', None)
profile = self.get_profile_from_callback(request)
if not profile: return HttpResponse(status=400)
request.session[self.session_profile] = profile
success_url = get_default_redirect(request, REDIRECT_FIELD_NAME)
if not success_url or 'None' == success_url:
success_url = '/'
try:
# Try looking for an association to perform a login.
assoc = UserOauthAssociation.objects.filter(
auth_type=self.auth_type,
profile_id=profile['id'],
username=profile['username']
).get()
if 'connect' == mode:
messages.add_message(request, messages.ERROR,
ugettext("""This service is already connected to another
account!""")
)
return HttpResponseRedirect(reverse(
ManagementView().urlname_pattern % 'associations'))
else:
self.log_in_user(request, assoc.user)
return HttpResponseRedirect(success_url)
except UserOauthAssociation.DoesNotExist:
# No association found, so...
if not request.user.is_authenticated():
# If no login session, bounce to registration
return HttpResponseRedirect(reverse(
self.urlname_pattern % 'register'))
else:
# If there's a login session, create an association to the
# currently logged in user.
assoc = self.create_association(request, request.user, profile)
del request.session[self.session_profile]
if 'connect' == mode:
return HttpResponseRedirect(reverse(
ManagementView().urlname_pattern % 'associations'))
else:
return HttpResponseRedirect(success_url)
def get_registration_form_class(self, request):
return OauthSignupForm
def do_register(self, request):
"""Handle registration with association"""
# Ensure that Twitter signin details are present in the session
profile = request.session.get(self.session_profile, None)
if not profile: return HttpResponse(status=400)
RegistrationForm = self.get_registration_form_class(request)
success_url = get_default_redirect(request, REDIRECT_FIELD_NAME)
if request.method != "POST":
# Pre-fill form with suggested info based in Twitter signin
form = RegistrationForm(initial = self.initial_from_profile(profile))
else:
form = RegistrationForm(request.POST)
if form.is_valid():
user = form.save(request=request)
assoc = self.create_association(request, user, profile)
self.log_in_user(request, user)
return HttpResponseRedirect(success_url)
return self.render(request, 'register.html', {
'form': form,
'auth_label': self.auth_label,
'signin_url': reverse(self.urlname_pattern % 'signin'),
"action": request.path,
})
def create_association(self, request, user, profile):
"""Create an association between this user and the given profile"""
assoc = UserOauthAssociation(
user=user,
auth_type=self.auth_type,
profile_id=profile['id'],
username=profile['username'],
access_token=profile['access_token']
)
assoc.save()
messages.add_message(request, messages.SUCCESS,
ugettext("""
Successfully associated %(user)s with %(auth_label)s
screen name %(username)s.
""") % {
"user": user_display(request.user),
"auth_label": self.auth_label,
"username": profile['username']
}
)
def suggest_nickname(self, nickname):
"Return a suggested nickname that has not yet been taken"
from django.contrib.auth.models import User
if not nickname:
return ''
original_nickname = nickname
suffix = None
while User.objects.filter(username = nickname).count():
if suffix is None:
suffix = 1
else:
suffix += 1
nickname = original_nickname + str(suffix)
return nickname
def log_in_user(self, request, user):
# Remember, openid might be None (after registration with none set)
from django.contrib.auth import login
# Nasty but necessary - annotate user and pretend it was the regular
# auth backend. This is needed so django.contrib.auth.get_user works:
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, user)
class TwitterAuthView(BaseAuthView):
auth_type = "twitter"
auth_label = _("Twitter")
urlname_pattern = 'socialconnect_twitter_%s'
consumer_key = TWITTER_CONSUMER_KEY
consumer_secret = TWITTER_CONSUMER_SECRET
session_access_token = 'twitter_access_token'
session_profile = 'twitter_profile'
def get_signin_url(self, request):
twitter = OAuthApi(self.consumer_key, self.consumer_secret)
request_token = twitter.getRequestToken()
request.session['twitter_request_token'] = request_token.to_string()
return twitter.getSigninURL(request_token)
def get_profile_from_callback(self, request):
"""Extract the access token and profile details from OAuth callback"""
request_token = request.session.get('twitter_request_token', None)
if not request_token: return None
token = oauth.OAuthToken.from_string(request_token)
if token.key != request.GET.get('oauth_token', 'no-token'):
return HttpResponse(status=400)
twitter = OAuthApi(self.consumer_key, self.consumer_secret, token)
access_token = twitter.getAccessToken()
twitter = oauthtwitter.OAuthApi(self.consumer_key,
self.consumer_secret, access_token)
try:
profile = twitter.GetUserInfo()
except:
return None
return {
'access_token': access_token.to_string(),
'id': profile.id,
'username': profile.screen_name,
'fullname': profile.name,
'email': '',
}
def initial_from_profile(self, profile):
fullname = profile['fullname']
first_name, last_name = '', ''
if fullname:
bits = fullname.split()
first_name = bits[0]
if len(bits) > 1:
last_name = ' '.join(bits[1:])
return {
'username': self.suggest_nickname(profile.get('username','')),
'first_name': first_name,
'last_name': last_name,
'email': ''
}
class FacebookAuthView(BaseAuthView):
auth_type = "facebook"
auth_label = _("Facebook")
urlname_pattern = 'socialconnect_facebook_%s'
consumer_key = FACEBOOK_CONSUMER_KEY
consumer_secret = FACEBOOK_CONSUMER_SECRET
session_access_token = 'facebook_access_token'
session_profile = 'facebook_profile'
def get_signin_url(self, request):
args = {
'client_id': self.consumer_key,
'redirect_uri': request.build_absolute_uri(
reverse('socialconnect_facebook_callback')),
'scope': 'publish_stream,offline_access'
}
return ("https://graph.facebook.com/oauth/authorize?" +
urllib.urlencode(args))
def get_profile_from_callback(self, request):
code = request.GET.get('code', None)
args = {
'client_id': self.consumer_key,
'client_secret': self.consumer_secret,
'redirect_uri': request.build_absolute_uri(
reverse('socialconnect_facebook_callback')),
'code': code,
}
access_token_url = ()
response = cgi.parse_qs(urllib2.urlopen(
"https://graph.facebook.com/oauth/access_token?" +
urllib.urlencode(args)
).read())
access_token = response["access_token"][-1]
profile = json.load(urllib2.urlopen("https://graph.facebook.com/me?" +
urllib.urlencode(dict(access_token=access_token))))
return {
'access_token': access_token,
'id': profile['id'],
'username': os.path.basename(profile.get('link','')),
'fullname': profile.get('name', ''),
'first_name': profile.get('first_name', ''),
'last_name': profile.get('last_name', ''),
'email': '',
}
def initial_from_profile(self, profile):
return {
'username': self.suggest_nickname(profile.get('username','')),
'first_name': profile.get('first_name', ''),
'last_name': profile.get('last_name', ''),
'email': ''
}
|
bsd-3-clause
| 6,301,588,463,236,984,000
| 36.889231
| 87
| 0.598993
| false
| 4.369766
| false
| false
| false
|
napalm-automation/napalm
|
test/ios/TestIOSDriver.py
|
1
|
6582
|
# Copyright 2015 Spotify AB. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Tests for IOSDriver."""
import unittest
from napalm.ios import ios
from napalm.base.test.base import TestConfigNetworkDriver, TestGettersNetworkDriver
import re
class TestConfigIOSDriver(unittest.TestCase, TestConfigNetworkDriver):
"""Configuration Tests for IOSDriver.
Core file operations:
load_replace_candidate Tested
load_merge_candidate Tested
compare_config Tested
commit_config Tested
discard_config Tested
rollback Tested
Internal methods:
_enable_confirm Tested
_disable_confirm Tested
_gen_rollback_cfg Tested as part of rollback
_check_file_exists Tested
Misc methods:
open Tested
close Skipped
normalize_compare_config Tested (indirectly)
scp_file Tested
gen_full_path Tested
"""
@classmethod
def setUpClass(cls):
"""Executed when the class is instantiated."""
ip_addr = "127.0.0.1"
username = "vagrant"
password = "vagrant"
cls.vendor = "ios"
optional_args = {"port": 12204, "dest_file_system": "bootflash:"}
cls.device = ios.IOSDriver(
ip_addr, username, password, optional_args=optional_args
)
cls.device.open()
# Setup initial state
cls.device.load_replace_candidate(filename="%s/initial.conf" % cls.vendor)
cls.device.commit_config()
def test_ios_only_confirm(self):
"""Test _disable_confirm() and _enable_confirm().
_disable_confirm() changes router config so it doesn't prompt for confirmation
_enable_confirm() reenables this
"""
# Set initial device configuration
self.device.load_replace_candidate(filename="%s/initial.conf" % self.vendor)
self.device.commit_config()
# Verify initial state
output = self.device.device.send_command("show run | inc file prompt")
output = output.strip()
self.assertEqual(output, "")
# Disable confirmation
self.device._disable_confirm()
output = self.device.device.send_command("show run | inc file prompt")
output = output.strip()
self.assertEqual(output, "file prompt quiet")
# Reenable confirmation
self.device._enable_confirm()
output = self.device.device.send_command("show run | inc file prompt")
output = output.strip()
self.assertEqual(output, "")
def test_ios_only_gen_full_path(self):
"""Test gen_full_path() method."""
output = self.device._gen_full_path(self.device.candidate_cfg)
self.assertEqual(output, self.device.dest_file_system + "/candidate_config.txt")
output = self.device._gen_full_path(self.device.rollback_cfg)
self.assertEqual(output, self.device.dest_file_system + "/rollback_config.txt")
output = self.device._gen_full_path(self.device.merge_cfg)
self.assertEqual(output, self.device.dest_file_system + "/merge_config.txt")
output = self.device._gen_full_path(
filename="running-config", file_system="system:"
)
self.assertEqual(output, "system:/running-config")
def test_ios_only_check_file_exists(self):
"""Test _check_file_exists() method."""
self.device.load_replace_candidate(filename="%s/initial.conf" % self.vendor)
valid_file = self.device._check_file_exists(
self.device.dest_file_system + "/candidate_config.txt"
)
self.assertTrue(valid_file)
invalid_file = self.device._check_file_exists(
self.device.dest_file_system + "/bogus_999.txt"
)
self.assertFalse(invalid_file)
class TestGetterIOSDriver(unittest.TestCase, TestGettersNetworkDriver):
"""Getters Tests for IOSDriver.
Get operations:
get_lldp_neighbors
get_facts
get_interfaces
get_bgp_neighbors
get_interfaces_counters
"""
@classmethod
def setUpClass(cls):
"""Executed when the class is instantiated."""
cls.mock = True
username = "vagrant"
ip_addr = "192.168.0.234"
password = "vagrant"
cls.vendor = "ios"
optional_args = {}
optional_args["dest_file_system"] = "flash:"
cls.device = ios.IOSDriver(
ip_addr, username, password, optional_args=optional_args
)
if cls.mock:
cls.device.device = FakeIOSDevice()
else:
cls.device.open()
def test_ios_only_bgp_time_conversion(self):
"""Verify time conversion static method."""
test_cases = {
"1w0d": 604800,
"00:14:23": 863,
"00:13:40": 820,
"00:00:21": 21,
"00:00:13": 13,
"00:00:49": 49,
"1d11h": 126000,
"1d17h": 147600,
"8w5d": 5270400,
"1y28w": 48470400,
"never": -1,
}
for bgp_time, result in test_cases.items():
self.assertEqual(self.device.bgp_time_conversion(bgp_time), result)
class FakeIOSDevice:
"""Class to fake a IOS Device."""
@staticmethod
def read_txt_file(filename):
"""Read a txt file and return its content."""
with open(filename) as data_file:
return data_file.read()
def send_command_expect(self, command, **kwargs):
"""Fake execute a command in the device by just returning the content of a file."""
cmd = re.sub(r"[\[\]\*\^\+\s\|]", "_", command)
output = self.read_txt_file("ios/mock_data/{}.txt".format(cmd))
return str(output)
def send_command(self, command, **kwargs):
"""Fake execute a command in the device by just returning the content of a file."""
return self.send_command_expect(command)
if __name__ == "__main__":
unittest.main()
|
apache-2.0
| -1,167,232,957,800,592,000
| 32.753846
| 91
| 0.614707
| false
| 3.94841
| true
| false
| false
|
emulbreh/vacuous
|
vacuous/backends/dulwich/tasks.py
|
1
|
1530
|
from StringIO import StringIO
from celery.task import Task
from celery.task.sets import TaskSet, subtask
from dulwich.protocol import ReceivableProtocol
from dulwich.server import ReceivePackHandler
from vacuous.backends import load_backend
from vacuous.backends.dulwich.utils import WebBackend
from vacuous.tasks import SyncTask
class _ReceivePackHandler(ReceivePackHandler):
def _apply_pack(self, refs):
result = super(_ReceivePackHandler, self)._apply_pack(refs)
status = dict(result)
self._good_refs = []
for oldsha, newsha, ref in refs:
if status[ref] == 'ok':
self._good_refs.append((oldsha, newsha, ref))
return result
class ReceivePackTask(Task):
def run(self, flavor, repo_path, data):
backend = load_backend(flavor, repo_path, cache=False)
out = StringIO()
proto = ReceivableProtocol(StringIO(data).read, out.write)
handler = _ReceivePackHandler(WebBackend(), [backend], proto, stateless_rpc=True)
handler.handle()
sync_tasks = []
for oldrev, newrev, name in handler._good_refs:
if name.startswith('refs/heads/'):
branch = name[11:]
sync_tasks.append(subtask(SyncTask, args=[backend.flavor, backend.path, oldrev, newrev, branch]))
if sync_tasks:
taskset = TaskSet(tasks=sync_tasks)
taskset.apply_async().join()
return out.getvalue(), handler._good_refs
|
mit
| -4,407,668,046,472,185,000
| 33.772727
| 113
| 0.640523
| false
| 3.963731
| false
| false
| false
|
hashimmm/iiifoo
|
testutils/manifest_validator.py
|
1
|
3963
|
from testutils.presentation_api.implementations.manifest_factory.loader import \
ManifestReader
from iiifoo_utils import image_id_from_canvas_id
def validate(manifestjson, logger=None):
"""Validate a given manifest json object."""
mr = ManifestReader(manifestjson)
try:
r = mr.read()
js = r.toJSON()
except Exception as e:
if logger:
logger.exception(e)
print e
valid = False
else:
valid = True
print mr.get_warnings()
if logger:
logger.warn(mr.get_warnings())
return valid
def assert_equal(first, second):
assert first == second, \
"%s != %s" % (first, second)
def ensure_manifest_details_integrity(detailsobj, manifest_json, start=0):
sequences = manifest_json['sequences']
canvases = sequences[0]['canvases']
no_of_images = len(detailsobj['images'])
assert_equal(len(sequences), 1)
assert_equal(len(canvases), no_of_images + start)
for i in xrange(start, start+no_of_images):
assert_equal(canvases[i]['label'],
detailsobj['images'][i-start]['name'])
assert_equal(canvases[i]['width'],
detailsobj['images'][i-start]['width'])
assert_equal(canvases[i]['height'],
detailsobj['images'][i-start]['height'])
image_resource = canvases[i]['images'][0]['resource']
assert_equal(image_resource['service']['@id'],
detailsobj['images'][i-start]['path'])
assert_equal(image_resource['width'],
detailsobj['images'][i-start]['width'])
assert_equal(image_resource['height'],
detailsobj['images'][i-start]['height'])
def ensure_manifest_schema_conformance(manifest_json):
assert validate(manifest_json), \
"Manifest json: \n%s\n is invalid" % manifest_json
def check_updated_details(manifest_json, details):
sequences = manifest_json['sequences']
canvases = sequences[0]['canvases']
new_image_ids = [image['image_id'] for image in details['images']]
updated_canvases = [canvas for canvas in canvases
if image_id_from_canvas_id(canvas["@id"])
in new_image_ids]
updated_canvases = {image_id_from_canvas_id(canvas["@id"]): canvas
for canvas in updated_canvases}
assert_equal(manifest_json['label'], details['manifest_label'])
for image_id in new_image_ids:
canvas = updated_canvases[image_id]
image = [image for image in details['images']
if image['image_id'] == image_id][0]
assert_equal(canvas['label'], image['name'])
assert_equal(canvas['width'], image['width'])
assert_equal(canvas['height'], image['height'])
image_resource = canvas['images'][0]['resource']
assert_equal(image_resource['service']['@id'], image['path'])
assert_equal(image_resource['width'], image['width'])
assert_equal(image_resource['height'], image['height'])
def check_annotations_in_list(annotation_list, imageobj):
resources = annotation_list['resources']
relevant_resources = []
for resource in resources:
if image_id_from_canvas_id(resource['on']) == imageobj['image_id']:
relevant_resources.append(resource)
list_comments = [item['resource']['chars'] for item in resources
if item['motivation'] == "oa:commenting"]
list_transcriptions = [item['resource']['chars'] for item in resources
if item['resource']['@type'] == "cnt:ContentAsText"]
for comment in imageobj.get('comments', []):
assert comment['text'] in list_comments, \
"Comment %s not found" % comment['text']
for transcription in imageobj.get('transcriptions', []):
assert transcription['text'] in list_transcriptions, \
"Comment %s not found" % transcription['text']
|
mit
| 5,603,357,598,368,924,000
| 40.715789
| 80
| 0.607368
| false
| 3.966967
| false
| false
| false
|
radicalbit/ambari
|
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
|
1
|
23158
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import glob
from urlparse import urlparse
from resource_management.libraries.script.script import Script
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
from resource_management.libraries.functions.get_config import get_config
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.core.resources.service import ServiceConfig
from resource_management.core.resources.system import File, Execute, Directory
from resource_management.core.source import StaticFile, Template, DownloadSource, InlineTemplate
from resource_management.core.shell import as_user
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.libraries.functions.format import format
from resource_management.core.exceptions import Fail
from resource_management.core.shell import as_sudo
from resource_management.core.shell import quote_bash_args
from resource_management.core.logger import Logger
from resource_management.core import utils
from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook
from resource_management.libraries.functions.security_commons import update_credential_provider_path
from ambari_commons.constants import SERVICE
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons import OSConst
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def hive(name=None):
import params
hive_client_conf_path = format("{stack_root}/current/{component_directory}/conf")
# Permissions 644 for conf dir (client) files, and 600 for conf.server
mode_identified = 0644 if params.hive_config_dir == hive_client_conf_path else 0600
Directory(params.hive_etc_dir_prefix,
mode=0755
)
# We should change configurations for client as well as for server.
# The reason is that stale-configs are service-level, not component.
Logger.info("Directories to fill with configs: %s" % str(params.hive_conf_dirs_list))
for conf_dir in params.hive_conf_dirs_list:
fill_conf_dir(conf_dir)
params.hive_site_config = update_credential_provider_path(params.hive_site_config,
'hive-site',
os.path.join(params.hive_conf_dir, 'hive-site.jceks'),
params.hive_user,
params.user_group
)
XmlConfig("hive-site.xml",
conf_dir=params.hive_config_dir,
configurations=params.hive_site_config,
configuration_attributes=params.config['configuration_attributes']['hive-site'],
owner=params.hive_user,
group=params.user_group,
mode=mode_identified)
# Generate atlas-application.properties.xml file
if params.enable_atlas_hook:
atlas_hook_filepath = os.path.join(params.hive_config_dir, params.atlas_hook_filename)
setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.hive_user, params.user_group)
File(format("{hive_config_dir}/hive-env.sh"),
owner=params.hive_user,
group=params.user_group,
content=InlineTemplate(params.hive_env_sh_template),
mode=mode_identified
)
# On some OS this folder could be not exists, so we will create it before pushing there files
Directory(params.limits_conf_dir,
create_parents = True,
owner='root',
group='root'
)
File(os.path.join(params.limits_conf_dir, 'hive.conf'),
owner='root',
group='root',
mode=0644,
content=Template("hive.conf.j2")
)
if params.security_enabled:
File(os.path.join(params.hive_config_dir, 'zkmigrator_jaas.conf'),
owner=params.hive_user,
group=params.user_group,
content=Template("zkmigrator_jaas.conf.j2")
)
File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}")),
mode = 0644,
)
if name != "client":
setup_non_client()
if name == 'hiveserver2':
setup_hiveserver2()
if name == 'metastore':
setup_metastore()
def setup_hiveserver2():
import params
File(params.start_hiveserver2_path,
mode=0755,
content=Template(format('{start_hiveserver2_script}'))
)
File(os.path.join(params.hive_server_conf_dir, "hadoop-metrics2-hiveserver2.properties"),
owner=params.hive_user,
group=params.user_group,
content=Template("hadoop-metrics2-hiveserver2.properties.j2"),
mode=0600
)
XmlConfig("hiveserver2-site.xml",
conf_dir=params.hive_server_conf_dir,
configurations=params.config['configurations']['hiveserver2-site'],
configuration_attributes=params.config['configuration_attributes']['hiveserver2-site'],
owner=params.hive_user,
group=params.user_group,
mode=0600)
# copy tarball to HDFS feature not supported
if not (params.stack_version_formatted_major and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.stack_version_formatted_major)):
params.HdfsResource(params.webhcat_apps_dir,
type="directory",
action="create_on_execute",
owner=params.webhcat_user,
mode=0755
)
# Create webhcat dirs.
if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
params.HdfsResource(params.hcat_hdfs_user_dir,
type="directory",
action="create_on_execute",
owner=params.webhcat_user,
mode=params.hcat_hdfs_user_mode
)
params.HdfsResource(params.webhcat_hdfs_user_dir,
type="directory",
action="create_on_execute",
owner=params.webhcat_user,
mode=params.webhcat_hdfs_user_mode
)
# ****** Begin Copy Tarballs ******
# *********************************
# if copy tarball to HDFS feature supported copy mapreduce.tar.gz and tez.tar.gz to HDFS
if params.stack_version_formatted_major and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.stack_version_formatted_major):
copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
copy_to_hdfs("tez", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
# Always copy pig.tar.gz and hive.tar.gz using the appropriate mode.
# This can use a different source and dest location to account
copy_to_hdfs("pig",
params.user_group,
params.hdfs_user,
file_mode=params.tarballs_mode,
custom_source_file=params.pig_tar_source,
custom_dest_file=params.pig_tar_dest_file,
skip=params.sysprep_skip_copy_tarballs_hdfs)
copy_to_hdfs("hive",
params.user_group,
params.hdfs_user,
file_mode=params.tarballs_mode,
custom_source_file=params.hive_tar_source,
custom_dest_file=params.hive_tar_dest_file,
skip=params.sysprep_skip_copy_tarballs_hdfs)
wildcard_tarballs = ["sqoop", "hadoop_streaming"]
for tarball_name in wildcard_tarballs:
source_file_pattern = eval("params." + tarball_name + "_tar_source")
dest_dir = eval("params." + tarball_name + "_tar_dest_dir")
if source_file_pattern is None or dest_dir is None:
continue
source_files = glob.glob(source_file_pattern) if "*" in source_file_pattern else [source_file_pattern]
for source_file in source_files:
src_filename = os.path.basename(source_file)
dest_file = os.path.join(dest_dir, src_filename)
copy_to_hdfs(tarball_name,
params.user_group,
params.hdfs_user,
file_mode=params.tarballs_mode,
custom_source_file=source_file,
custom_dest_file=dest_file,
skip=params.sysprep_skip_copy_tarballs_hdfs)
# ******* End Copy Tarballs *******
# *********************************
# if warehouse directory is in DFS
if not params.whs_dir_protocol or params.whs_dir_protocol == urlparse(params.default_fs).scheme:
# Create Hive Metastore Warehouse Dir
params.HdfsResource(params.hive_apps_whs_dir,
type="directory",
action="create_on_execute",
owner=params.hive_user,
group=params.user_group,
mode=params.hive_apps_whs_mode
)
else:
Logger.info(format("Not creating warehouse directory '{hive_apps_whs_dir}', as the location is not in DFS."))
# Create Hive User Dir
params.HdfsResource(params.hive_hdfs_user_dir,
type="directory",
action="create_on_execute",
owner=params.hive_user,
mode=params.hive_hdfs_user_mode
)
if not is_empty(params.hive_exec_scratchdir) and not urlparse(params.hive_exec_scratchdir).path.startswith("/tmp"):
params.HdfsResource(params.hive_exec_scratchdir,
type="directory",
action="create_on_execute",
owner=params.hive_user,
group=params.hdfs_user,
mode=0777) # Hive expects this dir to be writeable by everyone as it is used as a temp dir
params.HdfsResource(None, action="execute")
def setup_non_client():
import params
Directory(params.hive_pid_dir,
create_parents = True,
cd_access='a',
owner=params.hive_user,
group=params.user_group,
mode=0755)
Directory(params.hive_log_dir,
create_parents = True,
cd_access='a',
owner=params.hive_user,
group=params.user_group,
mode=0755)
Directory(params.hive_var_lib,
create_parents = True,
cd_access='a',
owner=params.hive_user,
group=params.user_group,
mode=0755)
if params.hive_jdbc_target is not None and not os.path.exists(params.hive_jdbc_target):
jdbc_connector(params.hive_jdbc_target, params.hive_previous_jdbc_jar)
if params.hive2_jdbc_target is not None and not os.path.exists(params.hive2_jdbc_target):
jdbc_connector(params.hive2_jdbc_target, params.hive2_previous_jdbc_jar)
def setup_metastore():
import params
if params.hive_metastore_site_supported:
hivemetastore_site_config = get_config("hivemetastore-site")
if hivemetastore_site_config:
XmlConfig("hivemetastore-site.xml",
conf_dir=params.hive_server_conf_dir,
configurations=params.config['configurations']['hivemetastore-site'],
configuration_attributes=params.config['configuration_attributes']['hivemetastore-site'],
owner=params.hive_user,
group=params.user_group,
mode=0600)
File(os.path.join(params.hive_server_conf_dir, "hadoop-metrics2-hivemetastore.properties"),
owner=params.hive_user,
group=params.user_group,
content=Template("hadoop-metrics2-hivemetastore.properties.j2"),
mode=0600
)
File(params.start_metastore_path,
mode=0755,
content=StaticFile('startMetastore.sh')
)
if not is_empty(params.hive_exec_scratchdir):
dirPathStr = urlparse(params.hive_exec_scratchdir).path
pathComponents = dirPathStr.split("/")
if dirPathStr.startswith("/tmp") and len(pathComponents) > 2:
Directory (params.hive_exec_scratchdir,
owner = params.hive_user,
create_parents = True,
mode=0777)
def create_metastore_schema():
import params
create_schema_cmd = format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
"{hive_schematool_bin}/schematool -initSchema "
"-dbType {hive_metastore_db_type} "
"-userName {hive_metastore_user_name} "
"-passWord {hive_metastore_user_passwd!p} -verbose")
check_schema_created_cmd = as_user(format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
"{hive_schematool_bin}/schematool -info "
"-dbType {hive_metastore_db_type} "
"-userName {hive_metastore_user_name} "
"-passWord {hive_metastore_user_passwd!p} -verbose"), params.hive_user)
# HACK: in cases with quoted passwords and as_user (which does the quoting as well) !p won't work for hiding passwords.
# Fixing it with the hack below:
quoted_hive_metastore_user_passwd = quote_bash_args(quote_bash_args(params.hive_metastore_user_passwd))
if quoted_hive_metastore_user_passwd[0] == "'" and quoted_hive_metastore_user_passwd[-1] == "'" \
or quoted_hive_metastore_user_passwd[0] == '"' and quoted_hive_metastore_user_passwd[-1] == '"':
quoted_hive_metastore_user_passwd = quoted_hive_metastore_user_passwd[1:-1]
Logger.sensitive_strings[repr(check_schema_created_cmd)] = repr(check_schema_created_cmd.replace(
format("-passWord {quoted_hive_metastore_user_passwd}"), "-passWord " + utils.PASSWORDS_HIDE_STRING))
Execute(create_schema_cmd,
not_if = check_schema_created_cmd,
user = params.hive_user
)
"""
Writes configuration files required by Hive.
"""
def fill_conf_dir(component_conf_dir):
import params
hive_client_conf_path = os.path.realpath(format("{stack_root}/current/{component_directory}/conf"))
component_conf_dir = os.path.realpath(component_conf_dir)
mode_identified_for_file = 0644 if component_conf_dir == hive_client_conf_path else 0600
mode_identified_for_dir = 0755 if component_conf_dir == hive_client_conf_path else 0700
Directory(component_conf_dir,
owner=params.hive_user,
group=params.user_group,
create_parents = True,
mode=mode_identified_for_dir
)
if 'mapred-site' in params.config['configurations']:
XmlConfig("mapred-site.xml",
conf_dir=component_conf_dir,
configurations=params.config['configurations']['mapred-site'],
configuration_attributes=params.config['configuration_attributes']['mapred-site'],
owner=params.hive_user,
group=params.user_group,
mode=mode_identified_for_file)
File(format("{component_conf_dir}/hive-default.xml.template"),
owner=params.hive_user,
group=params.user_group,
mode=mode_identified_for_file
)
File(format("{component_conf_dir}/hive-env.sh.template"),
owner=params.hive_user,
group=params.user_group,
mode=mode_identified_for_file
)
# Create hive-log4j.properties and hive-exec-log4j.properties
# in /etc/hive/conf and not in /etc/hive2/conf
if params.log4j_version == '1':
log4j_exec_filename = 'hive-exec-log4j.properties'
if (params.log4j_exec_props != None):
File(format("{component_conf_dir}/{log4j_exec_filename}"),
mode=mode_identified_for_file,
group=params.user_group,
owner=params.hive_user,
content=InlineTemplate(params.log4j_exec_props)
)
elif (os.path.exists("{component_conf_dir}/{log4j_exec_filename}.template")):
File(format("{component_conf_dir}/{log4j_exec_filename}"),
mode=mode_identified_for_file,
group=params.user_group,
owner=params.hive_user,
content=StaticFile(format("{component_conf_dir}/{log4j_exec_filename}.template"))
)
log4j_filename = 'hive-log4j.properties'
if (params.log4j_props != None):
File(format("{component_conf_dir}/{log4j_filename}"),
mode=mode_identified_for_file,
group=params.user_group,
owner=params.hive_user,
content=InlineTemplate(params.log4j_props)
)
elif (os.path.exists("{component_conf_dir}/{log4j_filename}.template")):
File(format("{component_conf_dir}/{log4j_filename}"),
mode=mode_identified_for_file,
group=params.user_group,
owner=params.hive_user,
content=StaticFile(format("{component_conf_dir}/{log4j_filename}.template"))
)
if params.parquet_logging_properties is not None:
File(format("{component_conf_dir}/parquet-logging.properties"),
mode = mode_identified_for_file,
group = params.user_group,
owner = params.hive_user,
content = params.parquet_logging_properties)
def jdbc_connector(target, hive_previous_jdbc_jar):
"""
Shared by Hive Batch, Hive Metastore, and Hive Interactive
:param target: Target of jdbc jar name, which could be for any of the components above.
"""
import params
if not params.jdbc_jar_name:
return
if params.hive_jdbc_driver in params.hive_jdbc_drivers_list and params.hive_use_existing_db:
environment = {
"no_proxy": format("{ambari_server_hostname}")
}
if hive_previous_jdbc_jar and os.path.isfile(hive_previous_jdbc_jar):
File(hive_previous_jdbc_jar, action='delete')
# TODO: should be removed after ranger_hive_plugin will not provide jdbc
if params.prepackaged_jdbc_name != params.jdbc_jar_name:
Execute(('rm', '-f', params.prepackaged_ojdbc_symlink),
path=["/bin", "/usr/bin/"],
sudo = True)
File(params.downloaded_custom_connector,
content = DownloadSource(params.driver_curl_source))
# maybe it will be more correcvly to use db type
if params.sqla_db_used:
untar_sqla_type2_driver = ('tar', '-xvf', params.downloaded_custom_connector, '-C', params.tmp_dir)
Execute(untar_sqla_type2_driver, sudo = True)
Execute(format("yes | {sudo} cp {jars_path_in_archive} {hive_lib}"))
Directory(params.jdbc_libs_dir,
create_parents = True)
Execute(format("yes | {sudo} cp {libs_path_in_archive} {jdbc_libs_dir}"))
Execute(format("{sudo} chown -R {hive_user}:{user_group} {hive_lib}/*"))
else:
Execute(('cp', '--remove-destination', params.downloaded_custom_connector, target),
#creates=target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
path=["/bin", "/usr/bin/"],
sudo = True)
else:
#for default hive db (Mysql)
Execute(('cp', '--remove-destination', format('/usr/share/java/{jdbc_jar_name}'), target),
#creates=target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
path=["/bin", "/usr/bin/"],
sudo=True
)
pass
File(target,
mode = 0644,
)
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def hive(name=None):
import params
XmlConfig("hive-site.xml",
conf_dir = params.hive_conf_dir,
configurations = params.config['configurations']['hive-site'],
owner=params.hive_user,
configuration_attributes=params.config['configuration_attributes']['hive-site']
)
if name in ["hiveserver2","metastore"]:
# Manually overriding service logon user & password set by the installation package
service_name = params.service_map[name]
ServiceConfig(service_name,
action="change_user",
username = params.hive_user,
password = Script.get_password(params.hive_user))
Execute(format("cmd /c hadoop fs -mkdir -p {hive_warehouse_dir}"), logoutput=True, user=params.hadoop_user)
if name == 'metastore':
if params.init_metastore_schema:
check_schema_created_cmd = format('cmd /c "{hive_bin}\\hive.cmd --service schematool -info '
'-dbType {hive_metastore_db_type} '
'-userName {hive_metastore_user_name} '
'-passWord {hive_metastore_user_passwd!p}'
'&set EXITCODE=%ERRORLEVEL%&exit /B %EXITCODE%"', #cmd "feature", propagate the process exit code manually
hive_bin=params.hive_bin,
hive_metastore_db_type=params.hive_metastore_db_type,
hive_metastore_user_name=params.hive_metastore_user_name,
hive_metastore_user_passwd=params.hive_metastore_user_passwd)
try:
Execute(check_schema_created_cmd)
except Fail:
create_schema_cmd = format('cmd /c {hive_bin}\\hive.cmd --service schematool -initSchema '
'-dbType {hive_metastore_db_type} '
'-userName {hive_metastore_user_name} '
'-passWord {hive_metastore_user_passwd!p}',
hive_bin=params.hive_bin,
hive_metastore_db_type=params.hive_metastore_db_type,
hive_metastore_user_name=params.hive_metastore_user_name,
hive_metastore_user_passwd=params.hive_metastore_user_passwd)
Execute(create_schema_cmd,
user = params.hive_user,
logoutput=True
)
if name == "hiveserver2":
if params.hive_execution_engine == "tez":
# Init the tez app dir in hadoop
script_file = __file__.replace('/', os.sep)
cmd_file = os.path.normpath(os.path.join(os.path.dirname(script_file), "..", "files", "hiveTezSetup.cmd"))
Execute("cmd /c " + cmd_file, logoutput=True, user=params.hadoop_user)
|
apache-2.0
| -4,470,074,220,140,164,000
| 42.205224
| 147
| 0.630883
| false
| 3.822083
| true
| false
| false
|
seanchen/taiga-back
|
taiga/users/serializers.py
|
1
|
5786
|
# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core import validators
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from taiga.base.api import serializers
from taiga.base.fields import PgArrayField
from taiga.projects.models import Project
from .models import User, Role
from .services import get_photo_or_gravatar_url, get_big_photo_or_gravatar_url
import re
######################################################
## User
######################################################
class ContactProjectDetailSerializer(serializers.ModelSerializer):
class Meta:
model = Project
fields = ("id", "slug", "name")
class UserSerializer(serializers.ModelSerializer):
full_name_display = serializers.SerializerMethodField("get_full_name_display")
photo = serializers.SerializerMethodField("get_photo")
big_photo = serializers.SerializerMethodField("get_big_photo")
roles = serializers.SerializerMethodField("get_roles")
projects_with_me = serializers.SerializerMethodField("get_projects_with_me")
class Meta:
model = User
# IMPORTANT: Maintain the UserAdminSerializer Meta up to date
# with this info (including there the email)
fields = ("id", "username", "full_name", "full_name_display",
"color", "bio", "lang", "theme", "timezone", "is_active",
"photo", "big_photo", "roles", "projects_with_me")
read_only_fields = ("id",)
def validate_username(self, attrs, source):
value = attrs[source]
validator = validators.RegexValidator(re.compile('^[\w.-]+$'), _("invalid username"),
_("invalid"))
try:
validator(value)
except ValidationError:
raise serializers.ValidationError(_("Required. 255 characters or fewer. Letters, "
"numbers and /./-/_ characters'"))
if (self.object and
self.object.username != value and
User.objects.filter(username=value).exists()):
raise serializers.ValidationError(_("Invalid username. Try with a different one."))
return attrs
def get_full_name_display(self, obj):
return obj.get_full_name() if obj else ""
def get_photo(self, user):
return get_photo_or_gravatar_url(user)
def get_big_photo(self, user):
return get_big_photo_or_gravatar_url(user)
def get_roles(self, user):
return user.memberships. order_by("role__name").values_list("role__name", flat=True).distinct()
def get_projects_with_me(self, user):
request = self.context.get("request", None)
requesting_user = request and request.user or None
if not requesting_user or not requesting_user.is_authenticated():
return []
else:
project_ids = requesting_user.memberships.values_list("project__id", flat=True)
memberships = user.memberships.filter(project__id__in=project_ids)
project_ids = memberships.values_list("project__id", flat=True)
projects = Project.objects.filter(id__in=project_ids)
return ContactProjectDetailSerializer(projects, many=True).data
class UserAdminSerializer(UserSerializer):
class Meta:
model = User
# IMPORTANT: Maintain the UserSerializer Meta up to date
# with this info (including here the email)
fields = ("id", "username", "full_name", "full_name_display", "email",
"color", "bio", "lang", "theme", "timezone", "is_active", "photo",
"big_photo")
read_only_fields = ("id", "email")
class BasicInfoSerializer(UserSerializer):
class Meta:
model = User
fields = ("username", "full_name_display","photo", "big_photo")
class RecoverySerializer(serializers.Serializer):
token = serializers.CharField(max_length=200)
password = serializers.CharField(min_length=6)
class ChangeEmailSerializer(serializers.Serializer):
email_token = serializers.CharField(max_length=200)
class CancelAccountSerializer(serializers.Serializer):
cancel_token = serializers.CharField(max_length=200)
######################################################
## Role
######################################################
class RoleSerializer(serializers.ModelSerializer):
members_count = serializers.SerializerMethodField("get_members_count")
permissions = PgArrayField(required=False)
class Meta:
model = Role
fields = ('id', 'name', 'permissions', 'computable', 'project', 'order', 'members_count')
i18n_fields = ("name",)
def get_members_count(self, obj):
return obj.memberships.count()
class ProjectRoleSerializer(serializers.ModelSerializer):
class Meta:
model = Role
fields = ('id', 'name', 'slug', 'order', 'computable')
i18n_fields = ("name",)
|
agpl-3.0
| 3,606,832,449,048,560,600
| 37.56
| 103
| 0.639523
| false
| 4.182213
| false
| false
| false
|
FireBladeNooT/Medusa_1_6
|
medusa/notifiers/plex.py
|
1
|
10632
|
# coding=utf-8
# Author: Dustyn Gibson <miigotu@gmail.com>
#
# This file is part of Medusa.
#
# Medusa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Medusa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Medusa. If not, see <http://www.gnu.org/licenses/>.
import re
from six import iteritems
from .. import app, common, logger
from ..helper.exceptions import ex
from ..helpers import getURL, make_session
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
class Notifier(object):
def __init__(self):
self.headers = {
'X-Plex-Device-Name': 'Medusa',
'X-Plex-Product': 'Medusa Notifier',
'X-Plex-Client-Identifier': common.USER_AGENT,
'X-Plex-Version': '2016.02.10'
}
self.session = make_session()
@staticmethod
def _notify_pht(message, title='Medusa', host=None, username=None, password=None, force=False): # pylint: disable=too-many-arguments
"""Internal wrapper for the notify_snatch and notify_download functions
Args:
message: Message body of the notice to send
title: Title of the notice to send
host: Plex Home Theater(s) host:port
username: Plex username
password: Plex password
force: Used for the Test method to override config safety checks
Returns:
Returns a list results in the format of host:ip:result
The result will either be 'OK' or False, this is used to be parsed by the calling function.
"""
from . import kodi_notifier
# suppress notifications if the notifier is disabled but the notify options are checked
if not app.USE_PLEX_CLIENT and not force:
return False
host = host or app.PLEX_CLIENT_HOST
username = username or app.PLEX_CLIENT_USERNAME
password = password or app.PLEX_CLIENT_PASSWORD
return kodi_notifier._notify_kodi(message, title=title, host=host, username=username, password=password, force=force, dest_app="PLEX") # pylint: disable=protected-access
##############################################################################
# Public functions
##############################################################################
def notify_snatch(self, ep_name, is_proper):
if app.PLEX_NOTIFY_ONSNATCH:
self._notify_pht(ep_name, common.notifyStrings[(common.NOTIFY_SNATCH, common.NOTIFY_SNATCH_PROPER)[is_proper]])
def notify_download(self, ep_name):
if app.PLEX_NOTIFY_ONDOWNLOAD:
self._notify_pht(ep_name, common.notifyStrings[common.NOTIFY_DOWNLOAD])
def notify_subtitle_download(self, ep_name, lang):
if app.PLEX_NOTIFY_ONSUBTITLEDOWNLOAD:
self._notify_pht(ep_name + ': ' + lang, common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD])
def notify_git_update(self, new_version='??'):
if app.NOTIFY_ON_UPDATE:
update_text = common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]
title = common.notifyStrings[common.NOTIFY_GIT_UPDATE]
if update_text and title and new_version:
self._notify_pht(update_text + new_version, title)
def notify_login(self, ipaddress=""):
if app.NOTIFY_ON_LOGIN:
update_text = common.notifyStrings[common.NOTIFY_LOGIN_TEXT]
title = common.notifyStrings[common.NOTIFY_LOGIN]
if update_text and title and ipaddress:
self._notify_pht(update_text.format(ipaddress), title)
def test_notify_pht(self, host, username, password):
return self._notify_pht('This is a test notification from Medusa',
'Test Notification', host, username, password, force=True)
def test_notify_pms(self, host, username, password, plex_server_token):
return self.update_library(host=host, username=username, password=password,
plex_server_token=plex_server_token, force=True)
def update_library(self, ep_obj=None, host=None, # pylint: disable=too-many-arguments, too-many-locals, too-many-statements, too-many-branches
username=None, password=None,
plex_server_token=None, force=False):
"""Handles updating the Plex Media Server host via HTTP API
Plex Media Server currently only supports updating the whole video library and not a specific path.
Returns:
Returns None for no issue, else a string of host with connection issues
"""
if not (app.USE_PLEX_SERVER and app.PLEX_UPDATE_LIBRARY) and not force:
return None
host = host or app.PLEX_SERVER_HOST
if not host:
logger.log(u'PLEX: No Plex Media Server host specified, check your settings', logger.DEBUG)
return False
if not self.get_token(username, password, plex_server_token):
logger.log(u'PLEX: Error getting auth token for Plex Media Server, check your settings', logger.WARNING)
return False
file_location = '' if not ep_obj else ep_obj.location
host_list = {x.strip() for x in host.split(',') if x.strip()}
hosts_all = hosts_match = {}
hosts_failed = set()
for cur_host in host_list:
url = 'http{0}://{1}/library/sections'.format(('', 's')[bool(app.PLEX_SERVER_HTTPS)], cur_host)
try:
xml_response = getURL(url, headers=self.headers, session=self.session, returns='text')
if not xml_response:
logger.log(u'PLEX: Error while trying to contact Plex Media Server: {0}'.format
(cur_host), logger.WARNING)
hosts_failed.add(cur_host)
continue
media_container = etree.fromstring(xml_response)
except IOError as error:
logger.log(u'PLEX: Error while trying to contact Plex Media Server: {0}'.format
(ex(error)), logger.WARNING)
hosts_failed.add(cur_host)
continue
except Exception as error:
if 'invalid token' in str(error):
logger.log(u'PLEX: Please set TOKEN in Plex settings: ', logger.WARNING)
else:
logger.log(u'PLEX: Error while trying to contact Plex Media Server: {0}'.format
(ex(error)), logger.WARNING)
hosts_failed.add(cur_host)
continue
sections = media_container.findall('.//Directory')
if not sections:
logger.log(u'PLEX: Plex Media Server not running on: {0}'.format
(cur_host), logger.DEBUG)
hosts_failed.add(cur_host)
continue
for section in sections:
if 'show' == section.attrib['type']:
keyed_host = [(str(section.attrib['key']), cur_host)]
hosts_all.update(keyed_host)
if not file_location:
continue
for section_location in section.findall('.//Location'):
section_path = re.sub(r'[/\\]+', '/', section_location.attrib['path'].lower())
section_path = re.sub(r'^(.{,2})[/\\]', '', section_path)
location_path = re.sub(r'[/\\]+', '/', file_location.lower())
location_path = re.sub(r'^(.{,2})[/\\]', '', location_path)
if section_path in location_path:
hosts_match.update(keyed_host)
if force:
return (', '.join(set(hosts_failed)), None)[not len(hosts_failed)]
if hosts_match:
logger.log(u'PLEX: Updating hosts where TV section paths match the downloaded show: ' + ', '.join(set(hosts_match)), logger.DEBUG)
else:
logger.log(u'PLEX: Updating all hosts with TV sections: ' + ', '.join(set(hosts_all)), logger.DEBUG)
hosts_try = (hosts_match.copy(), hosts_all.copy())[not len(hosts_match)]
for section_key, cur_host in iteritems(hosts_try):
url = 'http{0}://{1}/library/sections/{2}/refresh'.format(('', 's')[bool(app.PLEX_SERVER_HTTPS)], cur_host, section_key)
try:
getURL(url, headers=self.headers, session=self.session, returns='text')
except Exception as error:
logger.log(u'PLEX: Error updating library section for Plex Media Server: {0}'.format
(ex(error)), logger.WARNING)
hosts_failed.add(cur_host)
return (', '.join(set(hosts_failed)), None)[not len(hosts_failed)]
def get_token(self, username=None, password=None, plex_server_token=None):
username = username or app.PLEX_SERVER_USERNAME
password = password or app.PLEX_SERVER_PASSWORD
plex_server_token = plex_server_token or app.PLEX_SERVER_TOKEN
if plex_server_token:
self.headers['X-Plex-Token'] = plex_server_token
if 'X-Plex-Token' in self.headers:
return True
if not (username and password):
return True
logger.log(u'PLEX: fetching plex.tv credentials for user: ' + username, logger.DEBUG)
params = {
'user[login]': username,
'user[password]': password
}
try:
response = getURL('https://plex.tv/users/sign_in.json',
post_data=params,
headers=self.headers,
session=self.session,
returns='json')
self.headers['X-Plex-Token'] = response['user']['authentication_token']
except Exception as error:
self.headers.pop('X-Plex-Token', '')
logger.log(u'PLEX: Error fetching credentials from from plex.tv for user {0}: {1}'.format
(username, error), logger.DEBUG)
return 'X-Plex-Token' in self.headers
|
gpl-3.0
| 4,151,996,194,805,680,000
| 42.219512
| 178
| 0.586155
| false
| 4.125728
| false
| false
| false
|
QualiSystems/shellfoundry
|
shellfoundry/commands/extend_command.py
|
1
|
6915
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import re
import shutil
import click
from shellfoundry.exceptions import VersionRequestException
from shellfoundry.utilities.config_reader import CloudShellConfigReader, Configuration
from shellfoundry.utilities.constants import (
METADATA_AUTHOR_FIELD,
TEMPLATE_AUTHOR_FIELD,
TEMPLATE_BASED_ON,
)
from shellfoundry.utilities.modifiers.definition.definition_modification import (
DefinitionModification,
)
from shellfoundry.utilities.repository_downloader import RepositoryDownloader
from shellfoundry.utilities.temp_dir_context import TempDirContext
from shellfoundry.utilities.validations import (
ShellGenerationValidations,
ShellNameValidations,
)
class ExtendCommandExecutor(object):
LOCAL_TEMPLATE_URL_PREFIX = "local:"
SIGN_FILENAME = "signed"
ARTIFACTS = {"driver": "src", "deployment": "deployments"}
def __init__(
self,
repository_downloader=None,
shell_name_validations=None,
shell_gen_validations=None,
):
"""Creates a new shell based on an already existing shell.
:param RepositoryDownloader repository_downloader:
:param ShellNameValidations shell_name_validations:
"""
self.repository_downloader = repository_downloader or RepositoryDownloader()
self.shell_name_validations = shell_name_validations or ShellNameValidations()
self.shell_gen_validations = (
shell_gen_validations or ShellGenerationValidations()
)
self.cloudshell_config_reader = Configuration(CloudShellConfigReader())
def extend(self, source, attribute_names):
"""Create a new shell based on an already existing shell.
:param str source: The path to the existing shell. Can be a url or local path
:param tuple attribute_names: Sequence of attribute names that should be added
"""
with TempDirContext("Extended_Shell_Temp_Dir") as temp_dir:
try:
if self._is_local(source):
temp_shell_path = self._copy_local_shell(
self._remove_prefix(
source, ExtendCommandExecutor.LOCAL_TEMPLATE_URL_PREFIX
),
temp_dir,
)
else:
temp_shell_path = self._copy_online_shell(source, temp_dir)
except VersionRequestException as err:
raise click.ClickException(str(err))
except Exception:
raise click.BadParameter("Check correctness of entered attributes")
# Remove shell version from folder name
shell_path = re.sub(r"-\d+(\.\d+)*/?$", "", temp_shell_path)
os.rename(temp_shell_path, shell_path)
if not self.shell_gen_validations.validate_2nd_gen(shell_path):
raise click.ClickException("Invalid second generation Shell.")
modificator = DefinitionModification(shell_path)
self._unpack_driver_archive(shell_path, modificator)
self._remove_quali_signature(shell_path)
self._change_author(shell_path, modificator)
self._add_based_on(shell_path, modificator)
self._add_attributes(shell_path, attribute_names)
try:
shutil.move(shell_path, os.path.curdir)
except shutil.Error as err:
raise click.BadParameter(str(err))
click.echo("Created shell based on source {}".format(source))
def _copy_local_shell(self, source, destination):
"""Copy shell and extract if needed."""
if os.path.isdir(source):
source = source.rstrip(os.sep)
name = os.path.basename(source)
ext_shell_path = os.path.join(destination, name)
shutil.copytree(source, ext_shell_path)
else:
raise
return ext_shell_path
def _copy_online_shell(self, source, destination):
"""Download shell and extract it."""
archive_path = None
try:
archive_path = self.repository_downloader.download_file(source, destination)
ext_shell_path = (
self.repository_downloader.repo_extractor.extract_to_folder(
archive_path, destination
)
)
ext_shell_path = ext_shell_path[0]
finally:
if archive_path and os.path.exists(archive_path):
os.remove(archive_path)
return os.path.join(destination, ext_shell_path)
@staticmethod
def _is_local(source):
return source.startswith(ExtendCommandExecutor.LOCAL_TEMPLATE_URL_PREFIX)
@staticmethod
def _remove_prefix(string, prefix):
return string.rpartition(prefix)[-1]
def _unpack_driver_archive(self, shell_path, modificator=None):
"""Unpack driver files from ZIP-archive."""
if not modificator:
modificator = DefinitionModification(shell_path)
artifacts = modificator.get_artifacts_files(
artifact_name_list=list(self.ARTIFACTS.keys())
)
for artifact_name, artifact_path in artifacts.items():
artifact_path = os.path.join(shell_path, artifact_path)
if os.path.exists(artifact_path):
self.repository_downloader.repo_extractor.extract_to_folder(
artifact_path,
os.path.join(shell_path, self.ARTIFACTS[artifact_name]),
)
os.remove(artifact_path)
@staticmethod
def _remove_quali_signature(shell_path):
"""Remove Quali signature from shell."""
signature_file_path = os.path.join(
shell_path, ExtendCommandExecutor.SIGN_FILENAME
)
if os.path.exists(signature_file_path):
os.remove(signature_file_path)
def _change_author(self, shell_path, modificator=None):
"""Change shell authoring."""
author = self.cloudshell_config_reader.read().author
if not modificator:
modificator = DefinitionModification(shell_path)
modificator.edit_definition(field=TEMPLATE_AUTHOR_FIELD, value=author)
modificator.edit_tosca_meta(field=METADATA_AUTHOR_FIELD, value=author)
def _add_based_on(self, shell_path, modificator=None):
"""Add Based_ON field to shell-definition.yaml file."""
if not modificator:
modificator = DefinitionModification(shell_path)
modificator.add_field_to_definition(field=TEMPLATE_BASED_ON)
def _add_attributes(self, shell_path, attribute_names, modificator=None):
"""Add a commented out attributes to the shell definition."""
if not modificator:
modificator = DefinitionModification(shell_path)
modificator.add_properties(attribute_names=attribute_names)
|
apache-2.0
| 2,709,334,728,992,063,500
| 36.994505
| 88
| 0.635141
| false
| 4.231946
| true
| false
| false
|
rougier/dana
|
examples/oja.py
|
1
|
3086
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright INRIA
# Contributors: Nicolas P. Rougier (Nicolas.Rougier@inria.fr)
#
# DANA is a computing framework for the simulation of distributed,
# asynchronous, numerical and adaptive models.
#
# This software is governed by the CeCILL license under French law and abiding
# by the rules of distribution of free software. You can use, modify and/ or
# redistribute the software under the terms of the CeCILL license as circulated
# by CEA, CNRS and INRIA at the following URL
# http://www.cecill.info/index.en.html.
#
# As a counterpart to the access to the source code and rights to copy, modify
# and redistribute granted by the license, users are provided only with a
# limited warranty and the software's author, the holder of the economic
# rights, and the successive licensors have only limited liability.
#
# In this respect, the user's attention is drawn to the risks associated with
# loading, using, modifying and/or developing or reproducing the software by
# the user in light of its specific status of free software, that may mean that
# it is complicated to manipulate, and that also therefore means that it is
# reserved for developers and experienced professionals having in-depth
# computer knowledge. Users are therefore encouraged to load and test the
# software's suitability as regards their requirements in conditions enabling
# the security of their systems and/or data to be ensured and, more generally,
# to use and operate it in the same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
# -----------------------------------------------------------------------------
'''
Implementation of the Oja learning rule for extracting the principal component
of an elliptical gaussian distribution. Given that the distribution is
elliptical, its principal component should be oriented along the main axis of
the distribution, therefore, final weights should be +/-cos(theta), sin(theta)
References:
-----------
E. Oja, "A Simplified Neuron Model as a Principal Component Analyzer"
Journal of Mathematical Biology 15: 267-273, 1982.
'''
from numpy import *
from dana import *
def sample(theta, mu1, std1, mu2, std2):
''' Random sample according to an elliptical Gaussian distribution'''
u1 = random.random()
u2 = random.random()
T1 = sqrt(-2.0*log(u1))*cos(2.0*pi*u2)
T2 = sqrt(-2.0*log(u1))*sin(2.0*pi*u2)
x = mu1 + (std1*T1*cos(theta) - std2*T2*sin(theta))
y = mu2 + (std1*T1*sin(theta) + std2*T2*cos(theta))
return np.array([x,y])
theta = -135.0 * pi / 180.0
src = Group((2,), 'V = sample(theta,0.0,1.0,0.0,0.5)')
tgt = Group((1,), 'V')
C = DenseConnection(src('V'), tgt('V'), np.ones((1,2)),
'dW/dt = post.V*(pre.V-post.V*W)')
run(time=10.0,dt=0.001)
print "Learned weights : ", C.weights[0]
print "(should be +/- [%f, %f])" % (cos(theta), sin(theta))
|
bsd-3-clause
| -3,846,490,947,212,068,000
| 44.382353
| 79
| 0.685677
| false
| 3.510808
| false
| false
| false
|
ideascube/pibox-installer
|
kiwix-hotspot/backend/util.py
|
1
|
15466
|
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
import os
import re
import sys
import time
import shlex
import signal
import ctypes
import tempfile
import threading
import subprocess
import data
from util import CLILogger
# windows-only flags to prevent sleep on executing thread
WINDOWS_SLEEP_FLAGS = {
# Enables away mode. This value must be specified with ES_CONTINUOUS.
# Away mode should be used only by media-recording and media-distribution
# applications that must perform critical background processing
# on desktop computers while the computer appears to be sleeping.
"ES_AWAYMODE_REQUIRED": 0x00000040,
# Informs the system that the state being set should remain in effect until
# the next call that uses ES_CONTINUOUS and one of the other state flags is cleared.
"ES_CONTINUOUS": 0x80000000,
# Forces the display to be on by resetting the display idle timer.
"ES_DISPLAY_REQUIRED": 0x00000002,
# Forces the system to be in the working state by resetting the system idle timer.
"ES_SYSTEM_REQUIRED": 0x00000001,
}
class CheckCallException(Exception):
def __init__(self, msg):
Exception(self, msg)
def startup_info_args():
if hasattr(subprocess, "STARTUPINFO"):
# On Windows, subprocess calls will pop up a command window by default
# when run from Pyinstaller with the ``--noconsole`` option. Avoid this
# distraction.
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
cf = subprocess.CREATE_NEW_PROCESS_GROUP
else:
si = None
cf = 0
return {"startupinfo": si, "creationflags": cf}
def subprocess_pretty_call(
cmd, logger, stdin=None, check=False, decode=False, as_admin=False
):
""" flexible subprocess helper running separately and using the logger
cmd: the command to be run
logger: the logger to send debug output to
stdin: pipe input into the command
check: whether it should raise on non-zero return code
decode: whether it should decode output (bytes) into UTF-8 str
as_admin: whether the command should be run as root/admin """
if as_admin:
if sys.platform == "win32":
if logger is not None:
logger.std("Call (as admin): " + str(cmd))
return run_as_win_admin(cmd, logger)
from_cli = logger is None or type(logger) == CLILogger
cmd = get_admin_command(cmd, from_gui=not from_cli, logger=logger)
# We should use subprocess.run but it is not available in python3.4
process = subprocess.Popen(
cmd,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**startup_info_args()
)
if logger is not None:
logger.std("Call: " + str(process.args))
process.wait()
lines = (
[l.decode("utf-8", "ignore") for l in process.stdout.readlines()]
if decode
else process.stdout.readlines()
)
if logger is not None:
for line in lines:
logger.raw_std(line if decode else line.decode("utf-8", "ignore"))
if check:
if process.returncode != 0:
raise CheckCallException("Process %s failed" % process.args)
return lines
return process.returncode, lines
def subprocess_pretty_check_call(cmd, logger, stdin=None, as_admin=False):
return subprocess_pretty_call(
cmd=cmd, logger=logger, stdin=stdin, check=True, as_admin=as_admin
)
def subprocess_timed_output(cmd, logger, timeout=10):
logger.std("Getting output of " + str(cmd))
return subprocess.check_output(
cmd, universal_newlines=True, timeout=timeout
).splitlines()
def subprocess_external(cmd, logger):
""" spawn a new process without capturing nor watching it """
logger.std("Opening: " + str(cmd))
subprocess.Popen(cmd)
def is_admin():
""" whether current process is ran as Windows Admin or unix root """
if sys.platform == "win32":
try:
return ctypes.windll.shell32.IsUserAnAdmin()
except Exception:
return False
return os.getuid() == 0
def run_as_win_admin(command, logger):
""" run specified command with admin rights """
params = " ".join(['"{}"'.format(x) for x in command[1:]]).strip()
rc = ctypes.windll.shell32.ShellExecuteW(None, "runas", command[0], params, None, 1)
# ShellExecuteW returns 5 if user chose not to elevate
if rc == 5:
raise PermissionError()
return rc
def get_admin_command(command, from_gui, logger, log_to=None):
""" updated command to run it as root on macos or linux
from_gui: whether called via GUI. Using cli sudo if not """
if not from_gui:
return ["sudo"] + command
if sys.platform == "darwin":
# write command to a separate temp bash script
script = (
"#!/bin/bash\n\n{command} 2>&1 {redir}\n\n"
'if [ $? -eq 1 ]; then\n echo "!!! echer returned 1" {redir}\n'
" exit 11\nfi\n\n".format(
command=" ".join([shlex.quote(cmd) for cmd in command]),
redir=">>{}".format(log_to) if log_to else "",
)
)
# add script content to logger
logger.raw_std(script)
with tempfile.NamedTemporaryFile(mode="w", suffix=".sh", delete=False) as fd:
fd.write(script)
fd.seek(0)
return [
"/usr/bin/osascript",
"-e",
'do shell script "/bin/bash {command}" '
"with administrator privileges".format(command=fd.name),
]
if sys.platform == "linux":
return ["pkexec"] + command
class EtcherWriterThread(threading.Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._should_stop = False # stop flag
self.exp = None # exception to be re-raised by caller
def stop(self):
self._should_stop = True
@classmethod
def show_log(cls, logger, log_to_file, log_file, process, eof=False):
if log_to_file:
try:
with open(log_file.name, "r") as f:
lines = f.readlines()
if len(lines) >= 2:
lines.pop()
# working
if "Validating" in lines[-1] or "Flashing" in lines[-1]:
logger.std(lines[-1].replace("\x1b[1A", "").strip())
elif "[1A" in lines[-1]: # still working but between progress
logger.std(lines[-2].replace("\x1b[1A", "").strip())
else: # probably at end of file
for line in lines[-5:]:
logger.std(line.replace("\x1b[1A", "").strip())
except Exception as exp:
logger.err("Failed to read etcher log output: {}".format(exp))
if not log_to_file or eof:
for line in process.stdout:
logger.raw_std(line.decode("utf-8", "ignore"))
def run(self,):
image_fpath, device_fpath, logger = self._args
logger.step("Copy image to sd card using etcher-cli")
from_cli = logger is None or type(logger) == CLILogger
cmd, log_to_file, log_file = get_etcher_command(
image_fpath, device_fpath, logger, from_cli
)
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **startup_info_args()
)
logger.std("Starting Etcher: " + str(process.args))
# intervals in second
sleep_interval = 2
log_interval = 60
counter = 0
while process.poll() is None:
counter += 1
if self._should_stop: # on cancel
logger.std(". cancelling...")
break
time.sleep(sleep_interval)
# increment sleep counter until we reach log interval
if counter < log_interval // sleep_interval:
counter += 1
continue
# reset counter and display log
counter = 0
self.show_log(logger, log_to_file, log_file, process)
try:
logger.std(". has process exited?")
process.wait(timeout=2)
except subprocess.TimeoutExpired:
logger.std(". process exited")
# send ctrl^c
if sys.platform == "win32":
logger.std(". sending ctrl^C")
process.send_signal(signal.CTRL_C_EVENT)
process.send_signal(signal.CTRL_BREAK_EVENT)
time.sleep(2)
if process.poll() is None:
logger.std(". sending SIGTERM")
process.terminate() # send SIGTERM
time.sleep(2)
if process.poll() is None:
logger.std(". sending SIGKILL")
process.kill() # send SIGKILL (SIGTERM again on windows)
time.sleep(2)
else:
logger.std(". process exited")
if not process.returncode == 0:
self.exp = CheckCallException(
"Process returned {}".format(process.returncode)
)
# capture last output
self.show_log(logger, log_to_file, log_file, process, eof=True)
if log_to_file:
log_file.close()
try:
os.unlink(log_file.name)
except Exception as exp:
logger.err(str(exp))
logger.std(". process done")
logger.progress(1)
def prevent_sleep(logger):
if sys.platform == "win32":
logger.std("Setting ES_SYSTEM_REQUIRED mode to current thread")
ctypes.windll.kernel32.SetThreadExecutionState(
WINDOWS_SLEEP_FLAGS["ES_CONTINUOUS"]
| WINDOWS_SLEEP_FLAGS["ES_SYSTEM_REQUIRED"]
| WINDOWS_SLEEP_FLAGS["ES_DISPLAY_REQUIRED"]
)
return
if sys.platform == "linux":
def make_unmapped_window(wm_name):
from Xlib import display
screen = display.Display().screen()
window = screen.root.create_window(0, 0, 1, 1, 0, screen.root_depth)
window.set_wm_name(wm_name)
window.set_wm_protocols([])
return window
logger.std("Suspending xdg-screensaver")
wid = None
try:
# Create window to use with xdg-screensaver
window = make_unmapped_window("caffeinate")
wid = hex(window.id)
cmd = ["/usr/bin/xdg-screensaver", "suspend", wid]
logger.std("Calling {}".format(cmd))
p = subprocess.Popen(" ".join(cmd), shell=True)
p.wait()
if not p.returncode == 0:
raise OSError("xdg-screensaver returned {}".format(p.returncode))
except Exception as exp:
logger.err("Unable to disable sleep. Please do it manually.")
return wid
if sys.platform == "darwin":
cmd = ["/usr/bin/caffeinate", "-dsi"]
logger.std("Calling {}".format(cmd))
process = subprocess.Popen(cmd, **startup_info_args())
return process
def restore_sleep_policy(reference, logger):
if sys.platform == "win32":
logger.std("Restoring ES_CONTINUOUS mode to current thread")
ctypes.windll.kernel32.SetThreadExecutionState(
WINDOWS_SLEEP_FLAGS["ES_CONTINUOUS"]
)
return
if sys.platform == "linux":
logger.std("Resuming xdg-screensaver (wid #{})".format(reference))
if reference is not None:
subprocess_pretty_call(
["/usr/bin/xdg-screensaver", "resume", reference], logger
)
return
if sys.platform == "darwin":
logger.std("Stopping caffeinate process #{}".format(reference.pid))
reference.kill()
reference.wait(5)
return
def get_etcher_command(image_fpath, device_fpath, logger, from_cli):
# on macOS, GUI sudo captures stdout so we use a log file
log_to_file = not from_cli and sys.platform == "darwin"
if log_to_file:
log_file = tempfile.NamedTemporaryFile(
suffix=".log", delete=False, encoding="utf-8"
)
else:
log_file = None
cmd = [
os.path.join(
data.data_dir,
"etcher-cli",
"etcher" if sys.platform == "win32" else "balena-etcher",
),
"-c",
"-y",
"-u",
"-d",
device_fpath,
image_fpath,
]
# handle sudo or GUI alternative for linux and macOS
if sys.platform in ("linux", "darwin"):
cmd = get_admin_command(
cmd,
from_gui=not from_cli,
logger=logger,
log_to=log_file.name if log_to_file else None,
)
return cmd, log_to_file, log_file
def flash_image_with_etcher(image_fpath, device_fpath, retcode, from_cli=False):
""" flash an image onto SD-card
use only with small image as there is no output capture on OSX
and it is not really cancellable.
retcode is a multiprocessing.Value """
logger = CLILogger()
cmd, log_to_file, log_file = get_etcher_command(
image_fpath, device_fpath, logger, from_cli
)
returncode, _ = subprocess_pretty_call(cmd, check=False, logger=logger)
retcode.value = returncode
if log_to_file:
try:
subprocess_pretty_call(["/bin/cat", log_file.name], logger, decode=True)
log_file.close()
os.unlink(log_file.name)
except Exception as exp:
logger.err(str(exp))
return returncode == 0
def sd_has_single_partition(sd_card, logger):
""" whether sd_card consists of a single partition (expected to be clean) """
try:
if sys.platform == "darwin":
disk_prefix = re.sub(r"\/dev\/disk([0-9]+)", r"disk\1s", sd_card)
lines = subprocess_timed_output(["diskutil", "list", sd_card], logger)
nb_partitions = len(
[
line.strip().rsplit(" ", 1)[-1].replace(disk_prefix, "").strip()
for line in lines
if disk_prefix in line
]
)
return nb_partitions == 1
elif sys.platform == "win32":
disk_prefix = re.sub(
r".+PHYSICALDRIVE([0-9+])", r"Disk #\1, Partition #", sd_card
)
lines = subprocess_timed_output(["wmic", "partition"], logger)
nb_partitions = len(
[
re.sub(r".+" + disk_prefix + r"([0-9]+).+", r"\1", line)
for line in lines
if disk_prefix in line
]
)
return nb_partitions == 1
elif sys.platform == "linux":
disk_prefix = re.sub(r"\/dev\/([a-z0-9]+)", r"─\1", sd_card)
lines = subprocess_timed_output(["/bin/lsblk", sd_card], logger)
nb_partitions = len(
[
line.strip().split(" ", 1)[0].replace(disk_prefix, "").strip()
for line in lines
if disk_prefix in line
]
)
return nb_partitions == 1
except Exception as exp:
logger.err(str(exp))
return False
|
gpl-3.0
| 3,680,295,156,000,532,500
| 32.764192
| 88
| 0.563696
| false
| 4.003106
| false
| false
| false
|
Brett777/Predict-Churn
|
model_management/datascience_framework.py
|
1
|
8515
|
import os
import io
import sys
import dill
import copy
from datetime import datetime
from .evaluator import Evaluator
from .utils import (
post_to_platform,
get_current_notebook,
strip_output,
get_current_notebook,
mkdir_p,
)
class DataScienceFramework(object):
def __init__(
self,
model,
problem_class,
x_test,
y_test,
name=None,
description=None,
evaluator=Evaluator,
):
# assign variables to class
self.name = name
self.description = description
self.model = model
self.problem_class = problem_class
self.y_test = list(y_test)
self.x_test = list(x_test)
self.framework = model.__module__.split(".")[0]
# get environment data
self._meta_data = self.meta_data()
self.y_pred = self.predict()
# initialize evaluator
self.evaluator = Evaluator(self.problem_class)
# class methods
@classmethod
def load(cls, model_id):
# use hard coded string to load for now
with open(".model_cache/sklearn_model_cache.pkl", "rb") as file:
instance = dill.load(file)
instance.model = instance.parse_model(io.BytesIO(instance.model_serialized))
return instance
@classmethod
def project_models(cls):
query = """
query($service_name: String!) {
runnableInstance(serviceName: $service_name) {
runnable {
project {
name
models {
edges {
node {
id
name
description
problemClass
framework
objectClass
language
languageVersion
createdAt
updatedAt
rank
hyperParameters
structure
author {
fullName
}
metrics {
edges {
node {
key
value
}
}
}
diagnostics {
edges {
node {
... on ModelDiagnosticROC {
title
falsePositiveRates
truePositiveRates
thresholds
}
... on ModelDiagnosticResidual {
title
observations
residuals
}
... on ModelDiagnosticConfusionMatrix {
title
matrix
}
}
}
}
parameters {
edges {
node {
key
value
confidenceInterval {
positive
negative
}
}
}
}
}
}
}
}
}
}
}
"""
response = post_to_platform(
{"query": query, "variables": {"service_name": os.environ["SERVICE_NAME"]}}
)
response_data = response.json()["data"]
models = list(
map(
lambda edge: edge["node"],
response_data["runnableInstance"]["runnable"]["project"]["models"][
"edges"
],
)
)
return models
# framework dependent functions
def predict(self):
""" Make prediction based on x_test """
raise NotImplementedError
def framework_version(self):
""" Return version of the framework been used. """
raise NotImplementedError
def object_class(self):
""" Return name of the model object. """
raise NotImplementedError
def parameter(self):
""" Get parameter from model. """
raise NotImplementedError
def hyperparameter(self):
""" Get hyper parameter from model. """
raise NotImplementedError
def serialize_model(self):
""" Default methods for serialize model. """
return dill.dumps(self.model)
def parse_model(self, model_file):
""" Default methods for reading in model. """
return dill.load(model_file)
# base framework functions
def meta_data(self):
""" Capture environment meta data. """
meta_data_obj = {
"name": self.name,
"description": self.description,
"framework": self.framework,
"createdAt": datetime.now().isoformat(),
"sessionName": os.environ["SERVICE_NAME"],
"language": "python",
"languageVersion": ".".join(map(str, sys.version_info[0:3])),
}
return meta_data_obj
def diagnostics(self):
""" Return diagnostics of model. """
return [fn(self.y_test, self.y_pred) for fn in self.evaluator.diagnostics]
def metrics(self):
""" Return evaluation of model performance. """
return [fn(self.y_test, self.y_pred) for fn in self.evaluator.metrics]
def summary(self):
""" Return all infomation that will be stored. """
model_meta = {
"diagnostics": self.diagnostics(),
"metrics": self.metrics(),
"parameters": self.parameter(),
"frameworkVersion": self.framework_version(),
"hyperParameters": self.hyperparameter(),
"problemClass": self.problem_class,
"objectClass": self.object_class(),
}
model_meta.update(self._meta_data)
return model_meta
def save(self):
""" Save all information to platform. """
self.model_serialized = self.serialize_model()
# save model object locally for now
#mkdir_p(".model_cache")
#with open(".model_cache/sklearn_model_cache.pkl", "w") as file:
# dill.dump(self, file)
model_meta = self.summary()
model_meta.update(
{
"data": {"y_pred": list(self.y_pred), "y_test": list(self.y_test)},
"notebook": get_current_notebook(),
}
)
query = """
mutation($input: CreateModelInput!) {
createModel(input: $input) {
clientMutationId
}
}
"""
return post_to_platform({"query": query, "variables": {"input": model_meta}})
|
mit
| -343,225,543,429,373,700
| 34.92827
| 91
| 0.376864
| false
| 6.416729
| true
| false
| false
|
SebWouters/CheMPS2
|
PyCheMPS2/tests/test12.py
|
1
|
3497
|
#
# CheMPS2: a spin-adapted implementation of DMRG for ab initio quantum chemistry
# Copyright (C) 2013-2018 Sebastian Wouters
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import numpy as np
import sys
import PyCheMPS2
import ctypes
# Set the seed of the random number generator and cout.precision
Initializer = PyCheMPS2.PyInitialize()
Initializer.Init()
#######################
### BCS Hamiltonian ###
#######################
eps = np.array([ -3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5 ], dtype=ctypes.c_double)
L = len( eps )
g = -1.0
power = 0.0
Nelec = L # Number of fermions in the model = Number of single-particle states
TwoS = 0 # Twice the total spin
Irrep = 0 # No point group is used, Irrep should ALWAYS be zero.
'''
Model: h_ij = delta_ij eps[i]
v_ijkl = delta_ij delta_kl g ( eps[i] * eps[k] ) ^ {power}
h_ijkl = v_ijkl + ( delta_ik h_jl + delta_jl h_ik ) / ( N - 1 )
Ham = 0.5 sum_ijkl h_ijkl sum_sigma,tau a^+_{i,sigma} a^+_{j,tau} a_{l,tau} a_{k,sigma}
'''
# The Hamiltonian initializes all its matrix elements to 0.0
orbirreps = np.zeros( [ L ], dtype=ctypes.c_int )
group = 0
Ham = PyCheMPS2.PyHamiltonian( L, group, orbirreps )
# Setting up the Problem
Prob = PyCheMPS2.PyProblem( Ham, TwoS, Nelec, Irrep )
# Setting up the ConvergenceScheme
# setInstruction(instruction, D, Econst, maxSweeps, noisePrefactor)
OptScheme = PyCheMPS2.PyConvergenceScheme( 2 )
OptScheme.setInstruction( 0, 100, 1e-10, 10, 0.5 )
OptScheme.setInstruction( 1, 1000, 1e-10, 10, 0.0 )
# Run ground state calculation
theDMRG = PyCheMPS2.PyDMRG( Prob, OptScheme )
###############################################################################################
### Hack: overwrite the matrix elements with 4-fold symmetry directly in the Problem object ###
###############################################################################################
for orb1 in range( L ):
for orb2 in range( L ):
eri = g * ( abs( eps[ orb1 ] * eps[ orb2 ] )**power )
oei = ( eps[ orb1 ] + eps[ orb2 ] ) / ( Nelec - 1 )
if ( orb1 == orb2 ):
Prob.setMxElement( orb1, orb1, orb2, orb2, eri + oei )
else:
Prob.setMxElement( orb1, orb1, orb2, orb2, eri )
Prob.setMxElement( orb1, orb2, orb1, orb2, oei )
theDMRG.PreSolve() # New matrix elements require reconstruction of complementary renormalized operators
Energy = theDMRG.Solve()
theDMRG.calc2DMandCorrelations()
theDMRG.printCorrelations()
# Clean-up
# theDMRG.deleteStoredMPS()
theDMRG.deleteStoredOperators()
del theDMRG
del OptScheme
del Prob
del Ham
del Initializer
# Check whether the test succeeded
if ( np.fabs( Energy + 25.5134137600604 ) < 1e-8 ):
print("================> Did test 12 succeed : yes")
else:
print("================> Did test 12 succeed : no")
|
gpl-2.0
| -2,050,985,702,913,222,700
| 35.427083
| 103
| 0.635402
| false
| 3.070237
| false
| false
| false
|
jinjiaho/project57
|
forms.py
|
1
|
3372
|
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed, FileRequired
from wtforms import StringField, PasswordField, SubmitField, RadioField, validators, IntegerField, SelectField, BooleanField,DecimalField
from wtforms.validators import DataRequired, Email, Length
from flaskext.mysql import MySQL
class AddUserForm(FlaskForm):
name = StringField('Full Name', validators=[DataRequired("Please enter the name of the newcomer.")])
username= StringField('Username', validators=[DataRequired("Please enter a username.")])
role = RadioField('Role of User')
password = PasswordField('Password', validators=[DataRequired("Please enter a password."), Length(min=6, message="Passwords must be 6 characters or more.")])
submit = SubmitField('Add User')
class CreateNewItem(FlaskForm):
itemname = StringField('Item Name', validators=[DataRequired("Please enter the name of the new item.")])
category = StringField('Category of Item', validators = [DataRequired()])
price = DecimalField('Unit Price', places=4, rounding=None, validators = [DataRequired()])
reorderpt = IntegerField('Reorder Point', validators = [DataRequired()])
count_unit = SelectField('Unit for Withdrawal', validators = [DataRequired()], choices=[("carton", "carton"), ("pc", "pc"), ("kg", "kg"), ("tin", "tin"), ("box", "box"), ("unit", "unit"), ("packet", "packet")])
order_unit = SelectField('Unit for Receiving', validators = [DataRequired()], choices=[("carton", "carton"), ("pc", "pc"), ("kg", "kg"), ("tin", "tin"), ("box", "box"), ("unit", "unit")])
order_multiplier = DecimalField('Item Quantity', places=4, rounding=None, validators = [DataRequired()])
submitTwo = SubmitField('Add New Item')
class ExistingItemsLocation(FlaskForm):
itemname = StringField('Item Name', validators=[DataRequired("Please insert the name of the item")])
tid = SelectField('Tag', coerce=int) # Value is tid
qty = IntegerField('Available Amount', validators = [DataRequired()])
submitFour = SubmitField('Assign To Tag')
class TransferItem(FlaskForm):
iname = StringField('Item Name')
tagOld = SelectField('Old Tag', coerce=int) # Value is tid
tagNew = SelectField('New Tag', coerce=int) # Value is tid
qty = IntegerField('Qty to Transfer', [validators.Optional()])
submit = SubmitField()
class LoginForm(FlaskForm):
username = StringField(validators=[DataRequired("Please enter a username")])
password = PasswordField(validators=[DataRequired('Please enter a password')])
remember = BooleanField()
submit = SubmitField()
class RetrievalForm(FlaskForm):
amount = StringField('Input the Amount Taken', validators=[validators.input_required()])
submit4 = SubmitField("Enter Quantity")
class AddNewLocation(FlaskForm):
tname = StringField('Name of New Tag', validators=[DataRequired("Please enter the name of the tag without spaces.")])
location = SelectField('Select Storeroom')
newLocation = StringField('Add a New Storeroom')
remarks = StringField('Remarks (optional)')
submitThree = SubmitField("Enter")
class TrackingForm(FlaskForm):
enabled = RadioField('Track Item Quantity? ', choices=[('yes','Yes'),('no','No')])
password = PasswordField(validators=[DataRequired('Please enter a password')])
remember = BooleanField()
submit = SubmitField()
class RemoveItem(FlaskForm):
iname = StringField('Item Name')
submit = SubmitField("Delete Item")
|
mit
| 4,052,969,891,147,546,600
| 49.328358
| 211
| 0.733393
| false
| 3.844926
| false
| false
| false
|
kristohr/pybayenv2
|
pybayenv/compute_average_bf.py
|
1
|
4066
|
#!/usr/bin/python
import sys, string, re, os, commands, time, math
#from scipy import stats
#import scipy as sp
import numpy as np
#import matplotlib as mpl
#from matplotlib import pyplot as plt
class SNP:
def __init__(self, name, num_env, t):
self.name = name
self.num_env = [False] * num_env
self.bf_list = [[0 for i in range(t)] for j in range(num_env)]
self.rel_signal = []
self.sum_signals = 0
self.lg_info = []
self.chr = 99
self.lg = 99
def get_name(self):
return self.name
def get_num_env(self):
return self.num_env
def set_num_env(self, n):
self.num_env[n] = True
def add_to_list(self, bf, k, i):
self.bf_list[k][i] = bf
def set_signal(self, gamma):
self.rel_signal.append(gamma)
self.sum_signals += gamma #Add to the total of signals
#Return the bf signal in variable k
def get_signal(self, k):
return self.rel_signal[k]
#Return the bf signal list
def get_signals(self):
return self.rel_signal
def get_sum_signals(self):
return self.sum_signals
def print_env(self):
print self.num_env
def get_median_bf(self, k):
#print self.bf_list[k]
bfs = np.array(self.bf_list[k])
median = np.median(bfs)
return median
def get_avg_bf(self, k):
#print self.bf_list[k]
bfs = np.array(self.bf_list[k])
avg = np.average(bfs)
return avg
def add_bf(self, bf):
self.sum_bf += bf
def get_sum_bf(self):
return self.sum_bf
def get_num_runs(self):
return self.num_runs
def get_bf_list(self):
return self.bf_list
def get_bf_list(self):
return self.bf_list
def set_lg_info(self, info):
self.lg_info.append(info)
def get_lg_info(self):
return self.lg_info
def set_chr(self, ch):
self.chr = ch
def get_chr(self):
return self.chr
def set_linkage_group(self, lg):
self.lg = lg
def get_linkage_group(self):
return self.lg
def compute_average_bf(num_var, num_tests):
N = int(num_var)
t = int(num_tests)
snp_dict = {}
for i in range (0, t):
filename = "results/bf_results_t" + str(i) + ".bf"
data = open( filename, "r")
print filename
lines = data.readlines()
for line in lines:
cols = line.split("\t")
snp_name = cols[0][0:-2]
if i > 9:
snp_name = snp_name[0:-1]
if snp_name in snp_dict:
snp = snp_dict[snp_name]
for k in range(0, N):
snp.add_to_list(float(cols[k+1]), k, i)
else:
snp = SNP(snp_name, N, t)
snp_dict[snp_name] = snp
for k in range(0, N):
snp.add_to_list(float(cols[k+1]), k, i)
data.close()
print "################LENGTH:" + str(len(snp_dict))
FILE1 = open("results/median_bf.txt", "w")
FILE2 = open("results/average_bf.txt", "w")
#bf_median = "marker\tsal1\tsal2\ttemp1\ttemp2\tox1\tox2\n"
#bf_avg = "marker\tsal1\tsal2\ttemp1\ttemp2\tox1\tox2\n"
bf_median = ""
bf_avg = ""
for key in snp_dict:
snp = snp_dict[key]
bf_avg += snp.get_name()
bf_median += snp.get_name()
for k in range(0, N):
bf_a = snp.get_avg_bf(k)
bf_m = snp.get_median_bf(k)
bf_avg += "\t" + str(bf_a)
bf_median += "\t" + str(bf_m)
bf_avg += "\n"
bf_median += "\n"
FILE1.write(bf_median)
FILE2.write(bf_avg)
FILE1.close()
FILE2.close()
if __name__ == '__main__':
# Terminate if too few arguments
if len(sys.argv) < 3:
print 'usage: %s <number of vars> <num tests>' % sys.argv[0]
sys.exit(-1)
main(sys.argv[1], sys.argv[2])
|
bsd-3-clause
| 8,263,703,149,634,889,000
| 23.792683
| 70
| 0.512789
| false
| 3.118098
| false
| false
| false
|
thenakliman/nirikshak
|
nirikshak/post_task/console.py
|
1
|
2103
|
# Copyright 2017 <thenakliman@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from nirikshak.common import plugins
from nirikshak.post_task import base
LOG = logging.getLogger(__name__)
@plugins.register('console')
class FormatOutputConsole(base.FormatOutput):
@staticmethod
def _get_jaanch_result(jaanch_parameter):
if 'result' in jaanch_parameter['output']:
if str(jaanch_parameter['output']['result']) == \
str(jaanch_parameter['input']['result']):
return 'pass'
return 'fail'
return jaanch_parameter['input']['result']
def format_output(self, **kwargs):
jaanch_name = list(kwargs.keys())[0]
jaanch_parameter = kwargs[jaanch_name]
input_parameter = ''
for key, value in jaanch_parameter['input']['args'].items():
input_parameter = ("%s%s:%s," % (input_parameter, key, value))
jaanch_result = self._get_jaanch_result(jaanch_parameter)
jaanch_type = jaanch_parameter['type']
jaanch_name_type_param = ("%s,%s,%s" % (jaanch_name,
jaanch_type,
input_parameter))
separator = '.' * (120 - len(jaanch_name_type_param))
formatted_output = ("%s%s%s" % (jaanch_name_type_param, separator,
jaanch_result))
jaanch_parameter['formatted_output'] = formatted_output
LOG.info("%s output has been formatted for console", formatted_output)
return kwargs
|
apache-2.0
| 6,413,825,219,913,676,000
| 39.442308
| 78
| 0.622444
| false
| 3.809783
| false
| false
| false
|
matematik7/STM
|
tests/test_parser.py
|
1
|
6119
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------
# tests/test_parser.py
#
# Test input arguments parser
# ----------------------------------------------------------------
# copyright (c) 2015 - Domen Ipavec
# Distributed under The MIT License, see LICENSE
# ----------------------------------------------------------------
from unittest import TestCase
import argparse, sys
from stm.configuration import Configuration
from stm.parser import Parser
# change argument parser to print to stdout
class ArgumentParser(argparse.ArgumentParser):
def error(self, message):
self.print_help(sys.stdout)
print('%s: error: %s\n' % (self.prog, message))
exit()
class Test_parser(TestCase):
def setUp(self):
self.parser = Parser()
def tearDown(self):
self.parser = None
def getConf(self, arguments):
return self.parser.getConfiguration(arguments.split(), ArgumentParser)
def assertInvalid(self, input):
with self.assertRaises(SystemExit):
self.getConf(input)
def test_empty(self):
self.assertInvalid('')
def test_direct(self):
conf = self.getConf('--input test.png test2.png --prefix pref --postfix post --folder fol')
self.assertItemsEqual(conf.input, ['test.png', 'test2.png'])
self.assertEqual(conf.name_prefix, 'pref')
self.assertEqual(conf.name_postfix, 'post')
self.assertEqual(conf.folder, 'fol')
def test_output(self):
self.assertInvalid('--input test.png test2.png --output test.png')
self.assertInvalid('--input . --output test.png')
conf = self.getConf('--input test.png --output test.png')
self.assertEqual(conf.output, 'test.png')
def test_recursive(self):
conf = self.getConf('--input test.png --recursive')
self.assertTrue(conf.recursive)
conf = self.getConf('--input test.png')
self.assertFalse(conf.recursive)
def test_debug(self):
conf = self.getConf('--input test.png --debug')
self.assertTrue(conf.debug)
conf = self.getConf('--input test.png')
self.assertFalse(conf.debug)
def test_verbose(self):
conf = self.getConf('--input test.png --verbose')
self.assertTrue(conf.verbose)
conf = self.getConf('--input test.png')
self.assertFalse(conf.verbose)
def test_file_format(self):
conf = self.getConf('--input test.png --fileFormat jpg')
self.assertEqual(conf.fileFormat, 'jpg')
self.assertInvalid('--input test.png --fileFormat krn')
def test_size(self):
conf = self.getConf('--input test.png --size 123x456')
self.assertEqual(conf.size, [123, 456])
self.assertInvalid('--input test.png --size 0x2')
self.assertInvalid('--input test.png --size -12x2')
self.assertInvalid('--input test.png --size 123')
self.assertInvalid('--input test.png --size 12x12x12')
self.assertInvalid('--input test.png --size xxx')
def test_mode(self):
conf = self.getConf('--input test.png --scale')
self.assertEqual(conf.cropMode, 'none')
conf = self.getConf('--input test.png --padd')
self.assertEqual(conf.cropMode, 'padd')
conf = self.getConf('--input test.png --crop')
self.assertEqual(conf.cropMode, 'crop')
conf = self.getConf('--input test.png --smart')
self.assertEqual(conf.cropMode, 'smart')
conf = self.getConf('--input test.png')
self.assertEqual(conf.cropMode, 'smart')
self.assertInvalid('--input test.png --scale --padd')
self.assertInvalid('--input test.png --padd --crop')
self.assertInvalid('--input test.png --crop --featured a')
self.assertInvalid('--input test.png --featured a --smart')
self.assertInvalid('--input test.png --smart --scale')
def test_mode_featured(self):
conf = self.getConf('--input test.png --featured 100x30,-15x30')
self.assertEqual(conf.featured, ([100,30], [-15, 30]))
self.assertInvalid('--input test.png --featured xxx,xxx')
self.assertInvalid('--input test.png --featured 10x10x10,15x30')
self.assertInvalid('--input test.png --featured 10x10,10x10,10x10')
self.assertInvalid('--input test.png --featured 10x10')
self.assertInvalid('--input test.png --featured 10,10x10')
def test_padd_color(self):
conf = self.getConf('--input test.png --padd --paddColor 0,100,200,250')
self.assertEqual(conf.paddColor, [0,100,200,250])
conf = self.getConf('--input test.png --paddColor 0,100,200')
self.assertEqual(conf.paddColor, [0,100,200,255])
self.assertInvalid('--input test.png --padd --paddColor 0')
self.assertInvalid('--input test.png --padd --paddColor 0,100')
self.assertInvalid('--input test.png --padd --paddColor 0,100,100,100,100')
self.assertInvalid('--input test.png --padd --paddColor -1,100,100')
self.assertInvalid('--input test.png --padd --paddColor 256,100,100')
def test_zoominess(self):
conf = self.getConf('--input test.png --zoominess 10')
self.assertEqual(conf.zoominess, 10)
conf = self.getConf('--input test.png --zoominess 0')
self.assertEqual(conf.zoominess, 0)
self.assertInvalid('--input test.png --zoominess 101')
self.assertInvalid('--input test.png --zoominess -1')
self.assertInvalid('--input test.png --zoominess 45 --padd')
self.assertInvalid('--input test.png --zoominess 45 --crop')
self.assertInvalid('--input test.png --zoominess 45 --scale')
def test_allowPadd(self):
conf = self.getConf('--input test.png --allowPadd')
self.assertTrue(conf.allowPadd)
conf = self.getConf('--input test.png')
self.assertFalse(conf.allowPadd)
|
mit
| -2,415,367,671,543,004,000
| 37.484277
| 99
| 0.594868
| false
| 3.919923
| true
| false
| false
|
ciudadanointeligente/write-it
|
nuntium/user_section/views.py
|
1
|
24845
|
import requests
from django.contrib.auth.decorators import login_required
from subdomains.utils import reverse
from django.http import HttpResponse, Http404
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from django.views.generic import TemplateView, CreateView, DetailView, View, ListView, RedirectView
from django.views.generic.edit import UpdateView, DeleteView, FormView
from mailit.forms import MailitTemplateForm
from instance.models import WriteItInstance, WriteItInstanceConfig, WriteitInstancePopitInstanceRecord
from ..models import Message,\
NewAnswerNotificationTemplate, ConfirmationTemplate, \
Answer, Moderation, \
AnswerWebHook
from .forms import WriteItInstanceBasicForm, \
NewAnswerNotificationTemplateForm, ConfirmationTemplateForm, \
WriteItInstanceAnswerNotificationForm, \
WriteItInstanceApiAutoconfirmForm, \
WriteItInstanceCreateForm, \
WriteItInstanceModerationForm, \
WriteItInstanceMaxRecipientsForm, \
WriteItInstanceRateLimiterForm, \
WriteItInstanceWebBasedForm, \
AnswerForm, RelatePopitInstanceWithWriteItInstance, \
WebhookCreateForm
from django.contrib import messages as view_messages
from django.utils.translation import ugettext as _
import json
from nuntium.popit_api_instance import PopitApiInstance
from nuntium.tasks import pull_from_popit
from nuntium.user_section.forms import WriteItPopitUpdateForm
from django.contrib.sites.models import Site
class UserAccountView(TemplateView):
template_name = 'nuntium/profiles/your-profile.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(UserAccountView, self).dispatch(*args, **kwargs)
class WriteItInstanceDetailBaseView(DetailView):
model = WriteItInstance
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.kwargs['slug'] = request.subdomain
return super(DetailView, self).dispatch(request, *args, **kwargs)
def get_object(self, queryset=None):
self.object = super(DetailView, self).get_object(queryset=queryset)
#OK I don't know if it is better to test by id
if not self.object.owner.__eq__(self.request.user):
raise Http404
return self.object
class WriteItInstanceContactDetailView(WriteItInstanceDetailBaseView):
template_name = 'nuntium/profiles/contacts/contacts-per-writeitinstance.html'
def dispatch(self, request, *args, **kwargs):
self.kwargs['slug'] = request.subdomain
return super(WriteItInstanceContactDetailView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(WriteItInstanceContactDetailView, self).get_context_data(**kwargs)
context['people'] = self.object.persons.order_by('name')
return context
class WriteItInstanceStatusView(WriteItInstanceDetailBaseView):
def render_to_response(self, context, **response_kwargs):
status = self.object.pulling_from_popit_status
return HttpResponse(
json.dumps(status),
content_type='application/json',
**response_kwargs
)
class WriteItInstanceApiDocsView(WriteItInstanceDetailBaseView):
template_name = 'nuntium/writeitinstance_api_docs.html'
def get_context_data(self, *args, **kwargs):
context = super(WriteItInstanceApiDocsView, self).get_context_data(*args, **kwargs)
current_domain = Site.objects.get_current().domain
context['api_base_url'] = 'http://' + current_domain + '/api/v1/'
return context
class WriteItInstanceTemplateUpdateView(DetailView):
model = WriteItInstance
template_name = 'nuntium/profiles/templates.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.kwargs['slug'] = request.subdomain
return super(WriteItInstanceTemplateUpdateView, self).dispatch(request, *args, **kwargs)
def get_object(self, queryset=None):
self.object = super(WriteItInstanceTemplateUpdateView, self).get_object(queryset=queryset)
#OK I don't know if it is better to test by id
if not self.object.owner.__eq__(self.request.user):
raise Http404
return self.object
def get_context_data(self, **kwargs):
context = super(WriteItInstanceTemplateUpdateView, self).get_context_data(**kwargs)
context['new_answer_template_form'] = NewAnswerNotificationTemplateForm(
writeitinstance=self.object,
instance=self.object.new_answer_notification_template,
)
context['mailit_template_form'] = MailitTemplateForm(
writeitinstance=self.object,
instance=self.object.mailit_template,
)
context['confirmation_template_form'] = ConfirmationTemplateForm(
writeitinstance=self.object,
instance=self.object.confirmationtemplate,
)
return context
class WriteItInstanceUpdateView(UpdateView):
form_class = WriteItInstanceBasicForm
template_name = "nuntium/writeitinstance_update_form.html"
model = WriteItInstance
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.kwargs['slug'] = request.subdomain
return super(WriteItInstanceUpdateView, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
queryset = super(WriteItInstanceUpdateView, self).get_queryset().filter(owner=self.request.user)
return queryset
def get_success_url(self):
return reverse(
'writeitinstance_basic_update',
subdomain=self.object.slug,
)
class WriteItInstanceAdvancedUpdateView(UpdateView):
model = WriteItInstanceConfig
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.kwargs['slug'] = request.subdomain
return super(WriteItInstanceAdvancedUpdateView, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
return super(WriteItInstanceAdvancedUpdateView, self).get_queryset().filter(writeitinstance__owner=self.request.user)
def get_context_data(self, **kwargs):
context = super(WriteItInstanceAdvancedUpdateView, self).get_context_data(**kwargs)
context['writeitinstance'] = self.object.writeitinstance
return context
def get_slug_field(self):
return 'writeitinstance__slug'
class WriteItInstanceAnswerNotificationView(WriteItInstanceAdvancedUpdateView):
form_class = WriteItInstanceAnswerNotificationForm
template_name = 'nuntium/writeitinstance_answernotification_form.html'
def get_success_url(self):
return reverse(
'writeitinstance_answernotification_update',
subdomain=self.object.writeitinstance.slug
)
class WriteItInstanceRateLimiterView(WriteItInstanceAdvancedUpdateView):
form_class = WriteItInstanceRateLimiterForm
template_name = 'nuntium/writeitinstance_ratelimiter_form.html'
def get_success_url(self):
return reverse(
'writeitinstance_ratelimiter_update',
subdomain=self.object.writeitinstance.slug
)
class WriteItInstanceModerationView(WriteItInstanceAdvancedUpdateView):
form_class = WriteItInstanceModerationForm
template_name = 'nuntium/writeitinstance_moderation_form.html'
def get_success_url(self):
return reverse(
'writeitinstance_moderation_update',
subdomain=self.object.writeitinstance.slug
)
class WriteItInstanceApiAutoconfirmView(WriteItInstanceAdvancedUpdateView):
form_class = WriteItInstanceApiAutoconfirmForm
template_name = 'nuntium/writeitinstance_autoconfirm_form.html'
def get_success_url(self):
return reverse(
'writeitinstance_api_autoconfirm_update',
subdomain=self.object.writeitinstance.slug
)
class WriteItInstanceMaxRecipientsView(WriteItInstanceAdvancedUpdateView):
form_class = WriteItInstanceMaxRecipientsForm
template_name = 'nuntium/writeitinstance_max_recipients_form.html'
def get_success_url(self):
return reverse(
'writeitinstance_maxrecipients_update',
subdomain=self.object.writeitinstance.slug
)
class WriteItInstanceWebBasedView(WriteItInstanceAdvancedUpdateView):
form_class = WriteItInstanceWebBasedForm
template_name = 'nuntium/writeitinstance_web_based_form.html'
def get_success_url(self):
return reverse(
'writeitinstance_webbased_update',
subdomain=self.object.writeitinstance.slug
)
class UserSectionListView(ListView):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(UserSectionListView, self).dispatch(*args, **kwargs)
def get_queryset(self):
queryset = super(UserSectionListView, self).get_queryset().filter(owner=self.request.user)
return queryset
class WriteItInstanceCreateView(CreateView):
model = WriteItInstance
form_class = WriteItInstanceCreateForm
template_name = 'nuntium/create_new_writeitinstance.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(WriteItInstanceCreateView, self).dispatch(*args, **kwargs)
def get_success_url(self):
return reverse(
'welcome',
subdomain=self.object.slug
)
def get_form_kwargs(self):
kwargs = super(WriteItInstanceCreateView, self).get_form_kwargs()
kwargs['owner'] = self.request.user
if 'data' in kwargs and kwargs['data'].get('legislature'):
kwargs['data'] = kwargs['data'].copy()
kwargs['data']['popit_url'] = kwargs['data']['legislature']
return kwargs
def get_context_data(self, *args, **kwargs):
context = super(WriteItInstanceCreateView, self).get_context_data(*args, **kwargs)
countries_json_url = ('http://everypolitician.github.io/'
'everypolitician-writeinpublic/countries.json')
context['countries'] = requests.get(countries_json_url).json()
return context
class YourInstancesView(UserSectionListView):
model = WriteItInstance
template_name = 'nuntium/profiles/your-instances.html'
def get_context_data(self, **kwargs):
kwargs = super(YourInstancesView, self).get_context_data(**kwargs)
kwargs['new_instance_form'] = WriteItInstanceCreateForm()
kwargs['live_sites'] = kwargs['object_list'].filter(config__testing_mode=False)
kwargs['test_sites'] = kwargs['object_list'].filter(config__testing_mode=True)
return kwargs
class LoginRequiredMixin(View):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
class WriteItInstanceOwnerMixin(LoginRequiredMixin):
def get_object(self):
slug = self.request.subdomain
pk = self.kwargs.get('pk')
return get_object_or_404(self.model, writeitinstance__slug=slug, writeitinstance__owner=self.request.user, pk=pk)
def get_context_data(self, **kwargs):
context = super(WriteItInstanceOwnerMixin, self).get_context_data(**kwargs)
context['writeitinstance'] = self.object.writeitinstance
return context
# Note that there is no need for subclasses of this to also subclass WriteItInstanceOwnerMixin
# as it does its own owner checking.
class UpdateTemplateWithWriteitBase(LoginRequiredMixin, UpdateView):
def get_object(self):
return get_object_or_404(self.model, writeitinstance__slug=self.request.subdomain, writeitinstance__owner=self.request.user)
def get_form_kwargs(self):
kwargs = super(UpdateTemplateWithWriteitBase, self).get_form_kwargs()
kwargs['writeitinstance'] = self.object.writeitinstance
return kwargs
def get_success_url(self):
return reverse(
'writeitinstance_template_update',
subdomain=self.object.writeitinstance.slug,
)
class NewAnswerNotificationTemplateUpdateView(UpdateTemplateWithWriteitBase):
form_class = NewAnswerNotificationTemplateForm
model = NewAnswerNotificationTemplate
class ConfirmationTemplateUpdateView(UpdateTemplateWithWriteitBase):
form_class = ConfirmationTemplateForm
model = ConfirmationTemplate
class MessagesPerWriteItInstance(LoginRequiredMixin, ListView):
model = Message
template_name = 'nuntium/profiles/messages_per_instance.html'
def get_queryset(self):
self.writeitinstance = get_object_or_404(WriteItInstance, slug=self.request.subdomain, owner=self.request.user)
return super(MessagesPerWriteItInstance, self).get_queryset().filter(writeitinstance=self.writeitinstance)
def get_context_data(self, **kwargs):
context = super(MessagesPerWriteItInstance, self).get_context_data(**kwargs)
context['writeitinstance'] = self.writeitinstance
return context
class MessageDetail(WriteItInstanceOwnerMixin, DetailView):
model = Message
template_name = "nuntium/profiles/message_detail.html"
class AnswerEditMixin(View):
def get_message(self):
raise NotImplementedError
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
self.message = self.get_message()
if self.message.writeitinstance.owner != self.request.user:
raise Http404
return super(AnswerEditMixin, self).dispatch(*args, **kwargs)
def get_success_url(self):
return reverse(
'message_detail_private',
subdomain=self.message.writeitinstance.slug,
kwargs={'pk': self.message.pk},
)
class AnswerCreateView(AnswerEditMixin, CreateView):
model = Answer
template_name = "nuntium/profiles/create_answer.html"
form_class = AnswerForm
def get_message(self):
message = Message.objects.get(id=self.kwargs['pk'])
return message
def get_form_kwargs(self):
kwargs = super(AnswerCreateView, self).get_form_kwargs()
kwargs['message'] = self.message
return kwargs
class AnswerUpdateView(AnswerEditMixin, UpdateView):
model = Answer
template_name = "nuntium/profiles/update_answer.html"
fields = ['content']
def get_message(self):
return self.model.objects.get(id=self.kwargs['pk']).message
class AcceptMessageView(RedirectView):
permanent = False
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(AcceptMessageView, self).dispatch(*args, **kwargs)
def get_redirect_url(self, *args, **kwargs):
message = get_object_or_404(Message,
pk=kwargs['pk'],
writeitinstance__slug=self.request.subdomain,
writeitinstance__owner=self.request.user
)
message.moderate()
view_messages.info(self.request, _('The message "%(message)s" has been accepted') % {'message': message})
return reverse(
'messages_per_writeitinstance',
subdomain=message.writeitinstance.slug,
)
class RejectMessageView(RedirectView):
permanent = False
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(RejectMessageView, self).dispatch(*args, **kwargs)
def get_redirect_url(self, *args, **kwargs):
message = get_object_or_404(Message,
pk=kwargs['pk'],
writeitinstance__slug=self.request.subdomain,
writeitinstance__owner=self.request.user
)
message.public = False
message.moderated = True
message.save()
view_messages.info(self.request, _('The message "%(message)s" has been rejected') % {'message': message})
return reverse(
'messages_per_writeitinstance',
subdomain=message.writeitinstance.slug,
)
class ModerationView(DetailView):
model = Moderation
slug_field = 'key'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ModerationView, self).dispatch(*args, **kwargs)
def get_queryset(self):
queryset = super(ModerationView, self).get_queryset()
queryset.filter(
message__writeitinstance__owner=self.request.user,
message__writeitinstance__slug=self.request.subdomain,
)
return queryset
class AcceptModerationView(ModerationView):
template_name = "nuntium/moderation_accepted.html"
def get(self, *args, **kwargs):
moderation = self.get_object()
moderation.message.moderate()
return super(AcceptModerationView, self).get(*args, **kwargs)
class RejectModerationView(ModerationView):
template_name = "nuntium/moderation_rejected.html"
def get(self, *args, **kwargs):
get = super(RejectModerationView, self).get(*args, **kwargs)
self.object.message.public = False
# It is turned True to avoid users to
# mistakenly moderate this message
# in the admin section
self.object.message.moderated = True
self.object.message.save()
return get
class WriteitPopitRelatingView(FormView):
form_class = RelatePopitInstanceWithWriteItInstance
template_name = 'nuntium/profiles/writeitinstance_and_popit_relations.html'
# This method also checks for instance ownership
def get_writeitinstance(self):
self.writeitinstance = get_object_or_404(WriteItInstance, slug=self.request.subdomain, owner=self.request.user)
def dispatch(self, *args, **kwargs):
self.get_writeitinstance()
return super(WriteitPopitRelatingView, self).dispatch(*args, **kwargs)
def get_form_kwargs(self):
kwargs = super(WriteitPopitRelatingView, self).get_form_kwargs()
kwargs['writeitinstance'] = self.writeitinstance
return kwargs
def get_success_url(self):
return reverse('relate-writeit-popit', subdomain=self.writeitinstance.slug)
def form_valid(self, form):
form.relate()
# It returns an AsyncResult http://celery.readthedocs.org/en/latest/reference/celery.result.html
# that we could use for future information about this process
return super(WriteitPopitRelatingView, self).form_valid(form)
def get_context_data(self, **kwargs):
context = super(WriteitPopitRelatingView, self).get_context_data(**kwargs)
context['writeitinstance'] = self.writeitinstance
context['relations'] = self.writeitinstance.writeitinstancepopitinstancerecord_set.all()
return context
class ReSyncFromPopit(View):
def dispatch(self, *args, **kwargs):
if not self.request.user.is_authenticated():
raise Http404
return super(ReSyncFromPopit, self).dispatch(*args, **kwargs)
def post(self, request, *args, **kwargs):
writeitinstance = get_object_or_404(WriteItInstance,
slug=self.request.subdomain,
owner=self.request.user)
popits_previously_related = PopitApiInstance.objects.filter(
writeitinstancepopitinstancerecord__writeitinstance=writeitinstance)
popit_api_instance = get_object_or_404(popits_previously_related, pk=kwargs['popit_api_pk'])
pull_from_popit.delay(writeitinstance, popit_api_instance)
return HttpResponse()
class WriteItPopitUpdateView(UpdateView):
form_class = WriteItPopitUpdateForm
model = WriteitInstancePopitInstanceRecord
def get_writeitinstance(self):
self.writeitinstance = get_object_or_404(WriteItInstance, slug=self.request.subdomain, owner=self.request.user)
def dispatch(self, *args, **kwargs):
self.get_writeitinstance()
if self.request.method != 'POST':
return self.http_method_not_allowed(*args, **kwargs)
return super(WriteItPopitUpdateView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
form.save()
return HttpResponse(
json.dumps({
'id': form.instance.id,
'periodicity': form.instance.periodicity
}),
content_type='application/json'
)
def form_invalid(self, form):
super(WriteItPopitUpdateView, self).form_invalid(form)
return HttpResponse(
json.dumps({
'errors': form.errors
}),
content_type='application/json'
)
class WriteItDeleteView(DeleteView):
model = WriteItInstance
# @method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.kwargs['slug'] = request.subdomain
return super(WriteItDeleteView, self).dispatch(request, *args, **kwargs)
def get_object(self, queryset=None):
obj = super(WriteItDeleteView, self).get_object(queryset=queryset)
if not obj.owner == self.request.user:
raise Http404
return obj
def get_success_url(self):
url = reverse('your-instances')
return url
class MessageTogglePublic(RedirectView):
permanent = False
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(MessageTogglePublic, self).dispatch(*args, **kwargs)
def get_redirect_url(self, *args, **kwargs):
message = get_object_or_404(Message,
pk=kwargs['pk'],
writeitinstance__slug=self.request.subdomain,
writeitinstance__owner=self.request.user,
)
message.public = not message.public
message.save()
if message.public:
view_messages.info(self.request, _("This message has been marked as public"))
else:
view_messages.info(self.request, _("This message has been marked as private"))
return reverse('messages_per_writeitinstance', subdomain=self.request.subdomain)
class ContactUsView(TemplateView):
template_name = 'nuntium/profiles/contact.html'
class WelcomeView(DetailView):
model = WriteItInstance
template_name = 'nuntium/profiles/welcome.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.kwargs['slug'] = request.subdomain
return super(WelcomeView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(WelcomeView, self).get_context_data(**kwargs)
# passing URLs in for easy insertion into the translation tags
# because we're using an overridden version of the url tag that
# doesn't allow the use of "as" to pass the url as a variable
# that can be quoted within a translation block. *sigh*
context['url_template_update'] = reverse('writeitinstance_template_update', subdomain=self.request.subdomain)
context['url_basic_update'] = reverse('writeitinstance_basic_update', subdomain=self.request.subdomain)
context['url_maxrecipients_update'] = reverse('writeitinstance_maxrecipients_update', subdomain=self.request.subdomain)
context['url_answernotification_update'] = reverse('writeitinstance_answernotification_update', subdomain=self.request.subdomain)
context['url_recipients'] = reverse('contacts-per-writeitinstance', subdomain=self.request.subdomain)
context['url_data_sources'] = reverse('relate-writeit-popit', subdomain=self.request.subdomain)
return context
class WriteItInstanceWebHooksView(WriteItInstanceDetailBaseView):
template_name = 'nuntium/profiles/webhooks.html'
def get_context_data(self, *args, **kwargs):
context = super(WriteItInstanceWebHooksView, self).get_context_data(*args, **kwargs)
context['form'] = WebhookCreateForm(writeitinstance=self.object)
return context
class WriteItInstanceCreateWebHooksView(CreateView):
model = AnswerWebHook
form_class = WebhookCreateForm
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.kwargs['slug'] = request.subdomain
self.writeitinstance = get_object_or_404(WriteItInstance,
slug=self.kwargs['slug'],
owner=self.request.user)
return super(WriteItInstanceCreateWebHooksView, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super(WriteItInstanceCreateWebHooksView, self).get_form_kwargs()
kwargs['writeitinstance'] = self.writeitinstance
return kwargs
def get_success_url(self):
return reverse(
'writeitinstance_webhooks',
subdomain=self.writeitinstance.slug,
)
|
gpl-3.0
| 4,677,099,118,318,445,000
| 36.417169
| 137
| 0.692413
| false
| 4.002739
| false
| false
| false
|
cinepost/Copperfield_FX
|
copper/shout/drivers/refined.py
|
1
|
2799
|
#!/usr/bin/env python
#
# This program shows how to write data to mplay by writing data to the
# imdisplay program using a pipe.
#
# This program uses the -k option on imdisplay to perform progressive
# refinement when rendering an image. The image is quite simple.
#
# Notes:
# This uses the simple format (no deep rasters)
# It only writes 8-bit data
#
import os, struct, time
MAGIC = (ord('h')<<24) + (ord('M')<<16) + (ord('P')<<8) + ord('0')
DATASIZE = 1 # See .c file for meaning
NCHANNELS = 4 # See .c file for meaning
EO_IMAGE = -2 # End of image marker
RES = 256
COLORS = [
(0, 0, 0, 255),
(255, 0, 0, 255),
(0, 255, 0, 255),
(0, 0, 255, 255),
(255, 255, 0, 255),
(0, 255, 255, 255),
(255, 0, 255, 255),
(255, 255, 255, 255),
]
def quadrant(x, y):
# Determine which quadrant color to use
n = (x > y) * 4
n += (x > RES/2) * 2
n += (y > RES/2)
return n
class MPlay:
def __init__(self, xres, yres, name="Test Application"):
self.XRES = xres
self.YRES = yres
# Open a pipe to imdisplay
# -p tells imdisplay to read the data from the pipe
# -k tells imdisplay to keep reading data after the image has
# been fully written
self.fp = os.popen('imdisplay -p -k -n "%s"' % name, 'w')
# The header is documented in the C code examples
header = struct.pack('I'*8, MAGIC, xres, yres, DATASIZE,
NCHANNELS, 0, 0, 0)
self.fp.write(header)
def close(self):
# To tell imdisplay that the image has been finished, we send a special
# header.
header = struct.pack('iiii', EO_IMAGE, 0, 0, 0)
self.fp.write(header)
self.fp.close()
self.fp = None
def writeTile(self, x0, x1, y0, y1, clr):
# The tile header is documented in the c code.
header = struct.pack('IIII', x0, x1, y0, y1)
self.fp.write(header)
# The tile's bounds are inclusive, so to find the number of pixels we
# need to add one to each dimension.
size = (x1 - x0 + 1) * (y1 - y0 + 1)
pixel = struct.pack('BBBB', clr[0], clr[1], clr[2], clr[3])
# Write a bunch of pixel data
self.fp.write(pixel * size)
def render(self, step):
for y in range(0, self.XRES, step):
for x in range(0, self.YRES, step):
self.writeTile(x, x+step-1, y, y+step-1, COLORS[quadrant(x, y)])
def main():
mp = MPlay(RES, RES)
mp.writeTile(0, RES-1, 0, RES-1, (255, 128, 64, 255))
step = 64
while step > 0:
time.sleep(.5) # Let mplay update the latest image we wrote
mp.render(step)
step /= 2
mp.close()
if __name__ == '__main__':
main()
|
unlicense
| -7,526,381,331,422,434,000
| 30.1
| 80
| 0.554484
| false
| 3.089404
| false
| false
| false
|
corpnewt/CorpBot.py
|
Cogs/BotAdmin.py
|
1
|
12950
|
import asyncio, discord, re, random
from operator import itemgetter
from discord.ext import commands
from Cogs import Utils, DisplayName, Message, PickList
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(BotAdmin(bot, settings))
class BotAdmin(commands.Cog):
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
self.dregex = re.compile(r"(?i)(discord(\.gg|app\.com)\/)(?!attachments)([^\s]+)")
self.mention_re = re.compile(r"[0-9]{17,21}")
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
async def message(self, message):
# Check for discord invite links and remove them if found - per server settings
if not self.dregex.search(message.content): return None # No invite in the passed message - nothing to do
# Got an invite - let's see if we care
if not self.settings.getServerStat(message.guild,"RemoveInviteLinks",False): return None # We don't care
# We *do* care, let's see if the author is admin/bot-admin as they'd have power to post invites
ctx = await self.bot.get_context(message)
if Utils.is_bot_admin(ctx): return None # We are immune!
# At this point - we need to delete the message
return { 'Ignore' : True, 'Delete' : True}
@commands.command(pass_context=True)
async def removeinvitelinks(self, ctx, *, yes_no = None):
"""Enables/Disables auto-deleting discord invite links in chat (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
await ctx.send(Utils.yes_no_setting(ctx,"Remove discord invite links","RemoveInviteLinks",yes_no))
@commands.command(pass_context=True)
async def setuserparts(self, ctx, member : discord.Member = None, *, parts : str = None):
"""Set another user's parts list (owner only)."""
# Only allow owner
isOwner = self.settings.isOwner(ctx.author)
if isOwner == None:
msg = 'I have not been claimed, *yet*.'
return await ctx.send(msg)
elif isOwner == False:
msg = 'You are not the *true* owner of me. Only the rightful owner can use this command.'
return await ctx.send(msg)
if member == None:
msg = 'Usage: `{}setuserparts [member] "[parts text]"`'.format(ctx.prefix)
return await ctx.send(msg)
if type(member) is str:
try:
member = discord.utils.get(ctx.guild.members, name=member)
except:
return await ctx.send("That member does not exist")
if not parts:
parts = ""
self.settings.setGlobalUserStat(member, "Parts", parts)
msg = '*{}\'s* parts have been set to:\n{}'.format(DisplayName.name(member), parts)
await ctx.send(Utils.suppressed(ctx,msg))
@setuserparts.error
async def setuserparts_error(self, error, ctx):
# do stuff
msg = 'setuserparts Error: {}'.format(error)
await ctx.send(msg)
@commands.command(pass_context=True)
async def ignore(self, ctx, *, member = None):
"""Adds a member to the bot's "ignore" list (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
if member == None:
msg = 'Usage: `{}ignore [member]`'.format(ctx.prefix)
return await ctx.send(msg)
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(memberName)
return await ctx.send(Utils.suppressed(ctx,msg))
ignoreList = self.settings.getServerStat(ctx.guild, "IgnoredUsers")
for user in ignoreList:
if str(member.id) == str(user["ID"]):
# Found our user - already ignored
return await ctx.send('*{}* is already being ignored.'.format(DisplayName.name(member)))
# Let's ignore someone
ignoreList.append({ "Name" : member.name, "ID" : member.id })
self.settings.setServerStat(ctx.guild, "IgnoredUsers", ignoreList)
await ctx.send('*{}* is now being ignored.'.format(DisplayName.name(member)))
@ignore.error
async def ignore_error(self, error, ctx):
# do stuff
msg = 'ignore Error: {}'.format(error)
await ctx.send(msg)
@commands.command(pass_context=True)
async def listen(self, ctx, *, member = None):
"""Removes a member from the bot's "ignore" list (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
if member == None:
return await ctx.send('Usage: `{}listen [member]`'.format(ctx.prefix))
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(memberName)
return await ctx.send(Utils.suppressed(ctx,msg))
ignoreList = self.settings.getServerStat(ctx.guild, "IgnoredUsers")
for user in ignoreList:
if str(member.id) == str(user["ID"]):
# Found our user - already ignored
ignoreList.remove(user)
self.settings.setServerStat(ctx.guild, "IgnoredUsers", ignoreList)
return await ctx.send("*{}* is no longer being ignored.".format(DisplayName.name(member)))
await ctx.send('*{}* wasn\'t being ignored...'.format(DisplayName.name(member)))
@listen.error
async def listen_error(self, error, ctx):
# do stuff
msg = 'listen Error: {}'.format(error)
await ctx.send(msg)
@commands.command(pass_context=True)
async def ignored(self, ctx):
"""Lists the users currently being ignored."""
ignoreArray = self.settings.getServerStat(ctx.guild, "IgnoredUsers")
promoSorted = sorted(ignoreArray, key=itemgetter('Name'))
if not len(promoSorted):
return await ctx.send("I'm not currently ignoring anyone.")
ignored = ["*{}*".format(DisplayName.name(ctx.guild.get_member(int(x["ID"])))) for x in promoSorted if ctx.guild.get_member(int(x["ID"]))]
await ctx.send("Currently Ignored Users:\n{}".format("\n".join(ignored)))
async def kick_ban(self, ctx, members_and_reason = None, command_name = "kick"):
# Helper method to handle the lifting for kick and ban
if not await Utils.is_bot_admin_reply(ctx): return
if not members_and_reason:
return await ctx.send('Usage: `{}{} [space delimited member mention/id] [reason]`'.format(ctx.prefix, command_name))
# Force a mention - we don't want any ambiguity
args = members_and_reason.split()
# Get our list of targets
targets = []
missed = []
unable = []
reason = ""
for index,item in enumerate(args):
if self.mention_re.search(item): # Check if it's a mention
# Resolve the member
mem_id = int(re.sub(r'\W+', '', item))
member = ctx.guild.get_member(mem_id)
if member is None and command_name in ("ban","unban"): # Didn't get a valid member, let's allow a pre-ban/unban if we can resolve them
try: member = await self.bot.fetch_user(mem_id)
except: pass
# If we have an invalid mention, save it to report later
if member is None:
missed.append(str(mem_id))
continue
# Let's check if we have a valid member and make sure it's not:
# 1. The bot, 2. The command caller, 3. Another bot-admin/admin
if isinstance(member, discord.Member) and (member.id == self.bot.user.id or member.id == ctx.author.id or Utils.is_bot_admin(ctx,member)):
unable.append(member.mention)
continue
if not member in targets: targets.append(member) # Only add them if we don't already have them
else:
# Not a mention - must be the reason, dump the rest of the items into a string
# separated by a space
reason = " ".join(args[index:])
break
reason = reason if len(reason) else "No reason provided."
if not len(targets):
msg = "**With reason:**\n\n{}".format(reason)
if len(unable): msg = "**Unable to {}:**\n\n{}\n\n".format(command_name,"\n".join(unable)) + msg
if len(missed): msg = "**Unmatched ID{}:**\n\n{}\n\n".format("" if len(missed) == 1 else "s","\n".join(missed)) + msg
return await Message.EmbedText(title="No valid members passed!",description=msg,color=ctx.author).send(ctx)
# We should have a list of targets, and the reason - let's list them for confirmation
# then generate a 4-digit confirmation code that the original requestor needs to confirm
# in order to follow through
confirmation_code = "".join([str(random.randint(0,9)) for x in range(4)])
msg = "**To {} the following member{}:**\n\n{}\n\n**With reason:**\n\n\"{}\"\n\n**Please type:**\n\n`{}`{}{}".format(
command_name,
"" if len(targets) == 1 else "s",
"\n".join([x.name+"#"+x.discriminator for x in targets]),
reason if len(reason) else "None",
confirmation_code,
"" if not len(missed) else "\n\n**Unmatched ID{}:**\n\n{}".format("" if len(missed) == 1 else "s", "\n".join(missed)),
"" if not len(unable) else "\n\n**Unable to {}:**\n\n{}".format(command_name,"\n".join(unable))
)
confirmation_message = await Message.EmbedText(title="{} Confirmation".format(command_name.capitalize()),description=msg,color=ctx.author).send(ctx)
def check_confirmation(message):
return message.channel == ctx.channel and ctx.author == message.author # Just making sure it's the same user/channel
try: confirmation_user = await self.bot.wait_for('message', timeout=60, check=check_confirmation)
except: confirmation_user = ""
# Delete the confirmation message
await confirmation_message.delete()
# Verify the confirmation
if not confirmation_user.content == confirmation_code: return await ctx.send("{} cancelled!".format(command_name.capitalize()))
# We got the authorization!
message = await Message.EmbedText(title="{}ing...".format("Bann" if command_name == "ban" else "Unbann" if command_name == "unban" else "Kick"),color=ctx.author).send(ctx)
canned = []
cant = []
command = {"ban":ctx.guild.ban,"kick":ctx.guild.kick,"unban":ctx.guild.unban}.get(command_name.lower(),ctx.guild.kick)
for target in targets:
try:
await command(target,reason="{}#{}: {}".format(ctx.author.name,ctx.author.discriminator,reason))
canned.append(target)
except: cant.append(target)
msg = ""
if len(canned):
msg += "**I was ABLE to {}:**\n\n{}\n\n".format(command_name,"\n".join([x.name+"#"+x.discriminator for x in canned]))
if len(cant):
msg += "**I was UNABLE to {}:**\n\n{}\n\n".format(command_name,"\n".join([x.name+"#"+x.discriminator for x in cant]))
await Message.EmbedText(title="{} Results".format(command_name.capitalize()),description=msg).edit(ctx,message)
@commands.command(pass_context=True)
async def kick(self, ctx, *, members = None, reason = None):
"""Kicks the passed members for the specified reason.
All kick targets must be mentions or ids to avoid ambiguity (bot-admin only).
eg: $kick @user1#1234 @user2#5678 @user3#9012 for spamming"""
await self.kick_ban(ctx,members,"kick")
@commands.command(pass_context=True)
async def ban(self, ctx, *, members = None, reason = None):
"""Bans the passed members for the specified reason.
All ban targets must be mentions or ids to avoid ambiguity (bot-admin only).
eg: $ban @user1#1234 @user2#5678 @user3#9012 for spamming"""
await self.kick_ban(ctx,members,"ban")
@commands.command(pass_context=True)
async def unban(self, ctx, *, members = None, reason = None):
"""Unbans the passed members for the specified reason.
All unban targets must be mentions or ids to avoid ambiguity (bot-admin only).
eg: $unban @user1#1234 @user2#5678 @user3#9012 because we're nice"""
await self.kick_ban(ctx,members,"unban")
@commands.command()
async def banned(self, ctx, *, user_id = None):
"""Queries the guild's ban list for the passed user id and responds with whether they've been banned and the reason (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
try: all_bans = await ctx.guild.bans()
except: return await ctx.send("I couldn't get the ban list :(")
if not len(all_bans): return await Message.EmbedText(title="Ban List",description="No bans found",color=ctx.author).send(ctx)
orig_user = user_id
try: user_id = int(user_id) if user_id != None else None
except: user_id = -1 # Use -1 to indicate unresolved
entries = []
for ban in all_bans:
entries.append({"name":"{}#{} ({})".format(ban.user.name,ban.user.discriminator,ban.user.id),"value":ban.reason if ban.reason else "No reason provided"})
if user_id != None and user_id == ban.user.id:
# Got a match - display it
return await Message.Embed(
title="Ban Found For {}".format(user_id),
fields=[entries[-1]], # Send the last found entry
color=ctx.author
).send(ctx)
return await PickList.PagePicker(title="Ban List ({:,} total)".format(len(entries)),description=None if user_id == None else "No match found for '{}'.".format(orig_user),list=entries,ctx=ctx).pick()
|
mit
| 2,491,016,822,418,353,700
| 43.759717
| 200
| 0.669035
| false
| 3.229426
| false
| false
| false
|
proyectosdeley/proyectos_de_ley
|
migrate_db.py
|
1
|
2327
|
import dataset
import datetime
import os
import unicodedata
def convert_name_to_slug(name):
"""Takes a congresista name and returns its slug."""
name = name.replace(",", "").lower()
name = name.split(" ")
if len(name) > 2:
i = 0
slug = ""
while i < 3:
slug += name[i]
if i < 2:
slug += "_"
i += 1
slug = unicodedata.normalize('NFKD', slug).encode('ascii', 'ignore')
slug = str(slug, encoding="utf-8")
return slug + "/"
old_db = os.path.join("..", "leyes.db")
new_db = "leyes_sqlite3.db"
db = dataset.connect("sqlite:///" + old_db)
res = db.query("select * from proyectos")
new_items = []
slugs = [] # translation table between name an URL
for i in res:
timestamp = datetime.datetime.fromtimestamp(i['timestamp'])
i['time_created'] = timestamp
i['time_edited'] = timestamp
try:
fecha_presentacion = datetime.datetime.strptime(
i['fecha_presentacion'],
'%d/%m/%Y',
)
except ValueError:
fecha_presentacion = datetime.datetime.strptime(
i['fecha_presentacion'],
'%d/%m/%y',
)
fecha_presentacion = datetime.datetime.date(fecha_presentacion)
i['fecha_presentacion'] = fecha_presentacion
i['expediente'] = i['link_to_pdf']
if i['pdf_url'] is None:
i['pdf_url'] = ''
if i['seguimiento_page'] is None:
i['seguimiento_page'] = ''
del i['link_to_pdf']
del i['timestamp']
del i['id']
del i['link']
congresistas = i['congresistas'].split(';')
for congre in congresistas:
congre = congre.strip()
obj = dict(nombre=congre)
if congre is not None and congre.strip() != '':
congre_slug = convert_name_to_slug(congre)
obj['slug'] = congre_slug
if obj not in slugs and congre_slug is not None:
slugs.append(obj)
new_items.append(i)
db = dataset.connect("sqlite:///" + new_db)
table = db['pdl_proyecto']
table.insert_many(new_items)
table = db['pdl_slug']
table.insert_many(slugs)
# fix domain from example.com to proyectosdeley.pe
table = db['django_site']
table.update(dict(id=1, domain='proyectosdeley.pe', name='proyectosdeley.pe'),
['id']
)
|
mit
| -3,755,114,459,877,640,000
| 25.443182
| 78
| 0.568973
| false
| 3.236439
| false
| false
| false
|
ubports-weblate/gallery-app
|
tests/autopilot/gallery_app/emulators/photo_viewer.py
|
1
|
9588
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# Copyright 2012-2015 Canonical
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
import logging
from autopilot.introspection.dbus import StateNotFoundError
import autopilot.logging
import ubuntuuitoolkit
from gallery_app.emulators import main_screen
from gallery_app.emulators.gallery_utils import(
GalleryAppException,
GalleryUtils
)
logger = logging.getLogger(__name__)
class PopupPhotoViewer(ubuntuuitoolkit.UbuntuUIToolkitCustomProxyObjectBase):
def _get_header(self):
main = self.get_root_instance().select_single(main_screen.MainScreen)
return main.select_single('PageHeader',
objectName='photoViewerHeader')
def _open_overflow(self):
overflow_button = self._get_header().select_single(
objectName='overflow_action_button')
self.pointing_device.click_object(overflow_button)
return self.get_root_instance().wait_select_single(
objectName='actions_overflow_panel',
visible=True)
def click_action_button(self, action_object_name):
header = self._get_header()
if not header.visible:
main = self.get_root_instance().select_single(
main_screen.MainScreen)
x, y, w, h = main.globalRect
self.pointing_device.move(x + (w // 2), y + (h // 2))
self.pointing_device.click()
header.visible.wait_for(True)
try:
object_name = action_object_name + "_button"
button = header.select_single(objectName=object_name)
self.pointing_device.click_object(button)
except StateNotFoundError:
object_name = action_object_name + "_button"
popover = self._open_overflow()
button = popover.select_single(objectName=object_name)
self.pointing_device.click_object(button)
@autopilot.logging.log_action(logger.info)
def delete_current_photo(self, confirm=True):
self.click_action_button("deleteButton")
if confirm:
self.confirm_delete_photo()
else:
self.cancel_delete_photo()
@autopilot.logging.log_action(logger.debug)
def confirm_delete_photo(self):
self._click_delete_dialog_button("Yes")
def _click_delete_dialog_button(self, name):
delete_dialog = self._get_delete_dialog()
button = delete_dialog.wait_select_single(
"Button", objectName="deletePhotoDialog" + name, visible=True)
self.pointing_device.click_object(button)
delete_dialog.wait_until_destroyed()
def _get_delete_dialog(self):
delete_dialog = self.get_root_instance().wait_select_single(
objectName="deletePhotoDialog")
delete_dialog.visible.wait_for(True)
delete_dialog.opacity.wait_for(1)
return delete_dialog
@autopilot.logging.log_action(logger.debug)
def cancel_delete_photo(self):
self._click_delete_dialog_button('No')
class PhotoViewer(GalleryUtils):
def __init__(self, app):
super(PhotoViewer, self).__init__(self)
self.app = app
def get_popup_album_picker(self):
"""Returns the photo viewer album pickers."""
return self.app.wait_select_single("PopupAlbumPicker",
objectName="popupAlbumPicker")
def get_share_peer_picker(self):
"""Returns the photo viewer share picker."""
return self.app.wait_select_single(objectName="sharePicker",
visible=True)
def get_photo_editor(self):
"""Returns the photo edit dialog."""
return self.app.wait_select_single("PhotoEditor")
def get_revert_to_original_dialog(self):
"""Returns the revert to original dialog."""
return self.app.wait_select_single("Dialog",
objectName="revertPromptDialog")
def get_cancel_revert_to_original_button(self):
"""Returns the revert to original cancel button."""
return self.get_revert_to_original_dialog().wait_select_single(
"Button",
objectName="cancelRevertButton",
visible=True)
def get_confirm_revert_to_original_button(self):
"""Returns the revert to original confirm button."""
return self.get_revert_to_original_dialog().wait_select_single(
"Button",
objectName="confirmRevertButton",
visible=True)
def get_photo_component(self):
# Was using a list index (lp:1247711). Still needs fixing, I'm not
# convinced this is a suitable way to select the correct item.
return self.app.wait_select_single(
"SingleMediaViewer",
objectName="openedMedia0"
)
def get_photos_list(self):
return self.app.wait_select_single("MediaListView")
def get_editor_actions_bar(self):
"""Returns the actions bar for the editor."""
return self.app.select_single("ActionsBar",
objectName="editorActionsBar")
def get_editor_action_button_by_text(self, button_text):
"""Returns the action button from the editor by text."""
actions_bar = self.get_editor_actions_bar()
buttons = actions_bar.select_many('AbstractButton')
for button in buttons:
if str(button.text) == button_text:
return button
raise GalleryAppException(
'Editor action button {} could not be found'.format(button_text))
def get_crop_action_button(self):
"""Returns the crop item of the edit dialog."""
return self.get_editor_action_button_by_text("Crop")
def get_rotate_action_button(self):
"""Returns the rotate item of the edit dialog."""
return self.get_editor_action_button_by_text("Rotate")
def get_undo_menu_item(self):
"""Returns the undo item of the edit dialog."""
return self.app.select_single("Standard", objectName="undoListItem")
def get_redo_menu_item(self):
"""Returns the redo item of the edit dialog."""
return self.app.select_single("Standard", objectName="redoListItem")
def get_revert_action_button(self):
"""Returns the revert to original menu item in the edit dialog."""
return self.get_editor_action_button_by_text("Revert to Original")
def get_auto_enhance_menu_item(self):
"""Returns the 'auto enhance' menu item in the edit dialog."""
return self.app.select_single("Standard", objectName='enhanceListItem')
def get_delete_popover_cancel_item(self):
"""Returns the cancel button of the delete popover."""
return self.app.wait_select_single("Button",
objectName="deletePhotoDialogNo",
visible=True)
def get_opened_photo(self):
"""Returns the first opened photo."""
return self.app.wait_select_single("SingleMediaViewer",
objectName="openedMedia0")
def get_crop_interactor(self):
"""Returns the crop interactor."""
return self.app.wait_select_single("CropInteractor",
objectName="cropInteractor")
def get_crop_overlay(self):
"""Returns the crop overlay."""
return self.app.wait_select_single("CropOverlay",
objectName="cropOverlay")
def get_top_left_crop_corner(self):
"""Returns the top left corner of the crop overlay for dragging."""
return self.app.wait_select_single("CropCorner",
objectName="topLeftCropCorner")
def get_crop_overlays_crop_icon(self):
"""Returns the crop icon of the crop overlay."""
return self.app.wait_select_single("Button",
objectName="centerCropIcon",
visible=True)
def get_edit_preview(self):
"""Returns the edit preview."""
return self.app.wait_select_single("EditPreview",
objectName="editPreview")
def _click_item(self, item):
self.pointing_device.click_object(item)
def click_rotate_button(self):
rotate_item = self.get_rotate_action_button()
self._click_item(rotate_item)
def click_crop_button(self):
crop_item = self.get_crop_action_button()
self._click_item(crop_item)
def click_undo_item(self):
undo_item = self.get_undo_menu_item()
self._click_item(undo_item)
def click_redo_item(self):
redo_item = self.get_redo_menu_item()
self._click_item(redo_item)
def click_revert_button(self):
revert_item = self.get_revert_action_button()
self._click_item(revert_item)
def click_cancel_revert_button(self):
cancel_item = self.get_cancel_revert_to_original_button()
self._click_item(cancel_item)
def click_confirm_revert_button(self):
confirm_item = self.get_confirm_revert_to_original_button()
self._click_item(confirm_item)
def click_enhance_item(self):
enhance_item = self.get_auto_enhance_menu_item()
self._click_item(enhance_item)
|
gpl-3.0
| -9,074,009,003,350,327,000
| 37.66129
| 79
| 0.61577
| false
| 4.074798
| false
| false
| false
|
emonty/ansible-container
|
ansible_container/shipit/modules/k8s_deployment.py
|
1
|
9208
|
#!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
module: k8s_deployment
short_description: Start, cancel or retry a deployment on a Kubernetes or OpenShift cluster.
description:
- Start, cancel or retry a deployment on a Kubernetes or OpenShift cluster by setting the C(state) to I(present) or
I(absent).
- Supports check mode. Use check mode to view a list of actions the module will take.
options:
'''
EXAMPLES = '''
'''
RETURN = '''
'''
import logging
import logging.config
from ansible.module_utils.basic import *
from ansible_container.shipit.k8s_api import K8sApi
from ansible_container.shipit.exceptions import ShipItException
logger = logging.getLogger('k8s_deployment')
LOGGING = (
{
'version': 1,
'disable_existing_loggers': True,
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'ansible-container.log'
}
},
'loggers': {
'k8s_deployment': {
'handlers': ['file'],
'level': 'INFO',
},
'container': {
'handlers': ['file'],
'level': 'INFO',
},
'compose': {
'handlers': [],
'level': 'INFO'
},
'docker': {
'handlers': [],
'level': 'INFO'
}
},
}
)
class K8SDeploymentManager(AnsibleModule):
def __init__(self):
self.arg_spec = dict(
project_name=dict(type='str', aliases=['namespace'], required=True),
state=dict(type='str', choices=['present', 'absent'], default='present'),
labels=dict(type='dict'),
deployment_name=dict(type='str'),
recreate=dict(type='bool', default=False),
replace=dict(type='bool', default=True),
selector=dict(type='dict'),
replicas=dict(type='int', default=1),
containers=dict(type='list'),
strategy=dict(type='str', default='Rolling', choices=['Recreate', 'Rolling']),
cli=dict(type='str', choices=['kubectl', 'oc'], default='oc'),
debug=dict(type='bool', default=False)
)
super(K8SDeploymentManager, self).__init__(self.arg_spec,
supports_check_mode=True)
self.project_name = None
self.state = None
self.labels = None
self.ports = None
self.deployment_name = None
self.selector = None
self.replace = None
self.replicas = None
self.containers = None
self.strategy = None
self.recreate = None
self.cli = None
self.api = None
self.debug = None
def exec_module(self):
for key in self.arg_spec:
setattr(self, key, self.params.get(key))
if self.debug:
LOGGING['loggers']['container']['level'] = 'DEBUG'
LOGGING['loggers']['k8s_deployment']['level'] = 'DEBUG'
logging.config.dictConfig(LOGGING)
self.api = K8sApi(target=self.cli)
actions = []
changed = False
deployments = dict()
results = dict()
try:
project_switch = self.api.set_project(self.project_name)
except ShipItException as exc:
self.fail_json(msg=exc.message, stderr=exc.stderr, stdout=exc.stdout)
if not project_switch:
actions.append("Create project %s" % self.project_name)
if not self.check_mode:
try:
self.api.create_project(self.project_name)
except ShipItException as exc:
self.fail_json(msg=exc.message, stderr=exc.stderr, stdout=exc.stdout)
if self.state == 'present':
deployment = self.api.get_resource('dc', self.deployment_name)
if not deployment:
template = self._create_template()
changed = True
actions.append("Create deployment %s" % self.deployment_name)
if not self.check_mode:
try:
self.api.create_from_template(template=template)
except ShipItException as exc:
self.fail_json(msg=exc.message, stderr=exc.stderr, stdout=exc.stdout)
elif deployment and self.recreate:
actions.append("Delete deployment %s" % self.deployment_name)
changed = True
template = self._create_template()
if not self.check_mode:
try:
self.api.delete_resource('dc', self.deployment_name)
self.api.create_from_template(template=template)
except ShipItException as exc:
self.fail_json(msg=exc.message, stderr=exc.stderr, stdout=exc.stdout)
elif deployment and self.replace:
template = self._create_template()
try:
template['status'] = dict(latestVersion=deployment['status']['latestVersion'] + 1)
except Exception as exc:
self.fail_json(msg="Failed to increment latestVersion for %s - %s" % (self.deployment_name,
str(exc)))
changed = True
actions.append("Update deployment %s" % self.deployment_name)
if not self.check_mode:
try:
self.api.replace_from_template(template=template)
except ShipItException as exc:
self.fail_json(msg=exc.message, stderr=exc.stderr, stdout=exc.stdout)
deployments[self.deployment_name.replace('-', '_') + '_deployment'] = self.api.get_resource('dc', self.deployment_name)
elif self.state == 'absent':
if self.api.get_resource('deployment', self.deployment_name):
changed = True
actions.append("Delete deployment %s" % self.deployment_name)
if self.check_mode:
try:
self.api.delete_resource('deployment', self.deployment_name)
except ShipItException as exc:
self.fail_json(msg=exc.message, stderr=exc.stderr, stdout=exc.stdout)
results['changed'] = changed
if self.check_mode:
results['actions'] = actions
if deployments:
results['ansible_facts'] = deployments
return results
def _create_template(self):
for container in self.containers:
if container.get('env'):
container['env'] = self._env_to_list(container['env'])
if container.get('ports'):
container['ports'] = self._port_to_container_ports(container['ports'])
template = dict(
apiVersion="v1",
kind="DeploymentConfig",
metadata=dict(
name=self.deployment_name,
),
spec=dict(
template=dict(
metadata=dict(),
spec=dict(
containers=self.containers
)
),
replicas=self.replicas,
strategy=dict(
type=self.strategy,
),
)
)
if self.labels:
template['metadata']['labels'] = self.labels
template['spec']['template']['metadata']['labels'] = self.labels
if self.selector:
template['spec']['selector'] = self.selector
return template
def _env_to_list(self, env_variables):
result = []
for name, value in env_variables.items():
result.append(dict(
name=name,
value=value
))
return result
@staticmethod
def _port_to_container_ports(ports):
result = []
for port in ports:
result.append(dict(containerPort=port))
return result
def main():
manager = K8SDeploymentManager()
results = manager.exec_module()
manager.exit_json(**results)
if __name__ == '__main__':
main()
|
lgpl-3.0
| 7,921,536,518,088,795,000
| 33.74717
| 131
| 0.535947
| false
| 4.474247
| false
| false
| false
|
smurfix/DaBroker
|
dabroker/client/codec.py
|
1
|
10644
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division, unicode_literals
##
## This file is part of DaBroker, a distributed data access manager.
##
## DaBroker is Copyright © 2014 by Matthias Urlichs <matthias@urlichs.de>,
## it is licensed under the GPLv3. See the file `README.rst` for details,
## including optimistic statements by the author.
##
## This paragraph is auto-generated and may self-destruct at any time,
## courtesy of "make update". The original is in ‘utils/_boilerplate.py’.
## Thus, please do not remove the next line, or insert any blank lines.
##BP
from weakref import ref,WeakValueDictionary
from functools import partial
from . import ClientBaseRef,ClientBaseObj
from ..base import BaseRef,BaseObj, BrokeredInfo, BrokeredInfoInfo, adapters as baseAdapters, common_BaseObj,common_BaseRef, NoData,ManyData
from ..base.service import current_service
import logging
logger = logging.getLogger("dabroker.client.serial")
class _NotGiven: pass
class CacheProxy(object):
"""Can't weakref a string, so …"""
def __init__(self,data):
self.data = data
def kstr(v):
k = getattr(v,'__dict__',None)
if k is not None:
k = k.get('_key',None)
if k is not None:
return '.'.join(str(x) for x in k.key)
else:
return str(v)
def search_key(a,**kw):
"""Build a reproducible string from search keywords"""
if a is None:
a = ()
return ','.join(kstr(v) for v in a) + '|' + ','.join('{}:{}'.format(k, kstr(v)) for k,v in sorted(kw.items()))
# This is the client's adapter storage.
adapters = baseAdapters[:]
def codec_adapter(cls):
adapters.append(cls)
return cls
# This is a list of special metaclasses, by key,
_registry = {}
def baseclass_for(*k):
"""\
Register a base class for a specific object type.
@k is the meta object's key tuple.
See test11 for an example which overrides the root object.
If your client class duplicates an attribute, it takes
precedence: the server's value of that attribute will not be
accessible.
Usage:
@baseclass_for("static","root","meta")
class MyRoot(ClientBaseObj):
def check_me(self):
return "This is a client-specific class"
You can use `None` as the last value (only), which behaves like an
any-single value placeholder.
"""
def proc(fn):
_registry[k] = fn
return fn
return proc
class ClientBrokeredInfo(ClientBaseObj,BrokeredInfo):
"""\
This is the base class for client-side meta objects.
"""
def __init__(self,*a,**k):
super(ClientBrokeredInfo,self).__init__(*a,**k)
self.searches = WeakValueDictionary()
self._class = None
def __call__(self, _is_meta=False, *a,**kw):
"""\
Return the class to use for objects with this as metaclass
"""
cls = self._class
if cls is None:
k = self._key.key
cls = _registry.get(k,None)
if cls is None:
# Allow a single wildcard at the end
cls = _registry.get((k[:-1])+(None,),object)
if _is_meta:
class ClientInfo(ClientBrokeredInfo,cls):
pass
else:
class ClientInfo(ClientBaseObj,cls):
pass
cls = ClientInfo
for k in self.fields.keys():
setattr(cls, '_dab_'+k if hasattr(cls,k) else k,FieldProperty(k))
for k in self.refs.keys():
if k != '_meta':
setattr(cls, '_dab_'+k if hasattr(cls,k) else k,RefProperty(k))
for k,v in self.backrefs.items():
setattr(cls, '_dab_'+k if hasattr(cls,k) else k,BackRefProperty(k,v))
for k,v in self.calls.items():
if not hasattr(cls,k):
setattr(cls,k,RpcProperty(v))
self._class = cls
return cls(*a,**kw)
def find(self, **kw):
if self._dab_cached is None:
raise RuntimeError("You cannot search "+repr(self))
for r in self.client.find(self, _cached=self._dab_cached, **kw):
if not isinstance(r,BaseObj):
r = r()
yield r
def get(self, **kw):
if self._dab_cached is None:
raise RuntimeError("You cannot search "+repr(self))
res = list(self.client.find(self, _limit=2,_cached=self._dab_cached, **kw))
if len(res) == 0:
raise NoData(cls=self,**kw)
elif len(res) == 2:
raise ManyData(cls=self,**kw)
else:
res = res[0]
if not isinstance(res,BaseObj):
res = res()
return res
def count(self, **kw):
if self._dab_cached is None:
raise RuntimeError("You cannot search "+repr(self))
return self.client.count(self, _cached=self._dab_cached, **kw)
def __repr__(self):
k=getattr(self,'_key',None)
if not k or not hasattr(self,'name'):
return super(ClientBrokeredInfo,self).__repr__()
return '‹I:{}:{}›'.format(self.name, '¦'.join(str(x) for x in k))
__str__=__unicode__=__repr__
class _ClientInfo(ClientBrokeredInfo):
"""Mix-in class for meta objects"""
_name = None
def __init__(self,*a,**k):
super(_ClientInfo,self).__init__(*a,**k)
class ClientBrokeredInfoInfo(ClientBrokeredInfo,BrokeredInfoInfo):
"""\
This is the client-side singleton meta object
(the root of DaBroker's object system)
"""
pass
client_broker_info_meta = ClientBrokeredInfoInfo()
class FieldProperty(object):
"""This property accessor handles updating non-referential attributes."""
# Note that there is no `__get__` method. It is not necessary,
# the value is stored in the object's `__dict__`;
# Python will get it from there.
def __init__(self, name):
self.name = name
def __set__(self, obj, val):
ov = obj.__dict__.get(self.name,_NotGiven)
obj.__dict__[self.name] = val
if ov is _NotGiven:
return
if obj._meta is None:
assert not ov or ov == val, (self.name,ov,val)
else:
import pdb;pdb.set_trace()
obj._meta._dab.obj_change(obj, self.name, ov,val)
class RefProperty(object):
"""This property accessor handles referred objects"""
def __init__(self, name):
self.name = name
def __get__(self, obj, type=None):
if obj is None:
return self
k = obj._refs.get(self.name,None)
if k is None:
return None
return obj._meta._dab.get(k)
def __set__(self, obj, val):
ov = obj._refs.get(self.name,_NotGiven)
if val is not None:
val = val._key
obj._refs[self.name] = val
if ov is _NotGiven:
return
obj._meta._dab.obj_change(obj, self.name, ov,val)
class BackRefProperty(object):
"""This property accessor handles retrieving one-to-many relationships"""
def __init__(self, name,refobj):
self.name = name
self.ref = ref(refobj)
def __get__(self, obj, type=None):
if obj is None:
return self
k = obj._refs.get(self.name,None)
if k is None:
k = obj._refs[self.name] = k = BackRefHandler(obj, self.name,self.ref)
return k
class BackRefHandler(object):
"""Manage a specific back reference"""
def __init__(self, obj, name,refobj):
self.obj = ref(obj)
self.name = name
self.ref = refobj
def _deref(self):
obj = self.obj()
ref = self.ref()
if obj is None or ref is None:
raise RuntimeError("weak ref: should not have been freed")
return obj,ref
def __getitem__(self,i):
obj,ref = self._deref()
res = obj._meta._dab.send("backref_idx",obj, self.name,i)
if isinstance(res,BaseRef):
res = res()
return res
def __len__(self):
obj,ref = self._deref()
return obj._meta._dab.send("backref_len",obj, self.name)
class RpcProperty(object):
"""This property accessor returns a shim which executes a RPC to the server."""
def __init__(self, proc, base=None):
self.name = proc.name
self.cached = getattr(proc,'cached',False)
self.for_class = getattr(proc,'for_class',None)
self.meta = getattr(proc,'meta',False)
self.base = base
def _do_call(self,obj, *a,**k):
with obj._dab.env:
if self.cached and not obj._obsolete:
kws = self.name+':'+search_key(a,**k)
ckey = " ".join(str(x) for x in obj._key.key)+":"+kws
res = obj._call_cache.get(kws,_NotGiven)
if res is not _NotGiven:
res = res.data
current_service.top._cache[ckey] # Lookup to increase counter
return res
res = obj._meta._dab.call(obj,self.name, a,k, _meta=self.meta)
if self.cached and not obj._obsolete:
rc = CacheProxy(res)
obj._call_cache[kws] = rc
current_service.top._cache[ckey] = rc
return res
def __get__(self, obj, type=None):
if self.for_class is None: # normal method
if obj is None:
return self
else: # static- or classmethod
obj=type
c = partial(RpcProperty._do_call, self,obj)
c.__name__ = str(self.name)
return c
def __call__(self, *a,**k):
# direct call, "classmethod"
assert self.base is not None
return self._do_call(self.base, *a,**k)
@codec_adapter
class client_BaseRef(common_BaseRef):
cls = ClientBaseRef
@staticmethod
def decode(k,c=None):
return ClientBaseRef(key=tuple(k),code=c)
@codec_adapter
class client_BaseObj(common_BaseObj):
@classmethod
def encode_ref(obj,k):
"""\
Encode a reference, without loading the actual object.
(Since we can't load the object without encoding a reference for it, that'd be somewhat difficult.)
"""
ref = obj._refs[k]
if ref is not None:
import pdb;pdb.set_trace()
ref = ClientBaseRef(meta=obj._meta, key=obj._key)
return ref
@classmethod
def decode(cls, k,c=None,f=None,r=None, _is_meta=False):
"""\
Convert this object to a class
"""
k = ClientBaseRef(key=tuple(k),code=c)
if not r or '_meta' not in r:
raise RuntimeError("Object without meta data")
m = r['_meta']
if not isinstance(m,ClientBrokeredInfo):
# assume it's a reference, so resolve it
r['_meta'] = m = m()
res = m(_is_meta)
res._key = k
# Got the class, now fill it with data
if f:
for k,v in f.items():
res.__dict__[k] = v
# do not use setattr here, it tries to record a change
if r:
for k,v in r.items():
if k == '_meta':
res._meta = v
else:
res._refs[k] = v
if f and _is_meta and 'calls' in f:
c = f['calls']
for k,v in c.items():
if getattr(v,'for_class',False):
res.__dict__[k] = RpcProperty(v,res)
pass
return current_service.top._add_to_cache(res)
@codec_adapter
class client_InfoObj(client_BaseObj):
cls = ClientBrokeredInfo
clsname = "Info"
@staticmethod
def decode(k=None,c=None,f=None, **kw):
if f is None:
# We always need the data, but this is something like a ref,
# so we need to go and get the real thing.
# NOTE this assumes that the codec doesn't throw away empty lists.
return ClientBaseRef(key=k,code=c)()
res = client_BaseObj.decode(_is_meta=True, k=k,c=c,f=f,**kw)
res.client = current_service.top
return res
@codec_adapter
class client_InfoMeta(object):
cls = ClientBrokeredInfoInfo
clsname = "_ROOT"
@staticmethod
def encode(obj, include=False):
return {}
@staticmethod
def decode(**attr):
return client_broker_info_meta
|
gpl-3.0
| -5,950,866,018,713,775,000
| 25.984772
| 140
| 0.659142
| false
| 2.935395
| false
| false
| false
|
tongfa/vent
|
wserve/wserve/views.py
|
1
|
1406
|
from django.http import HttpResponse
from django.template.loader import get_template
from django.template import Context
from wserve.settings import VENT_WD, VENT_WWW_CLIENT_EP
import cPickle as pickle
import json, time, os
def address2key(address):
r = 0
for s in address[0].split('.'):
r = r << 8
r += int(s)
r = r << 16
r += address[1]
return r
def index(request):
t = get_template('index.html')
return HttpResponse(t.render(Context()))
def audio(request):
t = get_template('audio.html')
return HttpResponse(t.render(Context()))
def longcall(request):
time.sleep(1)
def url(c):
ep = VENT_WWW_CLIENT_EP
return 'http://%s%s/camera/%d/' % (
ep[0],
'' if ep[1] == 80 else ':%d' % ep[1],
address2key(c))
cameraList = os.listdir("%s" % VENT_WD)
if cameraList is None:
import code
code.interact(local=vars())
cameraList.sort()
cameraListIp = [pickle.load(open("%s/%s" % (VENT_WD, name), 'r'))
for name in cameraList]
# unique value, url, name
connList = [(address2key(c),url(c),c[0]) for c in cameraListIp]
response_data = {}
response_data['result'] = 'OK'
response_data['message'] = {'cameras': connList}
print response_data
return HttpResponse(json.dumps(response_data), content_type="application/json")
|
mit
| 2,273,925,697,469,779,000
| 28.914894
| 83
| 0.604552
| false
| 3.437653
| false
| false
| false
|
herqles-io/hq-manager
|
src/hqmanager/api/user.py
|
1
|
5040
|
import cherrypy
class UserAPIController(object):
exposed = True
def __init__(self, identity, assignment):
self.identity = identity
self.assignment = assignment
def index(self):
return "User api Index"
@cherrypy.tools.json_out()
@cherrypy.tools.json_in()
@cherrypy.tools.auth(permission="herqles.user.add")
def add(self):
data = cherrypy.request.json
if 'username' not in data:
raise cherrypy.HTTPError(400, "Missing username")
if 'password' not in data:
raise cherrypy.HTTPError(400, "Missing Password")
output = {'username': data['username'], 'identity': False, 'assignment': False}
if not self.identity.user_exists(data['username']):
self.identity.create_user(data['username'], data['password'])
output['identity'] = True
if not self.assignment.has_assignment(data['username']):
self.assignment.create_assignment(data['username'])
output['assignment'] = True
return output
@cherrypy.tools.json_out()
@cherrypy.tools.auth() # If the username is the requests username allow them to see
def get(self, username):
headers = cherrypy.request.headers
if not self.assignment.has_assignment(username):
raise cherrypy.HTTPError(404, "User does not exist")
if username != cherrypy.request.user['name']:
if not self.assignment.has_permission_token(headers['X-Auth-Token'], 'herqles.user.get'):
raise cherrypy.HTTPError(403, "Invalid permissions")
permissions = self.assignment.get_permissions(username)
return {'username': username, 'permissions': permissions}
@cherrypy.tools.json_out()
@cherrypy.tools.auth(permission="herqles.user.delete")
def delete(self, username):
output = {'username': username, 'identity': False, 'assignment': False}
if not self.identity.user_exists(username):
self.identity.delete_user(username)
output['identity'] = True
if not self.assignment.has_assignment(username):
self.assignment.delete_assignment(username)
output['assignment'] = True
return output
@cherrypy.tools.json_out()
@cherrypy.tools.json_in()
def get_token(self):
data = cherrypy.request.json
if 'username' not in data or 'password' not in data:
raise cherrypy.HTTPError(400, "Username and password required")
if not self.identity.auth(data['username'], data['password']):
raise cherrypy.HTTPError(401, "Invalid username or password")
if not self.assignment.has_assignment(data['username']):
raise cherrypy.HTTPError(404, "User does not exist")
(token, expire_at) = self.assignment.get_token(data['username'])
return {"token": token, 'expire_at': long(expire_at)}
@cherrypy.tools.json_out()
@cherrypy.tools.json_in()
@cherrypy.tools.auth() # We only need to check permissions sometimes
def change_password(self):
headers = cherrypy.request.headers
data = cherrypy.request.json
if 'username' not in data:
raise cherrypy.HTTPError(400, "Missing username")
if 'password' not in data:
raise cherrypy.HTTPError(400, "Missing password")
if data['username'] != cherrypy.request.user['name']:
if not self.assignment.has_permission_token(headers['X-Auth-Token'], 'herqles.user.password'):
raise cherrypy.HTTPError(403, "Invalid permissions")
self.identity.change_password(data['username'], data['password'])
self.assignment.get_token(data['username'], force=True)
return {'username': data['username']}
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
@cherrypy.tools.auth(permission="herqles.user.permission.add")
def add_permission(self):
data = cherrypy.request.json
username = data['username']
permission = data['permission']
if not self.assignment.has_assignment(username):
raise cherrypy.HTTPError(404, "User does not exist")
if self.assignment.has_permission_user(username, permission):
raise cherrypy.HTTPError(409, "User already has permission "+permission)
self.assignment.add_permission(username, permission)
return data
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
@cherrypy.tools.auth(permission="herqles.user.permission.delete")
def remove_permission(self):
data = cherrypy.request.json
username = data['username']
permission = data['permission']
if not self.assignment.has_assignment(username):
raise cherrypy.HTTPError(404, "User does not exist")
if self.assignment.has_permission_user(username, permission, exact=True) is False:
raise cherrypy.HTTPError(409, "User does not have permission "+permission)
return data
|
mit
| 5,361,003,477,038,764,000
| 34.244755
| 106
| 0.644444
| false
| 4.315068
| false
| false
| false
|
davidgardenier/frbpoppy
|
tests/dm_snr/future.py
|
1
|
6523
|
"""Check the log N log F slope for future surveys."""
import numpy as np
import matplotlib.pyplot as plt
from copy import copy
from frbpoppy import CosmicPopulation, Survey, LargePopulation, SurveyPopulation, hist
from frbpoppy import unpickle, pprint
import frbpoppy.direction_dists as did
import frbpoppy.galacticops as go
from tests.convenience import plot_aa_style, rel_path
from tests.rates.alpha_real import EXPECTED
MAKE = True
SURVEYS = ('parkes-htru',
'wsrt-apertif',
'fast-crafts',
'puma-full',
'chord',
'ska1-low',
'ska1-mid')
SIZE = 5e4
if MAKE:
# Calculate the fraction of the sky that the survey covers
surv_f_area = {}
for name in SURVEYS:
pop = CosmicPopulation.simple(5e5)
pop.gen_direction()
survey = Survey(name)
mask = survey.in_region(pop.frbs.ra, pop.frbs.dec,
pop.frbs.gl, pop.frbs.gb)
in_surv_region = np.sum(mask)
tot_region = len(mask)
area_sky = 4*np.pi*(180/np.pi)**2 # In sq. degrees
f_area = (survey.beam_size/area_sky)*(tot_region/in_surv_region)
surv_f_area[name] = f_area
print(f'{name} covers {f_area*100}% of the sky')
surv_pops = []
for name in SURVEYS:
# Set up survey
survey = Survey(name)
if name in ('parkes-htru', 'wsrt-apertif'):
survey.set_beam(model=name)
# Set up CosmicPopulation
pop = CosmicPopulation.optimal(SIZE, generate=False)
# Only generate FRBs in the survey region
pop.set_direction(model='uniform',
min_ra=survey.ra_min,
max_ra=survey.ra_max,
min_dec=survey.dec_min,
max_dec=survey.dec_max)
# Parkes also has galactic limits:
if name == 'parkes-htru':
pop.gen_index()
pop.gen_dist()
pop.gen_time()
# Generate FRBs just within the galactic constraints
pop.gen_direction()
# Gather ra, dec coordinate limits
lims = {'min_ra': survey.ra_min, 'max_ra': survey.ra_max,
'min_dec': survey.dec_min, 'max_dec': survey.dec_max}
def sample(n_gen):
ra, dec = did.uniform(n_srcs=n_gen, **lims)
gl, gb = go.radec_to_lb(ra, dec, frac=True)
coords = [ra, dec, gl, gb]
return coords
def accept(coords):
return survey.in_region(*coords)
coords = sample(int(SIZE))
mask = accept(coords)
reject, = np.where(~mask)
while reject.size > 0:
fill = sample(reject.size)
mask = accept(fill)
for i in range(len(coords)):
coords[i][reject[mask]] = fill[i][mask]
reject = reject[~mask]
# Assign the values
frbs = pop.frbs
frbs.ra, frbs.dec = coords[0], coords[1]
frbs.gl, frbs.gb = coords[2], coords[3]
# Continue with generation
pop.gen_gal_coords()
pop.gen_dm()
pop.gen_w()
pop.gen_lum()
pop.gen_si()
else:
pop.generate()
surv_pop = SurveyPopulation(pop, survey, scale_by_area=False)
surv_pop.source_rate.f_area = surv_f_area[name]
surv_pop.source_rate.scale_by_area()
# surv_pop.save()
surv_pops.append(surv_pop)
else:
surv_pops = []
for name in SURVEYS:
surv_pops.append(unpickle(f'optimal_{name}'))
# Start plot
plot_aa_style(cols=2)
plt.rcParams["figure.figsize"] = (3.556*3, 3.556)
fig, axes = plt.subplots(1, 3)
for ax in axes.flatten():
ax.set_aspect('auto')
# Get norm pop
y = 0
ys = []
names = []
rates = []
norm_sim_rate = surv_pops[0].source_rate.det
norm_real_rate = EXPECTED['parkes-htru'][0] / EXPECTED['parkes-htru'][1]
norm_rate = norm_sim_rate / norm_real_rate
for i, surv_pop in enumerate(surv_pops):
name = surv_pop.name.split('_')[-1]
pprint(name)
if surv_pop.n_sources() == 0:
print(surv_pop.source_rate)
print(f'{name} | no FRBs in population')
continue
names.append(name)
ys.append(y)
# Dimensions measure plot
ax = axes[0]
ax.set_xlabel(r'DM ($\textrm{pc}\ \textrm{cm}^{-3}$)')
ax.set_ylabel(r'\#')
ax.set_yscale('log')
bins, values = hist(surv_pop.frbs.dm, bin_type='lin', norm='frac',
n_bins=20)
values = values.astype(np.float64)
values *= float(surv_pop.source_rate.f_area)*1e6
ax.step(bins, values, where='mid', label=name)
# Fluence plot
ax = axes[1]
ax.set_xlabel('S/N')
ax.set_xscale('log')
ax.set_ylabel(r'\#(${>}\text{S/N}$)')
ax.set_yscale('log')
# Update fluence plot
bins, values = hist(surv_pop.frbs.snr, bin_type='log', norm='frac',
n_bins=25)
# Cumulative sum
values = np.cumsum(values[::-1])[::-1]
values = values.astype(np.float64)
values *= float(surv_pop.source_rate.f_area)*1e6
ax.step(bins, values, where='mid', label=name)
# Plot rates
ax = axes[2]
ax.set_xscale('log')
ax.set_xlabel(r'Rate (day$^{-1}$)')
rate = surv_pop.source_rate.det/norm_rate
print(f'rate: {rate}')
line = ax.errorbar(rate, y,
fmt='x',
label=rf'{name}')
ax.grid()
rates.append(rate)
y += 1
ax.yaxis.tick_right()
ax.set_yticks(ys)
ax.set_yticklabels(names)
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
for i, y in enumerate(ax.get_yticklabels()):
y.set_color(colors[i])
ax.invert_yaxis() # labels read top-to-bottom
# Add thin grey horizontal lines
x_lim = ax.get_xlim()
ax.set_xlim(x_lim)
for i, y in enumerate(ys):
ax.plot((x_lim[0], rates[i]), (y, y), color='k', lw=0.5, zorder=0, ls='--')
for e in list(zip(SURVEYS, rates)):
pprint(e)
euclidean_lines = True
if euclidean_lines:
xlims = axes[1].get_xlim()
ylims = axes[1].get_ylim()
axes[1].set_xlim(xlims)
axes[1].set_ylim(ylims)
xs = np.logspace(np.log10(xlims[0]),
np.log10(xlims[1]),
100)
for n in range(-10, 15):
ys = 10**((np.log10(xs)+n)*-1.5)
axes[1].plot(xs, ys, 'k:', linewidth=0.25)
# plt.legend()
plt.tight_layout()
plt.savefig(rel_path('./plots/future_surveys.pdf'))
|
mit
| 1,674,763,514,524,134,700
| 28.251121
| 86
| 0.555879
| false
| 3.08708
| false
| false
| false
|
i02sopop/Kirinki
|
gstreamer/examples/video_receiver.py
|
1
|
2317
|
#!/usr/bin/env python
# -=- encoding: utf-8 -=-
################ VIDEO RECEIVER
import gobject, pygst
pygst.require("0.10")
import gst
# TODO: detect from the RTPSource element inside the GstRtpBin
REMOTE_HOST = '192.168.34.150'
READ_VIDEO_CAPS = 'video.caps'
pipeline = gst.Pipeline('server')
caps = open(READ_VIDEO_CAPS).read().replace('\\', '')
rtpbin = gst.element_factory_make('gstrtpbin', 'rtpbin')
rtpbin.set_property('latency', 400)
udpsrc_rtpin = gst.element_factory_make('udpsrc', 'udpsrc0')
udpsrc_rtpin.set_property('port', 10000)
udpsrc_caps = gst.caps_from_string(caps)
udpsrc_rtpin.set_property('caps', udpsrc_caps)
udpsrc_rtcpin = gst.element_factory_make('udpsrc', 'udpsrc1')
udpsrc_rtcpin.set_property('port', 10001)
udpsink_rtcpout = gst.element_factory_make('udpsink', 'udpsink0')
udpsink_rtcpout.set_property('host', REMOTE_HOST)
udpsink_rtcpout.set_property('port', 10002)
rtph264depay = gst.element_factory_make('rtph264depay', 'rtpdepay')
q1 = gst.element_factory_make("queue", "q1")
q2 = gst.element_factory_make("queue", "q2")
avimux = gst.element_factory_make('avimux', 'avimux')
filesink = gst.element_factory_make('filesink', 'filesink')
filesink.set_property('location', '/tmp/go.avi')
ffmpegcs = gst.element_factory_make("ffmpegcolorspace", "ffmpegcs")
ffdec264 = gst.element_factory_make('ffdec_h264', 'ffdec264')
autovideosink = gst.element_factory_make('autovideosink')
pipeline.add(rtpbin, udpsrc_rtpin, udpsrc_rtcpin, udpsink_rtcpout,
rtph264depay, q1, avimux, ffdec264, autovideosink)
# Receive the RTP and RTCP streams
udpsrc_rtpin.link_pads('src', rtpbin, 'recv_rtp_sink_0')
udpsrc_rtcpin.link_pads('src', rtpbin, 'recv_rtcp_sink_0')
# reply with RTCP stream
rtpbin.link_pads('send_rtcp_src_0', udpsink_rtcpout, 'sink')
# Plus the RTP into the rest of the pipe...
def rtpbin_pad_added(obj, pad):
print "PAD ADDED"
print " obj", obj
print " pad", pad
rtpbin.link(rtph264depay)
rtpbin.connect('pad-added', rtpbin_pad_added)
gst.element_link_many(rtph264depay, q1, ffdec264, autovideosink)
def start():
pipeline.set_state(gst.STATE_PLAYING)
udpsink_rtcpout.set_locked_state(gst.STATE_PLAYING)
print "Started..."
def loop():
print "Running..."
gobject.MainLoop().run()
if __name__ == '__main__':
start()
loop()
|
agpl-3.0
| -9,069,301,017,670,709,000
| 33.073529
| 67
| 0.70738
| false
| 2.565891
| false
| false
| false
|
cloudysunny14/CloudySwitch
|
cloudyswitch/app/psyco_eventlet.py
|
1
|
2308
|
"""A wait callback to allow psycopg2 cooperation with eventlet.
Use `make_psycopg_green()` to enable eventlet support in Psycopg.
"""
# Copyright (C) 2010 Daniele Varrazzo <daniele.varrazzo@gmail.com>
# and licensed under the MIT license:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import psycopg2
from psycopg2 import extensions
from eventlet.hubs import trampoline
LOG = logging.getLogger(__name__)
def make_psycopg_green():
"""Configure Psycopg to be used with eventlet in non-blocking way."""
if not hasattr(extensions, 'set_wait_callback'):
raise ImportError(
"support for coroutines not available in this Psycopg version (%s)"
% psycopg2.__version__)
extensions.set_wait_callback(eventlet_wait_callback)
def eventlet_wait_callback(conn, timeout=-1):
"""A wait callback useful to allow eventlet to work with Psycopg."""
while 1:
state = conn.poll()
if state == extensions.POLL_OK:
break
elif state == extensions.POLL_READ:
trampoline(conn.fileno(), read=True)
elif state == extensions.POLL_WRITE:
trampoline(conn.fileno(), write=True)
else:
raise psycopg2.OperationalError(
"Bad result from poll: %r" % state)
|
apache-2.0
| -6,447,168,683,168,695,000
| 42.54717
| 79
| 0.717938
| false
| 4.204007
| false
| false
| false
|
tu-rbo/differentiable-particle-filters
|
methods/dpf_kitti.py
|
1
|
43029
|
import os
import numpy as np
import sonnet as snt
import tensorflow as tf
import matplotlib.pyplot as plt
from utils.data_utils_kitti import wrap_angle, compute_statistics, split_data, make_batch_iterator, make_repeating_batch_iterator, rotation_matrix, load_data_for_stats
from utils.method_utils import atan2, compute_sq_distance
from utils.plotting_utils import plot_maze, show_pause
from datetime import datetime
if tf.__version__ == '1.1.0-rc1' or tf.__version__ == '1.2.0':
from tensorflow.python.framework import ops
@ops.RegisterGradient("FloorMod")
def _mod_grad(op, grad):
x, y = op.inputs
gz = grad
x_grad = gz
y_grad = None # tf.reduce_mean(-(x // y) * gz, axis=[0], keep_dims=True)[0]
return x_grad, y_grad
class DPF():
def __init__(self, init_with_true_state, learn_odom, use_proposer, propose_ratio, proposer_keep_ratio, min_obs_likelihood, learn_gaussian_mle):
"""
:param init_with_true_state:
:param learn_odom:
:param use_proposer:
:param propose_ratio:
:param particle_std:
:param proposer_keep_ratio:
:param min_obs_likelihood:
"""
# store hyperparameters which are needed later
self.init_with_true_state = init_with_true_state
self.learn_odom = learn_odom
self.use_proposer = use_proposer and not init_with_true_state # only use proposer if we do not initializet with true state
self.propose_ratio = propose_ratio if not self.init_with_true_state else 0.0
# define some more parameters and placeholders
self.state_dim = 5
self.action_dim = 3
self.observation_dim = 6
self.placeholders = {'o': tf.placeholder('float32', [None, None, 50, 150, self.observation_dim], 'observations'),
'a': tf.placeholder('float32', [None, None, 3], 'actions'),
's': tf.placeholder('float32', [None, None, 5], 'states'),
'num_particles': tf.placeholder('float32'),
'keep_prob': tf.placeholder_with_default(tf.constant(1.0), []),
'is_training': tf.placeholder_with_default(tf.constant(False), [])
}
self.num_particles_float = self.placeholders['num_particles']
self.num_particles = tf.to_int32(self.num_particles_float)
# build learnable modules
self.build_modules(min_obs_likelihood, proposer_keep_ratio, learn_gaussian_mle)
def build_modules(self, min_obs_likelihood, proposer_keep_ratio, learn_gaussian_mle):
"""
:param min_obs_likelihood:
:param proposer_keep_ratio:
:return: None
"""
# MEASUREMENT MODEL
# conv net for encoding the image
self.encoder = snt.Sequential([
snt.nets.ConvNet2D([16, 16, 16, 16], [[7, 7], [5, 5], [5, 5], [5, 5]], [[1,1], [1, 2], [1, 2], [2, 2]], [snt.SAME], activate_final=True, name='encoder/convnet'),
snt.BatchFlatten(),
lambda x: tf.nn.dropout(x, self.placeholders['keep_prob']),
snt.Linear(128, name='encoder/linear'),
tf.nn.relu
])
# observation likelihood estimator that maps states and image encodings to probabilities
self.obs_like_estimator = snt.Sequential([
snt.Linear(128, name='obs_like_estimator/linear'),
tf.nn.relu,
snt.Linear(128, name='obs_like_estimator/linear'),
tf.nn.relu,
snt.Linear(1, name='obs_like_estimator/linear'),
tf.nn.sigmoid,
lambda x: x * (1 - min_obs_likelihood) + min_obs_likelihood
], name='obs_like_estimator')
# motion noise generator used for motion sampling
if learn_gaussian_mle:
self.mo_noise_generator = snt.nets.MLP([32, 32, 4], activate_final=False, name='mo_noise_generator')
else:
self.mo_noise_generator = snt.nets.MLP([32, 32, 2], activate_final=False, name='mo_noise_generator')
# odometry model (if we want to learn it)
if self.learn_odom:
self.mo_transition_model = snt.nets.MLP([128, 128, 128, self.state_dim], activate_final=False, name='mo_transition_model')
# particle proposer that maps encodings to particles (if we want to use it)
if self.use_proposer:
self.particle_proposer = snt.Sequential([
snt.Linear(128, name='particle_proposer/linear'),
tf.nn.relu,
lambda x: tf.nn.dropout(x, proposer_keep_ratio),
snt.Linear(128, name='particle_proposer/linear'),
tf.nn.relu,
snt.Linear(128, name='particle_proposer/linear'),
tf.nn.relu,
snt.Linear(128, name='particle_proposer/linear'),
tf.nn.relu,
snt.Linear(4, name='particle_proposer/linear'),
tf.nn.tanh,
])
self.noise_scaler1 = snt.Module(lambda x: x * tf.exp(10 * tf.get_variable('motion_sampler/noise_scaler1', initializer=np.array(0.0, dtype='float32'))))
self.noise_scaler2 = snt.Module(lambda x: x * tf.exp(10 * tf.get_variable('motion_sampler/noise_scaler2', initializer=np.array(0.0, dtype='float32'))))
def custom_build(self, inputs):
"""A custom build method to wrap into a sonnet Module."""
outputs = snt.Conv2D(output_channels=16, kernel_shape=[7, 7], stride=[1, 1])(inputs)
outputs = tf.nn.relu(outputs)
outputs = snt.Conv2D(output_channels=16, kernel_shape=[5, 5], stride=[1, 2])(outputs)
outputs = tf.nn.relu(outputs)
outputs = snt.Conv2D(output_channels=16, kernel_shape=[5, 5], stride=[1, 2])(outputs)
outputs = tf.nn.relu(outputs)
outputs = snt.Conv2D(output_channels=16, kernel_shape=[5, 5], stride=[2, 2])(outputs)
outputs = tf.nn.relu(outputs)
outputs = tf.nn.dropout(outputs, self.placeholders['keep_prob'])
outputs = snt.BatchFlatten()(outputs)
outputs = snt.Linear(128)(outputs)
outputs = tf.nn.relu(outputs)
return outputs
def measurement_update(self, encoding, particles, means, stds):
"""
Compute the likelihood of the encoded observation for each particle.
:param encoding: encoding of the observation
:param particles:
:param means:
:param stds:
:return: observation likelihood
"""
# prepare input (normalize particles poses and repeat encoding per particle)
particle_input = self.transform_particles_as_input(particles, means, stds)
encoding_input = tf.tile(encoding[:, tf.newaxis, :], [1, tf.shape(particles)[1], 1])
input = tf.concat([encoding_input, particle_input], axis=-1)
# estimate the likelihood of the encoded observation for each particle, remove last dimension
obs_likelihood = snt.BatchApply(self.obs_like_estimator)(input)[:, :, 0]
return obs_likelihood
def transform_particles_as_input(self, particles, means, stds):
return ((particles - means['s']) / stds['s'])[..., 3:5]
def propose_particles(self, encoding, num_particles, state_mins, state_maxs):
duplicated_encoding = tf.tile(encoding[:, tf.newaxis, :], [1, num_particles, 1])
proposed_particles = snt.BatchApply(self.particle_proposer)(duplicated_encoding)
proposed_particles = tf.concat([
proposed_particles[:,:,:1] * (state_maxs[0] - state_mins[0]) / 2.0 + (state_maxs[0] + state_mins[0]) / 2.0,
proposed_particles[:,:,1:2] * (state_maxs[1] - state_mins[1]) / 2.0 + (state_maxs[1] + state_mins[1]) / 2.0,
atan2(proposed_particles[:,:,2:3], proposed_particles[:,:,3:4])], axis=2)
return proposed_particles
def motion_update(self, actions, particles, means, stds, state_step_sizes, learn_gaussian_mle, stop_sampling_gradient=False):
"""
Move particles according to odometry info in actions. Add learned noise.
:param actions:
:param particles:
:param means:
:param stds:
:param state_step_sizes:
:param stop_sampling_gradient:
:return: moved particles
"""
# 1. SAMPLE NOISY ACTIONS
# add dimension for particles
time_step = 0.103
if learn_gaussian_mle:
actions = tf.concat([particles[:, :, 3:4] - means['s'][:, :, 3:4], particles[:, :, 4:5] - means['s'][:, :, 4:5]], axis=-1)
# prepare input (normalize actions and repeat per particle)
action_input = actions / stds['s'][:, :, 3:5]
input = action_input
# estimate action noise
delta = snt.BatchApply(self.mo_noise_generator)(input)
delta = tf.concat([delta[:, :, 0:2] * state_step_sizes[3], delta[:, :, 2:4] * state_step_sizes[4]], axis=-1)
if stop_sampling_gradient:
delta = tf.stop_gradient(delta)
action_vel_f = tf.random_normal(tf.shape(particles[:, :, 3:4]), mean = delta[:, :, 0:1], stddev = delta[:, :, 1:2])
action_vel_rot = tf.random_normal(tf.shape(particles[:, :, 4:5]), mean = delta[:, :, 2:3], stddev = delta[:, :, 3:4])
heading = particles[:, :, 2:3]
sin_heading = tf.sin(heading)
cos_heading = tf.cos(heading)
new_x = particles[:, :, 0:1] + cos_heading * particles[:, :, 3:4] * time_step
new_y = particles[:, :, 1:2] + sin_heading * particles[:, :, 3:4] * time_step
new_theta = particles[:, :, 2:3] + particles[:, :, 4:5] * time_step
wrap_angle(new_theta)
new_v = particles[:, :, 3:4] + action_vel_f
new_theta_dot = particles[:, :, 4:5] + action_vel_rot
moved_particles = tf.concat([new_x, new_y, new_theta, new_v, new_theta_dot], axis=-1)
return moved_particles, delta
else:
heading = particles[:, :, 2:3]
sin_heading = tf.sin(heading)
cos_heading = tf.cos(heading)
random_input = tf.random_normal(tf.shape(particles[:, :, 3:5]))
noise = snt.BatchApply(self.mo_noise_generator)(random_input)
noise = noise - tf.reduce_mean(noise, axis=1, keep_dims=True)
new_z = particles[:, :, 0:1] + cos_heading * particles[:, :, 3:4] * time_step
new_x = particles[:, :, 1:2] + sin_heading * particles[:, :, 3:4] * time_step
new_theta = wrap_angle(particles[:, :, 2:3] + particles[:, :, 4:5] * time_step)
new_v = particles[:, :, 3:4] + noise[:, :, :1] * state_step_sizes[3]
new_theta_dot = particles[:, :, 4:5] + noise[:, :, 1:] * state_step_sizes[4]
moved_particles = tf.concat([new_z, new_x, new_theta, new_v, new_theta_dot], axis=-1)
return moved_particles
def compile_training_stages(self, sess, batch_iterators, particle_list, particle_probs_list, encodings, means, stds, state_step_sizes, state_mins, state_maxs, learn_gaussian_mle, learning_rate, plot_task):
# TRAINING!
losses = dict()
train_stages = dict()
std = 0.25
# TRAIN ODOMETRY
if self.learn_odom:
# apply model
motion_samples = self.motion_update(self.placeholders['a'][:,0],
self.placeholders['s'][:, :1],
means, stds, state_step_sizes,
stop_sampling_gradient=True)
# define loss and optimizer
sq_distance = compute_sq_distance(motion_samples, self.placeholders['s'][:, 1:2], state_step_sizes)
losses['motion_mse'] = tf.reduce_mean(sq_distance, name='loss')
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# put everything together
train_stages['train_odom'] = {
'train_op': optimizer.minimize(losses['motion_mse']),
'batch_iterator_names': {'train': 'train1', 'val': 'val1'},
'monitor_losses': ['motion_mse'],
'validation_loss': 'motion_mse',
'plot': lambda e: self.plot_motion_model(sess, next(batch_iterators['val2']), motion_samples, plot_task, state_step_sizes) if e % 1 == 0 else None
}
# TRAIN MOTION MODEL
if learn_gaussian_mle:
motion_samples, motion_params = self.motion_update(self.placeholders['a'][:,1],
tf.tile(self.placeholders['s'][:, :1], [1, 1, 1]),
means, stds, state_step_sizes, learn_gaussian_mle)
# define loss and optimizer
diff_in_states = self.placeholders['s'][:, 1:2] - self.placeholders['s'][:, :1]
activations_vel_f = (1 / 32) / tf.sqrt(2 * np.pi * motion_params[:, :, 1] ** 2) * tf.exp(
-(diff_in_states[:, :, 3] - motion_params[:, :, 0]) ** 2 / (2.0 * motion_params[:, :, 1] ** 2))
activations_vel_rot = (1 / 32) / tf.sqrt(2 * np.pi * motion_params[:, :, 3] ** 2) * tf.exp(
-(diff_in_states[:, :, 4] - motion_params[:, :, 2]) ** 2 / (2.0 * motion_params[:, :, 3] ** 2))
losses['motion_mle'] = tf.reduce_mean(-tf.log(1e-16 + (tf.reduce_sum(activations_vel_f, axis=-1, name='loss1') * tf.reduce_sum(activations_vel_rot, axis=-1, name='loss2'))))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# put everything together
train_stages['train_motion_sampling'] = {
'train_op': optimizer.minimize(losses['motion_mle']),
'batch_iterator_names': {'train': 'train2', 'val': 'val2'},
'monitor_losses': ['motion_mle'],
'validation_loss': 'motion_mle',
'plot': lambda e: self.plot_motion_model(sess, next(batch_iterators['val2']), motion_samples, plot_task, state_step_sizes) if e % 1 == 0 else None
}
else:
motion_samples = self.motion_update(self.placeholders['a'][:,1],
tf.tile(self.placeholders['s'][:, :1], [1, self.num_particles, 1]),
means, stds, state_step_sizes, learn_gaussian_mle)
# define loss and optimizer
sq_distance = compute_sq_distance(motion_samples, self.placeholders['s'][:, 1:2], state_step_sizes)
activations_sample = (1 / self.num_particles_float) / tf.sqrt(2 * np.pi * std ** 2) * tf.exp(
-sq_distance / (2.0 * std ** 2))
losses['motion_mle'] = tf.reduce_mean(-tf.log(1e-16 + tf.reduce_sum(activations_sample, axis=-1, name='loss')))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# put everything together
train_stages['train_motion_sampling'] = {
'train_op': optimizer.minimize(losses['motion_mle']),
'batch_iterator_names': {'train': 'train2', 'val': 'val2'},
'monitor_losses': ['motion_mle'],
'validation_loss': 'motion_mle',
'plot': lambda e: self.plot_motion_model(sess, next(batch_iterators['val2']), motion_samples, plot_task, state_step_sizes) if e % 1 == 0 else None
}
# TRAIN MEASUREMENT MODEL
# apply model for all pairs of observations and states in that batch
test_particles = tf.tile(self.placeholders['s'][tf.newaxis, :, 0], [self.batch_size, 1, 1])
measurement_model_out = self.measurement_update(encodings[:, 0], test_particles, means, stds)
# define loss (correct -> 1, incorrect -> 0) and optimizer
correct_samples = tf.diag_part(measurement_model_out)
incorrect_samples = measurement_model_out - tf.diag(tf.diag_part(measurement_model_out))
losses['measurement_heuristic'] = tf.reduce_sum(-tf.log(correct_samples)) / tf.cast(self.batch_size, tf.float32) \
+ tf.reduce_sum(-tf.log(1.0 - incorrect_samples)) / tf.cast(self.batch_size * (self.batch_size - 1), tf.float32)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# put everything together
train_stages['train_measurement_model'] = {
'train_op': optimizer.minimize(losses['measurement_heuristic']),
'batch_iterator_names': {'train': 'train1', 'val': 'val1'},
'monitor_losses': ['measurement_heuristic'],
'validation_loss': 'measurement_heuristic',
'plot': lambda e: self.plot_measurement_model(sess, batch_iterators['val1'], measurement_model_out) if e % 1 == 0 else None
}
# TRAIN PARTICLE PROPOSER
if self.use_proposer:
# apply model (but only compute gradients until the encoding,
# otherwise we would unlearn it and the observation likelihood wouldn't work anymore)
proposed_particles = self.propose_particles(tf.stop_gradient(encodings[:, 0]), self.num_particles, state_mins, state_maxs)
# define loss and optimizer
std = 0.2
sq_distance = compute_sq_distance(proposed_particles, self.placeholders['s'][:, :1], state_step_sizes)
activations = (1 / self.num_particles_float) / tf.sqrt(2 * np.pi * std ** 2) * tf.exp(
-sq_distance / (2.0 * std ** 2))
losses['proposed_mle'] = tf.reduce_mean(-tf.log(1e-16 + tf.reduce_sum(activations, axis=-1)))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# put everything together
train_stages['train_particle_proposer'] = {
'train_op': optimizer.minimize(losses['proposed_mle']),
'batch_iterator_names': {'train': 'train1', 'val': 'val1'},
'monitor_losses': ['proposed_mle'],
'validation_loss': 'proposed_mle',
'plot': lambda e: self.plot_particle_proposer(sess, next(batch_iterators['val1']), proposed_particles, plot_task) if e % 10 == 0 else None
}
# END-TO-END TRAINING
# model was already applied further up -> particle_list, particle_probs_list
# define losses and optimizer
# first loss (which is being optimized)
sq_distance = compute_sq_distance(particle_list[:, :, :, 3:5], self.placeholders['s'][:, :, tf.newaxis, 3:5], state_step_sizes[3:5])
activations = particle_probs_list[:, :] / tf.sqrt(2 * np.pi * self.particle_std ** 2) * tf.exp(
-sq_distance / (2.0 * self.particle_std ** 2))
losses['mle'] = tf.reduce_mean(-tf.log(1e-16 + tf.reduce_sum(activations, axis=2, name='loss')))
# second loss (which we will monitor during execution)
pred = self.particles_to_state(particle_list, particle_probs_list)
sq_error = compute_sq_distance(pred[:, -1, 0:2], self.placeholders['s'][:, -1, 0:2], [1., 1.])
sq_dist = compute_sq_distance(self.placeholders['s'][:, 0, 0:2], self.placeholders['s'][:, -1, 0:2], [1., 1.])
losses['m/m'] = tf.reduce_mean(sq_error**0.5/sq_dist**0.5)
sq_error = compute_sq_distance(pred[:, -1, 2:3], self.placeholders['s'][:, -1, 2:3], [np.pi/180.0])
losses['deg/m'] = tf.reduce_mean(sq_error ** 0.5 / sq_dist ** 0.5)
# optimizer
optimizer = tf.train.AdamOptimizer(learning_rate)
# put everything together
train_stages['train_e2e'] = {
'train_op': optimizer.minimize(losses['mle']),
'batch_iterator_names': {'train': 'train', 'val': 'val'},
'monitor_losses': ['m/m', 'deg/m', 'mle'],
'validation_loss': 'deg/m',
'plot': lambda e: self.plot_particle_filter(sess, next(batch_iterators['val_ex']), particle_list,
particle_probs_list, state_step_sizes, plot_task) if e % 1 == 0 else None
}
return losses, train_stages
def load(self, sess, model_path, model_file='best_validation', statistics_file='statistics.npz', connect_and_initialize=True, modules=('encoder', 'mo_noise_generator', 'mo_transition_model', 'obs_like_estimator', 'particle_proposer')):
if type(modules) not in [type(list()), type(tuple())]:
raise Exception('modules must be a list or tuple, not a ' + str(type(modules)))
# build the tensorflow graph
if connect_and_initialize:
# load training data statistics (which are needed to build the tf graph)
statistics = dict(np.load(os.path.join(model_path, statistics_file)))
for key in statistics.keys():
if statistics[key].shape == ():
statistics[key] = statistics[key].item() # convert 0d array of dictionary back to a normal dictionary
# connect all modules into the particle filter
self.connect_modules(**statistics)
init = tf.global_variables_initializer()
sess.run(init)
# load variables
all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
vars_to_load = []
loaded_modules = set()
for v in all_vars:
for m in modules:
if m in v.name:
vars_to_load.append(v)
loaded_modules.add(m)
print('Loading all modules')
saver = tf.train.Saver()
saver.restore(sess, os.path.join(model_path, model_file))
# def fit(self, sess, data, model_path, train_individually, train_e2e, split_ratio, seq_len, batch_size, epoch_length, num_epochs, patience, learning_rate, dropout_keep_ratio, num_particles, particle_std, plot_task=None, plot=False):
def fit(self, sess, data, model_path, train_individually, train_e2e, split_ratio, seq_len, batch_size, epoch_length, num_epochs, patience, learning_rate, dropout_keep_ratio, num_particles, particle_std, learn_gaussian_mle, plot_task=None, plot=False):
if plot:
plt.ion()
self.particle_std = particle_std
mean_loss_for_plot = np.zeros((1,))
means, stds, state_step_sizes, state_mins, state_maxs = compute_statistics(data)
data = split_data(data, ratio=split_ratio)
epoch_lengths = {'train': epoch_length, 'val': epoch_length*2}
batch_iterators = {'train': make_batch_iterator(data['train'], seq_len=seq_len, batch_size=batch_size),
'val': make_repeating_batch_iterator(data['val'], epoch_lengths['val'], batch_size=batch_size, seq_len=seq_len),
'train_ex': make_batch_iterator(data['train'], batch_size=batch_size, seq_len=seq_len),
'val_ex': make_batch_iterator(data['val'], batch_size=batch_size, seq_len=seq_len),
'train1': make_batch_iterator(data['train'], batch_size=batch_size, seq_len=1),
'train2': make_batch_iterator(data['train'], batch_size=batch_size, seq_len=2),
'val1': make_repeating_batch_iterator(data['val'], epoch_lengths['val'], batch_size=batch_size, seq_len=1),
'val2': make_repeating_batch_iterator(data['val'], epoch_lengths['val'], batch_size=batch_size, seq_len=2),
}
# build the tensorflow graph by connecting all modules in the particles filter
particles, particle_probs, encodings, particle_list, particle_probs_list = self.connect_modules(means, stds, state_mins, state_maxs, state_step_sizes, learn_gaussian_mle)
# define losses and train stages for different ways of training (e.g. training individual models and e2e training)
losses, train_stages = self.compile_training_stages(sess, batch_iterators, particle_list, particle_probs_list,
encodings, means, stds, state_step_sizes, state_mins,
state_maxs, learn_gaussian_mle, learning_rate, plot_task)
# initialize variables
init = tf.global_variables_initializer()
sess.run(init)
# save statistics and prepare saving variables
if not os.path.exists(model_path):
os.makedirs(model_path)
np.savez(os.path.join(model_path, 'statistics'), means=means, stds=stds, state_step_sizes=state_step_sizes,
state_mins=state_mins, state_maxs=state_maxs)
saver = tf.train.Saver()
save_path = os.path.join(model_path, 'best_validation')
# define the training curriculum
curriculum = []
if train_individually:
if self.learn_odom:
curriculum += ['train_odom']
curriculum += ['train_measurement_model']
curriculum += ['train_motion_sampling']
if self.use_proposer:
curriculum += ['train_particle_proposer']
if train_e2e:
curriculum += ['train_e2e']
# split data for early stopping
data_keys = ['train']
if split_ratio < 1.0:
data_keys.append('val')
# define log dict
log = {c: {dk: {lk: {'mean': [], 'se': []} for lk in train_stages[c]['monitor_losses']} for dk in data_keys} for c in curriculum}
# go through curriculum
for c in curriculum:
stage = train_stages[c]
best_val_loss = np.inf
best_epoch = 0
epoch = 0
if c == 'train_e2e':
saver.save(sess, os.path.join(model_path, 'before_e2e/best_validation'))
np.savez(os.path.join(model_path, 'before_e2e/statistics'), means=means, stds=stds, state_step_sizes=state_step_sizes,
state_mins=state_mins, state_maxs=state_maxs)
while epoch < num_epochs and epoch - best_epoch < patience:
# training
for dk in data_keys:
# don't train in the first epoch, just evaluate the initial parameters
if dk == 'train' and epoch == 0:
continue
# set up loss lists which will be filled during the epoch
loss_lists = {lk: [] for lk in stage['monitor_losses']}
for e in range(epoch_lengths[dk]):
# t0 = time.time()
# pick a batch from the right iterator
batch = next(batch_iterators[stage['batch_iterator_names'][dk]])
# define the inputs and train/run the model
input_dict = {**{self.placeholders[key]: batch[key] for key in 'osa'},
**{self.placeholders['num_particles']: num_particles},
}
if dk == 'train':
input_dict[self.placeholders['keep_prob']] = dropout_keep_ratio
input_dict[self.placeholders['is_training']] = True
monitor_losses = {l: losses[l] for l in stage['monitor_losses']}
if dk == 'train':
s_losses, _ = sess.run([monitor_losses, stage['train_op']], input_dict)
else:
s_losses = sess.run(monitor_losses, input_dict)
for lk in stage['monitor_losses']:
loss_lists[lk].append(s_losses[lk])
# after each epoch, compute and log statistics
for lk in stage['monitor_losses']:
log[c][dk][lk]['mean'].append(np.mean(loss_lists[lk]))
log[c][dk][lk]['se'].append(np.std(loss_lists[lk], ddof=1) / np.sqrt(len(loss_lists[lk])))
# check whether the current model is better than all previous models
if 'val' in data_keys:
current_val_loss = log[c]['val'][stage['validation_loss']]['mean'][-1]
mean_loss_for_plot = np.append(mean_loss_for_plot,current_val_loss)
if current_val_loss < best_val_loss:
best_val_loss = current_val_loss
best_epoch = epoch
# save current model
saver.save(sess, save_path)
txt = 'epoch {:>3} >> '.format(epoch)
else:
txt = 'epoch {:>3} == '.format(epoch)
else:
best_epoch = epoch
saver.save(sess, save_path)
txt = 'epoch {:>3} >> '.format(epoch)
# after going through all data sets, do a print out of the current result
for lk in stage['monitor_losses']:
txt += '{}: '.format(lk)
for dk in data_keys:
if len(log[c][dk][lk]['mean']) > 0:
txt += '{:.2f}+-{:.2f}/'.format(log[c][dk][lk]['mean'][-1], log[c][dk][lk]['se'][-1])
txt = txt[:-1] + ' -- '
print(txt)
if plot:
stage['plot'](epoch)
epoch += 1
# after running out of patience, restore the model with lowest validation loss
saver.restore(sess, save_path)
return log
def predict(self, sess, batch, return_particles=False, **kwargs):
# define input dict, use the first state only if we do tracking
input_dict = {self.placeholders['o']: batch['o'],
self.placeholders['a']: batch['a'],
self.placeholders['num_particles']: 100}
if self.init_with_true_state:
input_dict[self.placeholders['s']] = batch['s'][:, :1]
if return_particles:
return sess.run([self.pred_states, self.particle_list, self.particle_probs_list], input_dict)
else:
return sess.run(self.pred_states, input_dict)
def connect_modules(self, means, stds, state_mins, state_maxs, state_step_sizes, learn_gaussian_mle=False):
# get shapes
self.batch_size = tf.shape(self.placeholders['o'])[0]
self.seq_len = tf.shape(self.placeholders['o'])[1]
# we use the static shape here because we need it to build the graph
self.action_dim = self.placeholders['a'].get_shape()[-1].value
encodings = snt.BatchApply(self.encoder)((self.placeholders['o'] - means['o']) / stds['o'])
# initialize particles
if self.init_with_true_state:
# tracking with known initial state
initial_particles = tf.tile(self.placeholders['s'][:, 0, tf.newaxis, :], [1, self.num_particles, 1])
else:
# global localization
if self.use_proposer:
# propose particles from observations
initial_particles = self.propose_particles(encodings[:, 0], self.num_particles, state_mins, state_maxs)
else:
# sample particles randomly
initial_particles = tf.concat(
[tf.random_uniform([self.batch_size, self.num_particles, 1], state_mins[d], state_maxs[d]) for d in
range(self.state_dim)], axis=-1, name='particles')
initial_particle_probs = tf.ones([self.batch_size, self.num_particles],
name='particle_probs') / self.num_particles_float
# assumes that samples has the correct size
def permute_batch(x, samples):
# get shapes
batch_size = tf.shape(x)[0]
num_particles = tf.shape(x)[1]
sample_size = tf.shape(samples)[1]
# compute 1D indices into the 2D array
idx = samples + num_particles * tf.tile(
tf.reshape(tf.range(batch_size), [batch_size, 1]),
[1, sample_size])
# index using the 1D indices and reshape again
result = tf.gather(tf.reshape(x, [batch_size * num_particles, -1]), idx)
result = tf.reshape(result, tf.shape(x[:,:sample_size]))
return result
def loop(particles, particle_probs, particle_list, particle_probs_list, additional_probs_list, i):
num_proposed_float = tf.round((self.propose_ratio ** tf.cast(i, tf.float32)) * self.num_particles_float)
num_proposed = tf.cast(num_proposed_float, tf.int32)
num_resampled_float = self.num_particles_float - num_proposed_float
num_resampled = tf.cast(num_resampled_float, tf.int32)
if self.propose_ratio < 1.0:
# resampling
basic_markers = tf.linspace(0.0, (num_resampled_float - 1.0) / num_resampled_float, num_resampled)
random_offset = tf.random_uniform([self.batch_size], 0.0, 1.0 / num_resampled_float)
markers = random_offset[:, None] + basic_markers[None, :] # shape: batch_size x num_resampled
cum_probs = tf.cumsum(particle_probs, axis=1)
marker_matching = markers[:, :, None] < cum_probs[:, None, :] # shape: batch_size x num_resampled x num_particles
samples = tf.cast(tf.argmax(tf.cast(marker_matching, 'int32'), dimension=2), 'int32')
standard_particles = permute_batch(particles, samples)
standard_particle_probs = tf.ones([self.batch_size, num_resampled])
standard_particles = tf.stop_gradient(standard_particles)
standard_particle_probs = tf.stop_gradient(standard_particle_probs)
# motion update
if learn_gaussian_mle:
standard_particles, _ = self.motion_update(self.placeholders['a'][:, i], standard_particles, means, stds, state_step_sizes, learn_gaussian_mle)
else:
standard_particles = self.motion_update(self.placeholders['a'][:, i], standard_particles, means, stds, state_step_sizes, learn_gaussian_mle)
# measurement update
standard_particle_probs *= self.measurement_update(encodings[:, i], standard_particles, means, stds)
if self.propose_ratio > 0.0:
# proposed particles
proposed_particles = self.propose_particles(encodings[:, i], num_proposed, state_mins, state_maxs)
proposed_particle_probs = tf.ones([self.batch_size, num_proposed])
# NORMALIZE AND COMBINE PARTICLES
if self.propose_ratio == 1.0:
particles = proposed_particles
particle_probs = proposed_particle_probs
elif self.propose_ratio == 0.0:
particles = standard_particles
particle_probs = standard_particle_probs
else:
standard_particle_probs *= (num_resampled_float / self.num_particles_float) / tf.reduce_sum(standard_particle_probs, axis=1, keep_dims=True)
proposed_particle_probs *= (num_proposed_float / self.num_particles_float) / tf.reduce_sum(proposed_particle_probs, axis=1, keep_dims=True)
particles = tf.concat([standard_particles, proposed_particles], axis=1)
particle_probs = tf.concat([standard_particle_probs, proposed_particle_probs], axis=1)
# NORMALIZE PROBABILITIES
particle_probs /= tf.reduce_sum(particle_probs, axis=1, keep_dims=True)
particle_list = tf.concat([particle_list, particles[:, tf.newaxis]], axis=1)
particle_probs_list = tf.concat([particle_probs_list, particle_probs[:, tf.newaxis]], axis=1)
return particles, particle_probs, particle_list, particle_probs_list, additional_probs_list, i + 1
# reshapes and sets the first shape sizes to None (which is necessary to keep the shape consistent in while loop)
particle_list = tf.reshape(initial_particles,
shape=[self.batch_size, -1, self.num_particles, self.state_dim])
particle_probs_list = tf.reshape(initial_particle_probs, shape=[self.batch_size, -1, self.num_particles])
additional_probs_list = tf.reshape(tf.ones([self.batch_size, self.num_particles, 4]), shape=[self.batch_size, -1, self.num_particles, 4])
# run the filtering process
particles, particle_probs, particle_list, particle_probs_list, additional_probs_list, i = tf.while_loop(
lambda *x: x[-1] < self.seq_len, loop,
[initial_particles, initial_particle_probs, particle_list, particle_probs_list, additional_probs_list,
tf.constant(1, dtype='int32')], name='loop')
# compute mean of particles
self.pred_states = self.particles_to_state(particle_list, particle_probs_list)
self.particle_list = particle_list
self.particle_probs_list = particle_probs_list
return particles, particle_probs, encodings, particle_list, particle_probs_list
def particles_to_state(self, particle_list, particle_probs_list):
mean_position = tf.reduce_sum(particle_probs_list[:, :, :, tf.newaxis] * particle_list[:, :, :, :2], axis=2)
mean_orientation = atan2(
tf.reduce_sum(particle_probs_list[:, :, :, tf.newaxis] * tf.cos(particle_list[:, :, :, 2:3]), axis=2),
tf.reduce_sum(particle_probs_list[:, :, :, tf.newaxis] * tf.sin(particle_list[:, :, :, 2:3]), axis=2))
mean_velocity = tf.reduce_sum(particle_probs_list[:, :, :, tf.newaxis] * particle_list[:, :, :, 3:5], axis=2)
return tf.concat([mean_position, mean_orientation, mean_velocity], axis=2)
def plot_motion_model(self, sess, batch, motion_samples, task, state_step_sizes):
# define the inputs and train/run the model
input_dict = {**{self.placeholders[key]: batch[key] for key in 'osa'},
**{self.placeholders['num_particles']: 100},
}
s_motion_samples = sess.run(motion_samples, input_dict)
plt.figure('Motion Model')
plt.gca().clear()
for i in range(min(len(s_motion_samples), 10)):
plt.scatter(s_motion_samples[i, :, 3] / state_step_sizes[3], s_motion_samples[i, :, 4] / state_step_sizes[4], color='blue', s=1)
plt.scatter(batch['s'][i, 0, 3] / state_step_sizes[3], batch['s'][i, 0, 4] / state_step_sizes[4], color='black', s=1)
plt.scatter(batch['s'][i, 1, 3] / state_step_sizes[3], batch['s'][i, 1, 4] / state_step_sizes[4], color='red', s=3)
plt.plot(batch['s'][i, :2, 3] / state_step_sizes[3], batch['s'][i, :2, 4] / state_step_sizes[4], color='black')
plt.xlim([0, 200])
plt.ylim([-50, 50])
plt.xlabel('translational vel')
plt.ylabel('angular vel')
plt.gca().set_aspect('equal')
plt.pause(0.01)
def plot_measurement_model(self, sess, batch_iterator, measurement_model_out):
batch = next(batch_iterator)
# define the inputs and train/run the model
input_dict = {**{self.placeholders[key]: batch[key] for key in 'osa'},
**{self.placeholders['num_particles']: 100},
}
s_measurement_model_out = sess.run([measurement_model_out], input_dict)
plt.figure('Measurement Model Output')
plt.gca().clear()
plt.imshow(s_measurement_model_out[0], interpolation="nearest", cmap="viridis_r", vmin=0.0, vmax=1.0)
plt.figure('Measurement Model Input')
plt.clf()
plt.scatter(batch['s'][:1, 0, 3], batch['s'][:1, 0, 4], marker='x', c=s_measurement_model_out[0][0,:1], vmin=0, vmax=1.0, cmap='viridis_r')
plt.scatter(batch['s'][1:, 0, 3], batch['s'][1:, 0, 4], marker='o', c=s_measurement_model_out[0][0,1:], vmin=0, vmax=1.0, cmap='viridis_r')
plt.xlabel('x_dot')
plt.ylabel('theta_dot')
plt.pause(0.01)
def plot_particle_proposer(self, sess, batch, proposed_particles, task):
# define the inputs and train/run the model
input_dict = {**{self.placeholders[key]: batch[key] for key in 'osa'},
**{self.placeholders['num_particles']: 100},
}
s_samples = sess.run(proposed_particles, input_dict)
plt.figure('Particle Proposer')
plt.gca().clear()
plot_maze(task)
for i in range(min(len(s_samples), 10)):
color = np.random.uniform(0.0, 1.0, 3)
plt.quiver(s_samples[i, :, 0], s_samples[i, :, 1], np.cos(s_samples[i, :, 2]), np.sin(s_samples[i, :, 2]), color=color, width=0.001, scale=100)
plt.quiver(batch['s'][i, 0, 0], batch['s'][i, 0, 1], np.cos(batch['s'][i, 0, 2]), np.sin(batch['s'][i, 0, 2]), color=color, scale=50, width=0.003)
plt.pause(0.01)
def plot_particle_filter(self, sess, batch, particle_list,
particle_probs_list, state_step_sizes, task):
s_states, s_particle_list, s_particle_probs_list, \
= sess.run([self.placeholders['s'], particle_list,
particle_probs_list], #self.noise_scaler1(1.0), self.noise_scaler2(2.0)],
{**{self.placeholders[key]: batch[key] for key in 'osa'},
**{self.placeholders['num_particles']: 20},
})
# print('learned motion noise factors {:.2f}/{:.2f}'.format(n1, n2))
num_steps = s_particle_list.shape[1]
for s in range(3):
plt.figure('particle_evolution, example {}'.format(s))
plt.clf()
for d in range(5):
plt.subplot(3, 2, [1, 3, 5, 2, 4][d])
for i in range(num_steps):
plt.scatter(i * np.ones_like(s_particle_list[s, i, :, d]),
s_particle_list[s, i, :, d] / (1 if s == 0 else state_step_sizes[d]),
c=s_particle_probs_list[s, i, :], cmap='viridis_r', marker='o', s=6, alpha=0.5,
linewidths=0.05,
vmin=0.0,
vmax=0.1)
current_state = batch['s'][s, i, d] / (1 if s == 0 else state_step_sizes[d])
plt.plot([i], [current_state], 'o', markerfacecolor='None', markeredgecolor='k',
markersize=2.5)
plt.xlabel('Time')
plt.ylabel('State {}'.format(d))
show_pause(pause=0.01)
|
mit
| 6,549,029,999,007,857,000
| 50.16409
| 255
| 0.560738
| false
| 3.742303
| false
| false
| false
|
westurner/pyglobalgoals
|
notebooks/globalgoals-pyglobalgoals.py.py
|
1
|
16352
|
# coding: utf-8
# # @TheGlobalGoals for Sustainable Development
# ## Background
#
# * Homepage: **http://www.globalgoals.org/**
# - Twitter: https://twitter.com/TheGlobalGoals
# - Instagram: https://instagram.com/TheGlobalGoals/
# - Facebook: https://www.facebook.com/globalgoals.org
# - YouTube: https://www.youtube.com/channel/UCRfuAYy7MesZmgOi1Ezy0ng/
# - Hashtag: **#GlobalGoals**
# - https://twitter.com/hashtag/GlobalGoals
# - https://instagram.com/explore/tags/GlobalGoals/
# - https://www.facebook.com/hashtag/GlobalGoals
# - Hashtag: #TheGlobalGoals
# - https://twitter.com/hashtag/TheGlobalGoals
# - https://instagram.com/explore/tags/TheGlobalGoals/
# - https://www.facebook.com/hashtag/TheGlobalGoals
#
#
# ### pyglobalgoals
#
# * Homepage: https://github.com/westurner/pyglobalgoals
# * Src: https://github.com/westurner/pyglobalgoals
# * Download: https://github.com/westurner/pyglobalgoals/releases
#
# ### Objectives
#
# * [x] ENH: Read and parse TheGlobalGoals from globalgoals.org
# * [x] ENH: Download (HTTP GET) each GlobalGoal tile image to ``./notebooks/data/images/``
# * [-] ENH: Generate e.g. tweets for each GlobalGoal (e.g. **##gg17** / **##GG17**)
# * [x] ENH: Save TheGlobalGoals to a JSON-LD document
# * [-] ENH: Save TheGlobalGoals with Schema.org RDF vocabulary (as JSON-LD)
# * [-] ENH: Save TheGlobalGoals as ReStructuredText with headings and images
# * [-] ENH: Save TheGlobalGoals as Markdown with headings and images
# * [-] ENH: Save TheGlobalGoals as RDFa with headings and images
# * [ ] ENH: Save TheGlobalGoals as RDFa with images like http://globalgoals.org/
# * [-] DOC: Add narrative documentation where necessary
# * [-] REF: Refactor and extract methods from ``./notebooks/`` to ``./pyglobalgoals/``
#
# ## Implementation
#
# * Python package: [**pyglobalgoals**](#pyglobalgoals)
#
# * Jupyter notebook: **``./notebooks/globalgoals-pyglobalgoals.py.ipynb``**
# * Src: https://github.com/westurner/pyglobalgoals/blob/master/notebooks/globalgoals-pyglobalgoals.py.ipynb
# * Src: https://github.com/westurner/pyglobalgoals/blob/master/notebooks/globalgoals-pyglobalgoals.py.py
# * Src: https://github.com/westurner/pyglobalgoals/blob/develop/notebooks/globalgoals-pyglobalgoals.py.ipynb
# * Src: https://github.com/westurner/pyglobalgoals/blob/v0.1.2/notebooks/globalgoals-pyglobalgoals.py.ipynb
# * Src: https://github.com/westurner/pyglobalgoals/blob/v0.2.1/notebooks/globalgoals-pyglobalgoals.py.ipynb
#
# * [x] Download HTML with requests
# * [x] Parse HTML with beautifulsoup
# * [x] Generate JSON[-LD] with ``collections.OrderedDict``
# * [-] REF: Functional methods -> more formal type model -> ``pyglobalgoals.<...>``
#
#
# * [JSON-LD](#JSONLD) document: **``./notebooks/data/globalgoals.jsonld``**
# * Src: https://github.com/westurner/pyglobalgoals/blob/master/notebooks/data/globalgoals.jsonld
#
#
# ### JSON-LD
#
# * Wikipedia: https://en.wikipedia.org/wiki/JSON-LD
# * Homepage: http://json-ld.org/
# * Docs: http://json-ld.org/playground/
# * Hashtag: #JSONLD
#
# ### RDFa
#
# * Wikipedia: https://en.wikipedia.org/wiki/RDFa
# * Standard: http://www.w3.org/TR/rdfa-core/
# * Docs: http://www.w3.org/TR/rdfa-primer/
# * Hashtag: #RDFa
# In[1]:
#!conda install -y beautiful-soup docutils jinja2 requests
get_ipython().system(u"pip install -U beautifulsoup4 jinja2 'requests<2.8' requests-cache version-information # tweepy")
import bs4
import jinja2
import requests
import requests_cache
requests_cache.install_cache('pyglobalgoals_cache')
#!pip install -U version_information
get_ipython().magic(u'load_ext version_information')
get_ipython().magic(u'version_information jupyter, bs4, jinja2, requests, requests_cache, version_information')
# In[2]:
url = "http://www.globalgoals.org/"
req = requests.get(url)
#print(req)
#print(sorted(dir(req)))
#req.<TAB>
#req??<[Ctrl-]Enter>
if not req.ok:
raise Exception(req)
content = req.content
print(content[:20])
# In[ ]:
# In[3]:
bs = bs4.BeautifulSoup(req.content)
print(bs.prettify())
# In[4]:
tiles = bs.find_all(class_='goal-tile-wrapper')
pp(tiles)
# In[5]:
tile = tiles[0]
print(tile)
# In[6]:
link = tile.findNext('a')
img = link.findNext('img')
img_title = img['alt'][:-5]
img_src = img['src']
link_href = link['href']
example = {'name': img_title, 'img_src': img_src, 'href': link_href}
print(example)
# In[7]:
import collections
def get_data_from_goal_tile_wrapper_div(node, n=None):
link = node.findNext('a')
img = link.findNext('img')
img_title = img['alt'][:-5]
img_src = img['src']
link_href = link['href']
output = collections.OrderedDict({'@type': 'un:GlobalGoal'})
if n:
output['n'] = n
output['name'] = img_title
output['image'] = img_src
output['url'] = link_href
return output
def get_goal_tile_data(bs):
for i, tile in enumerate(bs.find_all(class_='goal-tile-wrapper'), 1):
yield get_data_from_goal_tile_wrapper_div(tile, n=i)
tiles = list(get_goal_tile_data(bs))
import json
print(json.dumps(tiles, indent=2))
goal_tiles = tiles[:-1]
# In[ ]:
# In[8]:
import codecs
from path import Path
def build_default_context():
context = collections.OrderedDict()
# context["dc"] = "http://purl.org/dc/elements/1.1/"
context["schema"] = "http://schema.org/"
# context["xsd"] = "http://www.w3.org/2001/XMLSchema#"
# context["ex"] = "http://example.org/vocab#"
# context["ex:contains"] = {
# "@type": "@id"
# }
# default attrs (alternative: prefix each with schema:)
# schema.org/Thing == schema:Thing (!= schema:thing)
context["name"] = "http://schema.org/name"
context["image"] = {
"@type": "@id",
"@id": "http://schema.org/image"
}
context["url"] = {
"@type": "@id",
"@id":"http://schema.org/url"
}
context["description"] = {
"@type": "http://schema.org/Text",
"@id": "http://schema.org/description"
}
return context
DEFAULT_CONTEXT = build_default_context()
def goal_tiles_to_jsonld(nodes, context=None, default_context=DEFAULT_CONTEXT):
data = collections.OrderedDict()
if context is None and default_context is not None:
data['@context'] = build_default_context()
elif context:
data['@context'] = context
elif default_context:
data['@context'] = default_context
data['@graph'] = nodes
return data
DATA_DIR = Path('.') / 'data'
#DATA_DIR = Path(__file__).dirname
#DATA_DIR = determine_path_to(current_notebook) # PWD initially defaults to nb.CWD
DATA_DIR.makedirs_p()
GLOBAL_GOALS_JSONLD_PATH = DATA_DIR / 'globalgoals.jsonld'
def write_global_goals_jsonld(goal_tiles, path=GLOBAL_GOALS_JSONLD_PATH):
goal_tiles_jsonld = goal_tiles_to_jsonld(goal_tiles)
with codecs.open(path, 'w', 'utf8') as fileobj:
json.dump(goal_tiles_jsonld, fileobj, indent=2)
def read_global_goals_jsonld(path=GLOBAL_GOALS_JSONLD_PATH, prettyprint=True):
with codecs.open(path, 'r', 'utf8') as fileobj:
global_goals_dict = json.load(fileobj,
object_pairs_hook=collections.OrderedDict)
return global_goals_dict
def print_json_dumps(global_goals_dict, indent=2):
print(json.dumps(global_goals_dict, indent=indent))
write_global_goals_jsonld(goal_tiles)
global_goals_dict = read_global_goals_jsonld(path=GLOBAL_GOALS_JSONLD_PATH)
assert global_goals_dict == goal_tiles_to_jsonld(goal_tiles)
print_json_dumps(global_goals_dict)
# In[9]:
def build_tweet_for_goal_tile(node):
return '##gg{n} {name} {url} {image} @TheGlobalGoals #GlobalGoals'.format(**node)
tweets = list(build_tweet_for_goal_tile(tile) for tile in goal_tiles)
tweets
# In[10]:
for node in goal_tiles:
img_basename = node['image'].split('/')[-1]
node['image_basename'] = img_basename
node['tweet_txt'] = build_tweet_for_goal_tile(node)
print(json.dumps(goal_tiles, indent=2))
# In[11]:
#!conda install -y pycurl
try:
import pycurl
except ImportError as e:
import warnings
warnings.warn(unicode(e))
def pycurl_download_file(url, dest_path, follow_redirects=True):
with open(dest_path, 'wb') as f:
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.WRITEDATA, f)
if follow_redirects:
c.setopt(c.FOLLOWLOCATION, True)
c.perform()
c.close()
return (url, dest_path)
# In[12]:
import requests
def requests_download_file(url, dest_path, **kwargs):
local_filename = url.split('/')[-1]
# NOTE the stream=True parameter
r = requests.get(url, stream=True)
with open(dest_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return (url, dest_path)
# In[13]:
import urllib
def urllib_urlretrieve_download_file(url, dest_path):
"""
* https://docs.python.org/2/library/urllib.html#urllib.urlretrieve
"""
(filename, headers) = urlllib.urlretrieve(url, dest_path)
return (url, filename)
# In[14]:
def deduplicate_on_attr(nodes, attr='image_basename'):
attrindex = collections.OrderedDict()
for node in nodes:
attrindex.setdefault(node[attr], [])
attrindex[node[attr]].append(node)
return attrindex
def check_for_key_collisions(dict_of_lists):
for name, _nodes in dict_of_lists.items():
if len(_nodes) > 1:
raise Exception(('duplicate filenames:')
(name, nodes))
attrindex = deduplicate_on_attr(goal_tiles, attr='image_basename')
check_for_key_collisions(attrindex)
#
IMG_DIR = DATA_DIR / 'images'
IMG_DIR.makedirs_p()
def download_goal_tile_images(nodes, img_path):
for node in nodes:
dest_path = img_path / node['image_basename']
source_url = node['image']
(url, dest) = requests_download_file(source_url, dest_path)
node['image_path'] = dest
print((node['n'], node['name']))
print((node['image_path']))
# time.sleep(1) # see: requests_cache
download_goal_tile_images(goal_tiles, IMG_DIR)
tiles_jsonld = goal_tiles_to_jsonld(goal_tiles)
print(json.dumps(tiles_jsonld, indent=2))
# In[15]:
#import jupyter.display as display
import IPython.display as display
display.Image(goal_tiles[0]['image_path'])
# In[16]:
import IPython.display
for tile in goal_tiles:
x = IPython.display.Image(tile['image_path'])
x
# In[17]:
import IPython.display
def display_goal_images():
for tile in goal_tiles:
yield IPython.display.Image(tile['image_path'])
x = list(display_goal_images())
#pp(x)
IPython.display.display(*x)
# In[18]:
import string
print(string.punctuation)
NOT_URI_CHARS = dict.fromkeys(string.punctuation + string.digits)
NOT_URI_CHARS.pop('-')
NOT_URI_CHARS.pop('_')
def _slugify(txt):
"""an ~approximate slugify function for human-readable URI #fragments"""
txt = txt.strip().lower()
chars = (
(c if c != ' ' else '-') for c in txt if
c not in NOT_URI_CHARS)
return u''.join(chars)
def _slugify_single_dash(txt):
"""
* unlike docutils, this function does not strip stopwords like 'and' and 'or'
TODO: locate this method in docutils
"""
def _one_dash_only(txt):
count = 0
for char in txt:
if char == '-':
count += 1
else:
if count:
yield '-'
yield char
count = 0
return u''.join(_one_dash_only(_slugify(txt)))
for node in goal_tiles:
node['name_numbered'] = "%d. %s" % (node['n'], node['name'])
node['slug_rst'] = _slugify_single_dash(node['name'])
node['slug_md'] = _slugify_single_dash(node['name'])
print_json_dumps(goal_tiles)
# In[19]:
import IPython.display
def display_goal_images():
for tile in goal_tiles:
yield IPython.display.Markdown("## %s" % tile['name_numbered'])
yield IPython.display.Image(tile['image_path'])
yield IPython.display.Markdown(tile['tweet_txt'].replace('##', '\##'))
x = list(display_goal_images())
#pp(x)
IPython.display.display(*x)
# In[20]:
TMPL_RST = """
The Global Goals
******************
.. contents::
{% for node in nodes %}
{{ node['name_numbered'] }}
======================================================
| {{ node['url'] }}
.. image:: {{ node['image'] }}{# node['image_path'] #}
:target: {{ node['url'] }}
:alt: {{ node['name'] }}
..
{{ node['tweet_txt'] }}
{% endfor %}
"""
tmpl_rst = jinja2.Template(TMPL_RST)
output_rst = tmpl_rst.render(nodes=goal_tiles)
print(output_rst)
# In[21]:
output_rst_path = DATA_DIR / 'globalgoals.rst'
with codecs.open(output_rst_path, 'w', encoding='utf-8') as f:
f.write(output_rst)
print("# wrote goals to %r" % output_rst_path)
# In[22]:
import docutils.core
output_rst_html = docutils.core.publish_string(output_rst, writer_name='html')
print(bs4.BeautifulSoup(output_rst_html).find(id='the-global-goals'))
# In[23]:
IPython.display.HTML(output_rst_html)
# In[24]:
TMPL_MD = """
# The Global Goals
**Contents:**
{% for node in nodes %}
* [{{ node['name_numbered'] }}](#{{ node['slug_md'] }})
{%- endfor %}
{% for node in nodes %}
## {{ node['name_numbered'] }}
{{ node['url'] }}
[![{{node['name_numbered']}}]({{ node['image'] }})]({{ node['url'] }})
> {{ node['tweet_txt'] }}
{% endfor %}
"""
tmpl_md = jinja2.Template(TMPL_MD)
output_markdown = tmpl_md.render(nodes=goal_tiles)
print(output_markdown)
# In[25]:
output_md_path = DATA_DIR / 'globalgoals.md'
with codecs.open(output_md_path, 'w', encoding='utf-8') as f:
f.write(output_markdown)
print("# wrote goals to %r" % output_md_path)
# In[26]:
IPython.display.Markdown(output_markdown)
# In[27]:
context = dict(nodes=goal_tiles)
# In[28]:
TMPL_HTML = """
<h1>The Global Goals</h1>
<h2>Contents:</h2>
{% for node in nodes %}
<li><a href="#{{node.slug_md}}">{{node.name_numbered}}</a></li>
{%- endfor %}
{% for node in nodes %}
<div class="goal-tile">
<h2><a name="#{{node.slug_md}}">{{ node.name_numbered }}</a></h2>
<a href="{{node.url}}">{{node.url}} </a>
<a href="{{node.url}}">
<img src="{{node.image}}" alt="{{node.name_numbered}}"/>{{node.url}} </a>
<div style="margin-left: 12px">
{{ node.tweet_txt }}
</div>
</div>
{% endfor %}
"""
tmpl_html = jinja2.Template(TMPL_HTML)
output_html = tmpl_html.render(**context)
print(output_html)
# In[29]:
output_html_path = DATA_DIR / 'globalgoals.html'
with codecs.open(output_html_path, 'w', encoding='utf-8') as f:
f.write(output_html)
print("# wrote goals to %r" % output_html_path)
# In[30]:
IPython.display.HTML(output_html)
# In[31]:
import jinja2
# TODO: prefix un:
TMPL_RDFA_HTML5 = ("""
<div prefix="schema: http://schema.org/
un: http://schema.un.org/#">
<h1>The Global Goals</h1>
<h2>Contents:</h2>
{%- for node in nodes %}
<li><a href="#{{node.slug_md}}">{{node.name_numbered}}</a></li>
{%- endfor %}
{% for node in nodes %}
<div class="goal-tile" resource="{{node.url}}" typeof="un:GlobalGoal">
<div style="display:none">
<meta property="schema:name">{{node.name}}</meta>
<meta property="schema:image">{{node.image}}</meta>
<meta property="#n">{{node.n}}</meta>
</div>
<h2><a name="#{{node.slug_md}}">{{ node.name_numbered }}</a></h2>
<a property="schema:url" href="{{node.url}}">{{node.url}} </a>
<a href="{{node.url}}">
<img src="{{node.image}}" alt="{{node.name_numbered}}"/>{{node.url}} </a>
<div style="margin-left: 12px">
{{ node.tweet_txt }}
</div>
</div>
{% endfor %}
</div>
"""
)
tmpl_rdfa_html5 = jinja2.Template(TMPL_RDFA_HTML5)
output_rdfa_html5 = tmpl_rdfa_html5.render(**context)
print(output_rdfa_html5)
# In[32]:
output_rdfa_html5_path = DATA_DIR / 'globalgoals.rdfa.html5.html'
with codecs.open(output_rdfa_html5_path, 'w', encoding='utf-8') as f:
f.write(output_rdfa_html5_path)
print("# wrote goals to %r" % output_rdfa_html5_path)
# In[33]:
IPython.display.HTML(output_rdfa_html5)
# In[34]:
# tmpl_html
# tmpl_rdfa_html5
import difflib
for line in difflib.unified_diff(
TMPL_HTML.splitlines(),
TMPL_RDFA_HTML5.splitlines()):
print(line)
|
bsd-3-clause
| -4,042,131,204,744,934,400
| 24.630094
| 120
| 0.635763
| false
| 2.940478
| false
| false
| false
|
GoogleCloudPlatform/healthcare-deid
|
setup.py
|
1
|
1364
|
"""Setup module for the healthcare_deid DLP pipeline.
All of the code necessary to run the pipeline is packaged into a source
distribution that is uploaded to the --staging_location specified on the command
line. The source distribution is then installed on the workers before they
start running.
When remotely executing the pipeline, `--setup_file path/to/setup.py` must be
added to the pipeline's command line.
"""
import os
import setuptools
# Add required python packages that should be installed over and above the
# standard DataFlow worker environment. Version restrictions are supported if
# necessary.
REQUIRED_PACKAGES = [
'apache_beam[gcp]',
'google-api-python-client',
'google-cloud-storage',
'six==1.10.0',
]
packages = ['common', 'dlp', 'physionet']
package_dir = {p: p for p in packages}
# Use eval from bazel-bin so we get the generated results_pb2.py file.
# If it doesn't exist, then the job is another pipeline that doesn't need eval.
eval_bazel_path = 'bazel-bin/eval/run_pipeline.runfiles/__main__/eval'
if os.path.exists(eval_bazel_path):
packages.append('eval')
package_dir['eval'] = eval_bazel_path
setuptools.setup(
name='healthcare_deid',
version='0.0.1',
package_dir=package_dir,
description='Healthcare Deid pipeline package.',
install_requires=REQUIRED_PACKAGES,
packages=packages)
|
apache-2.0
| 6,858,331,243,784,327,000
| 32.268293
| 80
| 0.737537
| false
| 3.598945
| false
| false
| false
|
SethGreylyn/gwells
|
gwells/migrations/0009_auto_20170711_1600_squashed_0010_auto_20170713_0917.py
|
1
|
20389
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-07-13 17:57
from __future__ import unicode_literals
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
replaces = [('gwells', '0009_auto_20170711_1600'), ('gwells', '0010_auto_20170713_0917')]
dependencies = [
('gwells', '0008_auto_20170707_1158'),
]
operations = [
migrations.RemoveField(
model_name='activitysubmission',
name='created',
),
migrations.RemoveField(
model_name='activitysubmission',
name='modified',
),
migrations.RemoveField(
model_name='ltsaowner',
name='created',
),
migrations.RemoveField(
model_name='ltsaowner',
name='modified',
),
migrations.RemoveField(
model_name='well',
name='created',
),
migrations.RemoveField(
model_name='well',
name='modified',
),
migrations.AddField(
model_name='activitysubmission',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='activitysubmission',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='activitysubmission',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='activitysubmission',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='bedrockmaterial',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='bedrockmaterial',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='bedrockmaterial',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='bedrockmaterial',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='bedrockmaterialdescriptor',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='bedrockmaterialdescriptor',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='bedrockmaterialdescriptor',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='bedrockmaterialdescriptor',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='driller',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='driller',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='driller',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='driller',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='drillingcompany',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='drillingcompany',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='drillingcompany',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='drillingcompany',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='drillingmethod',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='drillingmethod',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='drillingmethod',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='drillingmethod',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='groundelevationmethod',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='groundelevationmethod',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='groundelevationmethod',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='groundelevationmethod',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='intendedwateruse',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='intendedwateruse',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='intendedwateruse',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='intendedwateruse',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='landdistrict',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='landdistrict',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='landdistrict',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='landdistrict',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologycolour',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologycolour',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologycolour',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologycolour',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologydescription',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologydescription',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologydescription',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologydescription',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologyhardness',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologyhardness',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologyhardness',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologyhardness',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologymoisture',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologymoisture',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologymoisture',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologymoisture',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologystructure',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologystructure',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologystructure',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologystructure',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologyweathering',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologyweathering',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologyweathering',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologyweathering',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='ltsaowner',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='ltsaowner',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='ltsaowner',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='ltsaowner',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='provincestate',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='provincestate',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='provincestate',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='provincestate',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='surficialmaterial',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='surficialmaterial',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='surficialmaterial',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='surficialmaterial',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='well',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='well',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='well',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='well',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='wellactivitytype',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='wellactivitytype',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='wellactivitytype',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='wellactivitytype',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='wellclass',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='wellclass',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='wellclass',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='wellclass',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='wellsubclass',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='wellsubclass',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='wellsubclass',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='wellsubclass',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='wellyieldunit',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='wellyieldunit',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='wellyieldunit',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='wellyieldunit',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AlterField(
model_name='activitysubmission',
name='drilling_method',
field=models.ForeignKey(db_column='drilling_method_guid', null=True, on_delete=django.db.models.deletion.CASCADE, to='gwells.DrillingMethod', verbose_name='Drilling Method'),
),
migrations.AlterField(
model_name='activitysubmission',
name='latitude',
field=models.DecimalField(decimal_places=6, max_digits=8, null=True),
),
migrations.AlterField(
model_name='activitysubmission',
name='longitude',
field=models.DecimalField(decimal_places=6, max_digits=9, null=True),
),
migrations.AlterField(
model_name='lithologydescription',
name='lithology_from',
field=models.DecimalField(decimal_places=2, max_digits=7, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))], verbose_name='From'),
),
migrations.AlterField(
model_name='lithologydescription',
name='lithology_to',
field=models.DecimalField(decimal_places=2, max_digits=7, validators=[django.core.validators.MinValueValidator(Decimal('0.01'))], verbose_name='To'),
),
]
|
apache-2.0
| -280,287,383,028,197,200
| 34.45913
| 186
| 0.548286
| false
| 4.581798
| false
| false
| false
|
dsparrow27/zoocore
|
zoo/libs/command/commandui.py
|
1
|
2760
|
from functools import partial
from qt import QtWidgets, QtGui, QtCore
from zoo.libs import iconlib
from zoo.libs.utils import zlogging
logger = zlogging.getLogger(__name__)
class CommandActionBase(QtCore.QObject):
"""CommandUi class deals with encapsulating a command as a widget
"""
triggered = QtCore.Signal(str)
triggeredUi = QtCore.Signal(str)
def __init__(self, command):
super(CommandActionBase, self).__init__()
self.command = command
self.item = None
def create(self, parent=None):
pass
class MenuItem(CommandActionBase):
def create(self, parent=None, optionBox=False):
from maya import cmds
uiData = self.command.uiData
self.item = cmds.menuItem(label=uiData["label"], boldFont=uiData.get("bold", False), parent=parent,
italicized=uiData.get("italicized", False), command=self.emitCommand,
optionBox=optionBox)
if optionBox:
cmds.menuItem(parent=parent, optionBox=optionBox, command=self.emitCommandUi)
return self.item
def emitCommand(self, *args):
"""
:param args: dummy to deal with maya command args shit stains. basically useless
:type args: tuple
"""
self.triggered.emit(self.command.id)
def emitCommandUi(self, *args):
"""
:param args: dummy to deal with maya command args shit stains. basically useless
:type args: tuple
"""
self.triggeredUi.emit(self.command.id)
class CommandAction(CommandActionBase):
def create(self, parent=None):
uiData = self.command.uiData
self.item = QtWidgets.QWidgetAction(parent)
text = uiData.get("label", "NOLABEL")
actionLabel = QtWidgets.QLabel(text)
self.item.setDefaultWidget(actionLabel)
color = uiData.get("color", "")
backColor = uiData.get("backgroundColor", "")
if color or backColor:
actionLabel.setStyleSheet(
"QLabel {background-color: %s; color: %s;}" % (backColor,
color))
icon = uiData.get("icon")
if icon:
if isinstance(icon, QtGui.QIcon):
self.item.setIcon(icon)
else:
icon = iconlib.icon(icon)
if not icon.isNull():
self.item.setIcon(icon)
self.item.setStatusTip(uiData.get("tooltip"))
self.item.triggered.connect(partial(self.triggered.emit, self.command.id))
logger.debug("Added commandAction, {}".format(text))
return self.item
def show(self):
if self.item is not None:
self.item.show()
|
gpl-3.0
| -7,807,650,875,466,042,000
| 33.5
| 107
| 0.598913
| false
| 4.119403
| false
| false
| false
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/Python/bin/2.78/scripts/addons/io_blend_utils/bl_utils/subprocess_helper.py
|
1
|
5646
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
"""
Defines an operator mix-in to use for non-blocking command line access.
"""
class SubprocessHelper:
"""
Mix-in class for operators to run commands in a non-blocking way.
This uses a modal operator to manage an external process.
Subclass must define:
``command``:
List of arguments to pass to subprocess.Popen
report_interval: Time in seconds between updating reports.
``process_pre()``:
Callback that runs before the process executes.
``process_post(returncode)``:
Callback that runs when the process has ende.
returncode is -1 if the process was terminated.
Subclass may define:
``environment``:
Dict of environment variables exposed to the subprocess.
Contrary to the subprocess.Popen(env=...) parameter, this
dict is and not used to replace the existing environment
entirely, but is just used to update it.
"""
environ = {}
command = ()
@staticmethod
def _non_blocking_readlines(f, chunk=64):
"""
Iterate over lines, yielding b'' when nothings left
or when new data is not yet available.
"""
import os
from .pipe_non_blocking import (
pipe_non_blocking_set,
pipe_non_blocking_is_error_blocking,
PortableBlockingIOError,
)
fd = f.fileno()
pipe_non_blocking_set(fd)
blocks = []
while True:
try:
data = os.read(fd, chunk)
if not data:
# case were reading finishes with no trailing newline
yield b''.join(blocks)
blocks.clear()
except PortableBlockingIOError as ex:
if not pipe_non_blocking_is_error_blocking(ex):
raise ex
yield b''
continue
while True:
n = data.find(b'\n')
if n == -1:
break
yield b''.join(blocks) + data[:n + 1]
data = data[n + 1:]
blocks.clear()
blocks.append(data)
def _report_output(self):
stdout_line_iter, stderr_line_iter = self._buffer_iter
for line_iter, report_type in (
(stdout_line_iter, {'INFO'}),
(stderr_line_iter, {'WARNING'})
):
while True:
line = next(line_iter).rstrip() # rstrip all, to include \r on windows
if not line:
break
self.report(report_type, line.decode(encoding='utf-8', errors='surrogateescape'))
def _wm_enter(self, context):
wm = context.window_manager
window = context.window
self._timer = wm.event_timer_add(self.report_interval, window)
window.cursor_set('WAIT')
def _wm_exit(self, context):
wm = context.window_manager
window = context.window
wm.event_timer_remove(self._timer)
window.cursor_set('DEFAULT')
def process_pre(self):
pass
def process_post(self, returncode):
pass
def modal(self, context, event):
wm = context.window_manager
p = self._process
if event.type == 'ESC':
self.cancel(context)
self.report({'INFO'}, "Operation aborted by user")
return {'CANCELLED'}
elif event.type == 'TIMER':
if p.poll() is not None:
self._report_output()
self._wm_exit(context)
self.process_post(p.returncode)
return {'FINISHED'}
self._report_output()
return {'PASS_THROUGH'}
def execute(self, context):
import subprocess
import os
import copy
self.process_pre()
env = copy.deepcopy(os.environ)
env.update(self.environ)
try:
p = subprocess.Popen(
self.command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
)
except FileNotFoundError as ex:
# Command not found
self.report({'ERROR'}, str(ex))
return {'CANCELLED'}
self._process = p
self._buffer_iter = (
iter(self._non_blocking_readlines(p.stdout)),
iter(self._non_blocking_readlines(p.stderr)),
)
wm = context.window_manager
wm.modal_handler_add(self)
self._wm_enter(context)
return {'RUNNING_MODAL'}
def cancel(self, context):
self._wm_exit(context)
self._process.kill()
self.process_post(-1)
|
gpl-3.0
| -5,265,626,018,868,350,000
| 28.873016
| 97
| 0.553135
| false
| 4.491647
| false
| false
| false
|
tetra5/radiance
|
ui/widgets/verticallabel.py
|
1
|
1370
|
# -*- coding: utf-8 -*-
"""
Created on 28.01.2011
@author: vda
"""
from PyQt4 import QtCore, QtGui
class VerticalLabel(QtGui.QWidget):
def __init__(self, text, parent=None):
QtGui.QLabel.__init__(self, parent)
self.text = text
fm = QtGui.QApplication.fontMetrics()
self.width = fm.width(self.text)
self.height = fm.height()
# self.setMinimumSize(QtCore.QSize(100, 100))
# self.setMaximumSize(QtCore.QSize(100, 100))
# self.setGeometry(0, 0, 100, 100)
self.setMinimumSize(QtCore.QSize(self.width, self.height))
self.setMaximumSize(QtCore.QSize(self.width, self.height))
self.setGeometry(0, 0, self.width, self.height)
# self.update()
def paintEvent(self, event):
fm = QtGui.QApplication.fontMetrics()
painter = QtGui.QPainter()
painter.begin(self)
painter.setBrush(QtGui.QBrush(QtGui.QColor('#CCCCCC')))
painter.setPen(QtCore.Qt.NoPen)
painter.drawRect(0, 0, fm.height(), fm.width(self.text))
#painter.drawRect(0, 0, 100, 100)
painter.setPen(QtCore.Qt.black)
# painter.translate(20, 100)
painter.rotate(-90)
painter.drawText(event.rect(), QtCore.Qt.AlignCenter, self.text)
painter.end()
|
mit
| 5,865,820,515,146,894,000
| 26.42
| 72
| 0.586861
| false
| 3.586387
| false
| false
| false
|
supermaik/selbot
|
Quote_Command.py
|
1
|
1175
|
from Quotes import Quote
from Command import Command
class Quote_Command(Command):
def __init__(self, config):
self.connection = config['connection']
self.event = config['event']
self.channel = config['channel']
pass
def resolve(self):
args = self.event.arguments[0].split()
# Don't let people skip last 10 (for voting!)
if not self.channel.quote_last_ten:
#Check if they asked for a source
if len(args) > 1:
try:
#Grab a random quote from given source
q = self.channel.quotes_list.random_quote(args[1])
except Exception:
#Invalid source name
q = Quote("your_boss", "Don't you think you should be getting back to work?")
else:
#Grab random quote from random source
q = self.channel.quotes_list.random_quote()
self.channel.last_quote = q
#Print the quote
self.respond(self.event.target, q)
pass
def respond(self, target, message):
self.connection.privmsg(target, message)
|
unlicense
| 5,750,818,820,420,012,000
| 34.606061
| 97
| 0.556596
| false
| 4.450758
| false
| false
| false
|
ojengwa/Bookie
|
bookie/tests/factory.py
|
1
|
2651
|
"""Provide tools for generating objects for testing purposes."""
from datetime import datetime
from random import randint
import random
import string
from bookie.models import DBSession
from bookie.models import Bmark
from bookie.models import Tag
from bookie.models.applog import AppLog
from bookie.models.auth import User
from bookie.models.stats import (
StatBookmark,
USER_CT,
)
def random_int(max=1000):
"""Generate a random integer value
:param max: Maximum value to hit.
"""
return randint(0, max)
def random_string(length=None):
"""Generates a random string from urandom.
:param length: Specify the number of chars in the generated string.
"""
chars = string.ascii_uppercase + string.digits
str_length = length if length is not None else random_int()
return unicode(u''.join(random.choice(chars) for x in range(str_length)))
def random_url():
"""Generate a random url that is totally bogus."""
url = u"http://{0}.com".format(random_string())
return url
def make_applog(message=None, status=None):
"""Generate applog instances."""
if status is None:
status = random_int(max=3)
if message is None:
message = random_string(100)
alog = AppLog(**{
'user': random_string(10),
'component': random_string(10),
'status': status,
'message': message,
'payload': u'',
})
return alog
def make_tag(name=None):
if not name:
name = random_string(255)
return Tag(name)
def make_bookmark(user=None):
"""Generate a fake bookmark for testing use."""
bmark = Bmark(random_url(),
username=u"admin",
desc=random_string(),
ext=random_string(),
tags=u"bookmarks")
if user:
bmark.username = user.username
bmark.user = user
DBSession.add(bmark)
DBSession.flush()
return bmark
def make_user_bookmark_count(username, data, tstamp=None):
"""Generate a fake user bookmark count for testing use"""
if tstamp is None:
tstamp = datetime.utcnow()
bmark_count = StatBookmark(tstamp=tstamp,
attrib=USER_CT.format(username),
data=data)
DBSession.add(bmark_count)
DBSession.flush()
return [bmark_count.attrib, bmark_count.data, bmark_count.tstamp]
def make_user(username=None):
"""Generate a fake user to test against."""
user = User()
if not username:
username = random_string(10)
user.username = username
DBSession.add(user)
DBSession.flush()
return user
|
agpl-3.0
| 812,322,954,075,319,000
| 23.775701
| 77
| 0.632214
| false
| 3.875731
| false
| false
| false
|
Jasonmk47/OpenWPM
|
automation/Proxy/mitm_commands.py
|
1
|
4714
|
# This module parses MITM Proxy requests/responses into (command, data pairs)
# This should mean that the MITMProxy code should simply pass the messages + its own data to this module
from urlparse import urlparse
import datetime
import mmh3
import json
import zlib
import os
def encode_to_unicode(msg):
"""
Tries different encodings before setting on utf8 ignoring any errors
We can likely inspect the headers for an encoding as well, though it
won't always be correct.
"""
try:
msg = unicode(msg, 'utf8')
except UnicodeDecodeError:
try:
msg = unicode(msg, 'ISO-8859-1')
except UnicodeDecodeError:
msg = unicode(msg, 'utf8', 'ignore')
return msg
def process_general_mitm_request(db_socket, browser_params, visit_id, msg):
""" Logs a HTTP request object """
referrer = msg.request.headers['referer'][0] if len(msg.request.headers['referer']) > 0 else ''
data = (browser_params['crawl_id'],
encode_to_unicode(msg.request.url),
msg.request.method,
encode_to_unicode(referrer),
json.dumps(msg.request.headers.get_state()),
visit_id,
str(datetime.datetime.now()))
db_socket.send(("INSERT INTO http_requests (crawl_id, url, method, referrer, headers, "
"visit_id, time_stamp) VALUES (?,?,?,?,?,?,?)", data))
def process_general_mitm_response(db_socket, ldb_socket, logger, browser_params, visit_id, msg):
""" Logs a HTTP response object and, if necessary, """
referrer = msg.request.headers['referer'][0] if len(msg.request.headers['referer']) > 0 else ''
location = msg.response.headers['location'][0] if len(msg.response.headers['location']) > 0 else ''
content_hash = save_javascript_content(ldb_socket, logger, browser_params, msg)
data = (browser_params['crawl_id'],
encode_to_unicode(msg.request.url),
encode_to_unicode(msg.request.method),
encode_to_unicode(referrer),
msg.response.code,
msg.response.msg,
json.dumps(msg.response.headers.get_state()),
encode_to_unicode(location),
visit_id,
str(datetime.datetime.now()),
content_hash)
db_socket.send(("INSERT INTO http_responses (crawl_id, url, method, referrer, response_status, "
"response_status_text, headers, location, visit_id, time_stamp, content_hash) VALUES (?,?,?,?,?,?,?,?,?,?,?)", data))
def save_javascript_content(ldb_socket, logger, browser_params, msg):
""" Save javascript files de-duplicated and compressed on disk """
if not browser_params['save_javascript']:
return
# Check if this response is javascript content
is_js = False
if (len(msg.response.headers['Content-Type']) > 0 and
'javascript' in msg.response.headers['Content-Type'][0]):
is_js = True
if not is_js and urlparse(msg.request.url).path.split('.')[-1] == 'js':
is_js = True
if not is_js:
return
# Decompress any content with compression
# We want files to hash to the same value
# Firefox currently only accepts gzip/deflate
script = ''
content_encoding = msg.response.headers['Content-Encoding']
if (len(content_encoding) == 0 or
content_encoding[0].lower() == 'utf-8' or
content_encoding[0].lower() == 'identity' or
content_encoding[0].lower() == 'none' or
content_encoding[0].lower() == 'ansi_x3.4-1968' or
content_encoding[0].lower() == 'utf8' or
content_encoding[0] == ''):
script = msg.response.content
elif 'gzip' in content_encoding[0].lower():
try:
script = zlib.decompress(msg.response.content, zlib.MAX_WBITS|16)
except zlib.error as e:
logger.error('BROWSER %i: Received zlib error when trying to decompress gzipped javascript: %s' % (browser_params['crawl_id'],str(e)))
return
elif 'deflate' in content_encoding[0].lower():
try:
script = zlib.decompress(msg.response.content, -zlib.MAX_WBITS)
except zlib.error as e:
logger.error('BROWSER %i: Received zlib error when trying to decompress deflated javascript: %s' % (browser_params['crawl_id'],str(e)))
return
else:
logger.error('BROWSER %i: Received Content-Encoding %s. Not supported by Firefox, skipping archive.' % (browser_params['crawl_id'], str(content_encoding)))
return
ldb_socket.send(script)
# Hash script for deduplication on disk
hasher = mmh3.hash128
script_hash = str(hasher(script) >> 64)
return script_hash
|
gpl-3.0
| 2,147,477,469,874,632,200
| 39.637931
| 163
| 0.627068
| false
| 3.823195
| false
| false
| false
|
Sharecare/cyclops
|
app/httpreq.py
|
1
|
4917
|
import urllib
import urllib2
import urlparse
import socket
import time
import json
import sys
import logging
logger = logging.getLogger(__name__)
import pprint
pp = pprint.PrettyPrinter(indent=4)
# we need to make sure we don't follow redirects so build a new opener
class NoRedirection(urllib2.HTTPErrorProcessor):
def http_response(self, request, response):
return response
https_response = http_response
# by default, urllib2 only deals with GET and POST
# so we subclass it and make it handle other methods
class RequestWithMethod(urllib2.Request):
def __init__(self, url, method, data=None, headers={}, origin_req_host=None, unverifiable=False):
self._method = method
# build up a copy of the full request
u = urlparse.urlparse(url)
self._the_request = "%s %s HTTP/1.1\n" % (method, u.path)
for h in headers:
self._the_request += "%s: %s\n" % (h, headers[h])
self._the_request += "\n"
if data:
self._the_request += data
urllib2.Request.__init__(self, url, data, headers, origin_req_host, unverifiable)
def get_method(self):
if self._method:
return self._method
else:
return urllib2.Request.get_method(self)
class HTTPReq():
def __init__(self, timeout=10):
self.timeout = timeout
self.AcceptTypes = {}
self.AcceptTypes['json'] = 'application/json'
self.AcceptTypes['xml'] = 'application/xml'
self.AcceptTypes['text'] = 'text/plain'
self.AcceptTypes['csv'] = 'text/csv'
def accept2type(self, accept):
for k in self.AcceptTypes:
try:
if self.AcceptTypes[k] == accept:
return(k)
except:
pass
return('json')
def _query(self, req):
start = end = 0
code = -1
rheaders = {}
ret = None
retheaders = None
try:
opener = urllib2.build_opener(NoRedirection)
except Exception, e:
logger.exception(e)
sys.exit(0)
try:
start = time.time()
response = opener.open(req, timeout=self.timeout)
end = time.time()
code = response.code
retheaders = response.info()
except urllib2.URLError, e:
if hasattr(e, 'reason'):
logger.exception(e)
ret = str(e.reason)
else:
code = e.code
retheaders = e.info()
ret = e.read()
raise e
except IOError, e:
if hasattr(e, 'reason'):
reason = e.reason
elif hasattr(e, 'code'):
code = e.code
rheaders = e.info()
else:
logger.exception(e)
raise e
try:
ret = response.read()
except:
pass
try:
for r in retheaders.items():
rheaders[r[0].lower()] = r[1]
except:
pass
#return dict(content=ret.decode('ascii', errors='ignore'), status=code, headers=rheaders, speed=(end - start), request=req._the_request)
return dict(content=ret, status=code, headers=rheaders, speed=(end - start), request=req._the_request)
def get(self, url, data=None, headers={}, type=None):
req = None
try:
if self.AcceptTypes[type]:
headers['Accept'] = self.AcceptTypes[type]
headers['Content-Type'] = self.AcceptTypes[type]
req = RequestWithMethod(url, 'GET', headers=headers)
except:
req = RequestWithMethod(url, 'GET', headers=headers)
return(self._query(req))
def post(self, url, data, headers={}, type=None):
req = None
try:
if self.AcceptTypes[type]:
headers['Accept'] = self.AcceptTypes[type]
headers['Content-Type'] = self.AcceptTypes[type]
req = RequestWithMethod(url, 'POST', data=data, headers=headers)
except Exception, e:
req = RequestWithMethod(url, 'POST', data=data, headers=headers)
#logger.exception(e)
return(self._query(req))
def delete(self, url, data=None, headers={}, type=None):
req = None
try:
if self.AcceptTypes[type]:
headers['Accept'] = self.AcceptTypes[type]
headers['Content-Type'] = self.AcceptTypes[type]
req = RequestWithMethod(url, 'DELETE', headers=headers)
except:
req = RequestWithMethod(url, 'DELETE', headers=headers)
return(self._query(req))
def put(self, url, data, headers={}, type=None):
req = None
try:
if self.AcceptTypes[type]:
headers['Accept'] = self.AcceptTypes[type]
headers['Content-Type'] = self.AcceptTypes[type]
req = RequestWithMethod(url, 'PUT', data=data, headers=headers)
except:
req = RequestWithMethod(url, 'PUT', data=data, headers=headers)
return(self._query(req))
|
apache-2.0
| -4,183,367,405,335,792,600
| 28.620482
| 142
| 0.580232
| false
| 3.847418
| false
| false
| false
|
jiaphuan/models
|
research/astronet/astronet/astro_model/astro_model.py
|
1
|
10261
|
# Copyright 2018 The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A TensorFlow model for identifying exoplanets in astrophysical light curves.
AstroModel is a concrete base class for models that identify exoplanets in
astrophysical light curves. This class implements a simple linear model that can
be extended by subclasses.
The general framework for AstroModel and its subclasses is as follows:
* Model inputs:
- Zero or more time_series_features (e.g. astrophysical light curves)
- Zero or more aux_features (e.g. orbital period, transit duration)
* Labels:
- An integer feature with 2 or more values (eg. 0 = Not Planet, 1 = Planet)
* Model outputs:
- The predicted probabilities for each label
* Architecture:
predictions
^
|
logits
^
|
(pre_logits_hidden_layers)
^
|
pre_logits_concat
^
|
(concatenate)
^ ^
| |
(time_series_hidden_layers) (aux_hidden_layers)
^ ^
| |
time_series_features aux_features
Subclasses will typically override the build_time_series_hidden_layers()
and/or build_aux_hidden_layers() functions. For example, a subclass could
override build_time_series_hidden_layers() to apply convolutional layers to the
time series features. In this class, those functions are simple concatenations
of the input features.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
import tensorflow as tf
class AstroModel(object):
"""A TensorFlow model for classifying astrophysical light curves."""
def __init__(self, features, labels, hparams, mode):
"""Basic setup. The actual TensorFlow graph is constructed in build().
Args:
features: A dictionary containing "time_series_features" and
"aux_features", each of which is a dictionary of named input Tensors.
All features have dtype float32 and shape [batch_size, length].
labels: An int64 Tensor with shape [batch_size]. May be None if mode is
tf.estimator.ModeKeys.PREDICT.
hparams: A ConfigDict of hyperparameters for building the model.
mode: A tf.estimator.ModeKeys to specify whether the graph should be built
for training, evaluation or prediction.
Raises:
ValueError: If mode is invalid.
"""
valid_modes = [
tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL,
tf.estimator.ModeKeys.PREDICT
]
if mode not in valid_modes:
raise ValueError("Expected mode in %s. Got: %s" % (valid_modes, mode))
self.hparams = hparams
self.mode = mode
# A dictionary of input Tensors. Values have dtype float32 and shape
# [batch_size, length].
self.time_series_features = features.get("time_series_features", {})
# A dictionary of input Tensors. Values have dtype float32 and shape
# [batch_size, length].
self.aux_features = features.get("aux_features", {})
# An int32 Tensor with shape [batch_size]. May be None if mode is
# tf.estimator.ModeKeys.PREDICT.
self.labels = labels
# Optional Tensor; the weights corresponding to self.labels.
self.weights = features.get("weights")
# A Python boolean or a scalar boolean Tensor. Indicates whether the model
# is in training mode for the purpose of graph ops, such as dropout. (Since
# this might be a Tensor, its value is defined in build()).
self.is_training = None
# Global step Tensor.
self.global_step = None
# A dictionary of float32 Tensors with shape [batch_size, layer_size]; the
# outputs of the time series hidden layers.
self.time_series_hidden_layers = {}
# A dictionary of float32 Tensors with shape [batch_size, layer_size]; the
# outputs of the auxiliary hidden layers.
self.aux_hidden_layers = {}
# A float32 Tensor with shape [batch_size, layer_size]; the concatenation of
# outputs from the hidden layers.
self.pre_logits_concat = None
# A float32 Tensor with shape [batch_size, output_dim].
self.logits = None
# A float32 Tensor with shape [batch_size, output_dim].
self.predictions = None
# A float32 Tensor with shape [batch_size]; the cross-entropy losses for the
# current batch.
self.batch_losses = None
# Scalar Tensor; the total loss for the trainer to optimize.
self.total_loss = None
def build_time_series_hidden_layers(self):
"""Builds hidden layers for the time series features.
Inputs:
self.time_series_features
Outputs:
self.time_series_hidden_layers
"""
# No hidden layers.
self.time_series_hidden_layers = self.time_series_features
def build_aux_hidden_layers(self):
"""Builds hidden layers for the auxiliary features.
Inputs:
self.aux_features
Outputs:
self.aux_hidden_layers
"""
# No hidden layers.
self.aux_hidden_layers = self.aux_features
def build_logits(self):
"""Builds the model logits.
Inputs:
self.aux_hidden_layers
self.time_series_hidden_layers
Outputs:
self.pre_logits_concat
self.logits
Raises:
ValueError: If self.time_series_hidden_layers and self.aux_hidden_layers
are both empty.
"""
# Sort the hidden layers by name because the order of dictionary items is
# nondeterministic between invocations of Python.
time_series_hidden_layers = sorted(
self.time_series_hidden_layers.items(), key=operator.itemgetter(0))
aux_hidden_layers = sorted(
self.aux_hidden_layers.items(), key=operator.itemgetter(0))
hidden_layers = time_series_hidden_layers + aux_hidden_layers
if not hidden_layers:
raise ValueError("At least one time series hidden layer or auxiliary "
"hidden layer is required.")
# Concatenate the hidden layers.
if len(hidden_layers) == 1:
pre_logits_concat = hidden_layers[0][1]
else:
pre_logits_concat = tf.concat(
[layer[1] for layer in hidden_layers],
axis=1,
name="pre_logits_concat")
net = pre_logits_concat
with tf.variable_scope("pre_logits_hidden"):
for i in range(self.hparams.num_pre_logits_hidden_layers):
net = tf.layers.dense(
inputs=net,
units=self.hparams.pre_logits_hidden_layer_size,
activation=tf.nn.relu,
name="fully_connected_%s" % (i + 1))
if self.hparams.pre_logits_dropout_rate > 0:
net = tf.layers.dropout(
net,
self.hparams.pre_logits_dropout_rate,
training=self.is_training)
# Identify the final pre-logits hidden layer as "pre_logits_hidden/final".
tf.identity(net, "final")
logits = tf.layers.dense(
inputs=net, units=self.hparams.output_dim, name="logits")
self.pre_logits_concat = pre_logits_concat
self.logits = logits
def build_predictions(self):
"""Builds the output predictions and losses.
Inputs:
self.logits
Outputs:
self.predictions
"""
# Use sigmoid activation function for binary classification, or softmax for
# multi-class classification.
prediction_fn = (
tf.sigmoid if self.hparams.output_dim == 1 else tf.nn.softmax)
predictions = prediction_fn(self.logits, name="predictions")
self.predictions = predictions
def build_losses(self):
"""Builds the training losses.
Inputs:
self.logits
self.labels
Outputs:
self.batch_losses
self.total_loss
"""
if self.hparams.output_dim == 1:
# Binary classification.
batch_losses = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.to_float(self.labels), logits=tf.squeeze(self.logits, [1]))
else:
# Multi-class classification.
batch_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=self.labels, logits=self.logits)
# Compute the weighted mean cross entropy loss and add it to the LOSSES
# collection.
weights = self.weights if self.weights is not None else 1.0
tf.losses.compute_weighted_loss(
losses=batch_losses,
weights=weights,
reduction=tf.losses.Reduction.MEAN)
# Compute the total loss, including any other losses added to the LOSSES
# collection (e.g. regularization losses).
total_loss = tf.losses.get_total_loss()
self.batch_losses = batch_losses
self.total_loss = total_loss
def build(self):
"""Creates all ops for training, evaluation or inference."""
self.global_step = tf.train.get_or_create_global_step()
if self.mode == tf.estimator.ModeKeys.TRAIN:
# This is implemented as a placeholder Tensor, rather than a constant, to
# allow its value to be feedable during training (e.g. to disable dropout
# when performing in-process validation set evaluation).
self.is_training = tf.placeholder_with_default(True, [], "is_training")
else:
self.is_training = False
self.build_time_series_hidden_layers()
self.build_aux_hidden_layers()
self.build_logits()
self.build_predictions()
if self.mode in [tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL]:
self.build_losses()
|
apache-2.0
| 8,468,219,150,089,580,000
| 32.753289
| 80
| 0.645941
| false
| 4.169443
| false
| false
| false
|
pawelmhm/splash
|
splash/network_manager.py
|
1
|
17016
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import base64
import itertools
import functools
from datetime import datetime
import traceback
from PyQt5.QtCore import QByteArray, QTimer
from PyQt5.QtNetwork import (
QNetworkAccessManager,
QNetworkProxyQuery,
QNetworkRequest,
QNetworkReply
)
from twisted.python import log
from splash.qtutils import qurl2ascii, REQUEST_ERRORS, get_request_webframe
from splash.request_middleware import (
AdblockMiddleware,
AllowedDomainsMiddleware,
AllowedSchemesMiddleware,
RequestLoggingMiddleware,
AdblockRulesRegistry,
ResourceTimeoutMiddleware,
ResponseBodyTrackingMiddleware,
)
from splash.response_middleware import ContentTypeMiddleware
from splash import defaults
from splash.utils import to_bytes
from splash.cookies import SplashCookieJar
class NetworkManagerFactory(object):
def __init__(self, filters_path=None, verbosity=None, allowed_schemes=None):
verbosity = defaults.VERBOSITY if verbosity is None else verbosity
self.verbosity = verbosity
self.request_middlewares = []
self.response_middlewares = []
self.adblock_rules = None
# Initialize request and response middlewares
allowed_schemes = (defaults.ALLOWED_SCHEMES if allowed_schemes is None
else allowed_schemes.split(','))
if allowed_schemes:
self.request_middlewares.append(
AllowedSchemesMiddleware(allowed_schemes, verbosity=verbosity)
)
if self.verbosity >= 2:
self.request_middlewares.append(RequestLoggingMiddleware())
self.request_middlewares.append(AllowedDomainsMiddleware(verbosity=verbosity))
self.request_middlewares.append(ResourceTimeoutMiddleware())
self.request_middlewares.append(ResponseBodyTrackingMiddleware())
if filters_path is not None:
self.adblock_rules = AdblockRulesRegistry(filters_path, verbosity=verbosity)
self.request_middlewares.append(
AdblockMiddleware(self.adblock_rules, verbosity=verbosity)
)
self.response_middlewares.append(ContentTypeMiddleware(self.verbosity))
def __call__(self):
manager = SplashQNetworkAccessManager(
request_middlewares=self.request_middlewares,
response_middlewares=self.response_middlewares,
verbosity=self.verbosity,
)
manager.setCache(None)
return manager
class ProxiedQNetworkAccessManager(QNetworkAccessManager):
"""
QNetworkAccessManager subclass with extra features. It
* Enables "splash proxy factories" support. Qt provides similar
functionality via setProxyFactory method, but standard
QNetworkProxyFactory is not flexible enough.
* Sets up extra logging.
* Provides a way to get the "source" request (that was made to Splash
itself).
* Tracks information about requests/responses and stores it in HAR format,
including response content.
* Allows to set per-request timeouts.
"""
_REQUEST_ID = QNetworkRequest.User + 1
_SHOULD_TRACK = QNetworkRequest.User + 2
def __init__(self, verbosity):
super(ProxiedQNetworkAccessManager, self).__init__()
self.sslErrors.connect(self._on_ssl_errors)
self.finished.connect(self._on_finished)
self.verbosity = verbosity
self._reply_timeout_timers = {} # requestId => timer
self._default_proxy = self.proxy()
self.cookiejar = SplashCookieJar(self)
self.setCookieJar(self.cookiejar)
self._response_bodies = {} # requestId => response content
self._request_ids = itertools.count()
assert self.proxyFactory() is None, "Standard QNetworkProxyFactory is not supported"
def _on_ssl_errors(self, reply, errors):
reply.ignoreSslErrors()
def _on_finished(self, reply):
reply.deleteLater()
def createRequest(self, operation, request, outgoingData=None):
"""
This method is called when a new request is sent;
it must return a reply object to work with.
"""
start_time = datetime.utcnow()
# Proxies are managed per-request, so we're restoring a default
# before each request. This assumes all requests go through
# this method.
self._clear_proxy()
request, req_id = self._wrap_request(request)
self._handle_custom_headers(request)
self._handle_request_cookies(request)
self._run_webpage_callbacks(request, 'on_request',
request, operation, outgoingData)
self._handle_custom_proxies(request)
self._handle_request_response_tracking(request)
har = self._get_har(request)
if har is not None:
har.store_new_request(
req_id=req_id,
start_time=start_time,
operation=operation,
request=request,
outgoingData=outgoingData,
)
reply = super(ProxiedQNetworkAccessManager, self).createRequest(
operation, request, outgoingData
)
if hasattr(request, 'timeout'):
timeout = request.timeout * 1000
if timeout:
self._set_reply_timeout(reply, timeout)
if har is not None:
har.store_new_reply(req_id, reply)
reply.error.connect(self._on_reply_error)
reply.finished.connect(self._on_reply_finished)
if self._should_track_content(request):
self._response_bodies[req_id] = QByteArray()
reply.readyRead.connect(self._on_reply_ready_read)
reply.metaDataChanged.connect(self._on_reply_headers)
reply.downloadProgress.connect(self._on_reply_download_progress)
return reply
def _set_reply_timeout(self, reply, timeout_ms):
request_id = self._get_request_id(reply.request())
# reply is used as a parent for the timer in order to destroy
# the timer when reply is destroyed. It segfaults otherwise.
timer = QTimer(reply)
timer.setSingleShot(True)
timer_callback = functools.partial(self._on_reply_timeout,
reply=reply,
timer=timer,
request_id=request_id)
timer.timeout.connect(timer_callback)
self._reply_timeout_timers[request_id] = timer
timer.start(timeout_ms)
def _on_reply_timeout(self, reply, timer, request_id):
self._reply_timeout_timers.pop(request_id)
self.log("timed out, aborting: {url}", reply, min_level=1)
# FIXME: set proper error code
reply.abort()
def _cancel_reply_timer(self, reply):
request_id = self._get_request_id(reply.request())
timer = self._reply_timeout_timers.pop(request_id, None)
if timer and timer.isActive():
timer.stop()
def _clear_proxy(self):
""" Init default proxy """
self.setProxy(self._default_proxy)
def _wrap_request(self, request):
req = QNetworkRequest(request)
req_id = next(self._request_ids)
req.setAttribute(self._REQUEST_ID, req_id)
for attr in ['timeout', 'track_response_body']:
if hasattr(request, attr):
setattr(req, attr, getattr(request, attr))
return req, req_id
def _handle_custom_proxies(self, request):
proxy = None
# proxies set in proxy profiles or `proxy` HTTP argument
splash_proxy_factory = self._get_webpage_attribute(request, 'splash_proxy_factory')
if splash_proxy_factory:
proxy_query = QNetworkProxyQuery(request.url())
proxy = splash_proxy_factory.queryProxy(proxy_query)[0]
self.setProxy(proxy)
# proxies set in on_request
if hasattr(request, 'custom_proxy'):
proxy = request.custom_proxy
self.setProxy(proxy)
# Handle proxy auth. We're setting Proxy-Authorization header
# explicitly because Qt loves to cache proxy credentials.
if proxy is None:
return
user, password = proxy.user(), proxy.password()
if not user and not password:
return
auth = b"Basic " + base64.b64encode("{}:{}".format(user, password).encode("utf-8"))
request.setRawHeader(b"Proxy-Authorization", auth)
def _handle_custom_headers(self, request):
if self._get_webpage_attribute(request, "skip_custom_headers"):
# XXX: this hack assumes that new requests between
# BrowserTab._create_request and this function are not possible,
# i.e. we don't give control to the event loop in between.
# Unfortunately we can't store this flag on a request itself
# because a new QNetworkRequest instance is created by QWebKit.
self._set_webpage_attribute(request, "skip_custom_headers", False)
return
headers = self._get_webpage_attribute(request, "custom_headers")
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers or []:
request.setRawHeader(to_bytes(name), to_bytes(value))
def _handle_request_cookies(self, request):
self.cookiejar.update_cookie_header(request)
def _handle_reply_cookies(self, reply):
self.cookiejar.fill_from_reply(reply)
def _handle_request_response_tracking(self, request):
track = getattr(request, 'track_response_body', False)
request.setAttribute(self._SHOULD_TRACK, track)
def _should_track_content(self, request):
return request.attribute(self._SHOULD_TRACK)
def _get_request_id(self, request=None):
if request is None:
request = self.sender().request()
return request.attribute(self._REQUEST_ID)
def _get_har(self, request=None):
"""
Return HarBuilder instance.
:rtype: splash.har_builder.HarBuilder | None
"""
if request is None:
request = self.sender().request()
return self._get_webpage_attribute(request, "har")
def _get_webpage_attribute(self, request, attribute):
web_frame = get_request_webframe(request)
if web_frame:
return getattr(web_frame.page(), attribute, None)
def _set_webpage_attribute(self, request, attribute, value):
web_frame = get_request_webframe(request)
if web_frame:
return setattr(web_frame.page(), attribute, value)
def _on_reply_error(self, error_id):
self._response_bodies.pop(self._get_request_id(), None)
if error_id != QNetworkReply.OperationCanceledError:
error_msg = REQUEST_ERRORS.get(error_id, 'unknown error')
self.log('Download error %d: %s ({url})' % (error_id, error_msg),
self.sender(), min_level=2)
def _on_reply_ready_read(self):
reply = self.sender()
self._store_response_chunk(reply)
def _store_response_chunk(self, reply):
req_id = self._get_request_id(reply.request())
if req_id not in self._response_bodies:
self.log("Internal problem in _store_response_chunk: "
"request %s is not tracked" % req_id, reply, min_level=1)
return
chunk = reply.peek(reply.bytesAvailable())
self._response_bodies[req_id].append(chunk)
def _on_reply_finished(self):
reply = self.sender()
request = reply.request()
self._cancel_reply_timer(reply)
har = self._get_har()
har_entry, content = None, None
if har is not None:
req_id = self._get_request_id()
# FIXME: what if har is None? When can it be None?
# Who removes the content from self._response_bodies dict?
content = self._response_bodies.pop(req_id, None)
if content is not None:
content = bytes(content)
# FIXME: content is kept in memory at least twice,
# as raw data and as a base64-encoded copy.
har.store_reply_finished(req_id, reply, content)
har_entry = har.get_entry(req_id)
# We're passing HAR entry to the callbacks because reply object
# itself doesn't have all information.
# Content is passed in order to avoid decoding it from base64.
self._run_webpage_callbacks(request, "on_response", reply, har_entry,
content)
self.log("Finished downloading {url}", reply)
def _on_reply_headers(self):
"""Signal emitted before reading response body, after getting headers
"""
reply = self.sender()
request = reply.request()
self._handle_reply_cookies(reply)
self._run_webpage_callbacks(request, "on_response_headers", reply)
har = self._get_har()
if har is not None:
har.store_reply_headers_received(self._get_request_id(request), reply)
self.log("Headers received for {url}", reply, min_level=3)
def _on_reply_download_progress(self, received, total):
har = self._get_har()
if har is not None:
req_id = self._get_request_id()
har.store_reply_download_progress(req_id, received, total)
if total == -1:
total = '?'
self.log("Downloaded %d/%s of {url}" % (received, total),
self.sender(), min_level=4)
def _on_reply_upload_progress(self, sent, total):
# FIXME: is it used?
har = self._get_har()
if har is not None:
req_id = self._get_request_id()
har.store_request_upload_progress(req_id, sent, total)
if total == -1:
total = '?'
self.log("Uploaded %d/%s of {url}" % (sent, total),
self.sender(), min_level=4)
def _get_render_options(self, request):
return self._get_webpage_attribute(request, 'render_options')
def _run_webpage_callbacks(self, request, event_name, *args):
callbacks = self._get_webpage_attribute(request, "callbacks")
if not callbacks:
return
for cb in callbacks.get(event_name, []):
try:
cb(*args)
except:
# TODO unhandled exceptions in lua callbacks
# should we raise errors here?
# https://github.com/scrapinghub/splash/issues/161
self.log("error in %s callback" % event_name, min_level=1)
self.log(traceback.format_exc(), min_level=1, format_msg=False)
def log(self, msg, reply=None, min_level=2, format_msg=True):
if self.verbosity < min_level:
return
if not reply:
url = ''
else:
url = qurl2ascii(reply.url())
if not url:
return
if format_msg:
msg = msg.format(url=url)
log.msg(msg, system='network-manager')
class SplashQNetworkAccessManager(ProxiedQNetworkAccessManager):
"""
This QNetworkAccessManager provides:
* proxy support;
* request middleware support;
* additional logging.
"""
def __init__(self, request_middlewares, response_middlewares, verbosity):
super(SplashQNetworkAccessManager, self).__init__(verbosity=verbosity)
self.request_middlewares = request_middlewares
self.response_middlewares = response_middlewares
def run_response_middlewares(self):
reply = self.sender()
reply.metaDataChanged.disconnect(self.run_response_middlewares)
render_options = self._get_render_options(reply.request())
if render_options:
try:
for middleware in self.response_middlewares:
middleware.process(reply, render_options)
except:
self.log("internal error in response middleware", min_level=1)
self.log(traceback.format_exc(), min_level=1, format_msg=False)
def createRequest(self, operation, request, outgoingData=None):
# XXX: This method MUST return a reply, otherwise PyQT segfaults.
render_options = self._get_render_options(request)
if render_options:
try:
for middleware in self.request_middlewares:
request = middleware.process(request, render_options, operation, outgoingData)
except:
self.log("internal error in request middleware", min_level=1)
self.log(traceback.format_exc(), min_level=1, format_msg=False)
reply = super(SplashQNetworkAccessManager, self).createRequest(operation, request, outgoingData)
if render_options:
reply.metaDataChanged.connect(self.run_response_middlewares)
return reply
|
bsd-3-clause
| 1,547,243,669,720,539,400
| 37.497738
| 104
| 0.622297
| false
| 4.146199
| false
| false
| false
|
mefly2012/platform
|
src/clean_validate/zyktgg.py
|
1
|
1453
|
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from common import public
class zyktgg():
"""开庭公告"""
need_check_ziduan = ['main',
'city',
'bbd_dotime',
'title'
]
def check_main(self, indexstr, ustr):
"""main 清洗验证"""
ret = None
if ustr and len(ustr):
if not public.has_count_hz(ustr, 1):
ret = u'不包含中文'
else:
ret = u'为空'
return ret
def check_city(self, indexstr, ustr):
"""city 清洗验证"""
ret = None
if ustr and len(ustr):
if ustr not in public.PROVINCE:
ret = u'非法的省名'
pass
else:
ret = u'为空'
return ret
def check_bbd_dotime(self, indexstr, ustr):
"""do_time 清洗验证"""
ret = None
if ustr and len(ustr):
if not public.bbd_dotime_date_format(ustr):
ret = u"不合法日期"
return ret
def check_title(self, indexstr, ustr):
"""title 清洗验证"""
ret = None
if ustr and len(ustr):
if all(not public.is_chinese(c) for c in ustr):
ret = u'没有中文'
elif not len(ustr) >= 5:
ret = u'不够5个字以上'
return ret
|
apache-2.0
| 8,685,627,748,616,619,000
| 23.636364
| 59
| 0.451661
| false
| 3.188235
| false
| false
| false
|
libvirt/libvirt-test-API
|
libvirttestapi/repos/domain/save.py
|
1
|
2922
|
# Copyright (C) 2010-2012 Red Hat, Inc.
# This work is licensed under the GNU GPLv2 or later.
# Save domain as a statefile
import os
import libvirt
from libvirt import libvirtError
from libvirttestapi.src import sharedmod
from libvirttestapi.utils import utils
required_params = ('guestname', 'filepath',)
optional_params = {}
def get_guest_ipaddr(*args):
"""Get guest ip address"""
(guestname, logger) = args
mac = utils.get_dom_mac_addr(guestname)
logger.debug("guest mac address: %s" % mac)
ipaddr = utils.mac_to_ip(mac, 15)
logger.debug("guest ip address: %s" % ipaddr)
if utils.do_ping(ipaddr, 20) == 1:
logger.info("ping current guest successfull")
return ipaddr
else:
logger.error("Error: can't ping current guest")
return None
def check_guest_status(*args):
"""Check guest current status"""
(domobj, logger) = args
state = domobj.info()[0]
logger.debug("current guest status: %s" % state)
if state == libvirt.VIR_DOMAIN_SHUTOFF or \
state == libvirt.VIR_DOMAIN_SHUTDOWN or \
state == libvirt.VIR_DOMAIN_BLOCKED:
return False
else:
return True
def check_guest_save(*args):
"""Check save domain result, if save domain is successful,
guestname.save will exist under /tmp directory and guest
can't be ping and status is paused
"""
(guestname, domobj, logger) = args
if not check_guest_status(domobj, logger):
if not get_guest_ipaddr(guestname, logger):
return True
else:
return False
else:
return False
def save(params):
"""Save domain to a disk file"""
logger = params['logger']
guestname = params['guestname']
filepath = params['filepath']
conn = sharedmod.libvirtobj['conn']
domobj = conn.lookupByName(guestname)
# Save domain
ipaddr = get_guest_ipaddr(guestname, logger)
if not check_guest_status(domobj, logger):
logger.error("Error: current guest status is shutoff")
return 1
if not ipaddr:
logger.error("Error: can't get guest ip address")
return 1
try:
domobj.save(filepath)
if check_guest_save(guestname, domobj, logger):
logger.info("save %s domain successful" % guestname)
else:
logger.error("Error: fail to check save domain")
return 1
except libvirtError as e:
logger.error("API error message: %s, error code is %s"
% (e.get_error_message(), e.get_error_code()))
logger.error("Error: fail to save %s domain" % guestname)
return 1
return 0
def save_clean(params):
""" clean testing environment """
logger = params['logger']
filepath = params['filepath']
if os.path.exists(filepath):
logger.info("remove dump file from save %s" % filepath)
os.remove(filepath)
|
gpl-2.0
| -1,875,613,081,637,927,700
| 26.055556
| 67
| 0.629363
| false
| 3.824607
| false
| false
| false
|
noelevans/sandpit
|
fivethiryeight/riddler_casino.py
|
1
|
1180
|
"""
Suppose a casino invents a new game that you must pay $250 to play. The game
works like this: The casino draws random numbers between 0 and 1, from a
uniform distribution. It adds them together until their sum is greater than 1,
at which time it stops drawing new numbers. You get a payout of $100 each time
a new number is drawn.
For example, suppose the casino draws 0.4 and then 0.7. Since the sum is
greater than 1, it will stop after these two draws, and you receive $200. If
instead it draws 0.2, 0.3, 0.3, and then 0.6, it will stop after the fourth
draw and you will receive $400. Given the $250 entrance fee, should you play
the game?
Specifically, what is the expected value of your winnings?
From:
http://fivethirtyeight.com/features/
should-you-pay-250-to-play-this-casino-game
"""
import numpy as np
def trial():
total = 0
spins = 0
while total < 1:
total += np.random.random()
spins += 1
return spins
def main():
n = 10000000
dollar_return = (np.mean([trial() for _ in range(n)]))
return_on_stake = 100 * dollar_return
print(return_on_stake)
if __name__ == '__main__':
main()
|
mit
| -3,011,533,244,456,876,500
| 27.780488
| 78
| 0.677119
| false
| 3.323944
| false
| false
| false
|
sunlightlabs/sarahs_inbox
|
mail/views.py
|
1
|
8502
|
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.core.paginator import Paginator
from django.http import HttpResponse, HttpResponseRedirect
from urllib import unquote
from haystack.query import SearchQuerySet
from mail.models import *
from django.db.models import Q
from django.core.urlresolvers import reverse
from django.core.cache import cache
import re
RESULTS_PER_PAGE = 50
def _search_string(request):
return request.GET.get('q', None)
def _search_tokens(request):
s = _search_string(request)
if s is None:
return []
# protection!
re_sanitize = re.compile(r'[^\w\d\s\'"\,\.\?\$]', re.I)
s = re_sanitize.sub('', s)
tokens = []
re_quotes = re.compile(r'\"([^\"]+)\"')
for m in re_quotes.findall(s):
tokens.append(m.replace('"','').strip())
s = s.replace('"%s"' % m, '')
for t in s.split(' '):
tokens.append(t.strip())
while '' in tokens:
tokens.remove('')
return tokens
def _highlight(text, tokens):
regexes = []
sorted_tokens = sorted(tokens, key=lambda x: len(x))
for t in sorted_tokens:
regexes.append(re.compile(r'(%s)' % t.replace(' ', r'\s+'), re.I))
for r in regexes:
text = r.sub('<span class="highlight">\\1</span>', text)
return text
def _prepare_ids_from_cookie(request, cookie_name, method=None):
if method == 'post':
cookie = unquote(request.POST.get(cookie_name, '')).replace(',,', ',')
else:
cookie = unquote(request.COOKIES.get(cookie_name,'')).replace(',,', ',')
print cookie
if len(cookie)>1:
if cookie[0]==',':
cookie = cookie[1:]
if cookie[-1]==',':
cookie = cookie[:-1]
try:
id_list = map(lambda x: (x!='') and int(x) or 0, cookie.split(','))
except:
id_list = []
return id_list
def _annotate_emails(emails, search=[]):
r = []
for email in emails:
email.text = _highlight(email.text, search)
r.append({ 'creator_html': email.creator_html(), 'to_html': email.to_html(), 'cc_html': email.cc_html(), 'obj': email })
return r
def index(request, search=[], threads=None):
if threads is None:
palin = Person.objects.sarah_palin()
threads = Thread.objects.exclude(creator__in=palin).order_by('-date')
threads_count = threads.count()
p = Paginator(threads, RESULTS_PER_PAGE)
page_num = 1
try:
page_num = int(request.GET.get('page', 1))
except:
pass
page = p.page(page_num)
highlighted_threads = []
for thread in page.object_list:
if (threads is not None) and type(threads) is SearchQuerySet: # deal with searchqueryset objects
thread = thread.object
thread.name = _highlight(thread.name, search)
highlighted_threads.append(thread)
template_vars = {
'range': "<strong>%d</strong> - <strong>%d</strong> of <strong>%d</strong>" % (page.start_index(), page.end_index(), threads_count),
'num_pages': p.num_pages ,
'next': page_num<p.num_pages and min(p.num_pages,page_num+1) or False,
'prev': page_num>1 and max(1, page_num-1) or False,
'first': '1',
'last': p.num_pages,
'current_page': page_num,
'threads': highlighted_threads,
'search': " ".join(search),
'search_orig': (_search_string(request) is not None) and _search_string(request) or '',
'path': request.path,
}
return render_to_response('index.html', template_vars, context_instance=RequestContext(request))
def sent(request):
kagan = Person.objects.elena_kagan()
emails = Thread.objects.filter(creator=kagan).order_by('-date')
return index(request, threads=emails)
def contact_by_id(request, contact_id, suppress_redirect=False):
cache_key = 'contact_%d' % int(contact_id)
threads = cache.get(cache_key)
if threads is None:
try:
person = Person.objects.get(id=contact_id)
except Person.DoesNotExist, e:
return HttpResponseRedirect(reverse('mail.views.index'))
if person.merged_into is not None:
return HttpResponseRedirect('/contact/%d/' % person.merged_into.id)
threads = []
emails = Email.objects.filter(Q(to=person)|Q(cc=person))
for e in emails:
if e.email_thread is not None:
threads.append(e.email_thread.id)
threads = Thread.objects.filter(id__in=threads).order_by('-date')
cache.set(cache_key, threads)
return index(request, threads=threads)
def contact_by_name(request, contact_name):
try:
contact = Person.objects.get(slug=contact_name)
except Person.DoesNotExist, e:
return HttpResponseRedirect(reverse('mail.views.contacts_index'))
except Thread.MultipleObjectsReturned, e:
return HttpResponseRedirect(reverse('mail.views.contacts_index'))
return contact_by_id(request, contact.id, suppress_redirect=True)
def contacts_index(request):
return index(request)
def thread_by_id(request, thread_id, suppress_redirect=False):
try:
thread = Thread.objects.get(id=thread_id)
except Thread.DoesNotExist, e:
return HttpResponseRedirect(reverse('mail.views.index'))
# if merged thread, redirect
if thread.merged_into is not None:
return HttpResponseRedirect('/thread/%d/' % thread.merged_into.id)
# if access by ID, redirect to descriptive URL
if (not suppress_redirect) and (len(thread.slug.strip())>3):
return HttpResponseRedirect('/thread/%s/' % thread.slug)
search = _search_tokens(request)
thread_starred = thread.id in _prepare_ids_from_cookie(request, 'kagan_star')
emails = _annotate_emails(Email.objects.filter(email_thread=thread).order_by('creation_date_time'), search)
return render_to_response('thread.html', {'thread': thread, 'thread_starred': thread_starred, 'emails': emails }, context_instance=RequestContext(request))
def thread_by_name(request, thread_name):
try:
thread = Thread.objects.get(slug=thread_name)
except Thread.DoesNotExist, e:
return HttpResponseRedirect(reverse('mail.views.index'))
except Thread.MultipleObjectsReturned, e:
return HttpResponseRedirect(reverse('mail.views.index'))
return thread_by_id(request, thread.id, suppress_redirect=True)
def search(request):
tokens = _search_tokens(request)
if len(tokens) is None:
return HttpResponseRedirect(reverse('mail.views.index'))
sqs = SearchQuerySet().models(Thread)
for t in tokens:
sqs = sqs.filter_or(text_and_recipients=t)
sqs = sqs.order_by('-date')
if sqs.count()==0:
return render_to_response('search_empty.html', { 'path': request.path }, context_instance=RequestContext(request))
return index(request, search=tokens, threads=sqs)
def star_record_ajax(request, thread_id, action):
try:
thread = Thread.objects.get(id=thread_id)
except Thread.DoesNotExist, e:
return HttpResponse('{ status: \'not_found\'}');
if thread.star_count is None:
thread.star_count = 0
if action=='add':
thread.star_count += 1
elif action=='remove':
thread.star_count -= 1
thread.save()
return HttpResponse('{ status: \'success\'}')
def starred(request):
if not request.POST.get('kagan_star'):
return HttpResponseRedirect(reverse('mail.views.index'))
starred_ids = _prepare_ids_from_cookie(request, 'kagan_star', method='post')
if len(starred_ids)==0:
return HttpResponseRedirect(reverse('mail.views.index'))
starred = Thread.objects.filter(id__in=starred_ids).order_by('-date')
if starred.count()==0:
return render_to_response('search_empty.html', { 'path': request.path }, context_instance=RequestContext(request))
else:
return index(request, threads=starred)
return index(request, threads=starred)
def starred_all(request):
starred = Thread.objects.filter(star_count__gt=0).order_by('-star_count','-date')
if starred.count()==0:
return render_to_response('search_empty.html', { 'path': request.path }, context_instance=RequestContext(request))
else:
return index(request, threads=starred)
|
bsd-3-clause
| 8,381,273,969,921,054,000
| 32.738095
| 159
| 0.631498
| false
| 3.683709
| false
| false
| false
|
nbeck90/city-swap
|
cityswap/requests/migrations/0001_initial.py
|
1
|
1445
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-11 16:52
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('profiles', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Request',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField(default=b'Type your title here')),
('description', models.TextField(default=b'Type your description here')),
('origin', models.CharField(choices=[(b'Seattle', b'Seattle'), (b'Portland', b'Portland')], default=b'Seattle', max_length=25)),
('destination', models.CharField(choices=[(b'Seattle', b'Seattle'), (b'Portland', b'Portland')], default=b'Seattle', max_length=25)),
('date_created', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('courier', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='requests', to='profiles.Profile')),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sent_from', to='profiles.Profile')),
],
),
]
|
mit
| 4,474,503,118,401,711,600
| 44.15625
| 163
| 0.624913
| false
| 3.96978
| false
| false
| false
|
ingmarlehmann/franca-tools
|
franca_parser/franca_parser/franca_ast.py
|
1
|
15547
|
#------------------------------------------------------------------------------
# franca_parser: franca_ast.py
#
# AST node classes: AST node classes for Franca IDL (*.fidl).
# Builds an AST to be used in other tools.
#
# This code is *heavlily* inspired by 'pycparser' by Eli Bendersky
# (https://github.com/eliben/pycparser/)
#
# Copyright (C) 2016, Ingmar Lehmann
# License: BSD
#------------------------------------------------------------------------------
import sys
class Node(object):
def __init__(self):
print ("node constructor")
def children(self):
pass
def show(self, buf=sys.stdout, offset=0, attrnames=False, nodenames=False, showcoord=False, _my_node_name=None):
""" Pretty print the Node and all its attributes and
children (recursively) to a buffer.
buf:
Open IO buffer into which the Node is printed.
offset:
Initial offset (amount of leading spaces)
attrnames:
True if you want to see the attribute names in
name=value pairs. False to only see the values.
nodenames:
True if you want to see the actual node names
within their parents.
showcoord:
Do you want the coordinates of each Node to be
displayed.
"""
lead = ' ' * offset
if nodenames and _my_node_name is not None:
buf.write(lead + self.__class__.__name__+ ' <' + _my_node_name + '>: ')
else:
buf.write(lead + self.__class__.__name__+ ': ')
if self.attr_names:
if attrnames:
nvlist = [(n, getattr(self,n)) for n in self.attr_names]
attrstr = ', '.join('%s=%s' % nv for nv in nvlist)
else:
vlist = [getattr(self, n) for n in self.attr_names]
attrstr = ', '.join('%s' % v for v in vlist)
buf.write(attrstr)
if showcoord:
buf.write(' (at %s)' % self.coord)
buf.write('\n')
for (child_name, child) in self.children():
child.show(
buf,
offset=offset + 2,
attrnames=attrnames,
nodenames=nodenames,
showcoord=showcoord,
_my_node_name=child_name)
class ArrayTypeDeclaration(Node):
def __init__(self, typename, type, dimension):
self.typename = typename
self.type = type
self.dimension = dimension
def children(self):
nodelist = []
if self.type is not None: nodelist.append(("type", self.type))
if self.typename is not None: nodelist.append(("typename", self.typename))
return tuple(nodelist)
attr_names = ('dimension',)
class Attribute(Node):
def __init__(self, typename, name):
self.typename = typename
self.name = name
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.typename is not None: nodelist.append(("typename", self.typename))
return tuple(nodelist)
attr_names = ()
class BroadcastMethod(Node):
def __init__(self, name, comment, out_args, is_selective=False):
self.name = name
self.comment = comment
self.out_args = out_args
self.is_selective = is_selective
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.comment is not None: nodelist.append(("comment", self.comment))
if self.out_args is not None: nodelist.append(("out_args", self.out_args))
return tuple(nodelist)
attr_names = ('is_selective',)
class ComplexTypeDeclarationList(Node):
def __init__(self, members):
self.members = members
def children(self):
nodelist = []
for i, child in enumerate(self.members or []):
nodelist.append(("members[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class Constant(Node):
def __init__(self, comment):
self.value = value
def children(self):
return tuple()
attr_names = ('value',)
class Enum(Node):
def __init__(self, name, values, comment=None):
self.name = name
self.values = values
self.comment = comment
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.values is not None: nodelist.append(("values", self.values))
if self.comment is not None: nodelist.append(("comment", self.comment))
return tuple(nodelist)
attr_names = ()
class Enumerator(Node):
def __init__(self, name, value=None, comment=None):
self.name = name
self.value = value
self.comment = comment
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.value is not None: nodelist.append(("value", self.value))
if self.comment is not None: nodelist.append(("comment", self.comment))
return tuple(nodelist)
attr_names = ()
class EnumeratorList(Node):
def __init__(self, enumerators):
self.enumerators = enumerators
def children(self):
nodelist = []
for i, child in enumerate(self.enumerators or []):
nodelist.append(("enumerators[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class FrancaComment(Node):
def __init__(self, comment):
self.comment = comment
def children(self):
return tuple()
attr_names = ('comment',)
class FrancaDocument(Node):
def __init__(self, package_identifier, imports, child_objects):
self.package_identifier = package_identifier
self.imports = imports
self.child_objects = child_objects
def children(self):
nodelist = []
if self.package_identifier is not None: nodelist.append(("package_identifier", self.package_identifier))
if self.imports is not None: nodelist.append(("imports", self.imports))
if self.child_objects is not None: nodelist.append(("child_objects", self.child_objects))
return tuple(nodelist)
attr_names = ()
class ID(Node):
def __init__(self, id):
self.id = id
def children(self):
return tuple()
attr_names = ('id',)
class ImportIdentifier(Node):
def __init__(self, import_identifier):
self.import_identifier = import_identifier
def children(self):
return tuple()
attr_names = ('import_identifier',)
class ImportStatement(Node):
def __init__(self, import_identifier, filename):
self.import_identifier = import_identifier
self.filename = filename
def children(self):
nodelist = []
if self.import_identifier is not None: nodelist.append(("import_identifier", self.import_identifier))
if self.filename is not None: nodelist.append(("filename", self.filename))
return tuple(nodelist)
attr_names = ()
class ImportStatementList(Node):
def __init__(self, members):
self.members = members
def children(self):
nodelist = []
for i, child in enumerate(self.members or []):
nodelist.append(("imports[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class IntegerConstant(Node):
def __init__(self, value):
self.value = value
def children(self):
return tuple()
attr_names = ('value',)
class Interface(Node):
def __init__(self, name, members, comment=None):
self.name = name
self.members = members
self.comment = comment
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.members is not None: nodelist.append(("members", self.members))
if self.comment is not None: nodelist.append(("comment", self.comment))
return tuple(nodelist)
attr_names = ()
class Map(Node):
def __init__(self, name, key_type, value_type, comment=None):
self.name = name
self.key_type = key_type
self.value_type = value_type
self.comment = comment
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.key_type is not None: nodelist.append(("key_type", self.key_type))
if self.value_type is not None: nodelist.append(("value_type", self.value_type))
if self.comment is not None: nodelist.append(("comment", self.comment))
return tuple(nodelist)
attr_names = ()
class Method(Node):
def __init__(self, name, comment, body, is_fire_and_forget=False):
self.name = name
self.comment = comment
self.body = body
self.is_fire_and_forget = is_fire_and_forget
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.comment is not None: nodelist.append(("comment", self.comment))
if self.body is not None: nodelist.append(("body", self.body))
return tuple(nodelist)
attr_names = ('is_fire_and_forget',)
class MethodBody(Node):
def __init__(self, in_args, out_args):
self.in_args = in_args
self.out_args = out_args
def children(self):
nodelist = []
if self.in_args is not None: nodelist.append(("in_args", self.in_args))
if self.out_args is not None: nodelist.append(("out_args", self.out_args))
return tuple(nodelist)
attr_names = ()
class MethodArgument(Node):
def __init__(self, type, name, comment=None):
self.type = type
self.name = name
self.comment = comment
def children(self):
nodelist = []
if self.type is not None: nodelist.append(("type", self.type))
if self.name is not None: nodelist.append(("name", self.name))
if self.comment is not None: nodelist.append(("comment", self.comment))
return tuple(nodelist)
attr_names = ()
class MethodArgumentList(Node):
def __init__(self, args):
self.args = args
def children(self):
nodelist = []
for i, child in enumerate(self.args or []):
nodelist.append(("args[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class MethodOutArguments(Node):
def __init__(self, args):
self.args = args
def children(self):
nodelist = []
if self.args is not None: nodelist.append(("args", self.args))
return tuple(nodelist)
attr_names = ()
class MethodInArguments(Node):
def __init__(self, args):
self.args = args
def children(self):
nodelist = []
if self.args is not None: nodelist.append(("args", self.args))
return tuple(nodelist)
attr_names = ()
class PackageStatement(Node):
def __init__(self, package_identifier):
self.package_identifier = package_identifier
def children(self):
nodelist = []
if self.package_identifier is not None: nodelist.append(("package_identifier", self.package_identifier))
return tuple(nodelist)
attr_names = ()
class PackageIdentifier(Node):
def __init__(self, package_identifier):
self.package_identifier = package_identifier
def children(self):
return tuple()
attr_names = ('package_identifier',)
class RootLevelObjectList(Node):
def __init__(self, root_level_objects):
self.members = root_level_objects
def children(self):
nodelist = []
for i, child in enumerate(self.members or []):
nodelist.append(("root_objects[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class String(Node):
def __init__(self, string):
self.string = string
def children(self):
return tuple()
attr_names = ('string',)
class Struct(Node):
def __init__(self, name, struct_members, comment=None):
self.name = name
self.struct_members = struct_members
self.comment = comment
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.struct_members is not None: nodelist.append(("struct_members", self.struct_members))
if self.comment is not None: nodelist.append(("comment", self.comment))
return tuple(nodelist)
attr_names = ()
class TypeCollection(Node):
def __init__(self, name, members, comment=None):
self.name = name
self.members = members
self.comment = comment
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.members is not None: nodelist.append(("members", self.members))
if self.comment is not None: nodelist.append(("comment", self.comment))
return tuple(nodelist)
attr_names = ()
class Typedef(Node):
def __init__(self, existing_type, new_type):
self.existing_type = existing_type
self.new_type = new_type
def children(self):
nodelist = []
if self.existing_type is not None: nodelist.append(("existing_type", self.existing_type))
if self.new_type is not None: nodelist.append(("new_type", self.new_type))
return tuple(nodelist)
attr_names = ()
class Typename(Node):
def __init__(self, typename):
self.typename = typename
def children(self):
nodelist = []
if self.typename is not None and isinstance(self.typename, Node): nodelist.append(("typename", self.typename))
return tuple(nodelist)
attr_names = ('typename',)
class Union(Node):
def __init__(self, name, member_list, comment=None):
self.name = name
self.member_list = member_list
self.comment = comment
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.member_list is not None: nodelist.append(("member_list", self.member_list))
if self.comment is not None: nodelist.append(("comment", self.comment))
return tuple(nodelist)
attr_names = ()
class Variable(Node):
def __init__(self, typename, name, comment):
self.typename = typename
self.name = name
self.comment = comment
def children(self):
nodelist = []
if self.typename is not None: nodelist.append(("typename", self.typename))
if self.name is not None: nodelist.append(("name", self.name))
if self.comment is not None: nodelist.append(("comment", self.comment))
return tuple(nodelist)
attr_names = ()
class VariableList(Node):
def __init__(self, members):
self.members = members
def children(self):
nodelist = []
for i, child in enumerate(self.members or []):
nodelist.append(("members[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class Version(Node):
def __init__(self, major, minor):
self.major = major
self.minor = minor
def children(self):
nodelist = []
if self.major is not None: nodelist.append(("major", self.major))
if self.minor is not None: nodelist.append(("minor", self.minor))
return tuple(nodelist)
attr_names = ()
|
mpl-2.0
| 2,878,004,356,479,588,400
| 29.247082
| 118
| 0.589631
| false
| 4.06139
| false
| false
| false
|
nuobit/odoo-addons
|
connector_oxigesti/components_custom/binder.py
|
1
|
8847
|
# -*- coding: utf-8 -*-
# Copyright 2013-2017 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
"""
Binders
=======
Binders are components that know how to find the external ID for an
Odoo ID, how to find the Odoo ID for an external ID and how to
create the binding between them.
"""
import psycopg2
import json
from odoo import fields, models, tools
from odoo.addons.component.core import AbstractComponent
from contextlib import contextmanager
from odoo.addons.connector.exception import (RetryableJobError, )
import odoo
class BinderComposite(AbstractComponent):
""" The same as Binder but allowing composite external keys
"""
_name = 'base.binder.composite'
_inherit = 'base.binder'
_default_binding_field = 'oxigesti_bind_ids'
_external_display_field = 'external_id_display'
_odoo_extra_fields = []
@contextmanager
def _retry_unique_violation(self):
""" Context manager: catch Unique constraint error and retry the
job later.
When we execute several jobs workers concurrently, it happens
that 2 jobs are creating the same record at the same time (binding
record created by :meth:`_export_dependency`), resulting in:
IntegrityError: duplicate key value violates unique
constraint "my_backend_product_product_odoo_uniq"
DETAIL: Key (backend_id, odoo_id)=(1, 4851) already exists.
In that case, we'll retry the import just later.
.. warning:: The unique constraint must be created on the
binding record to prevent 2 bindings to be created
for the same External record.
"""
try:
yield
except psycopg2.IntegrityError as err:
if err.pgcode == psycopg2.errorcodes.UNIQUE_VIOLATION:
raise RetryableJobError(
'A database error caused the failure of the job:\n'
'%s\n\n'
'Likely due to 2 concurrent jobs wanting to create '
'the same record. The job will be retried later.' % err)
else:
raise
def _is_binding(self, binding):
try:
binding._fields[self._odoo_field]
except KeyError:
return False
return True
def _find_binding(self, relation, binding_extra_vals={}):
if self._is_binding(relation):
raise Exception("The source object %s must not be a binding" % relation.model._name)
if not set(self._odoo_extra_fields).issubset(set(binding_extra_vals.keys())):
raise Exception("If _odoo_extra_fields are defined %s, "
"you must specify the correpsonding binding_extra_vals %s" % (
self._odoo_extra_fields, binding_extra_vals))
domain = [(self._odoo_field, '=', relation.id),
(self._backend_field, '=', self.backend_record.id)]
for f in self._odoo_extra_fields:
domain.append((f, '=', binding_extra_vals[f]))
binding = self.model.with_context(
active_test=False).search(domain)
if binding:
binding.ensure_one()
return binding
def wrap_binding(self, relation, binding_field=None, binding_extra_vals={}):
if not relation:
return
if binding_field is None:
if not self._default_binding_field:
raise Exception("_default_binding_field defined on synchronizer class is mandatory")
binding_field = self._default_binding_field
# wrap is typically True if the relation is a 'product.product'
# record but the binding model is 'oxigesti.product.product'
wrap = relation._name != self.model._name
if wrap and hasattr(relation, binding_field):
binding = self._find_binding(relation, binding_extra_vals)
if not binding:
# we are working with a unwrapped record (e.g.
# product.template) and the binding does not exist yet.
# Example: I created a product.product and its binding
# oxigesti.product.product, it is exported, but we need to
# create the binding for the template.
_bind_values = {self._odoo_field: relation.id,
self._backend_field: self.backend_record.id}
_bind_values.update(binding_extra_vals)
# If 2 jobs create it at the same time, retry
# one later. A unique constraint (backend_id,
# odoo_id) should exist on the binding model
with self._retry_unique_violation():
binding = (self.model
.with_context(connector_no_export=True)
.sudo()
.create(_bind_values))
# Eager commit to avoid having 2 jobs
# exporting at the same time. The constraint
# will pop if an other job already created
# the same binding. It will be caught and
# raise a RetryableJobError.
if not odoo.tools.config['test_enable']:
self.env.cr.commit() # nowait
else:
# If oxigest_bind_ids does not exist we are typically in a
# "direct" binding (the binding record is the same record).
# If wrap is True, relation is already a binding record.
binding = relation
if not self._is_binding(binding):
raise Exception(
"Expected binding '%s' and found regular model '%s'" % (self.model._name, relation._name))
return binding
def to_internal(self, external_id, unwrap=False):
""" Give the Odoo recordset for an external ID
:param external_id: external ID for which we want
the Odoo ID
:param unwrap: if True, returns the normal record
else return the binding record
:return: a recordset, depending on the value of unwrap,
or an empty recordset if the external_id is not mapped
:rtype: recordset
"""
domain = [(self._backend_field, '=', self.backend_record.id),
(self._external_display_field, '=', json.dumps(external_id))]
bindings = self.model.with_context(active_test=False).search(
domain
)
if not bindings:
if unwrap:
return self.model.browse()[self._odoo_field]
return self.model.browse()
bindings.ensure_one()
if unwrap:
bindings = bindings[self._odoo_field]
return bindings
def to_external(self, binding, wrap=False, wrapped_model=None, binding_extra_vals={}):
""" Give the external ID for an Odoo binding ID
:param binding: Odoo binding for which we want the external id
:param wrap: if True, binding is a normal record, the
method will search the corresponding binding and return
the external id of the binding
:return: external ID of the record
"""
if isinstance(binding, models.BaseModel):
binding.ensure_one()
else:
if wrap:
if not wrapped_model:
raise Exception("The wrapped model is mandatory if binding is not an object")
binding = self.env[wrapped_model].browse(binding)
else:
binding = self.model.browse(binding)
if wrap:
binding = self._find_binding(binding, binding_extra_vals)
if not binding:
return None
return binding[self._external_field] or None
def bind(self, external_id, binding):
""" Create the link between an external ID and an Odoo ID
:param external_id: external id to bind
:param binding: Odoo record to bind
:type binding: int
"""
# Prevent False, None, or "", but not 0
assert (external_id or external_id is 0) and binding, (
"external_id or binding missing, "
"got: %s, %s" % (external_id, binding)
)
# avoid to trigger the export when we modify the `external_id`
now_fmt = fields.Datetime.now()
if isinstance(binding, models.BaseModel):
binding.ensure_one()
else:
binding = self.model.browse(binding)
binding.with_context(connector_no_export=True).write({
self._external_field: external_id,
self._sync_date_field: now_fmt,
})
def _get_external_id(self, binding):
return None
|
agpl-3.0
| 2,360,778,247,693,015,600
| 38.851351
| 106
| 0.58144
| false
| 4.523006
| false
| false
| false
|
helfertool/helfertool
|
src/registration/models/shift.py
|
1
|
7529
|
from django.core.validators import MinValueValidator
from django.db import models
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from django.template.defaultfilters import date as date_f
from django.utils.timezone import localtime
from django.utils.translation import ugettext_lazy as _
from collections import OrderedDict
from copy import deepcopy
from datetime import datetime
import math
class Shift(models.Model):
""" A shift of one job.
Columns:
:job: job of this shift
:begin: begin of the shift
:end: end of the shift
:number: number of people
:blocked: shift is blocked, if the job is public
:hidden: shift is not displayed publicly
:name: name of the shift (optional)
"""
class Meta:
ordering = ['job', 'begin', 'end']
job = models.ForeignKey(
'Job',
on_delete=models.CASCADE,
)
name = models.CharField(
max_length=200,
verbose_name=_("Name (optional)"),
default="",
blank=True,
)
begin = models.DateTimeField(
verbose_name=_("Begin"),
)
end = models.DateTimeField(
verbose_name=_("End"),
)
number = models.IntegerField(
default=0,
verbose_name=_("Number of helpers"),
validators=[MinValueValidator(0)],
)
blocked = models.BooleanField(
default=False,
verbose_name=_("The shift is blocked and displayed as full."),
)
hidden = models.BooleanField(
default=False,
verbose_name=_("The shift is not visible."),
)
gifts = models.ManyToManyField(
'gifts.GiftSet',
verbose_name=_("Gifts"),
blank=True,
)
archived_number = models.IntegerField(
default=0,
verbose_name=_("Number of registered helpers for archived event"),
)
def __str__(self):
if self.name:
return "%s, %s, %s" % (self.job.name, self.name,
self.time_with_day())
else:
return "%s, %s" % (self.job.name, self.time_with_day())
def time(self):
""" Returns a string representation of the begin and end time.
The begin contains the date and time, the end only the time.
"""
return "%s, %s - %s" % (date_f(localtime(self.begin), 'DATE_FORMAT'),
date_f(localtime(self.begin), 'TIME_FORMAT'),
date_f(localtime(self.end), 'TIME_FORMAT'))
def time_hours(self):
""" Returns a string representation of the begin and end time.
Only the time is used, the date is not shown.
"""
return "%s - %s" % (date_f(localtime(self.begin), 'TIME_FORMAT'),
date_f(localtime(self.end), 'TIME_FORMAT'))
def time_with_day(self):
""" Returns a string representation of the day.
If the shift is on two days only the name of the first day is returned.
"""
day = date_f(localtime(self.begin), "l")
return "{}, {}".format(day, self.time())
def date(self):
""" Returns the day on which the shifts begins. """
return localtime(self.begin).date()
def num_helpers(self):
"""
Returns the current number of helpers, but 0 if event is archived.
"""
return self.helper_set.count()
def num_helpers_archived(self):
""" Returns the current number of helpers- """
if self.job.event.archived:
return self.archived_number
else:
return self.helper_set.count()
def is_full(self):
""" Check if the shift is full and return a boolean. """
return self.num_helpers() >= self.number
def helpers_percent(self):
""" Calculate the percentage of registered helpers and returns an int.
If the maximal number of helpers for a shift is 0, 0 is returned.
"""
if self.number == 0:
return 0
num = self.num_helpers_archived()
return int(round(float(num) / self.number * 100.0, 0))
def helpers_percent_5percent(self):
"""
Returns the percentage of registered helpers in 5% steps.
So the returned value is between 0 and 20 (including both values).
This is used to generate the CSS class names defined in style.css.
Therefore, inline CSS can be avoided.
"""
percent = self.helpers_percent()
return math.ceil(percent / 5)
def helpers_percent_vacant_5percent(self):
"""
Same as `helpers_percent_5percent`, but for the missing helpers.
"""
return 20 - self.helpers_percent_5percent()
@property
def shirt_sizes(self):
# data structure
shirts = OrderedDict()
for size, name in self.job.event.get_shirt_choices():
shirts.update({name: 0})
# collect all sizes, this must be the first shift of the helper
for helper in self.helper_set.all():
if helper.first_shift == self:
tmp = shirts[helper.get_shirt_display()]
shirts.update({helper.get_shirt_display(): tmp+1})
return shirts
def duplicate(self, new_date=None, new_job=None, gift_set_mapping=None):
""" Duplicate a shift. There are multiple possibilities:
* Shift is copied to new day in same job: set new_date
* Shift is copied to new job in same event: set new_job
* Shift is copied to new event: set new_job and gift_set_mapping
"""
new_shift = deepcopy(self)
new_shift.pk = None
new_shift.archived_number = 0
# maybe shift is copied to new job
if new_job:
new_shift.job = new_job
# if shift is copied to new event, move begin and end time according to diff in event dates
if self.job.event != new_job.event:
diff = new_job.event.date - self.job.event.date
new_shift.begin += diff
new_shift.end += diff
# maybe just the date is changed
if new_date:
new_shift.move_date(new_date)
# now save that
new_shift.save()
# and finally set the gifts again
for gift in self.gifts.all():
if gift_set_mapping:
new_shift.gifts.add(gift_set_mapping[gift])
else:
new_shift.gifts.add(gift)
return new_shift
def move_date(self, new_date):
# current begin and end in local time
old_begin_localtime = localtime(self.begin)
old_end_localtime = localtime(self.end)
# move date alone without chainging time
diff_days = new_date - old_begin_localtime.date()
new_begin_date = old_begin_localtime.date() + diff_days
new_end_date = old_end_localtime.date() + diff_days
# set time separately (10 am should always be 10 am, also when a time change is between old and new date)
begin_time = old_begin_localtime.time()
end_time = old_end_localtime.time()
self.begin = datetime.combine(new_begin_date, begin_time)
self.end = datetime.combine(new_end_date, end_time)
@receiver(pre_delete, sender=Shift)
def shift_deleted(sender, instance, using, **kwargs):
# m2m_changed does not trigger here, so remote the helpers before the shift is deleted
for helper in instance.helper_set.all():
helper.shifts.remove(instance)
|
agpl-3.0
| -1,135,561,369,318,406,400
| 31.175214
| 113
| 0.594501
| false
| 4.045674
| false
| false
| false
|
mpapierski/hb_balancer
|
protocol.py
|
1
|
6557
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# hb_balancer
# High performance load balancer between Helbreath World Servers.
#
# Copyright (C) 2012 Michał Papierski <michal@papierski.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import struct
import random
import logging
from twisted.internet import reactor
from twisted.protocols.stateful import StatefulProtocol
from twisted.python import log
from packets import Packets
class BaseHelbreathProtocol(StatefulProtocol):
''' Basic Helbreath Protocol '''
def getInitialState(self):
'''
Protocol overview:
[Key unsigned byte] [Size unsigned short] [Data Size-bytes]
'''
return (self.get_key, 1)
def get_key(self, data):
''' Get key '''
self.key, = struct.unpack('<B', data)
return (self.get_data_size, 2)
def get_data_size(self, data):
''' Read data size '''
self.data_size, = struct.unpack('<H', data)
return (self.get_data, self.data_size - 3)
def get_data(self, data):
''' Read encoded data and decode it '''
if self.key > 0:
# Decode
data = list(data)
for i in range(len(data)):
data[i] = chr(((ord(data[i]) ^ (self.key ^ (self.data_size - 3 - i))) - (i ^ self.key)) % 256)
data = ''.join(data)
# Pass decoded data
self.raw_data(data)
return (self.get_key, 1)
def send_message(self, data):
''' Send a Helbreath Packet data '''
key = random.randint(0, 255)
if key > 0:
# Encode
data = list(data)
for i in range(len(data)):
data[i] = chr(((ord(data[i]) + (i ^ key)) ^ (key ^ (len(data) - i))) % 256)
data = ''.join(data)
self.transport.write(struct.pack('<BH', key, len(data) + 3) + data)
def raw_data(self, data):
''' Got packet '''
pass
class ProxyHelbreathProtocol(BaseHelbreathProtocol):
''' Proxy Helbreath protocol used for proxying packets '''
def connectionMade(self):
self.factory.success(self)
def login(self, account_name, account_password, world_name):
''' Request a login '''
# Casting to str is made for sure
# world_name could be either str or unicode.
self.send_message(struct.pack('<IH10s10s30s',
Packets.MSGID_REQUEST_LOGIN, # MsgID
0, # MsgType
str(account_name),
str(account_password),
str(world_name)))
def raw_data(self, data):
self.factory.receiver(data)
self.transport.loseConnection()
class HelbreathProtocol(BaseHelbreathProtocol):
def raw_data(self, data):
# Header
msg_id, msg_type = struct.unpack('<IH', data[:6])
# Process packet data
if msg_id == Packets.MSGID_REQUEST_LOGIN:
# Client is requesting login
packet_format = '<10s10s30s'
account_name, account_password, world_name = struct.unpack(
packet_format,
data[6:]
)
self.request_login(
account_name.rstrip('\x00'),
account_password.rstrip('\x00'),
world_name.rstrip('\x00')
)
elif msg_id == Packets.MSGID_REQUEST_ENTERGAME:
# Client is trying to enter game
packet_format = '<10s10s10s10si30s120s'
player_name, map_name, account_name, account_password, \
level, world_name, cmd_line = struct.unpack(
packet_format,
data[6:])
self.request_entergame(
msg_type,
player_name.rstrip('\x00'),
map_name.rstrip('\x00'),
account_name.rstrip('\x00'),
account_password.rstrip('\x00'),
level,
world_name.rstrip('\x00'),
cmd_line.rstrip('\x00'))
else:
# Abort if a packet is not (yet) known
self.transport.loseConnection()
def request_login(self, account_name, account_password, world_name):
''' Request client login
account_name -- Account name
account_password -- Account password
world_name -- World server name
'''
def world_is_down(failure = None):
''' The requested world is offline '''
self.send_message(struct.pack('<IH',
Packets.MSGID_RESPONSE_LOG,
Packets.DEF_LOGRESMSGTYPE_NOTEXISTINGWORLDSERVER))
reactor.callLater(10, self.transport.loseConnection)
def handle_response(data):
''' Pass data and close the connection nicely '''
self.send_message(data)
reactor.callLater(10, self.transport.loseConnection)
def connection_made(remote):
''' Connection is made. Request a login. '''
log.msg('Remote connection made!')
remote.login(
account_name,
account_password,
remote.factory.world_name
)
# Request connection to a world by its name, pass some callbacks
self.factory.connect_to_world(
world_name = world_name,
receiver = handle_response,
success = connection_made,
failure = world_is_down)
log.msg('Request world %s' % (world_name, ))
def request_entergame(self, msg_type, player_name, map_name, account_name,
account_password, level, world_name, cmd_line):
''' Client wants to enter game. '''
log.msg('Request entergame player(%s) map(%s) account(%s) world(%s)' % (
player_name, map_name, account_name, world_name))
def connection_made(remote):
''' Request enter game, construct exacly the same data.
TODO: Parse the msg_type. '''
log.msg('Requesting enter game...')
remote.send_message(struct.pack('<IH10s10s10s10si30s120s',
Packets.MSGID_REQUEST_ENTERGAME,
msg_type,
player_name,
map_name,
account_name,
account_password,
level,
str(remote.factory.world_name),
cmd_line))
def error_handler(failure = None):
''' Unable to connect to destination world '''
log.err('Enter game error for account(%s) at world(%s)' % (
account_name,
world_name))
self.send_message(struct.pack('<IHB',
Packets.MSGID_RESPONSE_ENTERGAME,
Packets.DEF_ENTERGAMERESTYPE_REJECT,
Packets.DEF_REJECTTYPE_DATADIFFERENCE))
reactor.callLater(10, self.transport.loseConnection)
def response_handler(data):
''' Pass the (modified) data '''
self.send_message(data)
self.factory.connect_to_world(
world_name = world_name,
receiver = response_handler,
success = connection_made,
failure = error_handler
)
|
agpl-3.0
| -1,380,034,530,598,753,500
| 28.399103
| 98
| 0.675412
| false
| 3.133843
| false
| false
| false
|
sameersingh/bibere
|
scripts/first_pages.py
|
1
|
1750
|
#!/usr/bin/python3
import argparse
from read_json import *
import tempfile
import shutil
import pypdftk
import os
def get_pdf(source, dest):
shutil.copy(source, dest)
def run(idir, bdir, ofile):
authors, venues, papers = read_all_info(idir)
fpdf_names = []
tmpdirname = tempfile.mkdtemp()
for p in papers:
if p['pubTypeSlot'] == 'Conference' or p['pubTypeSlot'] == 'Journal':
if 'pdfLink' not in p:
print("pdfLink missing:", p['id'])
elif p['pdfLink'].startswith("http"):
print("local link missing:", p['id'])
else:
source = bdir + "/" + p['pdfLink']
i = len(fpdf_names)
dest = "%s/%d.pdf" % (tmpdirname, i)
print("getting %s, putting it %s" % (source, dest))
get_pdf(source, dest)
tdir = "%s/%d/" % (tmpdirname, i)
os.mkdir(tdir)
fpdf_names.append(tdir + "page_01.pdf")
pypdftk.split(dest, tdir)
pypdftk.concat(fpdf_names, out_file=ofile)
shutil.rmtree(tmpdirname)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help="directory containing the json files for authors/papers", required=True)
parser.add_argument("-b", "--basedir", help="the base directory of where the full PDFs reside.", required=True)
parser.add_argument("-o", "--output", help="output pdf file for the first pages", required=True)
args = parser.parse_args()
print("input: ", args.input)
print("basedir: ", args.basedir)
print("output: ", args.output)
run(args.input, args.basedir, args.output)
|
bsd-2-clause
| -2,552,111,543,381,166,600
| 36.888889
| 118
| 0.564
| false
| 3.535354
| false
| false
| false
|
openstack/yaql
|
yaql/language/contexts.py
|
1
|
9928
|
# Copyright (c) 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from yaql.language import exceptions
from yaql.language import runner
from yaql.language import specs
from yaql.language import utils
class ContextBase(metaclass=abc.ABCMeta):
def __init__(self, parent_context=None, convention=None):
self._parent_context = parent_context
self._convention = convention
if convention is None and parent_context:
self._convention = parent_context.convention
@property
def parent(self):
return self._parent_context
@abc.abstractmethod
def register_function(self, spec, *args, **kwargs):
pass
@abc.abstractmethod
def get_data(self, name, default=None, ask_parent=True):
return default
def __getitem__(self, name):
return self.get_data(name)
@abc.abstractmethod
def __setitem__(self, name, value):
pass
@abc.abstractmethod
def __delitem__(self, name):
pass
@abc.abstractmethod
def __contains__(self, item):
return False
def __call__(self, name, engine, receiver=utils.NO_VALUE,
data_context=None, use_convention=False,
function_filter=None):
return lambda *args, **kwargs: runner.call(
name, self, args, kwargs, engine, receiver,
data_context, use_convention, function_filter)
@abc.abstractmethod
def get_functions(self, name, predicate=None, use_convention=False):
return [], False
@abc.abstractmethod
def delete_function(self, spec):
pass
def collect_functions(self, name, predicate=None, use_convention=False):
overloads = []
p = self
while p is not None:
context_predicate = None
if predicate:
context_predicate = lambda fd: predicate(fd, p) # noqa: E731
layer_overloads, is_exclusive = p.get_functions(
name, context_predicate, use_convention)
p = None if is_exclusive else p.parent
if layer_overloads:
overloads.append(layer_overloads)
return overloads
def create_child_context(self):
return type(self)(self)
@property
def convention(self):
return self._convention
@abc.abstractmethod
def keys(self):
return {}.keys()
class Context(ContextBase):
def __init__(self, parent_context=None, data=utils.NO_VALUE,
convention=None):
super(Context, self).__init__(parent_context, convention)
self._functions = {}
self._data = {}
self._exclusive_funcs = set()
if data is not utils.NO_VALUE:
self['$'] = data
@staticmethod
def _import_function_definition(fd):
return fd
def register_function(self, spec, *args, **kwargs):
exclusive = kwargs.pop('exclusive', False)
if not isinstance(spec, specs.FunctionDefinition) and callable(spec):
spec = specs.get_function_definition(
spec, *args, convention=self._convention, **kwargs)
spec = self._import_function_definition(spec)
if spec.is_method:
if not spec.is_valid_method():
raise exceptions.InvalidMethodException(spec.name)
self._functions.setdefault(spec.name, set()).add(spec)
if exclusive:
self._exclusive_funcs.add(spec.name)
def delete_function(self, spec):
self._functions.get(spec.name, set()).discard(spec)
self._exclusive_funcs.discard(spec.name)
def get_functions(self, name, predicate=None, use_convention=False):
name = name.rstrip('_')
if use_convention and self._convention is not None:
name = self._convention.convert_function_name(name)
if predicate is None:
predicate = lambda x: True # noqa: E731
return (
set(filter(predicate, self._functions.get(name, set()))),
name in self._exclusive_funcs
)
@staticmethod
def _normalize_name(name):
if not name.startswith('$'):
name = ('$' + name)
if name == '$':
name = '$1'
return name
def __setitem__(self, name, value):
self._data[self._normalize_name(name)] = value
def get_data(self, name, default=None, ask_parent=True):
name = self._normalize_name(name)
if name in self._data:
return self._data[name]
ctx = self.parent
while ask_parent and ctx:
result = ctx.get_data(name, utils.NO_VALUE, False)
if result is utils.NO_VALUE:
ctx = ctx.parent
else:
return result
return default
def __delitem__(self, name):
self._data.pop(self._normalize_name(name))
def __contains__(self, item):
if isinstance(item, specs.FunctionDefinition):
return item in self._functions.get(item.name, [])
if isinstance(item, str):
return self._normalize_name(item) in self._data
return False
def keys(self):
return self._data.keys()
class MultiContext(ContextBase):
def __init__(self, context_list, convention=None):
self._context_list = context_list
if convention is None:
convention = context_list[0].convention
parents = tuple(
filter(lambda t: t, map(lambda t: t.parent, context_list))
)
if not parents:
super(MultiContext, self).__init__(None, convention)
elif len(parents) == 1:
super(MultiContext, self).__init__(parents[0], convention)
else:
super(MultiContext, self).__init__(MultiContext(parents),
convention)
def register_function(self, spec, *args, **kwargs):
self._context_list[0].register_function(spec, *args, **kwargs)
def get_data(self, name, default=None, ask_parent=True):
for context in self._context_list:
result = context.get_data(name, utils.NO_VALUE, False)
if result is not utils.NO_VALUE:
return result
ctx = self.parent
while ask_parent and ctx:
result = ctx.get_data(name, utils.NO_VALUE, False)
if result is utils.NO_VALUE:
ctx = ctx.parent
else:
return result
return default
def __setitem__(self, name, value):
self._context_list[0][name] = value
def __delitem__(self, name):
for context in self._context_list:
del context[name]
def create_child_context(self):
return Context(self)
def keys(self):
prev_keys = set()
for context in self._context_list:
for key in context.keys():
if key not in prev_keys:
prev_keys.add(key)
yield key
def delete_function(self, spec):
for context in self._context_list:
context.delete_function(spec)
def __contains__(self, item):
for context in self._context_list:
if item in context:
return True
return False
def get_functions(self, name, predicate=None, use_convention=False):
result = set()
is_exclusive = False
for context in self._context_list:
funcs, exclusive = context.get_functions(
name, predicate, use_convention)
result.update(funcs)
if exclusive:
is_exclusive = True
return result, is_exclusive
class LinkedContext(ContextBase):
"""Context that is as a proxy to another context but has its own parent."""
def __init__(self, parent_context, linked_context, convention=None):
self.linked_context = linked_context
if linked_context.parent:
super(LinkedContext, self).__init__(
LinkedContext(parent_context, linked_context.parent,
convention), convention)
else:
super(LinkedContext, self).__init__(parent_context, convention)
def register_function(self, spec, *args, **kwargs):
return self.linked_context.register_function(spec, *args, **kwargs)
def keys(self):
return self.linked_context.keys()
def get_data(self, name, default=None, ask_parent=True):
result = self.linked_context.get_data(
name, default=utils.NO_VALUE, ask_parent=False)
if result is utils.NO_VALUE:
if not ask_parent or not self.parent:
return default
return self.parent.get_data(name, default=default, ask_parent=True)
return result
def get_functions(self, name, predicate=None, use_convention=False):
return self.linked_context.get_functions(
name, predicate=predicate, use_convention=use_convention)
def delete_function(self, spec):
return self.linked_context.delete_function(spec)
def __contains__(self, item):
return item in self.linked_context
def __delitem__(self, name):
del self.linked_context[name]
def __setitem__(self, name, value):
self.linked_context[name] = value
def create_child_context(self):
return type(self.linked_context)(self)
|
apache-2.0
| 7,695,287,142,699,506,000
| 32.427609
| 79
| 0.599919
| false
| 4.192568
| false
| false
| false
|
ActiveState/code
|
recipes/Python/59867_crossplatform_import_hook_endofline/recipe-59867.py
|
1
|
1504
|
# Import hook for end-of-line conversion,
# by David Goodger (dgoodger@bigfoot.com).
# Put in your sitecustomize.py, anywhere on sys.path, and you'll be able to
# import Python modules with any of Unix, Mac, or Windows line endings.
import ihooks, imp, py_compile
class MyHooks(ihooks.Hooks):
def load_source(self, name, filename, file=None):
"""Compile source files with any line ending."""
if file:
file.close()
py_compile.compile(filename) # line ending conversion is in here
cfile = open(filename + (__debug__ and 'c' or 'o'), 'rb')
try:
return self.load_compiled(name, filename, cfile)
finally:
cfile.close()
class MyModuleLoader(ihooks.ModuleLoader):
def load_module(self, name, stuff):
"""Special-case package directory imports."""
file, filename, (suff, mode, type) = stuff
path = None
if type == imp.PKG_DIRECTORY:
stuff = self.find_module_in_dir("__init__", filename, 0)
file = stuff[0] # package/__init__.py
path = [filename]
try: # let superclass handle the rest
module = ihooks.ModuleLoader.load_module(self, name, stuff)
finally:
if file:
file.close()
if path:
module.__path__ = path # necessary for pkg.module imports
return module
ihooks.ModuleImporter(MyModuleLoader(MyHooks())).install()
|
mit
| 6,434,838,695,669,646,000
| 35.682927
| 75
| 0.588431
| false
| 3.978836
| false
| false
| false
|
RTHMaK/RPGOne
|
deep_qa-master/deep_qa/layers/recurrence_modes.py
|
1
|
1184
|
from typing import Any, Dict
from collections import OrderedDict
from keras import backend as K
class FixedRecurrence:
'''
This recurrence class simply performs a fixed number of memory network steps and
returns the memory representation and representation of the background knowledge
generated by the knowledge_selector and knowledge_combiner layers (the simplest
case being a weighted sum).
'''
def __init__(self, memory_network, params: Dict[str, Any]):
self.num_memory_layers = params.pop("num_memory_layers", 1)
self.memory_network = memory_network
def __call__(self, encoded_question, current_memory, encoded_background):
for _ in range(self.num_memory_layers):
current_memory, attended_knowledge = \
self.memory_network.memory_step(encoded_question, current_memory, encoded_background)
return current_memory, attended_knowledge
recurrence_modes = OrderedDict() # pylint: disable=invalid-name
recurrence_modes["fixed"] = FixedRecurrence
if K.backend() == 'tensorflow':
from .adaptive_recurrence import AdaptiveRecurrence
recurrence_modes["adaptive"] = AdaptiveRecurrence
|
apache-2.0
| 6,218,812,813,095,521,000
| 39.827586
| 101
| 0.723818
| false
| 4.274368
| false
| false
| false
|
disler/Kontact
|
App/Server.py
|
1
|
3547
|
from flask import Flask, render_template, current_app, Response, request
from server.DBInterface import DBInterface
from server.Validator import Validator
from server.WebUtil import WebUtil
import json
import ast
app = Flask(__name__)
#load database interface
db = DBInterface()
#load validator
validator = Validator.Kontact()
@app.route('/')
def Home():
"""
Landing page for application
"""
return current_app.send_static_file("index.html")
@app.route('/kontacts')
def Get():
"""
Get the list of kontacts
"""
return WebUtil.AsJson(db.Get("tblKontact"))
@app.route('/kontacts/<int:id>')
def GetByID(id):
"""
Get single record by id
"""
#get record by id from the kontact table
oRecord = db.GetByID("tblKontact", id)
#if the record returned is nothing return an empty object
if(oRecord is None):
oRecord = dict({})
return WebUtil.AsJson(oRecord)
@app.route('/kontacts', methods=["POST"])
def Create():
"""
Create a new kontact record
"""
#convert request data to json to be rendered as a python dict
oKontact = WebUtil.ToObject(request.data)
#if our processed data is a dict
if type(oKontact) is dict:
#validate to proper data structure
bValid = validator.Validate(oKontact)
#if valid kontact object is valid add to db
if bValid:
#create kontact obj
db.Create("tblKontact", oKontact)
#return success response
return WebUtil.SuccessResponse()
#kontact object is not valid return failure response
else:
return WebUtil.FailureResponse()
@app.route("/kontacts/<int:id>", methods=["PUT"])
def Update(id):
"""
Update a currently existing kontact record
"""
#Convert request to python structure
oNewKontact = WebUtil.ToObject(request.data)
#get current kontact we're going to update
oPreviousKontact = db.GetByID("tblKontact", id)
#if the kontact we're trying to update exists
if(oPreviousKontact is not None):
#combine the old kontact with the new - new having priority
oMergedKontact = WebUtil.MergeDict(oPreviousKontact, oNewKontact)
#validate the newly merged kontact object
bValid = validator.Validate(oMergedKontact)
#if the kontact object is valid
if bValid:
#update the kontact object
db.Update("tblKontact", id, oMergedKontact)
#return failure response
return WebUtil.SuccessResponse()
#kontact object is not valid
else:
#return failure response
return WebUtil.FailureResponse()
#the kontact we're trying to update does not exists return failure response
else:
return WebUtil.FailureResponse()
@app.route("/kontacts/<int:id>", methods=["DELETE"])
def Delete(id):
"""
Delete a kontact based on it's id'
"""
#get current kontact we're going to delete
oPreviousKontact = db.GetByID("tblKontact", id)
#if the kontact we're trying to delete exists
if(oPreviousKontact is not None):
#delete the kontact
db.Delete("tblKontact", id)
#return success response
return WebUtil.SuccessResponse()
#kontact does not exists return failure response
else:
return WebUtil.FailureResponse()
#launch flask app
if __name__ == '__main__':
app.run(host="0.0.0.0", port=5000, debug=True, threaded=True)
|
mit
| -8,879,114,534,368,940,000
| 24.702899
| 79
| 0.643079
| false
| 3.809882
| false
| false
| false
|
arenadata/ambari
|
ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_service_check.py
|
1
|
7000
|
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from stacks.utils.RMFTestCase import *
from only_for_platform import not_for_platform, PLATFORM_WINDOWS
@not_for_platform(PLATFORM_WINDOWS)
class TestMahoutClient(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "MAHOUT/1.0.0.2.3/package"
STACK_VERSION = "2.3"
DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
classname = "MahoutServiceCheck",
command = "service_check",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('File', '/tmp/sample-mahout-test.txt',
content = 'Test text which will be converted to sequence file.',
mode = 0755,
)
self.maxDiff=None
self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/2.2.1.0-2067/hadoop/bin',
keytab = UnknownConfigurationMock(),
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
mode = 0770,
owner = 'ambari-qa',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
hadoop_conf_dir = '/usr/hdp/2.2.1.0-2067/hadoop/conf',
type = 'directory',
)
self.assertResourceCalled('HdfsResource', '/user/ambari-qa/mahoutsmokeoutput',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/2.2.1.0-2067/hadoop/bin',
keytab = UnknownConfigurationMock(),
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
action = ['delete_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
hadoop_conf_dir = '/usr/hdp/2.2.1.0-2067/hadoop/conf',
type = 'directory',
)
self.assertResourceCalled('HdfsResource', '/user/ambari-qa/mahoutsmokeinput',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/2.2.1.0-2067/hadoop/bin',
keytab = UnknownConfigurationMock(),
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
owner = 'ambari-qa',
hadoop_conf_dir = '/usr/hdp/2.2.1.0-2067/hadoop/conf',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
)
self.assertResourceCalled('HdfsResource', '/user/ambari-qa/mahoutsmokeinput/sample-mahout-test.txt',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/2.2.1.0-2067/hadoop/bin',
keytab = UnknownConfigurationMock(),
kinit_path_local = '/usr/bin/kinit',
source = '/tmp/sample-mahout-test.txt',
user = 'hdfs',
dfs_type = '',
owner = 'ambari-qa',
hadoop_conf_dir = '/usr/hdp/2.2.1.0-2067/hadoop/conf',
type = 'file',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
)
self.assertResourceCalled('HdfsResource', None,
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/2.2.1.0-2067/hadoop/bin',
keytab = UnknownConfigurationMock(),
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
hadoop_conf_dir = '/usr/hdp/2.2.1.0-2067/hadoop/conf',
)
self.assertResourceCalled('Execute', 'mahout seqdirectory --input /user/ambari-qa/mahoutsmokeinput/'
'sample-mahout-test.txt --output /user/ambari-qa/mahoutsmokeoutput/ '
'--charset utf-8',
environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45',
'MAHOUT_HOME': '/usr/hdp/current/mahout-client'},
path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
tries = 3,
user = 'ambari-qa',
try_sleep = 5,
)
self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /user/ambari-qa/mahoutsmokeoutput/_SUCCESS',
try_sleep = 6,
tries = 10,
bin_dir = '/usr/hdp/2.2.1.0-2067/hadoop/bin',
user = 'ambari-qa',
conf_dir = '/usr/hdp/2.2.1.0-2067/hadoop/conf',
)
self.assertNoMoreResources()
|
apache-2.0
| 1,205,591,747,481,414,000
| 53.6875
| 291
| 0.59
| false
| 3.721425
| true
| false
| false
|
frew/simpleproto
|
scons-local-1.1.0/SCons/Scanner/C.py
|
1
|
4739
|
"""SCons.Scanner.C
This module implements the depenency scanner for C/C++ code.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/C.py 3603 2008/10/10 05:46:45 scons"
import SCons.Node.FS
import SCons.Scanner
import SCons.Util
import SCons.cpp
class SConsCPPScanner(SCons.cpp.PreProcessor):
"""
SCons-specific subclass of the cpp.py module's processing.
We subclass this so that: 1) we can deal with files represented
by Nodes, not strings; 2) we can keep track of the files that are
missing.
"""
def __init__(self, *args, **kw):
apply(SCons.cpp.PreProcessor.__init__, (self,)+args, kw)
self.missing = []
def initialize_result(self, fname):
self.result = SCons.Util.UniqueList([fname])
def finalize_result(self, fname):
return self.result[1:]
def find_include_file(self, t):
keyword, quote, fname = t
result = SCons.Node.FS.find_file(fname, self.searchpath[quote])
if not result:
self.missing.append((fname, self.current_file))
return result
def read_file(self, file):
try:
fp = open(str(file.rfile()))
except EnvironmentError, e:
self.missing.append((file, self.current_file))
return ''
else:
return fp.read()
def dictify_CPPDEFINES(env):
cppdefines = env.get('CPPDEFINES', {})
if cppdefines is None:
return {}
if SCons.Util.is_Sequence(cppdefines):
result = {}
for c in cppdefines:
if SCons.Util.is_Sequence(c):
result[c[0]] = c[1]
else:
result[c] = None
return result
if not SCons.Util.is_Dict(cppdefines):
return {cppdefines : None}
return cppdefines
class SConsCPPScannerWrapper:
"""
The SCons wrapper around a cpp.py scanner.
This is the actual glue between the calling conventions of generic
SCons scanners, and the (subclass of) cpp.py class that knows how
to look for #include lines with reasonably real C-preprocessor-like
evaluation of #if/#ifdef/#else/#elif lines.
"""
def __init__(self, name, variable):
self.name = name
self.path = SCons.Scanner.FindPathDirs(variable)
def __call__(self, node, env, path = ()):
cpp = SConsCPPScanner(current = node.get_dir(),
cpppath = path,
dict = dictify_CPPDEFINES(env))
result = cpp(node)
for included, includer in cpp.missing:
fmt = "No dependency generated for file: %s (included from: %s) -- file not found"
SCons.Warnings.warn(SCons.Warnings.DependencyWarning,
fmt % (included, includer))
return result
def recurse_nodes(self, nodes):
return nodes
def select(self, node):
return self
def CScanner():
"""Return a prototype Scanner instance for scanning source files
that use the C pre-processor"""
# Here's how we would (or might) use the CPP scanner code above that
# knows how to evaluate #if/#ifdef/#else/#elif lines when searching
# for #includes. This is commented out for now until we add the
# right configurability to let users pick between the scanners.
#return SConsCPPScannerWrapper("CScanner", "CPPPATH")
cs = SCons.Scanner.ClassicCPP("CScanner",
"$CPPSUFFIXES",
"CPPPATH",
'^[ \t]*#[ \t]*(?:include|import)[ \t]*(<|")([^>"]+)(>|")')
return cs
|
bsd-2-clause
| -3,936,452,968,050,813,000
| 36.611111
| 94
| 0.638109
| false
| 4.053892
| false
| false
| false
|
uclouvain/osis_louvain
|
base/models/learning_unit_year.py
|
1
|
24536
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import re
from django.core.exceptions import ValidationError
from django.core.validators import MinValueValidator, MaxValueValidator, RegexValidator
from django.db import models
from django.db.models import Q
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _, ngettext
from base.models import entity_container_year as mdl_entity_container_year
from base.models.academic_year import compute_max_academic_year_adjournment, AcademicYear, \
MAX_ACADEMIC_YEAR_FACULTY, starting_academic_year
from base.models.enums import active_status, learning_container_year_types
from base.models.enums import learning_unit_year_subtypes, internship_subtypes, \
learning_unit_year_session, entity_container_year_link_type, quadrimesters, attribution_procedure
from base.models.enums.learning_container_year_types import COURSE, INTERNSHIP
from base.models.enums.learning_unit_year_periodicity import PERIODICITY_TYPES, ANNUAL, BIENNIAL_EVEN, BIENNIAL_ODD
from base.models.learning_unit import LEARNING_UNIT_ACRONYM_REGEX_ALL, REGEX_BY_SUBTYPE
from osis_common.models.serializable_model import SerializableModel, SerializableModelAdmin
AUTHORIZED_REGEX_CHARS = "$*+.^"
REGEX_ACRONYM_CHARSET = "[A-Z0-9" + AUTHORIZED_REGEX_CHARS + "]+"
MINIMUM_CREDITS = 0.0
MAXIMUM_CREDITS = 500
def academic_year_validator(value):
academic = AcademicYear.objects.get(pk=value)
academic_year_max = compute_max_academic_year_adjournment()
if academic.year > academic_year_max:
raise ValidationError(_('learning_unit_creation_academic_year_max_error').format(academic_year_max))
class LearningUnitYearAdmin(SerializableModelAdmin):
list_display = ('external_id', 'acronym', 'specific_title', 'academic_year', 'credits', 'changed', 'structure',
'status')
list_filter = ('academic_year', 'decimal_scores', 'summary_locked')
search_fields = ['acronym', 'structure__acronym', 'external_id']
actions = [
'resend_messages_to_queue',
'apply_learning_unit_year_postponement'
]
def apply_learning_unit_year_postponement(self, request, queryset):
# Potential circular imports
from base.business.learning_units.automatic_postponement import LearningUnitAutomaticPostponement
from base.views.common import display_success_messages, display_error_messages
result, errors = LearningUnitAutomaticPostponement(queryset.filter(learning_container_year__isnull=False))
count = len(result)
display_success_messages(
request, ngettext(
'%(count)d learning unit has been postponed with success',
'%(count)d learning units have been postponed with success', count
) % {'count': count}
)
if errors:
display_error_messages(request, "{} : {}".format(
_("The following learning units ended with error"),
", ".join([str(error) for error in errors])
))
apply_learning_unit_year_postponement.short_description = _("Apply postponement on learning unit year")
class LearningUnitYearWithContainerManager(models.Manager):
def get_queryset(self):
# FIXME For the moment, the learning_unit_year without container must be hide !
return super().get_queryset().filter(learning_container_year__isnull=False)
class ExtraManagerLearningUnitYear(models.Model):
# This class ensure that the default manager (from serializable model) is not override by this manager
objects_with_container = LearningUnitYearWithContainerManager()
class Meta:
abstract = True
class LearningUnitYear(SerializableModel, ExtraManagerLearningUnitYear):
external_id = models.CharField(max_length=100, blank=True, null=True, db_index=True)
academic_year = models.ForeignKey(AcademicYear, verbose_name=_('academic_year'),
validators=[academic_year_validator])
learning_unit = models.ForeignKey('LearningUnit')
learning_container_year = models.ForeignKey('LearningContainerYear', null=True)
changed = models.DateTimeField(null=True, auto_now=True)
acronym = models.CharField(max_length=15, db_index=True, verbose_name=_('code'),
validators=[RegexValidator(LEARNING_UNIT_ACRONYM_REGEX_ALL)])
specific_title = models.CharField(max_length=255, blank=True, null=True,
verbose_name=_('title_proper_to_UE'))
specific_title_english = models.CharField(max_length=250, blank=True, null=True,
verbose_name=_('english_title_proper_to_UE'))
subtype = models.CharField(max_length=50, choices=learning_unit_year_subtypes.LEARNING_UNIT_YEAR_SUBTYPES,
default=learning_unit_year_subtypes.FULL)
credits = models.DecimalField(null=True, max_digits=5, decimal_places=2,
validators=[MinValueValidator(MINIMUM_CREDITS), MaxValueValidator(MAXIMUM_CREDITS)],
verbose_name=_('credits'))
decimal_scores = models.BooleanField(default=False)
structure = models.ForeignKey('Structure', blank=True, null=True)
internship_subtype = models.CharField(max_length=250, blank=True, null=True,
verbose_name=_('internship_subtype'),
choices=internship_subtypes.INTERNSHIP_SUBTYPES)
status = models.BooleanField(default=False, verbose_name=_('active_title'))
session = models.CharField(max_length=50, blank=True, null=True,
choices=learning_unit_year_session.LEARNING_UNIT_YEAR_SESSION,
verbose_name=_('session_title'))
quadrimester = models.CharField(max_length=9, blank=True, null=True, verbose_name=_('quadrimester'),
choices=quadrimesters.LEARNING_UNIT_YEAR_QUADRIMESTERS)
attribution_procedure = models.CharField(max_length=20, blank=True, null=True, verbose_name=_('procedure'),
choices=attribution_procedure.ATTRIBUTION_PROCEDURES)
summary_locked = models.BooleanField(default=False, verbose_name=_("summary_locked"))
professional_integration = models.BooleanField(default=False, verbose_name=_('professional_integration'))
campus = models.ForeignKey('Campus', null=True, verbose_name=_("learning_location"))
language = models.ForeignKey('reference.Language', null=True, verbose_name=_('language'))
periodicity = models.CharField(max_length=20, choices=PERIODICITY_TYPES, default=ANNUAL,
verbose_name=_('periodicity'))
_warnings = None
class Meta:
unique_together = (('learning_unit', 'academic_year'), ('acronym', 'academic_year'))
permissions = (
("can_receive_emails_about_automatic_postponement", "Can receive emails about automatic postponement"),
)
def __str__(self):
return u"%s - %s" % (self.academic_year, self.acronym)
@property
def subdivision(self):
if self.acronym and self.learning_container_year:
return self.acronym.replace(self.learning_container_year.acronym, "")
return None
@property
def parent(self):
if self.subdivision and self.is_partim():
return LearningUnitYear.objects.filter(
subtype=learning_unit_year_subtypes.FULL,
learning_container_year=self.learning_container_year,
).get()
return None
@property
def same_container_learning_unit_years(self):
return LearningUnitYear.objects.filter(
learning_container_year=self.learning_container_year
).order_by('acronym')
@cached_property
def allocation_entity(self):
return self.get_entity(entity_container_year_link_type.ALLOCATION_ENTITY)
@cached_property
def requirement_entity(self):
return self.get_entity(entity_container_year_link_type.REQUIREMENT_ENTITY)
@property
def complete_title(self):
complete_title = self.specific_title
if self.learning_container_year:
complete_title = ' - '.join(filter(None, [self.learning_container_year.common_title, self.specific_title]))
return complete_title
@property
def complete_title_english(self):
complete_title_english = self.specific_title_english
if self.learning_container_year:
complete_title_english = ' - '.join(filter(None, [
self.learning_container_year.common_title_english,
self.specific_title_english,
]))
return complete_title_english
@property
def container_common_title(self):
if self.learning_container_year:
return self.learning_container_year.common_title
return ''
def get_partims_related(self):
if self.is_full() and self.learning_container_year:
return self.learning_container_year.get_partims_related()
return LearningUnitYear.objects.none()
def find_list_group_element_year(self):
return self.child_leaf.filter(child_leaf=self).select_related('parent')
def get_learning_unit_next_year(self):
try:
return self.learning_unit.learningunityear_set.get(academic_year__year=(self.academic_year.year + 1))
except LearningUnitYear.DoesNotExist:
return None
@property
def in_charge(self):
return self.learning_container_year and self.learning_container_year.in_charge
@property
def container_type_verbose(self):
container_type = ''
if self.learning_container_year:
container_type = _(self.learning_container_year.container_type)
if self.learning_container_year.container_type in (COURSE, INTERNSHIP):
container_type += " ({subtype})".format(subtype=_(self.subtype))
return container_type
@property
def status_verbose(self):
return _("active") if self.status else _("inactive")
@property
def internship_subtype_verbose(self):
return _('to_complete') if self.learning_container_year and \
self.learning_container_year.container_type == INTERNSHIP and \
not self.internship_subtype else self.internship_subtype
@property
def get_previous_acronym(self):
return find_lt_learning_unit_year_with_different_acronym(self)
@property
def periodicity_verbose(self):
if self.periodicity:
return _(self.periodicity)
return None
def find_gte_learning_units_year(self):
return LearningUnitYear.objects.filter(learning_unit=self.learning_unit,
academic_year__year__gte=self.academic_year.year) \
.order_by('academic_year__year')
def find_gt_learning_units_year(self):
return LearningUnitYear.objects.filter(learning_unit=self.learning_unit,
academic_year__year__gt=self.academic_year.year) \
.order_by('academic_year__year')
def is_past(self):
return self.academic_year.is_past()
# FIXME move this method to business/perm file
def can_update_by_faculty_manager(self):
if not self.learning_container_year:
return False
starting_year = starting_academic_year().year
year = self.academic_year.year
return starting_year <= year <= starting_year + MAX_ACADEMIC_YEAR_FACULTY
def is_full(self):
return self.subtype == learning_unit_year_subtypes.FULL
def is_partim(self):
return self.subtype == learning_unit_year_subtypes.PARTIM
def get_entity(self, entity_type):
entity = None
# @TODO: Remove this condition when classes will be removed from learning unit year
if self.learning_container_year:
entity_container_yr = mdl_entity_container_year.search(
link_type=entity_type,
learning_container_year=self.learning_container_year,
).get()
entity = entity_container_yr.entity if entity_container_yr else None
return entity
def clean(self):
learning_unit_years = find_gte_year_acronym(self.academic_year, self.acronym)
if getattr(self, 'learning_unit', None):
learning_unit_years = learning_unit_years.exclude(learning_unit=self.learning_unit)
self.clean_acronym(learning_unit_years)
def clean_acronym(self, learning_unit_years):
if self.acronym in learning_unit_years.values_list('acronym', flat=True):
raise ValidationError({'acronym': _('already_existing_acronym')})
if not re.match(REGEX_BY_SUBTYPE[self.subtype], self.acronym):
raise ValidationError({'acronym': _('invalid_acronym')})
@property
def warnings(self):
if self._warnings is None:
self._warnings = []
self._warnings.extend(self._check_credits_is_integer())
self._warnings.extend(self._check_partim_parent_credits())
self._warnings.extend(self._check_internship_subtype())
self._warnings.extend(self._check_partim_parent_status())
self._warnings.extend(self._check_partim_parent_periodicity())
self._warnings.extend(self._check_learning_component_year_warnings())
self._warnings.extend(self._check_learning_container_year_warnings())
self._warnings.extend(self._check_entity_container_year_warnings())
return self._warnings
# TODO: Currently, we should warning user that the credits is not an integer
def _check_credits_is_integer(self):
warnings = []
if self.credits and self.credits % 1 != 0:
warnings.append(_('The credits value should be an integer'))
return warnings
def _check_partim_parent_credits(self):
children = self.get_partims_related()
return [_('The credits value of the partim %(acronym)s is greater or equal than the credits value of the '
'parent learning unit.') % {'acronym': child.acronym}
for child in children if child.credits and child.credits >= self.credits]
def _check_internship_subtype(self):
warnings = []
if getattr(self, 'learning_container_year', None):
if (self.learning_container_year.container_type == learning_container_year_types.INTERNSHIP and
not self.internship_subtype):
warnings.append(_('missing_internship_subtype'))
return warnings
def _check_partim_parent_status(self):
warnings = []
if self.parent:
if not self.parent.status and self.status:
warnings.append(_('This partim is active and the parent is inactive'))
else:
if self.status is False and find_partims_with_active_status(self).exists():
warnings.append(_("The parent is inactive and there is at least one partim active"))
return warnings
def _check_partim_parent_periodicity(self):
warnings = []
if self.parent:
if self.parent.periodicity in [BIENNIAL_EVEN, BIENNIAL_ODD] and self.periodicity != self.parent.periodicity:
warnings.append(_("This partim is %(partim_periodicity)s and the parent is %(parent_periodicty)s")
% {'partim_periodicity': self.periodicity_verbose,
'parent_periodicty': self.parent.periodicity_verbose})
else:
if self.periodicity in [BIENNIAL_EVEN, BIENNIAL_ODD] and \
find_partims_with_different_periodicity(self).exists():
warnings.append(_("The parent is %(parent_periodicty)s and there is at least one partim which is not "
"%(parent_periodicty)s") % {'parent_periodicty': self.periodicity_verbose})
return warnings
def _check_learning_component_year_warnings(self):
_warnings = []
components_queryset = self.learning_container_year.learningcomponentyear_set
all_components = components_queryset.all().order_by('learningunitcomponent__learning_unit_year__acronym')
for learning_component_year in all_components:
_warnings.extend(learning_component_year.warnings)
return _warnings
def _check_learning_container_year_warnings(self):
return self.learning_container_year.warnings
def _check_entity_container_year_warnings(self):
_warnings = []
entity_container_years = mdl_entity_container_year.find_by_learning_container_year(self.learning_container_year)
for entity_container_year in entity_container_years:
_warnings.extend(entity_container_year.warnings)
return _warnings
def is_external(self):
return hasattr(self, "externallearningunityear")
def get_by_id(learning_unit_year_id):
return LearningUnitYear.objects.select_related('learning_container_year__learning_container') \
.get(pk=learning_unit_year_id)
def find_by_acronym(acronym):
return LearningUnitYear.objects.filter(acronym=acronym).select_related('learning_container_year')
def _is_regex(acronym):
return set(AUTHORIZED_REGEX_CHARS).intersection(set(acronym))
def search(academic_year_id=None, acronym=None, learning_container_year_id=None, learning_unit=None,
title=None, subtype=None, status=None, container_type=None, tutor=None,
summary_responsible=None, requirement_entities=None, learning_unit_year_id=None, *args, **kwargs):
queryset = LearningUnitYear.objects_with_container
if learning_unit_year_id:
queryset = queryset.filter(id=learning_unit_year_id)
if academic_year_id:
queryset = queryset.filter(academic_year=academic_year_id)
if acronym:
if _is_regex(acronym):
queryset = queryset.filter(acronym__iregex=r"(" + acronym + ")")
else:
queryset = queryset.filter(acronym__icontains=acronym)
if learning_container_year_id is not None:
if isinstance(learning_container_year_id, list):
queryset = queryset.filter(learning_container_year__in=learning_container_year_id)
elif learning_container_year_id:
queryset = queryset.filter(learning_container_year=learning_container_year_id)
if requirement_entities:
queryset = queryset.filter(
learning_container_year__entitycontaineryear__entity__entityversion__in=requirement_entities,
learning_container_year__entitycontaineryear__type=entity_container_year_link_type.REQUIREMENT_ENTITY)
if learning_unit:
queryset = queryset.filter(learning_unit=learning_unit)
if title:
queryset = queryset. \
filter(Q(specific_title__iregex=title) | Q(learning_container_year__common_title__iregex=title))
if subtype:
queryset = queryset.filter(subtype=subtype)
if status:
queryset = queryset.filter(status=convert_status_bool(status))
if container_type:
queryset = queryset.filter(learning_container_year__container_type=container_type)
if tutor:
for name in tutor.split():
filter_by_first_name = {_build_tutor_filter(name_type='first_name'): name}
filter_by_last_name = {_build_tutor_filter(name_type='last_name'): name}
queryset = queryset.filter(Q(**filter_by_first_name) | Q(**filter_by_last_name)).distinct()
if summary_responsible:
queryset = find_summary_responsible_by_name(queryset, summary_responsible)
return queryset.select_related('learning_container_year', 'academic_year')
def find_summary_responsible_by_name(queryset, name):
for term in name.split():
queryset = queryset.filter(
Q(attribution__tutor__person__first_name__icontains=term) |
Q(attribution__tutor__person__last_name__icontains=term)
)
return queryset.filter(attribution__summary_responsible=True).distinct()
def _build_tutor_filter(name_type):
return '__'.join(['learningunitcomponent', 'learning_component_year', 'attributionchargenew', 'attribution',
'tutor', 'person', name_type, 'iregex'])
def convert_status_bool(status):
if status in (active_status.ACTIVE, active_status.INACTIVE):
boolean = status == active_status.ACTIVE
else:
boolean = status
return boolean
def find_gte_year_acronym(academic_yr, acronym):
return LearningUnitYear.objects.filter(academic_year__year__gte=academic_yr.year,
acronym__iexact=acronym)
def find_lt_year_acronym(academic_yr, acronym):
return LearningUnitYear.objects.filter(academic_year__year__lt=academic_yr.year,
acronym__iexact=acronym).order_by('academic_year')
def check_if_acronym_regex_is_valid(acronym):
return isinstance(acronym, str) and \
not acronym.startswith('*') and \
re.fullmatch(REGEX_ACRONYM_CHARSET, acronym.upper()) is not None
def find_max_credits_of_related_partims(a_learning_unit_year):
return a_learning_unit_year.get_partims_related().aggregate(max_credits=models.Max("credits"))["max_credits"]
def find_partims_with_active_status(a_learning_unit_year):
return a_learning_unit_year.get_partims_related().filter(status=True)
def find_partims_with_different_periodicity(a_learning_unit_year):
return a_learning_unit_year.get_partims_related().exclude(periodicity=a_learning_unit_year.periodicity)
def find_by_learning_unit(a_learning_unit):
return search(learning_unit=a_learning_unit)
def find_by_entities(entities):
return LearningUnitYear.objects.filter(learning_container_year__entitycontaineryear__entity__in=entities)
def find_latest_by_learning_unit(a_learning_unit):
return search(learning_unit=a_learning_unit).order_by('academic_year').last()
def find_lt_learning_unit_year_with_different_acronym(a_learning_unit_yr):
return LearningUnitYear.objects.filter(learning_unit__id=a_learning_unit_yr.learning_unit.id,
academic_year__year__lt=a_learning_unit_yr.academic_year.year,
proposallearningunit__isnull=True) \
.order_by('-academic_year') \
.exclude(acronym__iexact=a_learning_unit_yr.acronym).first()
def find_learning_unit_years_by_academic_year_tutor_attributions(academic_year, tutor):
""" In this function, only learning unit year with containers is visible! [no classes] """
qs = LearningUnitYear.objects_with_container.filter(
academic_year=academic_year,
attribution__tutor=tutor,
).distinct().order_by('academic_year__year', 'acronym')
return qs
def toggle_summary_locked(learning_unit_year_id):
luy = LearningUnitYear.objects.get(pk=learning_unit_year_id)
luy.summary_locked = not luy.summary_locked
luy.save()
return luy
|
agpl-3.0
| -1,512,369,831,425,935,600
| 43.853748
| 120
| 0.663746
| false
| 3.836591
| false
| false
| false
|
openworm/Blender2NeuroML
|
src/Entity/Entity.py
|
1
|
21651
|
'''
Created on 03.06.2011
@author: Sergey Khayrulin
'''
from __future__ import absolute_import
from Entity.Vertex import Vertex
from Entity.Face import Face
from Entity.Slice import Slice, AlternateSlice
from Entity.Helper import *
import pprint
import math
class Entity(object):
'''
Main Class which process data from blender file or WRL(formated file).
'''
def __init__(self):
'''
Constructor
'''
self.vertices = []
self.faces = Faces()
self.resulting_points = []
self.checked_points = []
self.neuronInfo = ''
def clean_all(self):
self.faces.clean_all()
def add_vertex(self, coordinates):
'''
Method add vertex to collection point. It get a
collection of coordinates of point, create point
and append it to collection of point.
'''
try:
if len(coordinates) != 3:
raise ParserException('Error')
point = Vertex(float(coordinates[0]),float(coordinates[1]),float(coordinates[2]))
self.vertices.append(point)
except ParserException as ex:
print('It should be some incorrect data')
raise ex
def add_face(self, points_arr):
'''
Method add face to faces collection. It get a sequence
of numbers which means position in point collection.
'''
try:
if len(points_arr) < 4:
raise ParserException('Face contains more that 4 point')
face = Face(self.vertices[int(points_arr[0])],self.vertices[int(points_arr[1])],self.vertices[int(points_arr[2])],self.vertices[int(points_arr[3])])
face.order = [int(points_arr[0]),int(points_arr[1]),int(points_arr[2]),int(points_arr[3])]
self.faces[face.order] = face
#print("add_face %s" % face.order)
#self.faces.append(face)
except ParserException as ex:
print('Error:%s'%ex)
print(points_arr)
raise ex
def findCenterOfSoma(self, use_method2 = False):
'''
Method find start point for work main algorithm
first point should be in soma. Soma is the
biggest segment of cell.
'''
iter = 0
temp_points = []
slices = []
for p in range(len(self.vertices)):
temp_points.append(HelpPoint(p,0))
if use_method2:
startSlice = Slice(temp_points,self.faces, use_method2 = True, vertices = self.vertices)
point_on_perimeter = self.vertices[startSlice[0].point]
self.checked_points += startSlice.extra_dict['points_in_soma']
self.start_center_point = startSlice.extra_dict['center_pt']
self.start_center_point.diametr = 2 * self.start_center_point.len_between_point(point_on_perimeter)
self.starting_slice = startSlice
return
slice = Slice(temp_points,self.faces)
slices.append(slice)
while len(slice) != 0:
temp_points = list(filter(lambda p: not slice.__contains__(p), temp_points))
slice = None
slice = Slice(temp_points,self.faces)
if len(slice) != 0:
slices.append(slice)
#if not (iter % 10):
# print('slice %d iter %d' % (len(temp_points), iter))
#slice.printSlice()
#print slice.getPerimetr(self.vertices)
iter += 1
# find slice with longest line segments
perimiter_coll = sorted(slices,key=lambda slice:slice.getPerimetr(self.vertices), reverse=True)
startSlice = Slice(perimiter_coll[0],self.faces)
#print("findCenterOfSoma while loop done %d %d" % (iter, len(temp_points)))
try:
self.start_center_point = self.__getCenterPoint(startSlice, minimal = True)
except IndexError:
print("no center point startSlice %d perimiter_coll %d"
% (len(startSlice), len(perimiter_coll[0])))
for face in self.faces.keys():
print("face order %s" % face)
# the coordinates aren't organized in a pattern that the normal
# code in Slice can understand, so we use an alternate method
return self.findCenterOfSoma(use_method2 = True)
if not use_method2:
point_on_perimeter = self.vertices[perimiter_coll[0][0].point]
self.start_center_point.diametr = 2 * self.start_center_point.len_between_point(point_on_perimeter)
def getAllBrunches(self):
'''
Method return dictionary which contains pair key=>value:
key it's name of neurite, value - it's sorted sequence
numbers which means position in resulting_points collection
for instance 'axon' => [1,2,4]
'''
brunches_temp = {}
result_coll = {}
i = 0
roots = [self.resulting_points.index(p) for p in self.resulting_points \
if p.parentPoint == 0 and self.resulting_points.index(p) != 0]
for root in roots:
brunches_temp[root] = []
for p in self.resulting_points:
parent = p.getRoot(self.resulting_points)
if parent == root:
brunches_temp[root].append(self.resulting_points.index(p))
# the first of these two lines works with python3, the second with python2:
#for k1, value in sorted(brunches_temp.iteritems(),key=lambda k,v:(len(v),k),reverse=True): # we try to determine
for k1, value in sorted(brunches_temp.iteritems(),key=lambda (k,v):(len(v),k),reverse=True): # we try to determine
if i == 0:
for j in value:
self.resulting_points[j].isAxon = True
result_coll['axon'] = value
else:
for j in value:
if self.resulting_points[j].cable != 2:
self.resulting_points[j].isDendrite = True
self.resulting_points[j].cable = 3
result_coll['dendrite' + str(i)] = value
i += 1
return result_coll
def use_alt_slice(self):
return hasattr(self, 'starting_slice')
def create_slice(self, coll, allow_checked = False):
if self.use_alt_slice():
if not allow_checked:
coll = filter(lambda p: not self.checked_points.__contains__(p.point), coll)
slice = AlternateSlice(coll,self.faces, self.vertices, self.checked_points, self.vertices[self.starting_slice[0].point], None, allow_checked)
else:
slice = Slice(coll,self.faces)
return slice
def branching(self, slice):
if not self.use_alt_slice():
return False
for p in range(len(slice)):
if len(self.starting_slice.extra_dict['adjacentPoints'][slice[p].point]) == 5:
return True
return False
def find_point(self,center_point=Vertex(),iteration=0,
parentPoint=0, isNeurite=False,
isBrunchStart=False, _slice=None):
'''
Main function find axon dendrite and neurite
'''
vector_len = []
print("enter find_point iteration %d isBrunchStart %d" % (iteration, isBrunchStart))
if iteration == 0: center_point = self.start_center_point
if isNeurite:
res_point = Result_Point(center_point,parentPoint,2,isBrunchStart)
res_point.isNeurite = True
self.resulting_points.append(res_point)
elif iteration != 0:
self.resulting_points.append(Result_Point(center_point,parentPoint,1,isBrunchStart))
elif iteration == 0:
self.resulting_points.append(Result_Point(center_point,parentPoint,0,isBrunchStart))
current_point = len(self.resulting_points) - 1
for p in range(len(self.vertices)):
vector_len.append(HelpPoint(p,self.vertices[p].len_between_point(center_point)))
vector_len = sorted(vector_len,key=lambda p:p.lenght)
tmp_list = []
if iteration != 0:
'''
If iteration != 0 that means we are should find next 4 or more(if we find place of brunching 6 or 8) vertices
'''
if _slice is not None:
slice = _slice
else:
slice = self.create_slice(vector_len)
adjacentPoints = []
use_v5 = iteration >= 3 and self.branching(slice) # with 5 adjacent points
for p in range(4):
if use_v5 and not isBrunchStart:
c = slice[p].point
tmp_list.append(c)
adjacentPoints.append(HelpPoint(c, self.vertices[c].len_between_point(center_point)))
if use_v5 and isBrunchStart:
#print("use_v5 br %d p %d" % (len(slice), p))
coll = self.__find_adjacent_vertices5(slice[p].point)
elif p != 3:
coll = self.__find_adjacent_vertices(slice[p].point, slice[p+1].point)
else:
coll = self.__find_adjacent_vertices(slice[p].point, slice[0].point)
#print("%d-%d has %d adj v" % (slice[p].point, slice[(p+1)%4].point, len(coll)))
for c in coll:
helpPoint = HelpPoint(c,self.vertices[c].len_between_point(center_point))
#print("%3d %3d is checked? %d" % (p, c, self.checked_points.__contains__(c)))
if not adjacentPoints.__contains__(helpPoint):
if not self.checked_points.__contains__(c):
adjacentPoints.append(helpPoint)
tmp_list.append(c)
print("got %d adjacentPoints %s" % (len(adjacentPoints), tmp_list))
if len(adjacentPoints) == 0: return
'''
If we find 8 adjacent vertices it means that we place in branching segments
'''
if len(adjacentPoints) > 4 and not (use_v5 and isBrunchStart):
if self.__more4AdjacentPointCase(adjacentPoints, slice, isBrunchStart,iteration, current_point, center_point):
return
del vector_len[:]
vector_len = [HelpPoint(p.point,self.vertices[p.point].len_between_point(center_point))
for p in adjacentPoints if not self.checked_points.__contains__(p.point)]
vector_len = sorted(vector_len,key=lambda p:p.lenght)
if self.use_alt_slice():
vector_len = filter(lambda p: not self.checked_points.__contains__(p.point), vector_len)
if iteration == 0:
adj_dict = self.starting_slice.extra_dict['adjacentPoints']
else:
adj_dict = None
slice = AlternateSlice(vector_len,self.faces, self.vertices, self.checked_points, self.vertices[self.starting_slice[0].point], adj_dict)
else:
slice = Slice(vector_len,self.faces)
lenOfSlice = len(slice)
print("lenOfSlice %d iter %d %d" % (lenOfSlice, iteration, len(vector_len)))
if lenOfSlice == 0:
slice = vector_len
if len(slice) < 4:
return
new_center_point = self.__getCenterPoint(slice)
iteration += 1
if lenOfSlice != 0:
self.find_point(new_center_point,iteration,parentPoint=current_point,isNeurite=isNeurite,isBrunchStart=False, _slice=slice)
else:
if isNeurite:
res_point = Result_Point(new_center_point,current_point,2,False)
res_point.isNeurite = True
self.resulting_points.append(res_point)
elif iteration != 0:
self.resulting_points.append(Result_Point(new_center_point,current_point,1,False))
if iteration == 1:
self.__checkDendrite(slice, center_point, vector_len,current_point)
def __getCenterPoint(self, slice, minimal = False):
'''
Get center point like center of mass for input collection slice (usually it should be 4 point)
'''
x=y=z=0
n_points = 4
if len(slice) < 4:
print("Bad slice len %d" % len(slice))
if minimal and len(slice) > 0:
n_points = len(slice)
else:
raise IndexError
for p in range(n_points):
x += self.vertices[slice[p].point].x
y += self.vertices[slice[p].point].y
z += self.vertices[slice[p].point].z
if not self.checked_points.__contains__(slice[p].point):
self.checked_points.append(slice[p].point)
center_point = Vertex(x/n_points,y/n_points,z/n_points)
center_point.diametr = 2 * center_point.len_between_point(self.vertices[slice[0].point])
if isinstance(slice, Slice):
slice.printSlice()
else:
print(slice)
return center_point
def __find_adjacent_vertices(self, num_p1,num_p2):
'''
Find for two point adjacent vertices
'''
adjacentVertices = []
for key,f in self.faces.items():
if f.order.__contains__(num_p1) and f.order.__contains__(num_p2):
for p in f.order:
if p != num_p1 and p != num_p2:
adjacentVertices.append(p)
return adjacentVertices
def __find_adjacent_vertices5(self, num_p1):
'''
Find for one point adjacent vertices
'''
adjacentVertices = []
for key,f in self.faces.items():
if f.order.__contains__(num_p1):
for p in f.order:
if p != num_p1 and not (p in adjacentVertices):
near_old_point = False
for r_pt in self.resulting_points:
dist = r_pt.point.len_between_point(self.vertices[p])
if dist < r_pt.point.diametr:
near_old_point = True
break
if not near_old_point:
adjacentVertices.append(p)
return adjacentVertices
def __fillUpBrachesCollection(self, adjacentPoints, slice):
'''
Fill branches collection
'''
branchesCollection = []
for i in range(4):
for p1 in adjacentPoints:
for p2 in adjacentPoints:
if p1 == p2:
continue
s = self.create_slice([slice[i], slice[(i + 1) % 4], p1, p2],
allow_checked = True)
if (len(s) == 4):
if not branchesCollection.__contains__(s):
branchesCollection.append(s)
if len(self.create_slice(adjacentPoints)) != 0:
branchesCollection.append(self.create_slice(adjacentPoints))
return branchesCollection
def __more4AdjacentPointCase(self, adjacentPoints, slice, isBrunch,iteration, current_point, center_point):
'''
Work when algorithm find more that 4 adjacent points
'''
branchesCollection = self.__fillUpBrachesCollection(adjacentPoints, slice)
if len(branchesCollection) >= 2 :
center_points = {}
thirdBrunchCollection = []
for branch in branchesCollection:
branch_center_point = self.__getCenterPoint(branch)
center_points[branch_center_point] = branch
print("%d center_points" % (len(center_points.keys())))
for branch_center_point,branch in center_points.items():
old_num_r_points = len(self.resulting_points)
print("start branch %d %d %d %d size %d %3d resulting_points"
% (branch[0].point, branch[1].point, branch[2].point, branch[3].point, len(branch), len(self.resulting_points)))
self.find_point(branch_center_point,iteration,current_point,True,True, _slice=branch)
print("finish branch %d %3d resulting_points" % (branch[0].point, len(self.resulting_points)))
if self.use_alt_slice() and len(self.resulting_points) == old_num_r_points + 1:
del self.resulting_points[-1]
print("undo branches of length 1")
if len(adjacentPoints) > 6:
thirdBrunchCollection.extend(branch)
thirdBrunchPoints = [HelpPoint(p.point,self.vertices[p.point].len_between_point(center_point)) \
for p in thirdBrunchCollection if not slice.__contains__(p)]
slice_t = self.create_slice(thirdBrunchPoints)
if len(slice_t) == 4:
third_brunch_center_point = self.__getCenterPoint(slice_t)
self.find_point(third_brunch_center_point,iteration, current_point,True,True, _slice=slice_t)
return True
elif len(branchesCollection) == 0 or (len(branchesCollection) == 1 and not isBrunch):
sortedadjacentPoints = sorted(adjacentPoints,key=lambda p:p.lenght)
first_slice = self.create_slice(sortedadjacentPoints)
second_slice = self.create_slice(filter(lambda p: first_slice.__contains__(p) == False, sortedadjacentPoints))
perimeter_1 = first_slice.getPerimetr(self.vertices)
perimeter_2 = second_slice.getPerimetr(self.vertices)
if perimeter_1 > perimeter_2 and perimeter_2 != 0:
new_center_point = self.__getCenterPoint(second_slice)
self.find_point(new_center_point,iteration, current_point,False,False, _slice=second_slice)
return True
elif perimeter_1 < perimeter_2 or perimeter_2 == 0:
if perimeter_1 == 0:
if len(branchesCollection) == 1:
first_slice = branchesCollection[0]
else:
first_slice.getFaceFromColl(adjacentPoints,self.faces)
new_center_point = self.__getCenterPoint(first_slice)
self.find_point(new_center_point,iteration, current_point,isBrunch,False, _slice=first_slice)
else:
new_center_point = self.__getCenterPoint(first_slice)
self.find_point(new_center_point,iteration, current_point,False,False, _slice=first_slice)
return True
elif len(branchesCollection) == 1 and isBrunch:
slice = branchesCollection[0]
if len(slice) == 0:
slice = slice.getFaceFromColl(adjacentPoints,self.faces)
try:
new_center_point = self.__getCenterPoint(slice)
except IndexError:
print("Warning: __getCenterPoint failed, slice len %d, %d adjacentPoints"
% (len(slice), len(adjacentPoints)))
slice.printSlice()
return False
self.find_point(new_center_point,iteration, parentPoint=current_point,isNeurite=True,isBrunchStart=False, _slice=slice)
return True
return False
def __checkDendrite(self, slice, center_point, vector_len, current_point):
'''
Private Method.
Check if soma has other output processes
if it's contain than run find_point for it.
'''
iteration = 1
vector_len = filter(lambda p: slice.__contains__(p) == False
and self.checked_points.__contains__(p.point) == False, vector_len)
vector_len = sorted(vector_len,key=lambda p:p.lenght)
for i in range(5):
slice2 = self.create_slice(vector_len)
if (len(slice2) == 4 and
int(slice.getPerimetr(self.vertices) / slice2.getPerimetr(self.vertices)) <= 1 and
int(slice2.getPerimetr(self.vertices) / slice.getPerimetr(self.vertices)) <= 1):
new_center_point = self.__getCenterPoint(slice2)
iteration += 1
self.find_point(new_center_point,iteration,parentPoint=current_point,isNeurite=False,isBrunchStart=False, _slice=slice2)
vector_len = filter(lambda p: slice2.__contains__(p) == False
and self.checked_points.__contains__(p.point) == False, vector_len)
vector_len = sorted(vector_len, key=lambda p:p.lenght)
#
# check_unused_coordinates might be of some use in checking for
# sections of a neuron that were omitted due to flaws in the code
#
def check_unused_coordinates(self):
for key,f in self.faces.items():
unused = True
for p in f.order:
if p in self.checked_points:
unused = False
break
if unused:
print("unused face %s" % f.order)
|
mit
| -6,809,170,652,540,044,000
| 47.095238
| 160
| 0.549813
| false
| 4.067443
| false
| false
| false
|
cansik/pyunicon
|
pyunicon/Cocoa/CocoaMouse.py
|
1
|
2057
|
from Quartz.CoreGraphics import CGEventCreateMouseEvent
from Quartz.CoreGraphics import CGEventPost
from Quartz.CoreGraphics import kCGEventMouseMoved
from Quartz.CoreGraphics import kCGEventLeftMouseDown
from Quartz.CoreGraphics import kCGEventLeftMouseUp
from Quartz.CoreGraphics import kCGEventRightMouseDown
from Quartz.CoreGraphics import kCGEventRightMouseUp
from Quartz.CoreGraphics import kCGMouseButtonLeft
from Quartz.CoreGraphics import kCGHIDEventTap
from Quartz.CoreGraphics import CGEventCreate
from Quartz.CoreGraphics import CGEventGetLocation
from Quartz.CoreGraphics import CGWarpMouseCursorPosition
from pyunicon.util import UCMouseKey
__author__ = 'cansik'
class CocoaMouse(object):
def __init__(self):
pass
def __mouse_event(self, type, x, y):
mouse_event = CGEventCreateMouseEvent(None, type, (x, y), kCGMouseButtonLeft)
CGEventPost(kCGHIDEventTap, mouse_event)
def move(self, x, y):
self.__mouse_event(kCGEventMouseMoved, x, y)
CGWarpMouseCursorPosition((x, y))
# todo: fix race condition (get position is not accurate)
def get_position(self):
mouse_event = CGEventCreate(None)
pos = CGEventGetLocation(mouse_event)
return pos.x, pos.y
def press(self, mouse_key):
x, y = self.get_position()
if mouse_key is UCMouseKey.UC_MOUSE_LEFT:
self.__mouse_event(kCGEventLeftMouseDown, x, y)
elif mouse_key is UCMouseKey.UC_MOUSE_MIDDLE:
print("mouse middle not supported on OSX!")
elif mouse_key is UCMouseKey.UC_MOUSE_RIGHT:
self.__mouse_event(kCGEventRightMouseDown, x, y)
def release(self, mouse_key):
x, y = self.get_position()
if mouse_key is UCMouseKey.UC_MOUSE_LEFT:
self.__mouse_event(kCGEventLeftMouseUp, x, y)
elif mouse_key is UCMouseKey.UC_MOUSE_MIDDLE:
print("mouse middle not supported on OSX!")
elif mouse_key is UCMouseKey.UC_MOUSE_RIGHT:
self.__mouse_event(kCGEventRightMouseUp, x, y)
|
mit
| 2,062,059,134,703,579,100
| 37.092593
| 85
| 0.712202
| false
| 3.528302
| false
| false
| false
|
AhmedHani/Neural-Networks-for-ML
|
Implementations/simple_word2vec/cbow.py
|
1
|
2983
|
import tensorflow as tf
class CBOW(object):
def __init__(self, args):
self.__args = args
self.__ngram_size = args.ngram_size
self.__input_size = self.__ngram_size - 1
self.__vocab_size = args.vocab_size + 1
self.__embedding_dim = args.embedding_dim
self.__learning_rate = args.learning_rate
self.__activation_function = args.activation_function
self.__optimizer = args.optimizer
self.__loss_function = args.loss_function
def init_session(self, restore=False):
self.__session = tf.Session()
if restore:
self.__saver = tf.train.Saver()
self.__saver.restore(self.__session, self.__args.model)
def build(self):
self.__input = tf.placeholder(tf.float32, [None, self.__input_size * self.__vocab_size])
self.__output = tf.placeholder(tf.float32, [None, self.__vocab_size])
self.__input_to_hidden_weights = tf.get_variable("ih_w", shape=[self.__input_size * self.__vocab_size, self.__embedding_dim],
initializer=tf.contrib.layers.xavier_initializer())
self.__input_to_hidden_bias = tf.Variable(tf.ones(self.__embedding_dim))
self.__hidden_to_output_weights = tf.get_variable("ho_w", shape=[self.__embedding_dim, self.__vocab_size], initializer=tf.contrib.layers.xavier_initializer())
self.__hidden_to_output_bias = tf.Variable(tf.ones([self.__vocab_size]))
if self.__optimizer.lower() == "sgd":
self.__optimizer = tf.train.GradientDescentOptimizer(self.__learning_rate)
elif self.__optimizer.lower() == "adam":
self.__optimizer = tf.train.AdamOptimizer(self.__learning_rate)
self.__embedding_layer = tf.matmul(self.__input, self.__input_to_hidden_weights) + self.__input_to_hidden_bias
if self.__activation_function.lower() == "tanh":
self.__embedding_layer = tf.nn.tanh(self.__embedding_layer)
elif self.__activation_function.lower() == "relu":
self.__embedding_layer = tf.nn.relu(self.__embedding_layer)
self.__output_layer = tf.matmul(self.__embedding_layer, self.__hidden_to_output_weights) + self.__hidden_to_output_bias
self.__output_layer = tf.nn.softmax(self.__output_layer)
if self.__loss_function.lower() == "mse":
self.__cost_function = 0.5 * tf.reduce_sum(tf.square(self.__output_layer - self.__output))
elif self.__loss_function.lower() == "ce":
self.__cost_function = -tf.reduce_mean((self.__output * tf.log(self.__output_layer)) + ((1 - self.__output) * tf.log(1 - self.__output_layer)))
self.__train = self.__optimizer.minimize(self.__cost_function)
def run(self, x_input, y_output):
self.__session.run(tf.global_variables_initializer())
error = self.__session.run(self.__cost_function, feed_dict={self.__input: x_input, self.__output: y_output})
return error
|
gpl-3.0
| -121,940,656,376,534,380
| 49.576271
| 166
| 0.612471
| false
| 3.719451
| false
| false
| false
|
alexsiri7/RoboScrum
|
stories/views.py
|
1
|
2140
|
from django.shortcuts import render_to_response, get_object_or_404
from django.template import Context, loader
from stories.models import Story, Sprint
from django.http import HttpResponse
from django.views.generic import DetailView, ListView
class SprintView(DetailView):
days = ["", "","Mon", "", "", "", "Tue", "", "", "", "Wed", "", "", "", "Thu", "", "Fri"]
model = Sprint
def get_context_data(self, **kwargs):
context = super(SprintView, self).get_context_data(**kwargs)
if self.object.is_finished:
context['burndown'] = self.burndown()
else:
context['burndown_schema'] = self.burndown_schema()
return context
def burndown(self):
total = self.object.original_commitment()
burn = map(lambda (i,e): (self.days[i], total-total*i/4, total*1.2-total*i/4*1.2, total*0.8-total*i/4*0.8,total-e),enumerate(self.object.burnup()))
return burn
def burndown_schema(self):
total = self.object.original_commitment()
burn = map(lambda (i,e): (
self.days[i],
total-total*i/17,
total*1.2-total*i/17*1.2,
total*0.8-total*i/17*0.8)
,enumerate(range(17)))
return burn
class SprintListView(ListView):
queryset = Sprint.objects.all().order_by('-start_date')
def get_context_data(self, **kwargs):
context = super(SprintListView, self).get_context_data(**kwargs)
context['TVI'] = self.getTVI()
context['Points'] = self.getPoints()
context['Pct'] = self.getPct()
return context
def getTVI(self):
return map(lambda s: (s.number, s.targeted_value_increase()), self.object_list.order_by('start_date').filter(is_finished=True).all())
def getPoints(self):
return map(lambda s: (s.number, s.work_capacity()*100/s.member_dedication, s.velocity()*100/s.member_dedication, s.original_commitment()*100/s.member_dedication),
self.object_list.order_by('start_date').filter(is_finished=True).all())
def getPct(self):
return map(lambda s: (s.number, s.focus_factor(), s.accuracy_of_estimation(), s.accuracy_of_commit()),
self.object_list.order_by('start_date').filter(is_finished=True).all())
|
gpl-3.0
| 218,280,061,264,985,020
| 43.583333
| 171
| 0.659346
| false
| 3.065903
| false
| false
| false
|
Knio/dominate
|
dominate/dom_tag.py
|
1
|
12996
|
__license__ = '''
This file is part of Dominate.
Dominate is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Dominate is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General
Public License along with Dominate. If not, see
<http://www.gnu.org/licenses/>.
'''
# pylint: disable=bad-indentation, bad-whitespace, missing-docstring
import copy
import numbers
from collections import defaultdict, namedtuple
from functools import wraps
import threading
try:
# Python 3
from collections.abc import Callable
except ImportError:
# Python 2.7
from collections import Callable
try:
basestring = basestring
except NameError: # py3
basestring = str
unicode = str
try:
import greenlet
except ImportError:
greenlet = None
def _get_thread_context():
context = [threading.current_thread()]
if greenlet:
context.append(greenlet.getcurrent())
return hash(tuple(context))
class dom_tag(object):
is_single = False # Tag does not require matching end tag (ex. <hr/>)
is_pretty = True # Text inside the tag should be left as-is (ex. <pre>)
# otherwise, text will be escaped() and whitespace may be
# modified
is_inline = False
def __new__(_cls, *args, **kwargs):
'''
Check if bare tag is being used a a decorator
(called with a single function arg).
decorate the function and return
'''
if len(args) == 1 and isinstance(args[0], Callable) \
and not isinstance(args[0], dom_tag) and not kwargs:
wrapped = args[0]
@wraps(wrapped)
def f(*args, **kwargs):
with _cls() as _tag:
return wrapped(*args, **kwargs) or _tag
return f
return object.__new__(_cls)
def __init__(self, *args, **kwargs):
'''
Creates a new tag. Child tags should be passed as arguments and attributes
should be passed as keyword arguments.
There is a non-rendering attribute which controls how the tag renders:
* `__inline` - Boolean value. If True renders all children tags on the same
line.
'''
self.attributes = {}
self.children = []
self.parent = None
self.document = None
# Does not insert newlines on all children if True (recursive attribute)
self.is_inline = kwargs.pop('__inline', self.is_inline)
self.is_pretty = kwargs.pop('__pretty', self.is_pretty)
#Add child elements
if args:
self.add(*args)
for attr, value in kwargs.items():
self.set_attribute(*type(self).clean_pair(attr, value))
self._ctx = None
self._add_to_ctx()
# context manager
frame = namedtuple('frame', ['tag', 'items', 'used'])
# stack of frames
_with_contexts = defaultdict(list)
def _add_to_ctx(self):
stack = dom_tag._with_contexts.get(_get_thread_context())
if stack:
self._ctx = stack[-1]
stack[-1].items.append(self)
def __enter__(self):
stack = dom_tag._with_contexts[_get_thread_context()]
stack.append(dom_tag.frame(self, [], set()))
return self
def __exit__(self, type, value, traceback):
thread_id = _get_thread_context()
stack = dom_tag._with_contexts[thread_id]
frame = stack.pop()
for item in frame.items:
if item in frame.used: continue
self.add(item)
if not stack:
del dom_tag._with_contexts[thread_id]
def __call__(self, func):
'''
tag instance is being used as a decorator.
wrap func to make a copy of this tag
'''
# remove decorator from its context so it doesn't
# get added in where it was defined
if self._ctx:
self._ctx.used.add(self)
@wraps(func)
def f(*args, **kwargs):
tag = copy.deepcopy(self)
tag._add_to_ctx()
with tag:
return func(*args, **kwargs) or tag
return f
def set_attribute(self, key, value):
'''
Add or update the value of an attribute.
'''
if isinstance(key, int):
self.children[key] = value
elif isinstance(key, basestring):
self.attributes[key] = value
else:
raise TypeError('Only integer and string types are valid for assigning '
'child tags and attributes, respectively.')
__setitem__ = set_attribute
def delete_attribute(self, key):
if isinstance(key, int):
del self.children[key:key+1]
else:
del self.attributes[key]
__delitem__ = delete_attribute
def setdocument(self, doc):
'''
Creates a reference to the parent document to allow for partial-tree
validation.
'''
# assume that a document is correct in the subtree
if self.document != doc:
self.document = doc
for i in self.children:
if not isinstance(i, dom_tag): return
i.setdocument(doc)
def add(self, *args):
'''
Add new child tags.
'''
for obj in args:
if isinstance(obj, numbers.Number):
# Convert to string so we fall into next if block
obj = str(obj)
if isinstance(obj, basestring):
obj = escape(obj)
self.children.append(obj)
elif isinstance(obj, dom_tag):
stack = dom_tag._with_contexts.get(_get_thread_context())
if stack:
stack[-1].used.add(obj)
self.children.append(obj)
obj.parent = self
obj.setdocument(self.document)
elif isinstance(obj, dict):
for attr, value in obj.items():
self.set_attribute(*dom_tag.clean_pair(attr, value))
elif hasattr(obj, '__iter__'):
for subobj in obj:
self.add(subobj)
else: # wtf is it?
raise ValueError('%r not a tag or string.' % obj)
if len(args) == 1:
return args[0]
return args
def add_raw_string(self, s):
self.children.append(s)
def remove(self, obj):
self.children.remove(obj)
def clear(self):
for i in self.children:
if isinstance(i, dom_tag) and i.parent is self:
i.parent = None
self.children = []
def get(self, tag=None, **kwargs):
'''
Recursively searches children for tags of a certain
type with matching attributes.
'''
# Stupid workaround since we can not use dom_tag in the method declaration
if tag is None: tag = dom_tag
attrs = [(dom_tag.clean_attribute(attr), value)
for attr, value in kwargs.items()]
results = []
for child in self.children:
if (isinstance(tag, basestring) and type(child).__name__ == tag) or \
(not isinstance(tag, basestring) and isinstance(child, tag)):
if all(child.attributes.get(attribute) == value
for attribute, value in attrs):
# If the child is of correct type and has all attributes and values
# in kwargs add as a result
results.append(child)
if isinstance(child, dom_tag):
# If the child is a dom_tag extend the search down through its children
results.extend(child.get(tag, **kwargs))
return results
def __getitem__(self, key):
'''
Returns the stored value of the specified attribute or child
(if it exists).
'''
if isinstance(key, int):
# Children are accessed using integers
try:
return object.__getattribute__(self, 'children')[key]
except KeyError:
raise IndexError('Child with index "%s" does not exist.' % key)
elif isinstance(key, basestring):
# Attributes are accessed using strings
try:
return object.__getattribute__(self, 'attributes')[key]
except KeyError:
raise AttributeError('Attribute "%s" does not exist.' % key)
else:
raise TypeError('Only integer and string types are valid for accessing '
'child tags and attributes, respectively.')
__getattr__ = __getitem__
def __len__(self):
'''
Number of child elements.
'''
return len(self.children)
def __bool__(self):
'''
Hack for "if x" and __len__
'''
return True
__nonzero__ = __bool__
def __iter__(self):
'''
Iterates over child elements.
'''
return self.children.__iter__()
def __contains__(self, item):
'''
Checks recursively if item is in children tree.
Accepts both a string and a class.
'''
return bool(self.get(item))
def __iadd__(self, obj):
'''
Reflexive binary addition simply adds tag as a child.
'''
self.add(obj)
return self
# String and unicode representations are the same as render()
def __unicode__(self):
return self.render()
__str__ = __unicode__
def render(self, indent=' ', pretty=True, xhtml=False):
data = self._render([], 0, indent, pretty, xhtml)
return u''.join(data)
def _render(self, sb, indent_level, indent_str, pretty, xhtml):
pretty = pretty and self.is_pretty
name = getattr(self, 'tagname', type(self).__name__)
# Workaround for python keywords and standard classes/methods
# (del, object, input)
if name[-1] == '_':
name = name[:-1]
# open tag
sb.append('<')
sb.append(name)
for attribute, value in sorted(self.attributes.items()):
if value is not False: # False values must be omitted completely
sb.append(' %s="%s"' % (attribute, escape(unicode(value), True)))
sb.append(' />' if self.is_single and xhtml else '>')
if not self.is_single:
inline = self._render_children(sb, indent_level + 1, indent_str, pretty, xhtml)
if pretty and not inline:
sb.append('\n')
sb.append(indent_str * indent_level)
# close tag
sb.append('</')
sb.append(name)
sb.append('>')
return sb
def _render_children(self, sb, indent_level, indent_str, pretty, xhtml):
inline = True
for child in self.children:
if isinstance(child, dom_tag):
if pretty and not child.is_inline:
inline = False
sb.append('\n')
sb.append(indent_str * indent_level)
child._render(sb, indent_level, indent_str, pretty, xhtml)
else:
sb.append(unicode(child))
return inline
def __repr__(self):
name = '%s.%s' % (self.__module__, type(self).__name__)
attributes_len = len(self.attributes)
attributes = '%s attribute' % attributes_len
if attributes_len != 1: attributes += 's'
children_len = len(self.children)
children = '%s child' % children_len
if children_len != 1: children += 'ren'
return '<%s at %x: %s, %s>' % (name, id(self), attributes, children)
@staticmethod
def clean_attribute(attribute):
'''
Normalize attribute names for shorthand and work arounds for limitations
in Python's syntax
'''
# Shorthand
attribute = {
'cls': 'class',
'className': 'class',
'class_name': 'class',
'fr': 'for',
'html_for': 'for',
'htmlFor': 'for',
}.get(attribute, attribute)
# Workaround for Python's reserved words
if attribute[0] == '_':
attribute = attribute[1:]
# Workaround for dash
special_prefix = any([attribute.startswith(x) for x in ('data_', 'aria_')])
if attribute in set(['http_equiv']) or special_prefix:
attribute = attribute.replace('_', '-').lower()
# Workaround for colon
if attribute.split('_')[0] in ('xlink', 'xml', 'xmlns'):
attribute = attribute.replace('_', ':', 1).lower()
return attribute
@classmethod
def clean_pair(cls, attribute, value):
'''
This will call `clean_attribute` on the attribute and also allows for the
creation of boolean attributes.
Ex. input(selected=True) is equivalent to input(selected="selected")
'''
attribute = cls.clean_attribute(attribute)
# Check for boolean attributes
# (i.e. selected=True becomes selected="selected")
if value is True:
value = attribute
# Ignore `if value is False`: this is filtered out in render()
return (attribute, value)
_get_current_none = object()
def get_current(default=_get_current_none):
'''
get the current tag being used as a with context or decorated function.
if no context is active, raises ValueError, or returns the default, if provided
'''
h = _get_thread_context()
ctx = dom_tag._with_contexts.get(h, None)
if ctx:
return ctx[-1].tag
if default is _get_current_none:
raise ValueError('no current context')
return default
def attr(*args, **kwargs):
'''
Set attributes on the current active tag context
'''
c = get_current()
dicts = args + (kwargs,)
for d in dicts:
for attr, value in d.items():
c.set_attribute(*dom_tag.clean_pair(attr, value))
# escape() is used in render
from .util import escape
|
lgpl-3.0
| -2,593,975,155,927,345,700
| 25.740741
| 85
| 0.6255
| false
| 3.903875
| false
| false
| false
|
openmicroscopy/omero-marshal
|
omero_marshal/encode/encoders/mask.py
|
1
|
1127
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Glencoe Software, Inc. All rights reserved.
#
# This software is distributed under the terms described by the LICENCE file
# you can find at the root of the distribution bundle.
# If the file is missing please request a copy by contacting
# jason@glencoesoftware.com.
#
from ... import SCHEMA_VERSION
from .shape import ShapeEncoder
from omero.model import MaskI
class Mask201501Encoder(ShapeEncoder):
TYPE = 'http://www.openmicroscopy.org/Schemas/ROI/2015-01#Mask'
def encode(self, obj):
v = super(Mask201501Encoder, self).encode(obj)
self.set_if_not_none(v, 'X', obj.x)
self.set_if_not_none(v, 'Y', obj.y)
self.set_if_not_none(v, 'Width', obj.width)
self.set_if_not_none(v, 'Height', obj.height)
return v
class Mask201606Encoder(Mask201501Encoder):
TYPE = 'http://www.openmicroscopy.org/Schemas/OME/2016-06#Mask'
if SCHEMA_VERSION == '2015-01':
encoder = (MaskI, Mask201501Encoder)
elif SCHEMA_VERSION == '2016-06':
encoder = (MaskI, Mask201606Encoder)
MaskEncoder = encoder[1]
|
gpl-2.0
| 9,218,345,415,002,380,000
| 27.897436
| 76
| 0.692103
| false
| 3.096154
| false
| false
| false
|
jseabold/statsmodels
|
statsmodels/tsa/vector_ar/output.py
|
5
|
6945
|
from statsmodels.compat.python import lzip
from io import StringIO
import numpy as np
from statsmodels.iolib import SimpleTable
mat = np.array
_default_table_fmt = dict(
empty_cell = '',
colsep=' ',
row_pre = '',
row_post = '',
table_dec_above='=',
table_dec_below='=',
header_dec_below='-',
header_fmt = '%s',
stub_fmt = '%s',
title_align='c',
header_align = 'r',
data_aligns = 'r',
stubs_align = 'l',
fmt = 'txt'
)
class VARSummary(object):
default_fmt = dict(
#data_fmts = ["%#12.6g","%#12.6g","%#10.4g","%#5.4g"],
#data_fmts = ["%#10.4g","%#10.4g","%#10.4g","%#6.4g"],
data_fmts = ["%#15.6F","%#15.6F","%#15.3F","%#14.3F"],
empty_cell = '',
#colwidths = 10,
colsep=' ',
row_pre = '',
row_post = '',
table_dec_above='=',
table_dec_below='=',
header_dec_below='-',
header_fmt = '%s',
stub_fmt = '%s',
title_align='c',
header_align = 'r',
data_aligns = 'r',
stubs_align = 'l',
fmt = 'txt'
)
part1_fmt = dict(
default_fmt,
data_fmts = ["%s"],
colwidths = 15,
colsep=' ',
table_dec_below='',
header_dec_below=None,
)
part2_fmt = dict(
default_fmt,
data_fmts = ["%#12.6g","%#12.6g","%#10.4g","%#5.4g"],
colwidths = None,
colsep=' ',
table_dec_above='-',
table_dec_below='-',
header_dec_below=None,
)
def __init__(self, estimator):
self.model = estimator
self.summary = self.make()
def __repr__(self):
return self.summary
def make(self, endog_names=None, exog_names=None):
"""
Summary of VAR model
"""
buf = StringIO()
buf.write(self._header_table() + '\n')
buf.write(self._stats_table() + '\n')
buf.write(self._coef_table() + '\n')
buf.write(self._resid_info() + '\n')
return buf.getvalue()
def _header_table(self):
import time
model = self.model
t = time.localtime()
# TODO: change when we allow coef restrictions
# ncoefs = len(model.beta)
# Header information
part1title = "Summary of Regression Results"
part1data = [[model._model_type],
["OLS"], #TODO: change when fit methods change
[time.strftime("%a, %d, %b, %Y", t)],
[time.strftime("%H:%M:%S", t)]]
part1header = None
part1stubs = ('Model:',
'Method:',
'Date:',
'Time:')
part1 = SimpleTable(part1data, part1header, part1stubs,
title=part1title, txt_fmt=self.part1_fmt)
return str(part1)
def _stats_table(self):
# TODO: do we want individual statistics or should users just
# use results if wanted?
# Handle overall fit statistics
model = self.model
part2Lstubs = ('No. of Equations:',
'Nobs:',
'Log likelihood:',
'AIC:')
part2Rstubs = ('BIC:',
'HQIC:',
'FPE:',
'Det(Omega_mle):')
part2Ldata = [[model.neqs], [model.nobs], [model.llf], [model.aic]]
part2Rdata = [[model.bic], [model.hqic], [model.fpe], [model.detomega]]
part2Lheader = None
part2L = SimpleTable(part2Ldata, part2Lheader, part2Lstubs,
txt_fmt = self.part2_fmt)
part2R = SimpleTable(part2Rdata, part2Lheader, part2Rstubs,
txt_fmt = self.part2_fmt)
part2L.extend_right(part2R)
return str(part2L)
def _coef_table(self):
model = self.model
k = model.neqs
Xnames = self.model.exog_names
data = lzip(model.params.T.ravel(),
model.stderr.T.ravel(),
model.tvalues.T.ravel(),
model.pvalues.T.ravel())
header = ('coefficient','std. error','t-stat','prob')
buf = StringIO()
dim = k * model.k_ar + model.k_trend + model.k_exog_user
for i in range(k):
section = "Results for equation %s" % model.names[i]
buf.write(section + '\n')
table = SimpleTable(data[dim * i : dim * (i + 1)], header,
Xnames, title=None, txt_fmt = self.default_fmt)
buf.write(str(table) + '\n')
if i < k - 1:
buf.write('\n')
return buf.getvalue()
def _resid_info(self):
buf = StringIO()
names = self.model.names
buf.write("Correlation matrix of residuals" + '\n')
buf.write(pprint_matrix(self.model.resid_corr, names, names) + '\n')
return buf.getvalue()
def normality_summary(results):
title = "Normality skew/kurtosis Chi^2-test"
null_hyp = 'H_0: data generated by normally-distributed process'
return hypothesis_test_table(results, title, null_hyp)
def hypothesis_test_table(results, title, null_hyp):
fmt = dict(_default_table_fmt,
data_fmts=["%#15.6F","%#15.6F","%#15.3F", "%s"])
buf = StringIO()
table = SimpleTable([[results['statistic'],
results['crit_value'],
results['pvalue'],
str(results['df'])]],
['Test statistic', 'Critical Value', 'p-value',
'df'], [''], title=None, txt_fmt=fmt)
buf.write(title + '\n')
buf.write(str(table) + '\n')
buf.write(null_hyp + '\n')
buf.write("Conclusion: %s H_0" % results['conclusion'])
buf.write(" at %.2f%% significance level" % (results['signif'] * 100))
return buf.getvalue()
def pprint_matrix(values, rlabels, clabels, col_space=None):
buf = StringIO()
T, K = len(rlabels), len(clabels)
if col_space is None:
min_space = 10
col_space = [max(len(str(c)) + 2, min_space) for c in clabels]
else:
col_space = (col_space,) * K
row_space = max([len(str(x)) for x in rlabels]) + 2
head = _pfixed('', row_space)
for j, h in enumerate(clabels):
head += _pfixed(h, col_space[j])
buf.write(head + '\n')
for i, rlab in enumerate(rlabels):
line = ('%s' % rlab).ljust(row_space)
for j in range(K):
line += _pfixed(values[i,j], col_space[j])
buf.write(line + '\n')
return buf.getvalue()
def _pfixed(s, space, nanRep=None, float_format=None):
if isinstance(s, float):
if float_format:
formatted = float_format(s)
else:
formatted = "%#8.6F" % s
return formatted.rjust(space)
else:
return ('%s' % s)[:space].rjust(space)
|
bsd-3-clause
| -7,572,108,318,767,246,000
| 27.004032
| 79
| 0.500216
| false
| 3.495219
| false
| false
| false
|
stefanklug/django-lazysignup
|
lazysignup/migrations/0001_initial.py
|
1
|
4128
|
# flake8: noqa
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'LazyUser'
db.create_table('lazysignup_lazyuser', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], unique=True)),
))
db.send_create_signal('lazysignup', ['LazyUser'])
def backwards(self, orm):
# Deleting model 'LazyUser'
db.delete_table('lazysignup_lazyuser')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lazysignup.lazyuser': {
'Meta': {'object_name': 'LazyUser'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['lazysignup']
|
bsd-3-clause
| 1,776,426,415,339,057,000
| 57.971429
| 182
| 0.556928
| false
| 3.749319
| false
| false
| false
|
dmccloskey/ddt_python
|
ddt_python/ddt_container_biPlotAndValidation.py
|
1
|
20231
|
from .ddt_container import ddt_container
from .ddt_tile import ddt_tile
from .ddt_tile_html import ddt_tile_html
class ddt_container_biPlotAndValidation(ddt_container):
def make_biPlotAndValidation(self,
data1,data2,
data1_keys,data1_nestkeys,data1_keymap,
data2_keys,data2_nestkeys,data2_keymap,
):
'''Make a biPlot and model validation plot
INPUT:
data1
data2
data1_keys
data1_nestkeys
data1_keymap
data2_keys
data2_nestkeys
data2_keymap
'''
cnt = 0;
#from 1: biplot
form = ddt_tile();
form.make_tileparameters(
tileparameters={
'tileheader':'Bi Plot filter menu',
'tiletype':'html',
'tileid':"filtermenu1",
'rowid':"row1",
'colid':"col1",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-4"}
);
form.make_htmlparameters(
htmlparameters = {
'htmlid':'filtermenuform1',
"htmltype":'form_01',
"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},
"formresetbuttonidtext":{'id':'reset1','text':'reset'},
"formupdatebuttonidtext":{'id':'update1','text':'update'}},
);
self.add_parameters(form.get_parameters());
self.update_tile2datamap("filtermenu1",[cnt]);
self.add_filtermenu(
{"filtermenuid":"filtermenu1",
"filtermenuhtmlid":"filtermenuform1",
"filtermenusubmitbuttonid":"submit1",
"filtermenuresetbuttonid":"reset1",
"filtermenuupdatebuttonid":"update1"}
);
#svg 1: biplot
svg = ddt_tile();
svg.make_tileparameters(
tileparameters={
'tileheader':'Bi Plot',
'tiletype':'svg',
'tileid':"tile1",
'rowid':"row1",
'colid':"col2",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-8"}
);
svg.make_svgparameters(
svgparameters={
"svgtype":'scatterlineplot2d_01',
"svgkeymap":[data1_keymap,data1_keymap],
'svgid':'svg1',
"svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"svgwidth":500,"svgheight":350,
"svgx1axislabel":"component",
"svgy1axislabel":"variance explained",
'svgformtileid':'filtermenu1',}
);
self.add_parameters(svg.get_parameters());
self.update_tile2datamap("tile1",[cnt,cnt]);
# data 1:
self.add_data(
data1,
data1_keys,
data1_nestkeys
);
# increment the data counter
cnt+=1;
#form 2: validation
form = ddt_tile();
form.make_tileparameters(
tileparameters={
'tileheader':'Cross Validation filter menu',
'tiletype':'html',
'tileid':"filtermenu2",
'rowid':"row2",'colid':"col1",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-4"}
);
form.make_htmlparameters(
htmlparameters = {
'htmlid':'filtermenuform2',
"htmltype":'form_01',
"formsubmitbuttonidtext":{'id':'submit2','text':'submit'},
"formresetbuttonidtext":{'id':'reset2','text':'reset'},
"formupdatebuttonidtext":{'id':'update12','text':'update'}},
);
self.add_parameters(form.get_parameters());
self.update_tile2datamap("filtermenu2",[cnt]);
self.add_filtermenu(
{"filtermenuid":"filtermenu2",
"filtermenuhtmlid":"filtermenuform2",
"filtermenusubmitbuttonid":"submit2",
"filtermenuresetbuttonid":"reset2",
"filtermenuupdatebuttonid":"update2"}
);
#svg 2: validation
svg = ddt_tile();
svg.make_tileparameters(
tileparameters={
'tileheader':'Cross Validation',
'tiletype':'svg',
'tileid':"tile2",
'rowid':"row2",
'colid':"col2",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-8"
});
svg.make_svgparameters(
svgparameters={
"svgtype":'verticalbarschart2d_01',
"svgkeymap":[data2_keymap],
'svgid':'svg2',
"svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"svgwidth":500,"svgheight":350,"svgy1axislabel":"Value",
"svgfilters":None,
'svgformtileid':'filtermenu2',
}
);
self.add_parameters(svg.get_parameters());
self.update_tile2datamap("tile2",[cnt]);
#table 2: validation
crosstable = ddt_tile();
crosstable.make_tileparameters(
tileparameters = {
'tileheader':'Cross Validation',
'tiletype':'table',
'tileid':"tile3",
'rowid':"row3",
'colid':"col1",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-12"}
);
crosstable.make_tableparameters(
tableparameters = {
"tablekeymap":[data2_keymap],
"tabletype":'responsivetable_01',
'tableid':'table2',
"tablefilters":None,
"tableheaders":None,
"tableclass":"table table-condensed table-hover"}
);
self.add_parameters(crosstable.get_parameters());
self.update_tile2datamap("tile3",[cnt]);
# add data 2
self.add_data(
data2,
data2_keys,
data2_nestkeys
);
# increment the data counter
cnt+=1;
def make_biPlot(self,
data1,
data1_keys,data1_nestkeys,data1_keymap,
):
'''Make a biPlot
INPUT:
data1
data1_keys
data1_nestkeys
data1_keymap
'''
cnt = 0;
#from 1: biplot
form = ddt_tile();
form.make_tileparameters(
tileparameters={
'tileheader':'Bi Plot filter menu',
'tiletype':'html',
'tileid':"filtermenu1",
'rowid':"row1",
'colid':"col1",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-4"}
);
form.make_htmlparameters(
htmlparameters = {
'htmlid':'filtermenuform1',
"htmltype":'form_01',
"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},
"formresetbuttonidtext":{'id':'reset1','text':'reset'},
"formupdatebuttonidtext":{'id':'update1','text':'update'}},
);
self.add_parameters(form.get_parameters());
self.update_tile2datamap("filtermenu1",[cnt]);
self.add_filtermenu(
{"filtermenuid":"filtermenu1",
"filtermenuhtmlid":"filtermenuform1",
"filtermenusubmitbuttonid":"submit1",
"filtermenuresetbuttonid":"reset1",
"filtermenuupdatebuttonid":"update1"}
);
#svg 1: biplot
svg = ddt_tile();
svg.make_tileparameters(
tileparameters={
'tileheader':'Bi Plot',
'tiletype':'svg',
'tileid':"tile1",
'rowid':"row1",
'colid':"col2",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-8"}
);
svg.make_svgparameters(
svgparameters={
"svgtype":'scatterlineplot2d_01',
"svgkeymap":[data1_keymap,data1_keymap],
'svgid':'svg1',
"svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"svgwidth":500,"svgheight":350,
"svgx1axislabel":"component",
"svgy1axislabel":"variance explained",
'svgformtileid':'filtermenu1',}
);
self.add_parameters(svg.get_parameters());
self.update_tile2datamap("tile1",[cnt,cnt]);
#table 1: Bi plot
crosstable = ddt_tile();
crosstable.make_tileparameters(
tileparameters = {
'tileheader':'Bi plot',
'tiletype':'table',
'tileid':"tile3",
'rowid':"row3",
'colid':"col1",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-12"}
);
crosstable.make_tableparameters(
tableparameters = {
"tablekeymap":[data1_keymap],
"tabletype":'responsivetable_01',
'tableid':'table1',
"tablefilters":None,
"tableheaders":None,
"tableclass":"table table-condensed table-hover"}
);
self.add_parameters(crosstable.get_parameters());
self.update_tile2datamap("tile3",[cnt]);
# data 1:
self.add_data(
data1,
data1_keys,
data1_nestkeys
);
# increment the data counter
cnt+=1;
def make_hyperparameter(self,
data1,
data1_keys,data1_nestkeys,data1_keymap,
data_cnt=0,
):
'''Make a hyperparameter bar plot
INPUT:
data1
data1_keys
data1_nestkeys
data1_keymap
'''
#form 2: validation
form = ddt_tile();
form.make_tileparameters(
tileparameters={
'tileheader':'Cross Validation filter menu',
'tiletype':'html',
'tileid':"filtermenu1",
'rowid':"row1",'colid':"col1",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-4"}
);
form.make_htmlparameters(
htmlparameters = {
'htmlid':'filtermenuform1',
"htmltype":'form_01',
"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},
"formresetbuttonidtext":{'id':'reset1','text':'reset'},
"formupdatebuttonidtext":{'id':'update1','text':'update'}},
);
self.add_parameters(form.get_parameters());
self.update_tile2datamap("filtermenu1",[data_cnt]);
self.add_filtermenu(
{"filtermenuid":"filtermenu1",
"filtermenuhtmlid":"filtermenuform1",
"filtermenusubmitbuttonid":"submit1",
"filtermenuresetbuttonid":"reset1",
"filtermenuupdatebuttonid":"update1"}
);
#svg 2: validation
svg = ddt_tile();
svg.make_tileparameters(
tileparameters={
'tileheader':'Cross Validation',
'tiletype':'svg',
'tileid':"tile2",
'rowid':"row1",
'colid':"col2",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-8"
});
svg.make_svgparameters(
svgparameters={
"svgtype":'verticalbarschart2d_01',
"svgkeymap":[data1_keymap],
'svgid':'svg2',
"svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"svgwidth":500,"svgheight":350,"svgy1axislabel":"Value",
"svgfilters":None,
'svgformtileid':'filtermenu1',
}
);
self.add_parameters(svg.get_parameters());
self.update_tile2datamap("tile2",[data_cnt]);
#table 2: validation
crosstable = ddt_tile();
crosstable.make_tileparameters(
tileparameters = {
'tileheader':'Cross Validation',
'tiletype':'table',
'tileid':"tile3",
'rowid':"row2",
'colid':"col1",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-12"}
);
crosstable.make_tableparameters(
tableparameters = {
"tablekeymap":[data1_keymap],
"tabletype":'responsivetable_01',
'tableid':'table1',
"tablefilters":None,
"tableheaders":None,
"tableclass":"table table-condensed table-hover"}
);
self.add_parameters(crosstable.get_parameters());
self.update_tile2datamap("tile3",[data_cnt]);
# add data 1
self.add_data(
data1,
data1_keys,
data1_nestkeys
);
# increment the data counter
data_cnt+=1;
def make_impfeat(self,
data1,
data1_keys,data1_nestkeys,data1_keymap,
data_cnt=0,
):
'''Make a important feature bar plot
INPUT:
data1
data1_keys
data1_nestkeys
data1_keymap
'''
#form 2: validation
form = ddt_tile();
form.make_tileparameters(
tileparameters={
'tileheader':'Important feature filter menu',
'tiletype':'html',
'tileid':"filtermenu1",
'rowid':"row1",'colid':"col1",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-4"}
);
form.make_htmlparameters(
htmlparameters = {
'htmlid':'filtermenuform1',
"htmltype":'form_01',
"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},
"formresetbuttonidtext":{'id':'reset1','text':'reset'},
"formupdatebuttonidtext":{'id':'update1','text':'update'}},
);
self.add_parameters(form.get_parameters());
self.update_tile2datamap("filtermenu1",[data_cnt]);
self.add_filtermenu(
{"filtermenuid":"filtermenu1",
"filtermenuhtmlid":"filtermenuform1",
"filtermenusubmitbuttonid":"submit1",
"filtermenuresetbuttonid":"reset1",
"filtermenuupdatebuttonid":"update1"}
);
#svg 2: validation
svg = ddt_tile();
svg.make_tileparameters(
tileparameters={
'tileheader':'Important features',
'tiletype':'svg',
'tileid':"tile2",
'rowid':"row1",
'colid':"col2",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-8"
});
svg.make_svgparameters(
svgparameters={
"svgtype":'horizontalbarschart2d_01',
"svgkeymap":[data1_keymap],
'svgid':'svg2',
"svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 250 },
"svgwidth":450,"svgheight":900,
"svgx1axislabel":"impfeat_value",
"svgy1axislabel":"component_name",
'svgformtileid':'filtermenu1',
}
);
self.add_parameters(svg.get_parameters());
self.update_tile2datamap("tile2",[data_cnt]);
#table 2: validation
crosstable = ddt_tile();
crosstable.make_tileparameters(
tileparameters = {
'tileheader':'Important features',
'tiletype':'table',
'tileid':"tile3",
'rowid':"row2",
'colid':"col1",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-12"}
);
crosstable.make_tableparameters(
tableparameters = {
"tablekeymap":[data1_keymap],
"tabletype":'responsivetable_01',
'tableid':'table1',
"tablefilters":None,
"tableheaders":None,
"tableclass":"table table-condensed table-hover"}
);
self.add_parameters(crosstable.get_parameters());
self.update_tile2datamap("tile3",[data_cnt]);
# add data 1
self.add_data(
data1,
data1_keys,
data1_nestkeys
);
# increment the data counter
data_cnt+=1;
def make_SPlot(self,
data1,data_dict1,
data1_keys,data1_nestkeys,data1_keymap,
data_cnt=0,
):
'''Make a important feature bar plot
INPUT:
data1
data1_keys
data1_nestkeys
data1_keymap
'''
#form 2: validation
form = ddt_tile();
form.make_tileparameters(
tileparameters={
'tileheader':'S-Plot filter menu',
'tiletype':'html',
'tileid':"filtermenu1",
'rowid':"row1",
'colid':"col1",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-4"}
);
form.make_htmlparameters(
htmlparameters = {
'htmlid':'filtermenuform1',
"htmltype":'form_01',
"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},
"formresetbuttonidtext":{'id':'reset1','text':'reset'},
"formupdatebuttonidtext":{'id':'update1','text':'update'}},
);
self.add_parameters(form.get_parameters());
self.update_tile2datamap("filtermenu1",[data_cnt]);
self.add_filtermenu(
{"filtermenuid":"filtermenu1",
"filtermenuhtmlid":"filtermenuform1",
"filtermenusubmitbuttonid":"submit1",
"filtermenuresetbuttonid":"reset1",
"filtermenuupdatebuttonid":"update1"}
);
# add data 1
self.add_data(
data1,
data1_keys,
data1_nestkeys
);
#svg 2: validation
svg = ddt_tile();
for i in range(int(max(data_dict1.keys()))):
axis = i+1;
svgid = 'svg'+str(axis);
colid = 'col'+str(axis+1);
tileid = 'tile'+str(axis);
svg.make_tileparameters(
tileparameters={
'tileheader':'S-Plot',
'tiletype':'svg',
'tileid':tileid,
'rowid':"row1",
'colid':colid,
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-6"
});
svg.make_svgparameters(
svgparameters={
"svgtype":'volcanoplot2d_01',
"svgkeymap":[data1_keymap],
'svgid':'svg1',
"svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"svgwidth":400,"svgheight":350,
"svgx1axislabel":"loadings" + str(axis),
"svgy1axislabel":"correlations" + str(axis),
}
);
self.add_parameters(svg.get_parameters());
self.update_tile2datamap(tileid,[axis]);
self.add_data(
data_dict1[axis],
data1_keys,
data1_nestkeys
);
#table 2: validation
crosstable = ddt_tile();
crosstable.make_tileparameters(
tileparameters = {
'tileheader':'S-Plot',
'tiletype':'table',
'tileid':'tile'+str(axis+1),
'rowid':"row2",
'colid':"col1",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-12"}
);
crosstable.make_tableparameters(
tableparameters = {
"tablekeymap":[data1_keymap],
"tabletype":'responsivetable_01',
'tableid':'table1',
"tablefilters":None,
"tableheaders":None,
"tableclass":"table table-condensed table-hover"}
);
self.add_parameters(crosstable.get_parameters());
self.update_tile2datamap('tile'+str(axis+1),[data_cnt]);
|
mit
| -6,382,075,242,696,304,000
| 32.329489
| 82
| 0.497059
| false
| 4.068168
| false
| false
| false
|
nuncjo/Delver
|
examples.py
|
1
|
6037
|
# -*- coding:utf-8 -*-
import os
import psycopg2
from pprint import pprint
from delver import Crawler
def scraping_movies_table():
c = Crawler()
c.logging = True
c.useragent = "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
c.open("http://www.boxofficemojo.com/daily/")
pprint(c.tables())
def user_login():
c = Crawler()
c.useragent = (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/60.0.3112.90 Safari/537.36"
)
c.random_timeout = (0, 5)
c.open('http://testing-ground.scraping.pro/login')
forms = c.forms()
if forms:
login_form = forms[0]
login_form.fields = {
'usr': 'admin',
'pwd': '12345'
}
c.submit(login_form)
success_check = c.submit_check(
login_form,
phrase='WELCOME :)',
status_codes=[200]
)
print(success_check)
class OnePunchManDownloader:
"""Downloads One Punch Man free manga chapers to local directories.
Uses one main thread for scraper with random timeout.
Uses 20 threads just for image downloads.
"""
def __init__(self):
self._target_directory = 'one_punch_man'
self._start_url = "http://m.mangafox.me/manga/onepunch_man_one/"
self.crawler = Crawler()
self.crawler.random_timeout = (0, 5)
self.crawler.useragent = "Googlebot-Image/1.0"
def run(self):
self.crawler.open(self._start_url)
for link in self.crawler.links(filters={'text': 'Ch '}, match='IN'):
self.download_images(link)
def download_images(self, link):
target_path = '{}/{}'.format(self._target_directory, link.split('/')[-2])
full_chapter_url = link.replace('/manga/', '/roll_manga/')
self.crawler.open(full_chapter_url)
images = self.crawler.xpath("//img[@class='reader-page']/@data-original")
os.makedirs(target_path, exist_ok=True)
self.crawler.download_files(target_path, files=images, workers=20)
def one_punch_downloader():
downloader = OnePunchManDownloader()
downloader.run()
class WithConnection:
def __init__(self, params):
self._connection = psycopg2.connect(**params)
self._connection.autocommit = True
self._cursor = self._connection.cursor()
def table_exists(self, table_name):
self._cursor.execute('''
select exists(
select * from information_schema.tables where table_name='{}'
)
'''.format(table_name))
return self._cursor.fetchone()[0]
def scrape_page(crawler):
""" Scrapes rows from tables with promotions.
:param crawler: <delver.crawler.Crawler object>
:return: generator with page of rows
"""
titles = crawler.xpath("//div/span[@class='title']/text()")
discounts = crawler.xpath("//div[contains(@class, 'search_discount')]/span/text()")
final_prices = crawler.xpath("//div[contains(@class, 'discounted')]//text()[2]").strip()
yield [{
'title': row[0],
'discount': row[1],
'price': row[2]
} for row in zip(titles, discounts, final_prices)]
class SteamPromotionsScraper:
""" Scraper which can be iterated through
Usage example::
>>> promotions_scraper = SteamPromotionsScraper()
>>> for page in promotions_scraper:
... pprint(page)
"""
def __init__(self):
self.crawler = Crawler()
self.crawler.logging = True
self.crawler.useragent = \
"Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
self.crawler.random_timeout = (0, 5)
def scrape_by_page(self):
self.crawler.open('http://store.steampowered.com/search/?specials=1')
yield from scrape_page(self.crawler)
while self.crawler.links(filters={
'class': 'pagebtn',
'text': '>'
}):
self.crawler.open(self.crawler.current_results[0])
yield from scrape_page(self.crawler)
def __iter__(self):
return self.scrape_by_page()
class SteamPromotionsScraperDB(WithConnection):
"""Example with saving data to postgresql database
Usage example::
>>> promotions_scraper_db = SteamPromotionsScraperDB({
... 'dbname': "test",
... 'user': "testuser",
... 'password': "test"
... })
>>> promotions_scraper.save_to_db()
"""
def __init__(self, params):
super().__init__(params)
self.crawler = Crawler()
self.crawler.logging = True
self.crawler.useragent = \
"Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
self.crawler.random_timeout = (0, 5)
def scrape_by_page(self):
self.crawler.open('http://store.steampowered.com/search/?specials=1')
yield from scrape_page(self.crawler)
while self.crawler.links(filters={
'class': 'pagebtn',
'text': '>'
}):
self.crawler.open(self.crawler.current_results[0])
yield from scrape_page(self.crawler)
def save_to_db(self):
if not self.table_exists('promotions'):
self._cursor.execute(
'''
CREATE TABLE promotions (
id serial PRIMARY KEY,
title varchar(255),
discount varchar(4),
price varchar(10)
);
'''
)
for page in self.scrape_by_page():
for row in page:
self._cursor.execute(
'''
INSERT INTO promotions(title, discount, price)
VALUES(%s, %s, %s)
''',
(row.get('title'), row.get('discount'), row.get('price'))
)
pprint(row)
|
mit
| 3,424,693,276,046,666,000
| 31.283422
| 92
| 0.558556
| false
| 3.712792
| false
| false
| false
|
beeftornado/sentry
|
src/sentry/stacktraces/processing.py
|
1
|
20204
|
from __future__ import absolute_import
import six
import logging
from datetime import datetime
from django.utils import timezone
from collections import namedtuple, OrderedDict
import sentry_sdk
from sentry.models import Project, Release
from sentry.utils.cache import cache
from sentry.utils.hashlib import hash_values
from sentry.utils.safe import get_path, safe_execute
from sentry.stacktraces.functions import set_in_app, trim_function_name
logger = logging.getLogger(__name__)
StacktraceInfo = namedtuple(
"StacktraceInfo", ["stacktrace", "container", "platforms", "is_exception"]
)
StacktraceInfo.__hash__ = lambda x: id(x)
StacktraceInfo.__eq__ = lambda a, b: a is b
StacktraceInfo.__ne__ = lambda a, b: a is not b
class ProcessableFrame(object):
def __init__(self, frame, idx, processor, stacktrace_info, processable_frames):
self.frame = frame
self.idx = idx
self.processor = processor
self.stacktrace_info = stacktrace_info
self.data = None
self.cache_key = None
self.cache_value = None
self.processable_frames = processable_frames
def __repr__(self):
return "<ProcessableFrame %r #%r at %r>" % (
self.frame.get("function") or "unknown",
self.idx,
self.frame.get("instruction_addr"),
)
def __contains__(self, key):
return key in self.frame
def __getitem__(self, key):
return self.frame[key]
def get(self, key, default=None):
return self.frame.get(key, default)
def close(self):
# manually break circular references
self.closed = True
self.processable_frames = None
self.stacktrace_info = None
self.processor = None
@property
def previous_frame(self):
last_idx = len(self.processable_frames) - self.idx - 1 - 1
if last_idx < 0:
return
return self.processable_frames[last_idx]
def set_cache_value(self, value):
if self.cache_key is not None:
cache.set(self.cache_key, value, 3600)
return True
return False
def set_cache_key_from_values(self, values):
if values is None:
self.cache_key = None
return
h = hash_values(values, seed=self.processor.__class__.__name__)
self.cache_key = rv = "pf:%s" % h
return rv
class StacktraceProcessingTask(object):
def __init__(self, processable_stacktraces, processors):
self.processable_stacktraces = processable_stacktraces
self.processors = processors
def close(self):
for frame in self.iter_processable_frames():
frame.close()
def iter_processors(self):
return iter(self.processors)
def iter_processable_stacktraces(self):
return six.iteritems(self.processable_stacktraces)
def iter_processable_frames(self, processor=None):
for _, frames in self.iter_processable_stacktraces():
for frame in frames:
if processor is None or frame.processor == processor:
yield frame
class StacktraceProcessor(object):
def __init__(self, data, stacktrace_infos, project=None):
self.data = data
self.stacktrace_infos = stacktrace_infos
if project is None:
project = Project.objects.get_from_cache(id=data["project"])
self.project = project
def close(self):
pass
def get_release(self, create=False):
"""Convenient helper to return the release for the current data
and optionally creates the release if it's missing. In case there
is no release info it will return `None`.
"""
release = self.data.get("release")
if not release:
return None
if not create:
return Release.get(project=self.project, version=self.data["release"])
timestamp = self.data.get("timestamp")
if timestamp is not None:
date = datetime.fromtimestamp(timestamp).replace(tzinfo=timezone.utc)
else:
date = None
return Release.get_or_create(
project=self.project, version=self.data["release"], date_added=date
)
def handles_frame(self, frame, stacktrace_info):
"""Returns true if this processor can handle this frame. This is the
earliest check and operates on a raw frame and stacktrace info. If
this returns `True` a processable frame is created.
"""
return False
def preprocess_frame(self, processable_frame):
"""After a processable frame has been created this method is invoked
to give the processor a chance to store additional data to the frame
if wanted. In particular a cache key can be set here.
"""
pass
def process_exception(self, exception):
"""Processes an exception."""
return False
def process_frame(self, processable_frame, processing_task):
"""Processes the processable frame and returns a tuple of three
lists: ``(frames, raw_frames, errors)`` where frames is the list of
processed frames, raw_frames is the list of raw unprocessed frames
(which however can also be modified if needed) as well as a list of
optional errors. Each one of the items can be `None` in which case
the original input frame is assumed.
"""
def preprocess_step(self, processing_task):
"""After frames are preprocessed but before frame processing kicks in
the preprocessing step is run. This already has access to the cache
values on the frames.
"""
return False
def find_stacktraces_in_data(data, include_raw=False, with_exceptions=False):
"""Finds all stracktraces in a given data blob and returns it
together with some meta information.
If `include_raw` is True, then also raw stacktraces are included. If
`with_exceptions` is set to `True` then stacktraces of the exception
are always included and the `is_exception` flag is set on that stack
info object.
"""
rv = []
def _report_stack(stacktrace, container, is_exception=False):
if not is_exception and (not stacktrace or not get_path(stacktrace, "frames", filter=True)):
return
platforms = set(
frame.get("platform") or data.get("platform")
for frame in get_path(stacktrace, "frames", filter=True, default=())
)
rv.append(
StacktraceInfo(
stacktrace=stacktrace,
container=container,
platforms=platforms,
is_exception=is_exception,
)
)
for exc in get_path(data, "exception", "values", filter=True, default=()):
_report_stack(exc.get("stacktrace"), exc, is_exception=with_exceptions)
_report_stack(data.get("stacktrace"), None)
for thread in get_path(data, "threads", "values", filter=True, default=()):
_report_stack(thread.get("stacktrace"), thread)
if include_raw:
for info in rv[:]:
if info.container is not None:
_report_stack(info.container.get("raw_stacktrace"), info.container)
return rv
def _has_system_frames(frames):
"""
Determines whether there are any frames in the stacktrace with in_app=false.
"""
system_frames = 0
for frame in frames:
if not frame.get("in_app"):
system_frames += 1
return bool(system_frames) and len(frames) != system_frames
def _normalize_in_app(stacktrace, platform=None, sdk_info=None):
"""
Ensures consistent values of in_app across a stacktrace.
"""
has_system_frames = _has_system_frames(stacktrace)
for frame in stacktrace:
# If all frames are in_app, flip all of them. This is expected by the UI
if not has_system_frames:
set_in_app(frame, False)
# Default to false in all cases where processors or grouping enhancers
# have not yet set in_app.
elif frame.get("in_app") is None:
set_in_app(frame, False)
def normalize_stacktraces_for_grouping(data, grouping_config=None):
"""
Applies grouping enhancement rules and ensure in_app is set on all frames.
This also trims functions if necessary.
"""
stacktraces = []
for stacktrace_info in find_stacktraces_in_data(data, include_raw=True):
frames = get_path(stacktrace_info.stacktrace, "frames", filter=True, default=())
if frames:
stacktraces.append(frames)
if not stacktraces:
return
platform = data.get("platform")
# Put the trimmed function names into the frames. We only do this if
# the trimming produces a different function than the function we have
# otherwise stored in `function` to not make the payload larger
# unnecessarily.
for frames in stacktraces:
for frame in frames:
# Restore the original in_app value before the first grouping
# enhancers have been run. This allows to re-apply grouping
# enhancers on the original frame data.
orig_in_app = get_path(frame, "data", "orig_in_app")
if orig_in_app is not None:
frame["in_app"] = None if orig_in_app == -1 else bool(orig_in_app)
if frame.get("raw_function") is not None:
continue
raw_func = frame.get("function")
if not raw_func:
continue
function_name = trim_function_name(raw_func, frame.get("platform") or platform)
if function_name != raw_func:
frame["raw_function"] = raw_func
frame["function"] = function_name
# If a grouping config is available, run grouping enhancers
if grouping_config is not None:
for frames in stacktraces:
grouping_config.enhancements.apply_modifications_to_frame(frames, platform)
# normalize in-app
for stacktrace in stacktraces:
_normalize_in_app(stacktrace, platform=platform)
def should_process_for_stacktraces(data):
from sentry.plugins.base import plugins
infos = find_stacktraces_in_data(data, with_exceptions=True)
platforms = set()
for info in infos:
platforms.update(info.platforms or ())
for plugin in plugins.all(version=2):
processors = safe_execute(
plugin.get_stacktrace_processors,
data=data,
stacktrace_infos=infos,
platforms=platforms,
_with_transaction=False,
)
if processors:
return True
return False
def get_processors_for_stacktraces(data, infos):
from sentry.plugins.base import plugins
platforms = set()
for info in infos:
platforms.update(info.platforms or ())
processors = []
for plugin in plugins.all(version=2):
processors.extend(
safe_execute(
plugin.get_stacktrace_processors,
data=data,
stacktrace_infos=infos,
platforms=platforms,
_with_transaction=False,
)
or ()
)
if processors:
project = Project.objects.get_from_cache(id=data["project"])
processors = [x(data, infos, project) for x in processors]
return processors
def get_processable_frames(stacktrace_info, processors):
"""Returns thin wrappers around the frames in a stacktrace associated
with the processor for it.
"""
frames = get_path(stacktrace_info.stacktrace, "frames", filter=True, default=())
frame_count = len(frames)
rv = []
for idx, frame in enumerate(frames):
processor = next((p for p in processors if p.handles_frame(frame, stacktrace_info)), None)
if processor is not None:
rv.append(
ProcessableFrame(frame, frame_count - idx - 1, processor, stacktrace_info, rv)
)
return rv
def process_single_stacktrace(processing_task, stacktrace_info, processable_frames):
# TODO: associate errors with the frames and processing issues
changed_raw = False
changed_processed = False
raw_frames = []
processed_frames = []
all_errors = []
bare_frames = get_path(stacktrace_info.stacktrace, "frames", filter=True, default=())
frame_count = len(bare_frames)
processable_frames = {frame.idx: frame for frame in processable_frames}
for i, bare_frame in enumerate(bare_frames):
idx = frame_count - i - 1
rv = None
if idx in processable_frames:
processable_frame = processable_frames[idx]
assert processable_frame.frame is bare_frame
try:
rv = processable_frame.processor.process_frame(processable_frame, processing_task)
except Exception:
logger.exception("Failed to process frame")
expand_processed, expand_raw, errors = rv or (None, None, None)
if expand_processed is not None:
processed_frames.extend(expand_processed)
changed_processed = True
elif expand_raw: # is not empty
processed_frames.extend(expand_raw)
changed_processed = True
else:
processed_frames.append(bare_frame)
if expand_raw is not None:
raw_frames.extend(expand_raw)
changed_raw = True
else:
raw_frames.append(bare_frame)
all_errors.extend(errors or ())
return (
processed_frames if changed_processed else None,
raw_frames if changed_raw else None,
all_errors,
)
def get_crash_frame_from_event_data(data, frame_filter=None):
"""
Return the highest (closest to the crash) in-app frame in the top stacktrace
which doesn't fail the given filter test.
If no such frame is available, return the highest non-in-app frame which
otherwise meets the same criteria.
Return None if any of the following are true:
- there are no frames
- all frames fail the given filter test
- we're unable to find any frames nested in either event.exception or
event.stacktrace, and there's anything other than exactly one thread
in the data
"""
frames = get_path(data, "exception", "values", -1, "stacktrace", "frames") or get_path(
data, "stacktrace", "frames"
)
if not frames:
threads = get_path(data, "threads", "values")
if threads and len(threads) == 1:
frames = get_path(threads, 0, "stacktrace", "frames")
default = None
for frame in reversed(frames or ()):
if frame is None:
continue
if frame_filter is not None:
if not frame_filter(frame):
continue
if frame.get("in_app"):
return frame
if default is None:
default = frame
if default:
return default
def lookup_frame_cache(keys):
rv = {}
for key in keys:
rv[key] = cache.get(key)
return rv
def get_stacktrace_processing_task(infos, processors):
"""Returns a list of all tasks for the processors. This can skip over
processors that seem to not handle any frames.
"""
by_processor = {}
to_lookup = {}
# by_stacktrace_info requires stable sorting as it is used in
# StacktraceProcessingTask.iter_processable_stacktraces. This is important
# to guarantee reproducible symbolicator requests.
by_stacktrace_info = OrderedDict()
for info in infos:
processable_frames = get_processable_frames(info, processors)
for processable_frame in processable_frames:
processable_frame.processor.preprocess_frame(processable_frame)
by_processor.setdefault(processable_frame.processor, []).append(processable_frame)
by_stacktrace_info.setdefault(processable_frame.stacktrace_info, []).append(
processable_frame
)
if processable_frame.cache_key is not None:
to_lookup[processable_frame.cache_key] = processable_frame
frame_cache = lookup_frame_cache(to_lookup)
for cache_key, processable_frame in six.iteritems(to_lookup):
processable_frame.cache_value = frame_cache.get(cache_key)
return StacktraceProcessingTask(
processable_stacktraces=by_stacktrace_info, processors=by_processor
)
def dedup_errors(errors):
# This operation scales bad but we do not expect that many items to
# end up in rv, so that should be okay enough to do.
rv = []
for error in errors:
if error not in rv:
rv.append(error)
return rv
def process_stacktraces(data, make_processors=None, set_raw_stacktrace=True):
infos = find_stacktraces_in_data(data, with_exceptions=True)
if make_processors is None:
processors = get_processors_for_stacktraces(data, infos)
else:
processors = make_processors(data, infos)
# Early out if we have no processors. We don't want to record a timer
# in that case.
if not processors:
return
changed = False
# Build a new processing task
processing_task = get_stacktrace_processing_task(infos, processors)
try:
# Preprocess step
for processor in processing_task.iter_processors():
with sentry_sdk.start_span(
op="stacktraces.processing.process_stacktraces.preprocess_step"
) as span:
span.set_data("processor", processor.__class__.__name__)
if processor.preprocess_step(processing_task):
changed = True
span.set_data("data_changed", True)
# Process all stacktraces
for stacktrace_info, processable_frames in processing_task.iter_processable_stacktraces():
# Let the stacktrace processors touch the exception
if stacktrace_info.is_exception and stacktrace_info.container:
for processor in processing_task.iter_processors():
with sentry_sdk.start_span(
op="stacktraces.processing.process_stacktraces.process_exception"
) as span:
span.set_data("processor", processor.__class__.__name__)
if processor.process_exception(stacktrace_info.container):
changed = True
span.set_data("data_changed", True)
# If the stacktrace is empty we skip it for processing
if not stacktrace_info.stacktrace:
continue
with sentry_sdk.start_span(
op="stacktraces.processing.process_stacktraces.process_single_stacktrace"
) as span:
new_frames, new_raw_frames, errors = process_single_stacktrace(
processing_task, stacktrace_info, processable_frames
)
if new_frames is not None:
stacktrace_info.stacktrace["frames"] = new_frames
changed = True
span.set_data("data_changed", True)
if (
set_raw_stacktrace
and new_raw_frames is not None
and stacktrace_info.container is not None
):
stacktrace_info.container["raw_stacktrace"] = dict(
stacktrace_info.stacktrace, frames=new_raw_frames
)
changed = True
if errors:
data.setdefault("errors", []).extend(dedup_errors(errors))
data.setdefault("_metrics", {})["flag.processing.error"] = True
changed = True
except Exception:
logger.exception("stacktraces.processing.crash")
data.setdefault("_metrics", {})["flag.processing.fatal"] = True
data.setdefault("_metrics", {})["flag.processing.error"] = True
changed = True
finally:
for processor in processors:
processor.close()
processing_task.close()
if changed:
return data
|
bsd-3-clause
| 501,741,680,478,238,800
| 34.076389
| 100
| 0.620719
| false
| 4.307889
| false
| false
| false
|
MTLeeLab/RESA
|
resa_util.py
|
1
|
10051
|
###
# Copyright 2016 Miler T. Lee, University of Pittburgh
# This file is part of the RESA Suite
#
# RESA Suite is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# RESA Suite is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with RESA Suite. If not, see <http://www.gnu.org/licenses/>.
#
#
# resa_util.py: Utilities for processing RESA data
##
import bz2
import gzip
import json
import re
import sys
import subprocess
def initialize_loci(utr_bed12_file, utr_fasta_file, test = False):
"""
Given the UTRs listed in the bed12 file and
their corresponding sequences in the fasta_file,
creates a dict of loci[key] = (chr, strand, exon_list, seq)
"""
seqs = dict(read_fasta(utr_fasta_file))
loci = {}
f = open(utr_bed12_file)
for line in f:
fields = line.strip().split()
chrom = fields[0]
start = int(fields[1])
strand = fields[5]
feat_id = fields[3]
block_sizes = fields[10].strip(',').split(',')
block_starts = fields[11].strip(',').split(',')
exons = []
for i, (bsize, bstart) in enumerate(zip(block_sizes, block_starts)):
gstart = start + int(bstart)
gend = gstart + int(bsize)
exons.append((gstart, gend))
loci[feat_id] = (chrom, strand, tuple(exons), seqs[fields[3]].upper())
if test:
break
f.close()
return loci
###
# UTILITIES
###
nt_mutations = {'C': 'T', 'G': 'A'}
anti_strand_str = {'-': '+', '+': '-'}
###string.maketrans('acgturyACGTURY', 'tgcaayrTGCAAYR')
DNA_TRANS = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !"#$%&\'()*+,-./0123456789:;<=>?@TBGDEFCHIJKLMNOPQYSAAVWXRZ[\\]^_`tbgdefchijklmnopqysaavwxrz{|}~\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff'
def rc(sequence, reverse = True):
"""
Reverse complement a DNA sequence, preserving case
"""
result = sequence.translate(DNA_TRANS)
if reverse:
return result[::-1]
else:
return result
def bam_entry_is_reverse(samflag):
"""
Flag is passed in as an integer. Determines
whether the 0x10 bit is set (16 in base 10),
which indicates reverse complemented sequence.
This is done using a binary operator &
"""
return samflag & 16 == 16
def seq_mask(seq, chars = ['A', 'G']):
"""
Replaces specified characters with N
"""
for char in chars:
seq = seq.replace(char, 'N')
return seq
def load_chr_seqs(genome_fa):
"""
Loads all chromosome sequences into a dict
"""
chr_dict = dict(read_fasta(genome_fa))
return chr_dict
def load_chr_seq(chr_id, chr_dict, genome_fa):
"""
Loads the chromosome sequence into memory if it's not
already there
"""
if chr_id not in chr_dict:
fasta_file = genome_fa % chr_id
chr_dict[chr_id] = read_fasta(fasta_file)[0][1]
return chr_dict[chr_id]
def decode_cigar(cigar):
"""
Parses the cigar string into integers and letters
"""
return re.findall('(\d+)([MNDISHPX=])', cigar)
def cigar_span_(cigar):
"""
Interprets the cigar string as the number of genomic
positions consumed
"""
span = 0
cigar_ops = decode_cigar(cigar)
for nts, op in cigar_ops:
nts = int(nts)
if op != 'I':
span += nts
return span
def cigar_span(cigar):
return sum(int(x) for x in re.findall('(\d+)[MNDSHPX=]', cigar)) #no I
def tx_indexing(exons, minus = False, inverse = False):
"""
Returns a dict of genomic coordinates -> tx coordinates
(or the inverse if inverse = True)
Exons are zero indexed.
"""
positions = []
for s, e in exons:
positions += [i for i in range(s, e)]
if minus:
positions.reverse()
if inverse:
return {i:x for i, x in enumerate(positions)}
else:
return {x:i for i, x in enumerate(positions)}
def pretty_str(x, fields = False):
"""
Handles tuples or lists
"""
def joined_string(x, sep=','):
return sep.join(list([str(y) for y in x]))
if isinstance(x, str):
return x
elif isinstance(x, float):
if abs(x) < 0.001:
return '%.1E' % x
else:
return '%.3f' % x
elif isinstance(x, tuple) or isinstance(x, list):
if fields:
return joined_string(x, '\t')
elif not x:
return '.'
elif isinstance(x[0], tuple) or isinstance(x[0], list):
return ';'.join([joined_string(y) for y in x])
else:
return joined_string(x)
else:
return str(x)
#######################
# FASTA file processing
#######################
def read_fasta(filename):
"""
Returns the contents of a fasta file in a list of (id, sequence)
tuples. Empty list returned if there are no fasta sequences in the file
"""
a = fasta_reader(filename)
seqs = []
while a.has_next():
seqs.append(next(a))
return seqs
class fasta_reader:
"""
Lightweight class for incrementally reading fasta files.
Supports reading directly from properly named
gzipped (.gz or .z) or bzip2ed (.bz2) files.
"""
file = None
nextheader=''
def __init__(self, filename):
try:
if filename.endswith('.gz') or filename.endswith('.z'):
self.file = gzip.open(filename, 'rb')
elif filename.endswith('.bz2'):
self.file = bz2.BZ2File(filename, 'rb')
else:
self.file = open(filename, 'r')
# fast forward to the first entry
while 1:
line = self.file.readline()
if line == '':
self.close()
return
elif line[0] == '>':
self.nextheader = line[1:].rstrip()
return
except IOError:
#print('No such file', filename)
raise
def has_next(self):
"""
Returns true if there are still fasta entries
"""
return len(self.nextheader) > 0
def __next__(self):
"""
Returns an (id, sequence) tuple, or () if file is finished
"""
#if global nextheader is empty, return empty
#otherwise, the header is the nextheader
try:
identifier = self.nextheader
total = []
while 1:
line = self.file.readline()
if line == '' or line[0] == '>': #EOF, end of entry
break
total.append(line.rstrip())
sequence = ''.join(total)
if len(line) > 0:
self.nextheader = line[1:].rstrip()
else:
self.nextheader = ''
self.close()
return (identifier, sequence)
except:
self.nextheader=''
self.close()
return ()
def close(self):
"""
Close the fasta file
"""
self.file.close()
def write_fasta(filename, id_or_list, seq='', width=60, gzip_compress = False):
"""
Writes a fasta file with the sequence(s)
version 1: write_fasta(myfilename, 'seq1_id', 'AAAAA')
version 2: write_fasta(myfilename, [('seq1_id', 'AAAAA'),
('seq2_id', BBBBB)])
"""
a = fasta_writer(filename, width=width, gzip_compress = gzip_compress)
a.write(id_or_list, seq)
a.close()
class fasta_writer:
"""
Rudimentary fasta file writer
Supports writing out to a gzipped file. If the passed in filename
does not end with .gz or .z, .gz is appended.
"""
file = None
width = 0
def __init__(self, filename, width=60, gzip_compress = False):
self.width = width
try:
if gzip_compress:
if not filename.endswith('.gz') and not filename.endswith('.z'):
filename += '.gz'
self.file = gzip.open(filename, 'wb')
else:
self.file = open(filename, 'w')
except IOError:
print('Can\'t open file.')
def write(self, id, seq=''):
"""
Supports an id and a sequence, an (id, seq) tuple, or
a list of sequence tuples
"""
if type(id) == type([]):
list(map(self.writeone, id))
else:
self.writeone(id, seq)
def writeone(self, id, seq=''):
"""
Internal method.
"""
if type(id) == type((0,0)):
seq = id[1]
id = id[0]
line_width = self.width
if self.width == 0:
line_width = len(seq)
self.file.write(">" + id + "\n")
i = 0
while i < len(seq):
self.file.write(seq[i:i+line_width] + "\n")
i+=line_width
def close(self):
"""
Closes the fasta file.
"""
self.file.close()
|
gpl-3.0
| 4,324,001,643,226,316,000
| 26.3125
| 749
| 0.553477
| false
| 3.373951
| false
| false
| false
|
openstack/sahara-dashboard
|
sahara_dashboard/content/data_processing/clusters/cluster_templates/views.py
|
1
|
7433
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tabs
from horizon.utils import memoized
from horizon import workflows
from sahara_dashboard.api import sahara as saharaclient
import sahara_dashboard.content.data_processing.clusters. \
cluster_templates.tables as ct_tables
import sahara_dashboard.content.data_processing.clusters. \
cluster_templates.tabs as _tabs
import sahara_dashboard.content.data_processing.clusters. \
cluster_templates.workflows.copy as copy_flow
import sahara_dashboard.content.data_processing.clusters. \
cluster_templates.workflows.create as create_flow
import sahara_dashboard.content.data_processing.clusters. \
cluster_templates.workflows.edit as edit_flow
import sahara_dashboard.content.data_processing.clusters. \
cluster_templates.forms.import_forms as import_forms
class ClusterTemplateDetailsView(tabs.TabView):
tab_group_class = _tabs.ClusterTemplateDetailsTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ template.name|default:template.id }}"
@memoized.memoized_method
def get_object(self):
ct_id = self.kwargs["template_id"]
try:
return saharaclient.cluster_template_get(self.request, ct_id)
except Exception:
msg = _('Unable to retrieve details for '
'cluster template "%s".') % ct_id
redirect = self.get_redirect_url()
exceptions.handle(self.request, msg, redirect=redirect)
def get_context_data(self, **kwargs):
context = super(ClusterTemplateDetailsView, self)\
.get_context_data(**kwargs)
cluster_template = self.get_object()
context['template'] = cluster_template
context['url'] = self.get_redirect_url()
context['actions'] = self._get_actions(cluster_template)
return context
def _get_actions(self, cluster_template):
table = ct_tables.ClusterTemplatesTable(self.request)
return table.render_row_actions(cluster_template)
@staticmethod
def get_redirect_url():
return reverse("horizon:project:data_processing."
"clusters:index")
class CreateClusterTemplateView(workflows.WorkflowView):
workflow_class = create_flow.CreateClusterTemplate
success_url = ("horizon:project:data_processing.clusters"
":create-cluster-template")
classes = ("ajax-modal",)
template_name = "cluster_templates/create.html"
page_title = _("Create Cluster Template")
class ConfigureClusterTemplateView(workflows.WorkflowView):
workflow_class = create_flow.ConfigureClusterTemplate
success_url = ("horizon:project:data_processing.clusters"
":index")
template_name = "cluster_templates/configure.html"
page_title = _("Configure Cluster Template")
class CopyClusterTemplateView(workflows.WorkflowView):
workflow_class = copy_flow.CopyClusterTemplate
success_url = ("horizon:project:data_processing.clusters"
":index")
template_name = "cluster_templates/configure.html"
page_title = _("Copy Cluster Template")
def get_context_data(self, **kwargs):
context = super(CopyClusterTemplateView, self)\
.get_context_data(**kwargs)
context["template_id"] = kwargs["template_id"]
return context
def get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
template_id = self.kwargs['template_id']
try:
template = saharaclient.cluster_template_get(self.request,
template_id)
except Exception:
template = {}
exceptions.handle(self.request,
_("Unable to fetch cluster template."))
self._object = template
return self._object
def get_initial(self):
initial = super(CopyClusterTemplateView, self).get_initial()
initial['template_id'] = self.kwargs['template_id']
return initial
class EditClusterTemplateView(CopyClusterTemplateView):
workflow_class = edit_flow.EditClusterTemplate
success_url = ("horizon:project:data_processing.clusters"
":index")
template_name = "cluster_templates/configure.html"
class ImportClusterTemplateFileView(forms.ModalFormView):
template_name = "cluster_templates/import.html"
form_class = import_forms.ImportClusterTemplateFileForm
submit_label = _("Next")
submit_url = reverse_lazy("horizon:project:data_processing."
"clusters:import-cluster-template-file")
success_url = reverse_lazy("horizon:project:data_processing."
"clusters:import-cluster-template-name")
page_title = _("Import Cluster Template")
def get_form_kwargs(self):
kwargs = super(ImportClusterTemplateFileView, self).get_form_kwargs()
kwargs['next_view'] = ImportClusterTemplateNameView
return kwargs
class ImportClusterTemplateNameView(forms.ModalFormView):
template_name = "cluster_templates/import.html"
form_class = import_forms.ImportClusterTemplateNameForm
submit_label = _("Next")
submit_url = reverse_lazy("horizon:project:data_processing."
"clusters:import-cluster-template-name")
success_url = reverse_lazy("horizon:project:data_processing."
"clusters:import-cluster-template-nodegroups")
page_title = _("Import Cluster Template")
def get_form_kwargs(self):
kwargs = super(ImportClusterTemplateNameView, self).get_form_kwargs()
kwargs['next_view'] = ImportClusterTemplateNodegroupsView
if 'template_upload' in self.kwargs:
kwargs['template_upload'] = self.kwargs['template_upload']
return kwargs
class ImportClusterTemplateNodegroupsView(forms.ModalFormView):
template_name = "cluster_templates/import_nodegroups.html"
# template_name = "some_random_stuff.html"
form_class = import_forms.ImportClusterTemplateNodegroupsForm
submit_label = _("Import")
submit_url = reverse_lazy("horizon:project:data_processing."
"clusters:import-cluster-template-nodegroups")
success_url = reverse_lazy("horizon:project:data_processing."
"clusters:index")
page_title = _("Import Cluster Template")
def get_form_kwargs(self):
kwargs = super(ImportClusterTemplateNodegroupsView,
self).get_form_kwargs()
if 'template_upload' in self.kwargs:
kwargs['template_upload'] = self.kwargs['template_upload']
return kwargs
|
apache-2.0
| 4,539,590,981,925,018,600
| 40.066298
| 77
| 0.675636
| false
| 4.324026
| true
| false
| false
|
caterinaurban/Lyra
|
src/lyra/tests/code_jam/pancake_flipper/pancakes_fyodr.py
|
1
|
1842
|
def pow(a: int, b: int) -> int:
power: int = 1
for i in range(b):
power = power * a
return power
def check(memos: Dict[(Tuple[(int, int, int, int)], int)], i: int, s: int, c: int, k: int) -> int:
if (s == 0):
return 0
elif ((i, s, c, k) not in memos):
memos[(i, s, c, k)]: int = (- 1)
flip: int = 0
for j in range(k):
flip += pow(2, j)
flip: int = flip * pow(2, i)
new_s: int = (s ^ flip)
best: int = (- 1)
for j in range((c - (k - 1))):
maybe: int = check(j, new_s, c, k)
if (maybe == 0):
best: int = maybe
break
elif (maybe == (- 1)):
pass
elif ((best == (- 1)) or (maybe < best)):
best: int = maybe
if (best == (- 1)):
memos[(i, s, c, k)]: int = best
else:
memos[(i, s, c, k)]: int = (best + 1)
return memos[(i, s, c, k)]
T: int = int(input().strip())
lines: List[str] = []
memos: Dict[(Tuple[(int, int, int, int)], int)] = {
}
for t in range(1, (T + 1)):
line: List[str] = input().strip().split()
cakes: str = line[0]
k: int = int(line[1])
s: int = 0
for i in range(len(cakes)):
c: str = cakes[i]
if (c == '-'):
s += pow(2, i)
best: int = (- 1)
for i in range((len(cakes) - (k - 1))):
maybe: int = check(memos, i, s, len(cakes), k)
if (maybe == 0):
best: int = maybe
break
if (maybe == (- 1)):
pass
elif ((maybe < best) or (best == (- 1))):
best: int = maybe
if (best == (- 1)):
lines.append('Case #' + str(t) + ': ' + 'IMPOSSIBLE')
else:
lines.append('Case #' + str(t) + ': ' + str(best))
print(lines[(- 1)])
|
mpl-2.0
| -7,948,510,953,474,577,000
| 28.709677
| 98
| 0.410966
| false
| 3.080268
| false
| false
| false
|
RedHatInsights/insights-core
|
insights/parsers/corosync_cmapctl.py
|
1
|
2063
|
"""
CorosyncCmapctl - Command ``corosync-cmapctl [params]``
=======================================================
This module parses the output of the ``corosync-cmapctl [params]`` command.
"""
from insights import parser, CommandParser
from insights.parsers import SkipException, ParseException
from insights.specs import Specs
@parser(Specs.corosync_cmapctl)
class CorosyncCmapctl(CommandParser, dict):
"""
Class for parsing the `/usr/sbin/corosync-cmapctl [params]` command.
All lines are stored in the dictionary with the left part of the equal
sign witout parenthese info as the key and the right part of equal sign
as the value.
Typical output of the command is::
config.totemconfig_reload_in_progress (u8) = 0
internal_configuration.service.0.name (str) = corosync_cmap
internal_configuration.service.0.ver (u32) = 0
internal_configuration.service.1.name (str) = corosync_cfg
internal_configuration.service.1.ver (u32) = 0
internal_configuration.service.2.name (str) = corosync_cpg
internal_configuration.service.2.ver (u32) = 0
Examples:
>>> type(corosync)
<class 'insights.parsers.corosync_cmapctl.CorosyncCmapctl'>
>>> 'internal_configuration.service.0.name' in corosync
True
>>> corosync['internal_configuration.service.0.name']
'corosync_cmap'
Raises:
SkipException: When there is no content
ParseException: When there is no "=" in the content
"""
def __init__(self, context):
super(CorosyncCmapctl, self).__init__(context, extra_bad_lines=['corosync-cmapctl: invalid option'])
def parse_content(self, content):
if not content:
raise SkipException
for line in content:
if '=' not in line:
raise ParseException("Can not parse line %s" % line)
key, value = [item.strip() for item in line.split('=')]
key_without_parenthese = key.split()[0]
self[key_without_parenthese] = value
|
apache-2.0
| 1,929,703,601,921,233,200
| 36.509091
| 108
| 0.644207
| false
| 3.848881
| true
| false
| false
|
ksteinfe/decodes
|
src/decodes/core/dc_interval.py
|
1
|
10356
|
from decodes.core import *
import math, random
class Interval(object):
"""
an interval class
"""
def __init__(self, a=0,b=1):
""" Interval Constructor.
:param a: First number of the interval.
:type a: float
:param b: Second number of the interval.
:type a: float
:result: Interval Object.
:rtype: Interval
"""
self.a = float(a)
self.b = float(b)
def __truediv__(self,divs): return self.__div__(divs)
def __div__(self, divs):
""" Overloads the division **(/)** operator. Calls Interval.divide(divs).
:param divs: Number of divisions.
:type divs: int
:result: List of numbers in which a list is divided.
:rtype: list
"""
return self.divide(divs)
def __floordiv__(self, other):
""" Overloads the integer division **(//)** operator. Calls Interval.subinterval(other).
:param other: Number to subintervals.
:type other: int
:result: list of subintervals
:rtype: list
"""
return self.subinterval(other)
def __add__(self, val):
""" Overloads the addition **(+)** operator.
:param val: Value to add to the interval.
:type val: float
:result: New interval.
:rtype: Interval
"""
return Interval(self.a + val, self.b + val)
def __sub__(self, val):
""" Overloads the subtraction **(-)** operator.
:param val: Value to subtract from the interval.
:type val: float
:result: New interval.
:rtype: Interval
"""
return Interval(self.a - val, self.b - val)
def __contains__(self, number):
""" Overloads the containment **(in)** operator
:param number: Number whose containment must be determined.
:type number: float
:result: Boolean result of containment.
:rtype: bool
"""
ival = self.order()
return (ival.a <= number) and (ival.b >= number)
def __eq__(self, other):
""" Overloads the equal **(==)** operator.
:param other: Interval to be compared.
:type other: Interval
:result: Boolean result of comparison
:rtype: bool
"""
return all([self.a==other.a,self.b==other.b])
def __hash__(self):
return hash( (self.a, self.b) )
@property
def list(self):
""" Returns a list of the interval's start and end values.
:result: List of interval's components
:rtype: list
"""
return [self.a, self.b]
@property
def is_ordered(self):
""" Returns True if the start value of the interval is smaller than the end value.
:result: Boolean value
:rtype: bool
"""
return True if self.a < self.b else False
@property
def length(self):
"""| Returns the absolute value of length of the interval.
| For a signed representation, use delta.
:result: Absolute value of length of an interval.
:rtype: int
"""
length = self.b - self.a
if length > 0: return length
else: return length *-1
@property
def delta(self):
"""| Returns the signed delta of the interval, calculated as b-a
| For an unsigned representation, use length.
:result: Delta of an interval.
:rtype: float
"""
return float(self.b - self.a)
@property
def mid(self):
"""Returns the midpoint value of the interval.
"""
return self.eval(0.5)
def overlaps(self,other):
"""
"""
return other.a in self or other.b in self or self.a in other or self.b in other
def order(self):
""" Returns a copy of this interval with ordered values, such that a < b
:result: Ordered copy of Interval object.
:rtype: Interval
"""
if self.is_ordered: return Interval(self.a, self.b)
else: return Interval(self.b, self.a)
def invert(self):
"""| Returns a copy of this interval with swapped values.
| Such that this.a = new.b and this.b = new.a
:result: Interval object with swapped values.
:rtype: Interval
"""
return Interval(self.b, self.a)
def divide(self, divs=10, include_last=False):
"""| Divides this interval into a list of values equally spaced between a and b.
| Unless include_last is set to True, returned list will not include Interval.b: the first value returned is Interval.a and the last is Interval.b-(Interval.delta/divs)
:param divs: Number of interval divisions.
:type divs: int
:param include_last: Boolean value.
:type include_last: bool
:result: List of numbers in which a list is divided.
:rtype: list
"""
step = self.delta/float(divs)
if include_last : divs += 1
return [self.a+step*n for n in range(divs)]
def subinterval(self, divs):
""" Divides an interval into a list of equal size subintervals(interval objects).
:param divs: Number of subintervals.
:type divs: int
:result: List of subintervals (interval objects).
:rtype: list
"""
return [Interval(n,n+self.delta/float(divs)) for n in self.divide(divs)]
def rand_interval(self, divs):
""" Divides an interval into a list of randomly sized subintervals(interval objects).
:param divs: Number of subintervals.
:type divs: int
:result: List of subintervals (interval objects).
:rtype: list
"""
if divs < 1 : return ival
result = []
r_list = [self.a,self.b]
r_list.extend(self.eval(random.random()) for k in range(divs-1))
r_list.sort()
return [Interval(r_list[n],r_list[n+1]) for n in range(divs)]
def deval(self, number):
"""| Returns a parameter corresponding to the position of the given number within this interval.
| Effectively, the opposite of eval().
:param number: Number to find the parameter of.
:type number: float
:result: Parameter.
:rtype: float
::
print Interval(10,20).deval(12)
>>0.2
print Interval(10,20).deval(25)
>>1.5
"""
if self.delta == 0 : raise ZeroDivisionError("This interval cannot be devaluated because the delta is zero")
return (number-self.a) / self.delta
def eval(self, t,limited=False):
"""| Evaluates a given parameter within this interval.
| For example, given an Interval(0->2*math.pi): eval(0.5) == math.pi
| Optionally, you may limit the resulting output to this interval
:param t: Number to evaluate.
:type t: float
:result: Evaluated number.
:rtype: float
::
print Interval(10,20).eval(0.2)
>>12.0
print Interval(10,20).deval(1.5)
>>25.0
"""
ret = self.delta * t + self.a
if not limited : return ret
return self.limit_val(ret)
def limit_val(self, n):
""" Limits a given value to the min and max of this Interval.
:param n: the number to be limited by the Interval.
:type n: float
:result: a number between the min and max of this Interval (inclusive).
:rtype: float
"""
if n < self.a : return self.a
if n > self.b : return self.b
return n
def __repr__(self): return "ival[{0},{1}]".format(self.a,self.b)
def remap_to(self,val,target_interval=None,limited=False):
return Interval.remap(val,self,target_interval,limited)
@staticmethod
def remap(val, source_interval, target_interval=None, limited=False):
""" Translates a number from its position within the source interval to its relative position in the target interval. Optionally, you may limit the resulting output to the target interval.
:param val: Number to remap.
:type val: float
:param source_interval: Source interval.
:type source_interval: interval
:param target_interval: Target interval
:type target_interval: interval
:param limited: flag that limits result to target interval
:type limited: bool
:result: The given number remapped to the target interval.
:rtype: float
"""
if target_interval is None: target_interval = Interval(0,1)
t = source_interval.deval(val)
return target_interval.eval(t,limited)
@staticmethod
def encompass(values = [0],nudge=False):
""" Returns an interval defined by the minimum and maximum of a list of values.
:param values: A list of numbers.
:type values: list
:result: An Interval from the min and max of a list of values.
:rtype: Interval
"""
from .dc_base import EPSILON
if nudge: return Interval(min(values)-EPSILON, max(values)+EPSILON)
a, b = min(values), max(values)
if a == b : return False
return Interval(a,b)
@staticmethod
def twopi():
""" Creates an interval from 0->2PI
:result: Interval from 0 to 2PI.
:rtype: Interval
"""
return Interval(0,math.pi*2)
@staticmethod
def pi():
""" Creates an interval from 0->PI
:result: Interval from 0 to 2PI.
:rtype: Interval
"""
return Interval(0,math.pi)
|
gpl-3.0
| 6,334,377,875,742,004,000
| 31.772152
| 197
| 0.539784
| false
| 4.367777
| false
| false
| false
|
bnbowman/BifoAlgo
|
src/Chapter2/Sec24_LeaderCycleSeq.py
|
1
|
3081
|
#! /usr/bin/env python3
from collections import Counter
from operator import itemgetter
def cyclo_seq( spectrum_file, spectrum_table_file ):
N, spectrum = parse_spectrum_file( spectrum_file )
spectrum_table = parse_spectrum_table( spectrum_table_file )
aa_weights = set(spectrum_table.values())
peptides = list(find_possible_peptides( spectrum, aa_weights, N ))
max_peptides = find_max_peptides( peptides, spectrum )
return set(['-'.join([str(w) for w in p]) for p in max_peptides])
def find_possible_peptides( spectrum, weights, N ):
peptides = [ [0] ]
true_weight = max(spectrum)
while peptides:
peptides = expand_peptides( peptides, weights )
peptides = [p for p in peptides if sum(p) <= max(spectrum)]
for p in peptides:
if sum( p ) != true_weight:
continue
yield p
del p
peptides = cut_peptides( peptides, spectrum, N )
def expand_peptides( peptides, weights ):
new_peptides = []
for peptide in peptides:
for weight in weights:
if peptide == [0]:
copy = []
else:
copy = peptide[:]
copy.append( weight )
new_peptides.append( copy )
return new_peptides
def cut_peptides( peptides, spectrum, N ):
if len(peptides) <= N:
return peptides
scores = {}
for peptide in peptides:
sub_peptides = find_subpeptides( peptide )
weights = [sum(p) for p in sub_peptides]
peptide_str = '-'.join( [str(p) for p in peptide] )
scores[peptide_str] = sum([1 for w in weights if w in spectrum])
sorted_scores = sorted(scores.items(), key=itemgetter(1), reverse=True)
min_score = sorted_scores[N][1]
peptides = [p for p, s in scores.items() if s >= min_score]
peptides = [[int(n) for n in p.split('-')] for p in peptides]
return peptides
def find_max_peptides( peptides, spectrum ):
scores = {}
for peptide in peptides:
sub_peptides = find_subpeptides( peptide )
weights = [sum(p) for p in sub_peptides]
peptide_str = '-'.join( [str(p) for p in peptide] )
scores[peptide_str] = sum([1 for w in weights if w in spectrum])
sorted_scores = sorted(scores.items(), key=itemgetter(1), reverse=True)
max_score = sorted_scores[0][1]
peptides = [p for p, s in scores.items() if s == max_score]
peptides = [[int(n) for n in p.split('-')] for p in peptides]
return peptides
def find_subpeptides( peptide ):
subpeptides = [ peptide ]
for j in range(1, len(peptide)):
for i in range(len(peptide)-j+1):
subpeptides.append( peptide[i:i+j] )
return subpeptides
def parse_spectrum_file( spectrum_file ):
inputs = []
with open(spectrum_file) as handle:
for line in handle:
inputs += [int(w) for w in line.strip().split()]
return inputs[0], inputs[1:]
def parse_spectrum_table( spectrum_table_file ):
table = {}
with open( spectrum_table_file ) as handle:
for line in handle:
aa, size = line.strip().split()
try:
size = int(size)
table[aa] = size
except:
raise ValueError
return table
if __name__ == '__main__':
import sys
spectrum_file = sys.argv[1]
spectrum_table_file = sys.argv[2]
results = cyclo_seq( spectrum_file, spectrum_table_file )
print(' '.join(results))
|
gpl-2.0
| -5,203,654,426,318,608,000
| 29.81
| 72
| 0.674132
| false
| 2.702632
| false
| false
| false
|
taigaio/taiga-back
|
taiga/projects/attachments/permissions.py
|
1
|
5169
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-present Taiga Agile LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from taiga.base.api.permissions import (TaigaResourcePermission, HasProjectPerm,
AllowAny, PermissionComponent)
class IsAttachmentOwnerPerm(PermissionComponent):
def check_permissions(self, request, view, obj=None):
if obj and obj.owner and request.user.is_authenticated:
return request.user == obj.owner
return False
class CommentAttachmentPerm(PermissionComponent):
def check_permissions(self, request, view, obj=None):
if obj.from_comment:
return True
return False
class EpicAttachmentPermission(TaigaResourcePermission):
retrieve_perms = HasProjectPerm('view_epics') | IsAttachmentOwnerPerm()
create_perms = HasProjectPerm('modify_epic') | (CommentAttachmentPerm() & HasProjectPerm('comment_epic'))
update_perms = HasProjectPerm('modify_epic') | IsAttachmentOwnerPerm()
partial_update_perms = HasProjectPerm('modify_epic') | IsAttachmentOwnerPerm()
destroy_perms = HasProjectPerm('modify_epic') | IsAttachmentOwnerPerm()
list_perms = AllowAny()
class UserStoryAttachmentPermission(TaigaResourcePermission):
retrieve_perms = HasProjectPerm('view_us') | IsAttachmentOwnerPerm()
create_perms = HasProjectPerm('modify_us') | (CommentAttachmentPerm() & HasProjectPerm('comment_us'))
update_perms = HasProjectPerm('modify_us') | IsAttachmentOwnerPerm()
partial_update_perms = HasProjectPerm('modify_us') | IsAttachmentOwnerPerm()
destroy_perms = HasProjectPerm('modify_us') | IsAttachmentOwnerPerm()
list_perms = AllowAny()
class TaskAttachmentPermission(TaigaResourcePermission):
retrieve_perms = HasProjectPerm('view_tasks') | IsAttachmentOwnerPerm()
create_perms = HasProjectPerm('modify_task') | (CommentAttachmentPerm() & HasProjectPerm('comment_task'))
update_perms = HasProjectPerm('modify_task') | IsAttachmentOwnerPerm()
partial_update_perms = HasProjectPerm('modify_task') | IsAttachmentOwnerPerm()
destroy_perms = HasProjectPerm('modify_task') | IsAttachmentOwnerPerm()
list_perms = AllowAny()
class IssueAttachmentPermission(TaigaResourcePermission):
retrieve_perms = HasProjectPerm('view_issues') | IsAttachmentOwnerPerm()
create_perms = HasProjectPerm('modify_issue') | (CommentAttachmentPerm() & HasProjectPerm('comment_issue'))
update_perms = HasProjectPerm('modify_issue') | IsAttachmentOwnerPerm()
partial_update_perms = HasProjectPerm('modify_issue') | IsAttachmentOwnerPerm()
destroy_perms = HasProjectPerm('modify_issue') | IsAttachmentOwnerPerm()
list_perms = AllowAny()
class WikiAttachmentPermission(TaigaResourcePermission):
retrieve_perms = HasProjectPerm('view_wiki_pages') | IsAttachmentOwnerPerm()
create_perms = HasProjectPerm('modify_wiki_page') | (CommentAttachmentPerm() & HasProjectPerm('comment_wiki_page'))
update_perms = HasProjectPerm('modify_wiki_page') | IsAttachmentOwnerPerm()
partial_update_perms = HasProjectPerm('modify_wiki_page') | IsAttachmentOwnerPerm()
destroy_perms = HasProjectPerm('modify_wiki_page') | IsAttachmentOwnerPerm()
list_perms = AllowAny()
class RawAttachmentPerm(PermissionComponent):
def check_permissions(self, request, view, obj=None):
is_owner = IsAttachmentOwnerPerm().check_permissions(request, view, obj)
if obj.content_type.app_label == "epics" and obj.content_type.model == "epic":
return EpicAttachmentPermission(request, view).check_permissions('retrieve', obj) or is_owner
elif obj.content_type.app_label == "userstories" and obj.content_type.model == "userstory":
return UserStoryAttachmentPermission(request, view).check_permissions('retrieve', obj) or is_owner
elif obj.content_type.app_label == "tasks" and obj.content_type.model == "task":
return TaskAttachmentPermission(request, view).check_permissions('retrieve', obj) or is_owner
elif obj.content_type.app_label == "issues" and obj.content_type.model == "issue":
return IssueAttachmentPermission(request, view).check_permissions('retrieve', obj) or is_owner
elif obj.content_type.app_label == "wiki" and obj.content_type.model == "wikipage":
return WikiAttachmentPermission(request, view).check_permissions('retrieve', obj) or is_owner
return False
class RawAttachmentPermission(TaigaResourcePermission):
retrieve_perms = RawAttachmentPerm()
|
agpl-3.0
| -7,189,436,673,565,517,000
| 52.28866
| 119
| 0.734185
| false
| 4.05094
| false
| false
| false
|
cysuncn/python
|
spark/crm/PROC_O_LNA_XDXT_CUSTOMER_RELATIVE.py
|
1
|
5008
|
#coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_O_LNA_XDXT_CUSTOMER_RELATIVE').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
O_CI_XDXT_CUSTOMER_RELATIVE = sqlContext.read.parquet(hdfs+'/O_CI_XDXT_CUSTOMER_RELATIVE/*')
O_CI_XDXT_CUSTOMER_RELATIVE.registerTempTable("O_CI_XDXT_CUSTOMER_RELATIVE")
#任务[21] 001-01::
V_STEP = V_STEP + 1
sql = """
SELECT A.CUSTOMERID AS CUSTOMERID
,A.RELATIVEID AS RELATIVEID
,A.RELATIONSHIP AS RELATIONSHIP
,A.CUSTOMERNAME AS CUSTOMERNAME
,A.CERTTYPE AS CERTTYPE
,A.CERTID AS CERTID
,A.FICTITIOUSPERSON AS FICTITIOUSPERSON
,A.CURRENCYTYPE AS CURRENCYTYPE
,A.INVESTMENTSUM AS INVESTMENTSUM
,A.OUGHTSUM AS OUGHTSUM
,A.INVESTMENTPROP AS INVESTMENTPROP
,A.INVESTDATE AS INVESTDATE
,A.STOCKCERTNO AS STOCKCERTNO
,A.DUTY AS DUTY
,A.TELEPHONE AS TELEPHONE
,A.EFFECT AS EFFECT
,A.WHETHEN1 AS WHETHEN1
,A.WHETHEN2 AS WHETHEN2
,A.WHETHEN3 AS WHETHEN3
,A.WHETHEN4 AS WHETHEN4
,A.WHETHEN5 AS WHETHEN5
,A.DESCRIBE AS DESCRIBE
,A.INPUTORGID AS INPUTORGID
,A.INPUTUSERID AS INPUTUSERID
,A.INPUTDATE AS INPUTDATE
,A.REMARK AS REMARK
,A.SEX AS SEX
,A.BIRTHDAY AS BIRTHDAY
,A.SINO AS SINO
,A.FAMILYADD AS FAMILYADD
,A.FAMILYZIP AS FAMILYZIP
,A.EDUEXPERIENCE AS EDUEXPERIENCE
,A.INVESTYIELD AS INVESTYIELD
,A.HOLDDATE AS HOLDDATE
,A.ENGAGETERM AS ENGAGETERM
,A.HOLDSTOCK AS HOLDSTOCK
,A.LOANCARDNO AS LOANCARDNO
,A.EFFSTATUS AS EFFSTATUS
,A.CUSTOMERTYPE AS CUSTOMERTYPE
,A.INVESINITIALSUM AS INVESINITIALSUM
,A.ACCOUNTSUM AS ACCOUNTSUM
,A.FAIRSUM AS FAIRSUM
,A.DIATHESIS AS DIATHESIS
,A.ABILITY AS ABILITY
,A.INNOVATION AS INNOVATION
,A.CHARACTER AS CHARACTER
,A.COMPETITION AS COMPETITION
,A.STRATEGY AS STRATEGY
,A.RISE AS RISE
,A.POSSESS AS POSSESS
,A.EYESHOT AS EYESHOT
,A.FORESIGHT AS FORESIGHT
,A.STATUS AS STATUS
,A.INDUSTRY AS INDUSTRY
,A.PROSECUTION AS PROSECUTION
,A.FIRSTINVESTSUM AS FIRSTINVESTSUM
,A.FIRSTINVESTDATE AS FIRSTINVESTDATE
,A.LASTINVESTSUM AS LASTINVESTSUM
,A.LASTINVESTDATE AS LASTINVESTDATE
,A.DEADLINE AS DEADLINE
,A.FR_ID AS FR_ID
,V_DT AS ODS_ST_DATE
,'LNA' AS ODS_SYS_ID
FROM O_CI_XDXT_CUSTOMER_RELATIVE A --客户关联信息
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
F_CI_XDXT_CUSTOMER_RELATIVE = sqlContext.sql(sql)
F_CI_XDXT_CUSTOMER_RELATIVE.registerTempTable("F_CI_XDXT_CUSTOMER_RELATIVE")
dfn="F_CI_XDXT_CUSTOMER_RELATIVE/"+V_DT+".parquet"
F_CI_XDXT_CUSTOMER_RELATIVE.cache()
nrows = F_CI_XDXT_CUSTOMER_RELATIVE.count()
F_CI_XDXT_CUSTOMER_RELATIVE.write.save(path=hdfs + '/' + dfn, mode='overwrite')
F_CI_XDXT_CUSTOMER_RELATIVE.unpersist()
ret = os.system("hdfs dfs -rm -r /"+dbname+"/F_CI_XDXT_CUSTOMER_RELATIVE/"+V_DT_LD+".parquet")
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert F_CI_XDXT_CUSTOMER_RELATIVE lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
|
gpl-3.0
| 2,939,206,429,795,156,000
| 41.551724
| 179
| 0.545989
| false
| 2.891623
| false
| true
| false
|
saffsd/updatedir
|
src/updatedir/__init__.py
|
1
|
4396
|
import logging
import os
import urlparse
logger = logging.getLogger(__name__)
def updatetree(source, dest, overwrite=False):
parsed_url = urlparse.urlparse(dest)
logger.debug(parsed_url)
if parsed_url.scheme == '':
import shutil
if overwrite and os.path.exists(parsed_url.path):
logger.debug("Deleting existing '%s'", parsed_url.path)
shutil.rmtree(parsed_url.path)
logger.debug("Local copy '%s' -> '%s'", source, parsed_url.path)
shutil.copytree(source, parsed_url.path)
else:
dest = parsed_url.path
def visit(arg, dirname, names):
logger.debug("Visit '%s'", dirname)
abs_dir = os.path.normpath(os.path.join(dest, os.path.relpath(dirname, source)))
logger.debug("abs_dir '%s'", abs_dir)
for name in names:
src = os.path.join(dirname, name)
dst = os.path.join(abs_dir, name)
logger.debug("Processing '%s'", src)
if os.path.isdir(src):
if not os.path.isdir(dst):
logger.debug("mkdir '%s'", dst)
os.mkdir(dst)
else:
if os.path.exists(dst):
if overwrite:
logger.debug("overwrite '%s' -> '%s'", src, dst)
shutil.copyfile(src,dst)
else:
logger.debug("will not overwrite '%s'", dst)
else:
logger.debug("copy '%s' -> '%s'", src, dst)
shutil.copyfile(src,dst)
# TODO: mkdir -p behaviour
if not os.path.exists(dest):
os.mkdir(dest)
os.path.walk(source, visit, None)
elif parsed_url.scheme == 'ssh':
import paramiko
import getpass
# Work out host details
host = parsed_url.hostname
port = parsed_url.port if parsed_url.port else 22
transport = paramiko.Transport((host,port))
# Connect the transport
username = parsed_url.username if parsed_url.username else getpass.getuser()
logger.debug("Using username '%s'", username)
if parsed_url.password:
logger.debug("Using password")
transport.connect(username = username, password = parsed_url.password)
# TODO allow the keyfile to be configured in .hydratrc
elif os.path.exists(os.path.expanduser('~/.ssh/id_rsa')):
logger.debug("Using private RSA key")
privatekeyfile = os.path.expanduser('~/.ssh/id_rsa')
mykey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
transport.connect(username = username, pkey = mykey)
elif os.path.exists(os.path.expanduser('~/.ssh/id_dsa')):
logger.debug("Using private DSS key")
privatekeyfile = os.path.expanduser('~/.ssh/id_dsa')
mykey = paramiko.DSSKey.from_private_key_file(privatekeyfile)
transport.connect(username = username, pkey = mykey)
else:
raise ValueError, "Cannot connect transport: Unable to authenticate"
logger.debug("Transport Connected")
# Start the sftp client
sftp = paramiko.SFTPClient.from_transport(transport)
def visit(arg, dirname, names):
logger.debug("Visit '%s'", dirname)
abs_dir = sftp.normalize(os.path.relpath(dirname, source))
logger.debug("abs_dir '%s'", abs_dir)
for name in names:
src = os.path.join(dirname, name)
dst = os.path.join(abs_dir, name)
logger.debug("Processing '%s'", src)
if os.path.isdir(src):
try:
sftp.stat(dst)
except IOError:
sftp.mkdir(dst)
else:
try:
sftp.stat(dst)
if overwrite:
logger.debug("overwrite '%s'", dst)
sftp.put(src, dst)
except IOError:
sftp.put(src, dst)
head = str(parsed_url.path)
tails = []
done = False
# Roll back the path until we find one that exists
while not done:
try:
sftp.stat(head)
done = True
except IOError:
head, tail = os.path.split(head)
tails.append(tail)
# Now create all the missing paths that don't exist
for tail in reversed(tails):
head = os.path.join(head, tail)
sftp.mkdir(head)
sftp.chdir(parsed_url.path)
os.path.walk(source, visit, None)
else:
raise ValueError, "Don't know how to use scheme '%s'" % parsed_url.scheme
def main():
import sys
logging.basicConfig(level = logging.DEBUG)
updatetree(sys.argv[1], sys.argv[2], overwrite=False)
|
gpl-3.0
| -6,502,143,634,632,490,000
| 33.077519
| 88
| 0.605778
| false
| 3.725424
| false
| false
| false
|
jelly/calibre
|
src/calibre/gui2/actions/show_quickview.py
|
2
|
7899
|
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from PyQt5.Qt import QAction
from calibre.gui2.actions import InterfaceAction
from calibre.gui2.dialogs.quickview import Quickview
from calibre.gui2 import error_dialog, gprefs
from calibre.gui2.widgets import LayoutButton
class QuickviewButton(LayoutButton): # {{{
def __init__(self, gui, quickview_manager):
self.qv = quickview_manager
qaction = quickview_manager.qaction
LayoutButton.__init__(self, I('quickview.png'), _('Quickview'),
parent=gui, shortcut=qaction.shortcut().toString())
self.toggled.connect(self.update_state)
self.action_toggle = qaction
self.action_toggle.triggered.connect(self.toggle)
self.action_toggle.changed.connect(self.update_shortcut)
def update_state(self, checked):
if checked:
self.set_state_to_hide()
self.qv._show_quickview()
else:
self.set_state_to_show()
self.qv._hide_quickview()
def save_state(self):
gprefs['quickview visible'] = bool(self.isChecked())
def restore_state(self):
if gprefs.get('quickview visible', False):
self.toggle()
# }}}
current_qv_action_pi = None
def set_quickview_action_plugin(pi):
global current_qv_action_pi
current_qv_action_pi = pi
def get_quickview_action_plugin():
return current_qv_action_pi
class ShowQuickviewAction(InterfaceAction):
name = 'Quickview'
action_spec = (_('Quickview'), 'quickview.png', None, None)
dont_add_to = frozenset(['context-menu-device'])
action_type = 'current'
current_instance = None
def genesis(self):
self.gui.keyboard.register_shortcut('Toggle Quickview', _('Toggle Quickview'),
description=_('Open/close the Quickview panel/window'),
default_keys=('Q',), action=self.qaction,
group=self.action_spec[0])
self.focus_action = QAction(self.gui)
self.gui.addAction(self.focus_action)
self.gui.keyboard.register_shortcut('Focus To Quickview', _('Focus to Quickview'),
description=_('Move the focus to the Quickview panel/window'),
default_keys=('Shift+Q',), action=self.focus_action,
group=self.action_spec[0])
self.focus_action.triggered.connect(self.focus_quickview)
self.focus_bl_action = QAction(self.gui)
self.gui.addAction(self.focus_bl_action)
self.gui.keyboard.register_shortcut('Focus from Quickview',
_('Focus from Quickview to the book list'),
description=_('Move the focus from Quickview to the book list'),
default_keys=('Shift+Alt+Q',), action=self.focus_bl_action,
group=self.action_spec[0])
self.focus_bl_action.triggered.connect(self.focus_booklist)
self.focus_refresh_action = QAction(self.gui)
self.gui.addAction(self.focus_refresh_action)
self.gui.keyboard.register_shortcut('Refresh from Quickview',
_('Refresh Quickview'),
description=_('Refresh the information shown in the Quickview pane'),
action=self.focus_refresh_action,
group=self.action_spec[0])
self.focus_refresh_action.triggered.connect(self.refill_quickview)
self.search_action = QAction(self.gui)
self.gui.addAction(self.search_action)
self.gui.keyboard.register_shortcut('Search from Quickview', _('Search from Quickview'),
description=_('Search for the currently selected Quickview item'),
default_keys=('Shift+S',), action=self.search_action,
group=self.action_spec[0])
self.search_action.triggered.connect(self.search_quickview)
self.search_action.changed.connect(self.set_search_shortcut)
self.menuless_qaction.changed.connect(self.set_search_shortcut)
self.qv_button = QuickviewButton(self.gui, self)
def initialization_complete(self):
set_quickview_action_plugin(self)
def _hide_quickview(self):
'''
This is called only from the QV button toggle
'''
if self.current_instance:
if not self.current_instance.is_closed:
self.current_instance._reject()
self.current_instance = None
def _show_quickview(self, *args):
'''
This is called only from the QV button toggle
'''
if self.current_instance:
if not self.current_instance.is_closed:
self.current_instance._reject()
self.current_instance = None
if self.gui.current_view() is not self.gui.library_view:
error_dialog(self.gui, _('No quickview available'),
_('Quickview is not available for books '
'on the device.')).exec_()
return
self.qv_button.set_state_to_hide()
index = self.gui.library_view.currentIndex()
self.current_instance = Quickview(self.gui, index)
self.current_instance.reopen_after_dock_change.connect(self.open_quickview)
self.set_search_shortcut()
self.current_instance.show()
self.current_instance.quickview_closed.connect(self.qv_button.set_state_to_show)
def set_search_shortcut(self):
if self.current_instance and not self.current_instance.is_closed:
self.current_instance.addAction(self.focus_bl_action)
self.current_instance.set_shortcuts(self.search_action.shortcut().toString(),
self.menuless_qaction.shortcut().toString())
def open_quickview(self):
'''
QV moved from/to dock. Close and reopen the pane/window.
Also called when QV is closed and the user asks to move the focus
'''
if self.current_instance and not self.current_instance.is_closed:
self.current_instance.reject()
self.current_instance = None
self.qaction.triggered.emit()
def refill_quickview(self):
'''
Called when the columns shown in the QV pane might have changed.
'''
if self.current_instance and not self.current_instance.is_closed:
self.current_instance.refill()
def refresh_quickview(self, idx):
'''
Called when the data shown in the QV pane might have changed.
'''
if self.current_instance and not self.current_instance.is_closed:
self.current_instance.refresh(idx)
def change_quickview_column(self, idx):
'''
Called from the column header context menu to change the QV query column
'''
self.focus_quickview()
self.current_instance.slave(idx)
def library_changed(self, db):
'''
If QV is open, close it then reopen it so the columns are correct
'''
if self.current_instance and not self.current_instance.is_closed:
self.current_instance.reject()
self.qaction.triggered.emit()
def focus_quickview(self):
'''
Used to move the focus to the QV books table. Open QV if needed
'''
if not self.current_instance or self.current_instance.is_closed:
self.open_quickview()
else:
self.current_instance.set_focus()
def focus_booklist(self):
self.gui.activateWindow()
self.gui.library_view.setFocus()
def search_quickview(self):
if not self.current_instance or self.current_instance.is_closed:
return
self.current_instance.do_search()
|
gpl-3.0
| -8,621,730,550,062,112,000
| 37.531707
| 96
| 0.620965
| false
| 3.979345
| false
| false
| false
|
kantai/passe-framework-prototype
|
django/http/__init__.py
|
1
|
31597
|
import datetime
import os
import re
import time
from pprint import pformat
from urllib import urlencode, quote
from urlparse import urljoin
#try:
# from StringIO import StringIO
#except ImportError:
from StringIO import StringIO
from copy import deepcopy
try:
# The mod_python version is more efficient, so try importing it first.
from mod_python.util import parse_qsl
except ImportError:
try:
# Python 2.6 and greater
from urlparse import parse_qsl
except ImportError:
# Python 2.5, 2.4. Works on Python 2.6 but raises
# PendingDeprecationWarning
from cgi import parse_qsl
import Cookie
# httponly support exists in Python 2.6's Cookie library,
# but not in Python 2.4 or 2.5.
_morsel_supports_httponly = Cookie.Morsel._reserved.has_key('httponly')
# Some versions of Python 2.7 and later won't need this encoding bug fix:
_cookie_encodes_correctly = Cookie.SimpleCookie().value_encode(';') == (';', '"\\073"')
# See ticket #13007, http://bugs.python.org/issue2193 and http://trac.edgewall.org/ticket/2256
_tc = Cookie.SimpleCookie()
_tc.load('f:oo')
_cookie_allows_colon_in_names = 'Set-Cookie: f:oo=' in _tc.output()
if False: #_morsel_supports_httponly and _cookie_encodes_correctly and _cookie_allows_colon_in_names:
SimpleCookie = Cookie.SimpleCookie
else:
class Morsel(Cookie.Morsel):
def __getstate__(self):
d = dict([(k,v) for k,v in dict.items(self)])
return d
def __setstate__(self, state):
for k,v in state.items():
dict.__setitem__(self, k, v)
if not _morsel_supports_httponly:
def __setitem__(self, K, V):
K = K.lower()
if K == "httponly":
if V:
# The superclass rejects httponly as a key,
# so we jump to the grandparent.
super(Cookie.Morsel, self).__setitem__(K, V)
else:
super(Morsel, self).__setitem__(K, V)
def OutputString(self, attrs=None):
output = super(Morsel, self).OutputString(attrs)
if "httponly" in self:
output += "; httponly"
return output
class SimpleCookie(Cookie.SimpleCookie):
def __getstate__(self):
d = dict([(k,v) for k,v in dict.items(self)])
return d
def __setstate__(self, state):
for k,v in state.items():
dict.__setitem__(self, k, v)
if not _morsel_supports_httponly:
def __set(self, key, real_value, coded_value):
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
def __setitem__(self, key, value):
rval, cval = self.value_encode(value)
self.__set(key, rval, cval)
if not _cookie_encodes_correctly:
def value_encode(self, val):
# Some browsers do not support quoted-string from RFC 2109,
# including some versions of Safari and Internet Explorer.
# These browsers split on ';', and some versions of Safari
# are known to split on ', '. Therefore, we encode ';' and ','
# SimpleCookie already does the hard work of encoding and decoding.
# It uses octal sequences like '\\012' for newline etc.
# and non-ASCII chars. We just make use of this mechanism, to
# avoid introducing two encoding schemes which would be confusing
# and especially awkward for javascript.
# NB, contrary to Python docs, value_encode returns a tuple containing
# (real val, encoded_val)
val, encoded = super(SimpleCookie, self).value_encode(val)
encoded = encoded.replace(";", "\\073").replace(",","\\054")
# If encoded now contains any quoted chars, we need double quotes
# around the whole string.
if "\\" in encoded and not encoded.startswith('"'):
encoded = '"' + encoded + '"'
return val, encoded
if not _cookie_allows_colon_in_names:
def load(self, rawdata, ignore_parse_errors=False):
if ignore_parse_errors:
self.bad_cookies = set()
self._BaseCookie__set = self._loose_set
super(SimpleCookie, self).load(rawdata)
if ignore_parse_errors:
self._BaseCookie__set = self._strict_set
for key in self.bad_cookies:
del self[key]
_strict_set = Cookie.BaseCookie._BaseCookie__set
def _loose_set(self, key, real_value, coded_value):
try:
self._strict_set(key, real_value, coded_value)
except Cookie.CookieError:
self.bad_cookies.add(key)
dict.__setitem__(self, key, Cookie.Morsel())
class CompatCookie(SimpleCookie):
def __init__(self, *args, **kwargs):
super(CompatCookie, self).__init__(*args, **kwargs)
import warnings
warnings.warn("CompatCookie is deprecated, use django.http.SimpleCookie instead.",
PendingDeprecationWarning)
from django.utils.datastructures import MultiValueDict, ImmutableList
from django.utils.encoding import smart_str, iri_to_uri, force_unicode
from django.utils.http import cookie_date
from django.http.multipartparser import MultiPartParser
from django.conf import settings
from django.core.files import uploadhandler
from utils import *
RESERVED_CHARS="!*'();:@&=+$,/?%#[]"
absolute_http_url_re = re.compile(r"^https?://", re.I)
class Http404(Exception):
pass
class HttpRequestDummy(object):
"""
A stripped down HTTP request object that's pickle-able
TODO FILES
"""
def __init__(self,request):
self.GET = request.GET
self.POST = request.POST
self.COOKIES = request.COOKIES
self.META = dict([(k,v) for k,v in request.META.items() if not k.startswith('wsgi')])
self.FILES = request.FILES
self.path = request.path
self.session = request.session
self.path_info = request.path_info
self.user = request.user
self.method = request.method
if hasattr(request, '_messages'):
self._messages = request._messages
def __repr__(self):
return "G: %s \nP: %s \nC: %s\nM: %s\np: %s\nm: %s" % (self.GET,
self.POST,
self.COOKIES,
self.META,
self.path,
self.method)
def _get_request(self):
from django.utils import datastructures
if not hasattr(self, '_request'):
self._request = datastructures.MergeDict(self.POST, self.GET)
return self._request
def get_host(self):
"""Returns the HTTP host using the environment or request headers."""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and (
'HTTP_X_FORWARDED_HOST' in self.META):
host = self.META['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in self.META:
host = self.META['HTTP_HOST']
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META['SERVER_NAME']
server_port = str(self.META['SERVER_PORT'])
if server_port != (self.is_secure() and '443' or '80'):
host = '%s:%s' % (host, server_port)
return host
def get_full_path(self):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s' % (self.path, self.META.get('QUERY_STRING', '') and ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) or '')
def build_absolute_uri(self, location=None):
"""
Builds an absolute URI from the location and the variables available in
this request. If no location is specified, the absolute URI is built on
``request.get_full_path()``.
"""
if not location:
location = self.get_full_path()
if not absolute_http_url_re.match(location):
current_uri = '%s://%s%s' % (self.is_secure() and 'https' or 'http',
self.get_host(), self.path)
location = urljoin(current_uri, location)
return iri_to_uri(location)
def is_secure(self):
return os.environ.get("HTTPS") == "on"
def is_ajax(self):
return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
REQUEST = property(_get_request)
typeset = [int, float, str, unicode, bool]
class HttpRequest(object):
"""A basic HTTP request."""
# The encoding used in GET/POST dicts. None means use default setting.
_encoding = None
_upload_handlers = []
def __init__(self):
self.GET, self.POST, self.COOKIES, self.META, self.FILES = {}, {}, {}, {}, {}
self.path = ''
self.path_info = ''
self.method = None
def __repr__(self):
return '<HttpRequest\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' % \
(pformat(self.GET), pformat(self.POST), pformat(self.COOKIES),
pformat(self.META))
def get_host(self):
"""Returns the HTTP host using the environment or request headers."""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and (
'HTTP_X_FORWARDED_HOST' in self.META):
host = self.META['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in self.META:
host = self.META['HTTP_HOST']
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META['SERVER_NAME']
server_port = str(self.META['SERVER_PORT'])
if server_port != (self.is_secure() and '443' or '80'):
host = '%s:%s' % (host, server_port)
return host
def get_full_path(self):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s' % (self.path, self.META.get('QUERY_STRING', '') and ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) or '')
def build_absolute_uri(self, location=None):
"""
Builds an absolute URI from the location and the variables available in
this request. If no location is specified, the absolute URI is built on
``request.get_full_path()``.
"""
if not location:
location = self.get_full_path()
if not absolute_http_url_re.match(location):
current_uri = '%s://%s%s' % (self.is_secure() and 'https' or 'http',
self.get_host(), self.path)
location = urljoin(current_uri, location)
return iri_to_uri(location)
def is_secure(self):
return os.environ.get("HTTPS") == "on"
def is_ajax(self):
return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
def _set_encoding(self, val):
"""
Sets the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, it is removed and recreated on the
next access (so that it is decoded correctly).
"""
self._encoding = val
if hasattr(self, '_get'):
del self._get
if hasattr(self, '_post'):
del self._post
def _get_encoding(self):
return self._encoding
encoding = property(_get_encoding, _set_encoding)
def _initialize_handlers(self):
self._upload_handlers = [uploadhandler.load_handler(handler, self)
for handler in settings.FILE_UPLOAD_HANDLERS]
def _set_upload_handlers(self, upload_handlers):
if hasattr(self, '_files'):
raise AttributeError("You cannot set the upload handlers after the upload has been processed.")
self._upload_handlers = upload_handlers
def _get_upload_handlers(self):
if not self._upload_handlers:
# If thre are no upload handlers defined, initialize them from settings.
self._initialize_handlers()
return self._upload_handlers
upload_handlers = property(_get_upload_handlers, _set_upload_handlers)
def parse_file_upload(self, META, post_data):
"""Returns a tuple of (POST QueryDict, FILES MultiValueDict)."""
self.upload_handlers = ImmutableList(
self.upload_handlers,
warning = "You cannot alter upload handlers after the upload has been processed."
)
parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
return parser.parse()
def _get_raw_post_data(self):
if not hasattr(self, '_raw_post_data'):
if self._read_started:
raise Exception("You cannot access raw_post_data after reading from request's data stream")
try:
content_length = int(self.META.get('CONTENT_LENGTH', 0))
except (ValueError, TypeError):
# If CONTENT_LENGTH was empty string or not an integer, don't
# error out. We've also seen None passed in here (against all
# specs, but see ticket #8259), so we handle TypeError as well.
content_length = 0
if content_length:
self._raw_post_data = self.read(content_length)
else:
self._raw_post_data = self.read()
self._stream = StringIO(self._raw_post_data) # HACHI: used to be self._stream -- uh oh, 0-copy fuckup.
#self._streamed = _stream.getvalue()
return self._raw_post_data
raw_post_data = property(_get_raw_post_data)
def _mark_post_parse_error(self):
self._post = QueryDict('')
self._files = MultiValueDict()
self._post_parse_error = True
def _load_post_and_files(self):
# Populates self._post and self._files
if self.method != 'POST':
self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict()
return
if self._read_started and not hasattr(self, '_raw_post_data'):
self._mark_post_parse_error()
return
if self.META.get('CONTENT_TYPE', '').startswith('multipart'):
if hasattr(self, '_raw_post_data'):
# Use already read data
data = StringIO(self._raw_post_data)
else:
data = self
try:
self._post, self._files = self.parse_file_upload(self.META, data)
except:
# An error occured while parsing POST data. Since when
# formatting the error the request handler might access
# self.POST, set self._post and self._file to prevent
# attempts to parse POST data again.
# Mark that an error occured. This allows self.__repr__ to
# be explicit about it instead of simply representing an
# empty POST
self._mark_post_parse_error()
raise
else:
self._post, self._files = QueryDict(self.raw_post_data, encoding=self._encoding), MultiValueDict()
## File-like and iterator interface.
##
## Expects self._stream to be set to an appropriate source of bytes by
## a corresponding request subclass (WSGIRequest or ModPythonRequest).
## Also when request data has already been read by request.POST or
## request.raw_post_data, self._stream points to a StringIO instance
## containing that data.
def read(self, size=0):
self._read_started = True
return self._stream.read(size)
def readline(self, *args, **kwargs):
self._read_started = True
return self._stream.readline(*args, **kwargs)
def xreadlines(self):
while True:
buf = self.readline()
if not buf:
break
yield buf
__iter__ = xreadlines
def readlines(self):
return list(iter(self))
def get_changeset(request):
"""
Right now, this just makes a dict of all the attributes that we allow to be modified.
"""
d = {}
for attr, val in request.__dict__.items():
if callable(val):
continue
if attr in ['PUT', 'GET', 'REQUEST', 'META', 'path', 'path_info', 'script_name',
'method', '_request', '_post', '_files']:
continue
d[attr] = val
return d
class RequestDelta(object):
def __init__(self, wrapped):
self._wrapped = wrapped
self.changeset = {}
def __getattr__(self, name):
# if an attribute is fetched, we should add it to the changset :(
if name in self.changeset:
return self.changeset[name]
if not hasattr(self._wrapped, name):
raise AttributeError("Barfing %s on %s" % (name, type(self._wrapped) ))
val = getattr(self._wrapped, name)
if name in ['PUT', 'GET', 'REQUEST', 'META']: # these are immutable now, so deal with it punks
return val
if callable(val):
return val
if type(val) not in typeset:
self.changeset[name] = val
return val
def __setattr__(self, name, val):
if name == '_wrapped' or name == 'changeset':
self.__dict__[name] = val
else:
self.changeset[name] = val
def __getstate__(self):
return self.changeset
def __setstate__(self, state):
if '_wrapped' not in self.__dict__:
self._wrapped = None
self.changeset = state
class QueryDict(MultiValueDict):
"""
A specialized MultiValueDict that takes a query string when initialized.
This is immutable unless you create a copy of it.
Values retrieved from this class are converted from the given encoding
(DEFAULT_CHARSET by default) to unicode.
"""
# These are both reset in __init__, but is specified here at the class
# level so that unpickling will have valid values
_mutable = True
_encoding = None
def __init__(self, query_string, mutable=False, encoding=None):
MultiValueDict.__init__(self)
if not encoding:
# *Important*: do not import settings any earlier because of note
# in core.handlers.modpython.
from django.conf import settings
encoding = settings.DEFAULT_CHARSET
self.encoding = encoding
for key, value in parse_qsl((query_string or ''), True): # keep_blank_values=True
self.appendlist(force_unicode(key, encoding, errors='replace'),
force_unicode(value, encoding, errors='replace'))
self._mutable = mutable
def _get_encoding(self):
if self._encoding is None:
# *Important*: do not import settings at the module level because
# of the note in core.handlers.modpython.
from django.conf import settings
self._encoding = settings.DEFAULT_CHARSET
return self._encoding
def _set_encoding(self, value):
self._encoding = value
encoding = property(_get_encoding, _set_encoding)
def _assert_mutable(self):
if not self._mutable:
raise AttributeError("This QueryDict instance is immutable")
def __setitem__(self, key, value):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
value = str_to_unicode(value, self.encoding)
MultiValueDict.__setitem__(self, key, value)
def __delitem__(self, key):
self._assert_mutable()
super(QueryDict, self).__delitem__(key)
def __copy__(self):
result = self.__class__('', mutable=True, encoding=self.encoding)
for key, value in dict.items(self):
dict.__setitem__(result, key, value)
return result
def __deepcopy__(self, memo):
import django.utils.copycompat as copy
result = self.__class__('', mutable=True, encoding=self.encoding)
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def setlist(self, key, list_):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
list_ = [str_to_unicode(elt, self.encoding) for elt in list_]
MultiValueDict.setlist(self, key, list_)
def setlistdefault(self, key, default_list=()):
self._assert_mutable()
if key not in self:
self.setlist(key, default_list)
return MultiValueDict.getlist(self, key)
def appendlist(self, key, value):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
value = str_to_unicode(value, self.encoding)
MultiValueDict.appendlist(self, key, value)
def update(self, other_dict):
self._assert_mutable()
f = lambda s: str_to_unicode(s, self.encoding)
if hasattr(other_dict, 'lists'):
for key, valuelist in other_dict.lists():
for value in valuelist:
MultiValueDict.update(self, {f(key): f(value)})
else:
d = dict([(f(k), f(v)) for k, v in other_dict.items()])
MultiValueDict.update(self, d)
def pop(self, key, *args):
self._assert_mutable()
return MultiValueDict.pop(self, key, *args)
def popitem(self):
self._assert_mutable()
return MultiValueDict.popitem(self)
def clear(self):
self._assert_mutable()
MultiValueDict.clear(self)
def setdefault(self, key, default=None):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
default = str_to_unicode(default, self.encoding)
return MultiValueDict.setdefault(self, key, default)
def copy(self):
"""Returns a mutable copy of this object."""
return self.__deepcopy__({})
def urlencode(self, safe=None):
"""
Returns an encoded string of all query string arguments.
:arg safe: Used to specify characters which do not require quoting, for
example::
>>> q = QueryDict('', mutable=True)
>>> q['next'] = '/a&b/'
>>> q.urlencode()
'next=%2Fa%26b%2F'
>>> q.urlencode(safe='/')
'next=/a%26b/'
"""
output = []
if safe:
encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe)))
else:
encode = lambda k, v: urlencode({k: v})
for k, list_ in self.lists():
k = smart_str(k, self.encoding)
output.extend([encode(k, smart_str(v, self.encoding))
for v in list_])
return '&'.join(output)
def parse_cookie(cookie):
if cookie == '':
return {}
if not isinstance(cookie, Cookie.BaseCookie):
try:
c = SimpleCookie()
c.load(cookie, ignore_parse_errors=True)
except Cookie.CookieError:
# Invalid cookie
return {}
else:
c = cookie
cookiedict = {}
for key in c.keys():
cookiedict[key] = c.get(key).value
return cookiedict
class BadHeaderError(ValueError):
pass
class HttpResponse(object):
"""A basic HTTP response, with content and dictionary-accessed headers."""
status_code = 200
def __init__(self, content='', mimetype=None, status=None,
content_type=None):
# _headers is a mapping of the lower-case name to the original case of
# the header (required for working with legacy systems) and the header
# value. Both the name of the header and its value are ASCII strings.
self._headers = {}
self._charset = settings.DEFAULT_CHARSET
if mimetype:
content_type = mimetype # For backwards compatibility
if not content_type:
content_type = "%s; charset=%s" % (settings.DEFAULT_CONTENT_TYPE,
self._charset)
if not isinstance(content, basestring) and hasattr(content, '__iter__'):
self._container = content
self._is_string = False
else:
self._container = [content]
self._is_string = True
self.cookies = SimpleCookie()
if status:
self.status_code = status
self['Content-Type'] = content_type
def __str__(self):
"""Full HTTP message, including headers."""
return '\n'.join(['%s: %s' % (key, value)
for key, value in self._headers.values()]) \
+ '\n\n' + self.content
def _convert_to_ascii(self, *values):
"""Converts all values to ascii strings."""
for value in values:
if isinstance(value, unicode):
try:
value = value.encode('us-ascii')
except UnicodeError, e:
e.reason += ', HTTP response headers must be in US-ASCII format'
raise
else:
value = str(value)
if '\n' in value or '\r' in value:
raise BadHeaderError("Header values can't contain newlines (got %r)" % (value))
yield value
def __setitem__(self, header, value):
header, value = self._convert_to_ascii(header, value)
self._headers[header.lower()] = (header, value)
def __delitem__(self, header):
try:
del self._headers[header.lower()]
except KeyError:
pass
def __getitem__(self, header):
return self._headers[header.lower()][1]
def has_header(self, header):
"""Case-insensitive check for a header."""
return self._headers.has_key(header.lower())
__contains__ = has_header
def items(self):
return self._headers.values()
def get(self, header, alternate):
return self._headers.get(header.lower(), (None, alternate))[1]
def set_cookie(self, key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False):
"""
Sets a cookie.
``expires`` can be a string in the correct format or a
``datetime.datetime`` object in UTC. If ``expires`` is a datetime
object then ``max_age`` will be calculated.
"""
self.cookies[key] = value
if expires is not None:
if isinstance(expires, datetime.datetime):
delta = expires - expires.utcnow()
# Add one second so the date matches exactly (a fraction of
# time gets lost between converting to a timedelta and
# then the date string).
delta = delta + datetime.timedelta(seconds=1)
# Just set max_age - the max_age logic will set expires.
expires = None
max_age = max(0, delta.days * 86400 + delta.seconds)
else:
self.cookies[key]['expires'] = expires
if max_age is not None:
self.cookies[key]['max-age'] = max_age
# IE requires expires, so set it if hasn't been already.
if not expires:
self.cookies[key]['expires'] = cookie_date(time.time() +
max_age)
if path is not None:
self.cookies[key]['path'] = path
if domain is not None:
self.cookies[key]['domain'] = domain
if secure:
self.cookies[key]['secure'] = True
if httponly:
self.cookies[key]['httponly'] = True
def delete_cookie(self, key, path='/', domain=None):
self.set_cookie(key, max_age=0, path=path, domain=domain,
expires='Thu, 01-Jan-1970 00:00:00 GMT')
def _get_content(self):
if self.has_header('Content-Encoding'):
return ''.join(self._container)
return smart_str(''.join(self._container), self._charset)
def _set_content(self, value):
self._container = [value]
self._is_string = True
content = property(_get_content, _set_content)
def __iter__(self):
self._iterator = iter(self._container)
return self
def next(self):
chunk = self._iterator.next()
if isinstance(chunk, unicode):
chunk = chunk.encode(self._charset)
return str(chunk)
def close(self):
if hasattr(self._container, 'close'):
self._container.close()
# The remaining methods partially implement the file-like object interface.
# See http://docs.python.org/lib/bltin-file-objects.html
def write(self, content):
if not self._is_string:
raise Exception("This %s instance is not writable" % self.__class__)
self._container.append(content)
def flush(self):
pass
def tell(self):
if not self._is_string:
raise Exception("This %s instance cannot tell its position" % self.__class__)
return sum([len(chunk) for chunk in self._container])
class HttpResponseRedirect(HttpResponse):
status_code = 302
def __init__(self, redirect_to):
super(HttpResponseRedirect, self).__init__()
self['Location'] = iri_to_uri(redirect_to)
class HttpResponsePermanentRedirect(HttpResponse):
status_code = 301
def __init__(self, redirect_to):
super(HttpResponsePermanentRedirect, self).__init__()
self['Location'] = iri_to_uri(redirect_to)
class HttpResponseNotModified(HttpResponse):
status_code = 304
class HttpResponseBadRequest(HttpResponse):
status_code = 400
class HttpResponseNotFound(HttpResponse):
status_code = 404
class HttpResponseForbidden(HttpResponse):
status_code = 403
class HttpResponseNotAllowed(HttpResponse):
status_code = 405
def __init__(self, permitted_methods):
super(HttpResponseNotAllowed, self).__init__()
self['Allow'] = ', '.join(permitted_methods)
class HttpResponseGone(HttpResponse):
status_code = 410
class HttpResponseServerError(HttpResponse):
status_code = 500
# A backwards compatible alias for HttpRequest.get_host.
def get_host(request):
return request.get_host()
# It's neither necessary nor appropriate to use
# django.utils.encoding.smart_unicode for parsing URLs and form inputs. Thus,
# this slightly more restricted function.
def str_to_unicode(s, encoding):
"""
Converts basestring objects to unicode, using the given encoding. Illegally
encoded input characters are replaced with Unicode "unknown" codepoint
(\ufffd).
Returns any non-basestring objects without change.
"""
if isinstance(s, str):
return unicode(s, encoding, 'replace')
else:
return s
|
bsd-3-clause
| 649,093,225,685,276,500
| 36.705251
| 134
| 0.576352
| false
| 4.209566
| false
| false
| false
|
rchav/vinerack
|
saleor/userprofile/models.py
|
2
|
5462
|
from __future__ import unicode_literals
from django.contrib.auth.models import (
AbstractBaseUser, BaseUserManager, PermissionsMixin)
from django.db import models
from django.forms.models import model_to_dict
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import pgettext_lazy
from django_countries.fields import Country, CountryField
class AddressManager(models.Manager):
def as_data(self, address):
data = model_to_dict(address, exclude=['id', 'user'])
if isinstance(data['country'], Country):
data['country'] = data['country'].code
return data
def are_identical(self, addr1, addr2):
data1 = self.as_data(addr1)
data2 = self.as_data(addr2)
return data1 == data2
def store_address(self, user, address):
data = self.as_data(address)
address, dummy_created = user.addresses.get_or_create(**data)
return address
@python_2_unicode_compatible
class Address(models.Model):
first_name = models.CharField(
pgettext_lazy('Address field', 'first name'),
max_length=256)
last_name = models.CharField(
pgettext_lazy('Address field', 'last name'),
max_length=256)
company_name = models.CharField(
pgettext_lazy('Address field', 'company or organization'),
max_length=256, blank=True)
street_address_1 = models.CharField(
pgettext_lazy('Address field', 'address'),
max_length=256, blank=True)
street_address_2 = models.CharField(
pgettext_lazy('Address field', 'address'),
max_length=256, blank=True)
city = models.CharField(
pgettext_lazy('Address field', 'city'),
max_length=256, blank=True)
city_area = models.CharField(
pgettext_lazy('Address field', 'district'),
max_length=128, blank=True)
postal_code = models.CharField(
pgettext_lazy('Address field', 'postal code'),
max_length=20, blank=True)
country = CountryField(
pgettext_lazy('Address field', 'country'))
country_area = models.CharField(
pgettext_lazy('Address field', 'state or province'),
max_length=128, blank=True)
phone = models.CharField(
pgettext_lazy('Address field', 'phone number'),
max_length=30, blank=True)
objects = AddressManager()
@property
def full_name(self):
return '%s %s' % (self.first_name, self.last_name)
def __str__(self):
if self.company_name:
return '%s - %s' % (self.company_name, self.full_name)
return self.full_name
def __repr__(self):
return (
'Address(first_name=%r, last_name=%r, company_name=%r, '
'street_address_1=%r, street_address_2=%r, city=%r, '
'postal_code=%r, country=%r, country_area=%r, phone=%r)' % (
self.first_name, self.last_name, self.company_name,
self.street_address_1, self.street_address_2, self.city,
self.postal_code, self.country, self.country_area,
self.phone))
class UserManager(BaseUserManager):
def create_user(self, email, password=None, is_staff=False,
is_active=True, **extra_fields):
'Creates a User with the given username, email and password'
email = UserManager.normalize_email(email)
user = self.model(email=email, is_active=is_active,
is_staff=is_staff, **extra_fields)
if password:
user.set_password(password)
user.save()
return user
def create_superuser(self, email, password=None, **extra_fields):
return self.create_user(email, password, is_staff=True,
is_superuser=True, **extra_fields)
def store_address(self, user, address, billing=False, shipping=False):
entry = Address.objects.store_address(user, address)
changed = False
if billing and not user.default_billing_address_id:
user.default_billing_address = entry
changed = True
if shipping and not user.default_shipping_address_id:
user.default_shipping_address = entry
changed = True
if changed:
user.save()
return entry
class User(PermissionsMixin, AbstractBaseUser):
email = models.EmailField(unique=True)
addresses = models.ManyToManyField(Address, blank=True)
is_staff = models.BooleanField(
pgettext_lazy('User field', 'staff status'),
default=False)
is_active = models.BooleanField(
pgettext_lazy('User field', 'active'),
default=False)
date_joined = models.DateTimeField(
pgettext_lazy('User field', 'date joined'),
default=timezone.now, editable=False)
default_shipping_address = models.ForeignKey(
Address, related_name='+', null=True, blank=True,
on_delete=models.SET_NULL,
verbose_name=pgettext_lazy('User field', 'default shipping address'))
default_billing_address = models.ForeignKey(
Address, related_name='+', null=True, blank=True,
on_delete=models.SET_NULL,
verbose_name=pgettext_lazy('User field', 'default billing address'))
USERNAME_FIELD = 'email'
objects = UserManager()
def get_full_name(self):
return self.email
def get_short_name(self):
return self.email
|
bsd-3-clause
| -8,794,741,306,931,394,000
| 35.657718
| 77
| 0.631637
| false
| 3.952243
| false
| false
| false
|
lmazuel/azure-sdk-for-python
|
azure-servicefabric/azure/servicefabric/models/file_info.py
|
1
|
1684
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class FileInfo(Model):
"""Information about a image store file.
:param file_size: The size of file in bytes.
:type file_size: str
:param file_version: Information about the version of image store file.
:type file_version: ~azure.servicefabric.models.FileVersion
:param modified_date: The date and time when the image store file was last
modified.
:type modified_date: datetime
:param store_relative_path: The file path relative to the image store root
path.
:type store_relative_path: str
"""
_attribute_map = {
'file_size': {'key': 'FileSize', 'type': 'str'},
'file_version': {'key': 'FileVersion', 'type': 'FileVersion'},
'modified_date': {'key': 'ModifiedDate', 'type': 'iso-8601'},
'store_relative_path': {'key': 'StoreRelativePath', 'type': 'str'},
}
def __init__(self, file_size=None, file_version=None, modified_date=None, store_relative_path=None):
super(FileInfo, self).__init__()
self.file_size = file_size
self.file_version = file_version
self.modified_date = modified_date
self.store_relative_path = store_relative_path
|
mit
| -8,983,340,538,904,316,000
| 39.095238
| 104
| 0.612827
| false
| 4.17866
| false
| false
| false
|
ScreamingUdder/mantid
|
Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANSBeamSpreaderTransmission.py
|
1
|
13137
|
#pylint: disable=no-init,invalid-name
from __future__ import (absolute_import, division, print_function)
import mantid.simpleapi as api
from mantid.api import *
from mantid.kernel import *
import os
from reduction_workflow.find_data import find_data
class SANSBeamSpreaderTransmission(PythonAlgorithm):
def category(self):
return "Workflow\\SANS\\UsesPropertyManager"
def name(self):
return "SANSBeamSpreaderTransmission"
def summary(self):
return "Compute transmission using the beam spreader method"
def PyInit(self):
self.declareProperty(MatrixWorkspaceProperty("InputWorkspace", "",
direction=Direction.Input))
self.declareProperty(FileProperty("SampleSpreaderFilename", "",
action=FileAction.Load,
extensions=['xml', 'nxs', 'nxs.h5']))
self.declareProperty(FileProperty("DirectSpreaderFilename", "",
action=FileAction.Load,
extensions=['xml', 'nxs', 'nxs.h5']))
self.declareProperty(FileProperty("SampleScatteringFilename", "",
action=FileAction.Load,
extensions=['xml', 'nxs', 'nxs.h5']))
self.declareProperty(FileProperty("DirectScatteringFilename", "",
action=FileAction.Load,
extensions=['xml', 'nxs', 'nxs.h5']))
self.declareProperty("SpreaderTransmissionValue", 1.0,
"Transmission of the beam spreader")
self.declareProperty("SpreaderTransmissionError", 0.0,
"Error on the transmission of the beam spreader")
self.declareProperty("ThetaDependent", True,
"If true, a theta-dependent correction will be applied")
self.declareProperty(FileProperty("DarkCurrentFilename", "",
action=FileAction.OptionalLoad,
extensions=['xml', 'nxs', 'nxs.h5']))
self.declareProperty("UseSampleDarkCurrent", False,
"If true, the sample dark current will be used")
self.declareProperty("ReductionProperties", "__sans_reduction_properties",
validator=StringMandatoryValidator(),
doc="Property manager name for the reduction")
self.declareProperty(MatrixWorkspaceProperty("OutputWorkspace", "",
direction = Direction.Output))
self.declareProperty("MeasuredTransmission", 0.0,
direction=Direction.Output)
self.declareProperty("MeasuredError", 0.0,
direction=Direction.Output)
self.declareProperty("OutputMessage", "",
direction=Direction.Output, doc = "Output message")
def PyExec(self): # noqa: C901
# Get the reduction property manager
property_manager_name = self.getProperty("ReductionProperties").value
property_manager = PropertyManagerDataService.retrieve(property_manager_name)
# Build the name we are going to give the transmission workspace
sample_scatt = self.getPropertyValue("SampleScatteringFilename")
sample_basename = os.path.basename(sample_scatt)
entry_name = "TransmissionSpreader%s" % sample_scatt
trans_ws_name = "__transmission_fit_%s" % sample_basename
trans_ws = None
# If we have already computed the transmission, used the
# previously computed workspace
if property_manager.existsProperty(entry_name):
trans_ws_name = property_manager.getProperty(entry_name)
if AnalysisDataService.doesExist(trans_ws_name):
trans_ws = AnalysisDataService.retrieve(trans_ws_name)
# Get instrument to use with FileFinder
instrument = ''
if property_manager.existsProperty("InstrumentName"):
instrument = property_manager.getProperty("InstrumentName").value
# Get the data loader
def _load_data(filename, output_ws):
if not property_manager.existsProperty("LoadAlgorithm"):
Logger("SANSBeamSpreaderTransmission").error("SANS reduction not set up properly: missing load algorithm")
raise RuntimeError("SANS reduction not set up properly: missing load algorithm")
p=property_manager.getProperty("LoadAlgorithm")
alg=Algorithm.fromString(p.valueAsStr)
alg.setProperty("Filename", filename)
alg.setProperty("OutputWorkspace", output_ws)
if alg.existsProperty("ReductionProperties"):
alg.setProperty("ReductionProperties", property_manager_name)
alg.execute()
msg = ''
if alg.existsProperty("OutputMessage"):
msg = alg.getProperty("OutputMessage").value
return msg
# Compute the transmission if we don't already have it
if trans_ws is None:
# Load data files
sample_spreader_ws = "__trans_sample_spreader"
direct_spreader_ws = "__trans_direct_spreader"
sample_scatt_ws = "__trans_sample_scatt"
direct_scatt_ws = "__trans_direct_scatt"
sample_spread = self.getPropertyValue("SampleSpreaderFilename")
direct_spread = self.getPropertyValue("DirectSpreaderFilename")
direct_scatt = self.getPropertyValue("DirectScatteringFilename")
ws_names = [[sample_spread, sample_spreader_ws],
[direct_spread, direct_spreader_ws],
[sample_scatt, sample_scatt_ws],
[direct_scatt, direct_scatt_ws]]
for f in ws_names:
filepath = find_data(f[0], instrument=instrument)
_load_data(filepath, f[1])
self._subtract_dark_current(f[1], property_manager)
# Get normalization for transmission calculation
monitor_det_ID = None
if property_manager.existsProperty("TransmissionNormalisation"):
sample_ws = AnalysisDataService.retrieve(sample_scatt_ws)
if property_manager.getProperty("TransmissionNormalisation").value=="Monitor":
monitor_det_ID = int(sample_ws.getInstrument().getNumberParameter("default-incident-monitor-spectrum")[0])
else:
monitor_det_ID = int(sample_ws.getInstrument().getNumberParameter("default-incident-timer-spectrum")[0])
elif property_manager.existsProperty("NormaliseAlgorithm"):
def _normalise(workspace):
p=property_manager.getProperty("NormaliseAlgorithm")
alg=Algorithm.fromString(p.valueAsStr)
alg.setProperty("InputWorkspace", workspace)
alg.setProperty("OutputWorkspace", workspace)
if alg.existsProperty("ReductionProperties"):
alg.setProperty("ReductionProperties", property_manager_name)
alg.execute()
msg = ''
if alg.existsProperty("OutputMessage"):
msg += alg.getProperty("OutputMessage").value+'\n'
return msg
for f in ws_names:
_normalise(f[1])
# Calculate transmission. Use the reduction method's normalization channel (time or beam monitor)
# as the monitor channel.
spreader_t_value = self.getPropertyValue("SpreaderTransmissionValue")
spreader_t_error = self.getPropertyValue("SpreaderTransmissionError")
alg = AlgorithmManager.createUnmanaged('CalculateTransmissionBeamSpreader')
alg.initialize()
alg.setProperty("SampleSpreaderRunWorkspace", sample_spreader_ws)
alg.setProperty("DirectSpreaderRunWorkspace", direct_spreader_ws)
alg.setProperty("SampleScatterRunWorkspace", sample_scatt_ws)
alg.setProperty("DirectScatterRunWorkspace", direct_scatt_ws)
alg.setProperty("IncidentBeamMonitor", monitor_det_ID)
alg.setProperty("OutputWorkspace",trans_ws_name)
alg.setProperty("SpreaderTransmissionValue",spreader_t_value)
alg.setProperty("SpreaderTransmissionError",spreader_t_error)
alg.execute()
trans_ws = AnalysisDataService.retrieve(trans_ws_name)
for f in ws_names:
if AnalysisDataService.doesExist(f[1]):
AnalysisDataService.remove(f[1])
# 2- Apply correction (Note: Apply2DTransCorr)
input_ws_name = self.getPropertyValue("InputWorkspace")
if not AnalysisDataService.doesExist(input_ws_name):
Logger("SANSBeamSpreaderTransmission").error("Could not find input workspace")
workspace = AnalysisDataService.retrieve(input_ws_name).name()
# Clone workspace to make boost-python happy
api.CloneWorkspace(InputWorkspace=workspace,
OutputWorkspace='__'+workspace)
workspace = '__'+workspace
self._apply_transmission(workspace, trans_ws_name)
trans = trans_ws.dataY(0)[0]
error = trans_ws.dataE(0)[0]
output_str = ''
if len(trans_ws.dataY(0))==1:
self.setProperty("MeasuredTransmission", trans)
self.setProperty("MeasuredError", error)
output_str = "\n%s T = %6.2g += %6.2g\n" % (output_str, trans, error)
output_msg = "Transmission correction applied [%s]%s\n" % (trans_ws_name, output_str)
output_ws = AnalysisDataService.retrieve(workspace)
self.setProperty("OutputWorkspace", output_ws)
self.setPropertyValue("OutputMessage", output_msg)
def _apply_transmission(self, workspace, trans_workspace):
"""
Apply transmission correction
@param workspace: workspace to apply correction to
@param trans_workspace: workspace name for of the transmission
"""
# Make sure the binning is compatible
api.RebinToWorkspace(WorkspaceToRebin=trans_workspace,
WorkspaceToMatch=workspace,
OutputWorkspace=trans_workspace+'_rebin',
PreserveEvents=False)
# Apply angle-dependent transmission correction using the zero-angle transmission
theta_dependent = self.getProperty("ThetaDependent").value
api.ApplyTransmissionCorrection(InputWorkspace=workspace,
TransmissionWorkspace=trans_workspace+'_rebin',
OutputWorkspace=workspace,
ThetaDependent=theta_dependent)
if AnalysisDataService.doesExist(trans_workspace+'_rebin'):
AnalysisDataService.remove(trans_workspace+'_rebin')
def _subtract_dark_current(self, workspace_name, property_manager):
"""
Subtract the dark current
@param workspace_name: name of the workspace to subtract from
@param property_manager: property manager object
"""
# Subtract dark current
use_sample_dc = self.getProperty("UseSampleDarkCurrent").value
dark_current_data = self.getPropertyValue("DarkCurrentFilename")
property_manager_name = self.getProperty("ReductionProperties").value
def _dark(workspace, dark_current_property):
if property_manager.existsProperty(dark_current_property):
p=property_manager.getProperty(dark_current_property)
# Dark current subtraction for sample data
alg=Algorithm.fromString(p.valueAsStr)
alg.setProperty("InputWorkspace", workspace)
alg.setProperty("OutputWorkspace", workspace)
alg.setProperty("Filename", dark_current_data)
if alg.existsProperty("PersistentCorrection"):
alg.setProperty("PersistentCorrection", False)
if alg.existsProperty("ReductionProperties"):
alg.setProperty("ReductionProperties", property_manager_name)
alg.execute()
msg = "Dark current subtracted"
if alg.existsProperty("OutputMessage"):
msg += alg.getProperty("OutputMessage").value
return msg
if use_sample_dc is True:
_dark(workspace_name, "DarkCurrentAlgorithm")
elif len(dark_current_data.strip())>0:
_dark(workspace_name, "DefaultDarkCurrentAlgorithm")
#############################################################################################
AlgorithmFactory.subscribe(SANSBeamSpreaderTransmission)
|
gpl-3.0
| -2,155,445,629,212,738,800
| 49.722008
| 126
| 0.599528
| false
| 4.792776
| false
| false
| false
|
sebleier/python-redis
|
pyredis/hash.py
|
1
|
2365
|
from collections import defaultdict
class Hash(object):
def __init__(self):
self._data = defaultdict(int)
def hset(self, key, value):
"""
Set ``key`` to ``value`` within hash ``name``
Returns 1 if HSET created a new field, otherwise 0
"""
if key in self._data:
created = 0
else:
created = 1
self._data[key] = value
return created
def hget(self, key):
"Return the value of ``key``"
return self._data.get(key, None)
def hdel(self, *keys):
"Delete ``keys``"
deleted = 0
for key in keys:
if key in self._data:
deleted += 1
del self._data[key]
return deleted
def hexists(self, key):
"Returns a boolean indicating if ``key`` exists within hash ``name``"
return key in self._data
def hgetall(self):
"Return a Python dict of the hash's name/value pairs"
return self._data
def hincrby(self, key, amount=1):
"Increment the value of ``key`` in hash by ``amount``"
self._data[key] += amount
return self._data[key]
def hincrbyfloat(self, key, amount=1.0):
"""
Increment the value of ``key`` in hash by floating ``amount``
"""
return self.hincrby(key, amount)
def hkeys(self):
"Return the list of keys within hash"
return self._data.keys()
def hlen(self):
"Return the number of elements in hash"
return len(self._data)
def hsetnx(self, key, value):
"""
Set ``key`` to ``value`` within hash if ``key`` does not
exist. Returns 1 if HSETNX created a field, otherwise 0.
"""
if key in self._data:
return 0
self._data[key] = value
return 1
def hmset(self, mapping):
"""
Sets each key in the ``mapping`` dict to its corresponding value
in the hash
"""
return self._data.update(mapping)
def hmget(self, keys):
"Returns a list of values ordered identically to ``keys``"
values = []
for key in keys:
values.append(self._data.get(key, None))
return values
def hvals(self):
"Return the list of values within hash"
return self._data.values()
|
bsd-3-clause
| 7,353,101,178,696,724,000
| 26.183908
| 77
| 0.542072
| false
| 4.063574
| false
| false
| false
|
stepanovsh/project_template
|
{{cookiecutter.repo_name}}/config/settings/local.py
|
1
|
2101
|
# -*- coding: utf-8 -*-
'''
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
'''
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env("DJANGO_SECRET_KEY", default='CHANGEME!!!')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
{%- if cookiecutter.use_celery == "y" -%}
########## CELERY
# In development, all tasks will be executed locally by blocking until the task returns
CELERY_ALWAYS_EAGER = True
########## END CELERY
{%- endif %}
# Your local stuff: Below this line define 3rd party library settings
|
bsd-3-clause
| 1,962,637,750,829,820,200
| 30.358209
| 101
| 0.502142
| false
| 4.235887
| false
| false
| false
|
jmeppley/py-metagenomics
|
sample_records.py
|
1
|
3684
|
#!/usr/bin/env python
"""
This script takes any file that can be divided into records and
returns N randomly selected records
Records can be fasta, fastq, genbank, or something described by a
simple RegExp
"""
from os import path
from edl.util import *
from edl.batch import *
import re
import sys
import argparse
def main():
# set up CLI
description = """
This script takes any file that can be divided into records and
returns N randomly selected records.
NOTE:
By default, all sampled records are stored in memory. This requires a
good amount of RAM (depending on record size and sample size). To avoid
this, specify the number of records or request a count using the
-n (population_size) option.
Records can be fasta, fastq, genbank, or something described by a
simple RegExp
"""
parser = argparse.ArgumentParser(description=description)
add_IO_arguments(parser)
add_record_parsing_arguments(parser)
parser.add_argument(
"-s",
"--sample_size",
default=1000000,
type=int,
metavar="SAMPLE_SIZE",
help="Number of records to pull out. Defaults to 1 million.")
parser.add_argument("-n", "--population_size", type=int, default=0,
metavar="POPULATION_SIZE",
help="Number of records in file. An integer, should "
"be greater than the SAMPLE_SIZE, except: 0 "
"(default)=> do a separate pass to count records "
"first; -1 => reservoir sample to RAM on the fly")
add_universal_arguments(parser)
arguments = parser.parse_args()
setup_logging(arguments)
# check arguments
if arguments.input_files == [sys.stdin,
] and arguments.population_size == 0:
parser.error("We cannot count records from STDIN, please specify a"
"positive population size or use reservoir sampling "
"(-n -1)")
if arguments.population_size > 0 and \
arguments.population_size < arguments.sample_size:
parser.error("We cannot sample more records then "
"there are in the file!")
for inhandle, outhandle in inputIterator(arguments):
# We need the file name to ge the type, get from handle (if not stdin)
infilename = inhandle.name
fileType = getFileType(arguments, infilename)
record_iterator = fileType.recordStreamer(inhandle)
logging.debug("Looking for %d records in %s" % (arguments.sample_size,
infilename))
# if arguments.population_size<0:
# indexed_sample_generator will only read file once
# using reservoir sampling
# count records if asked to
if arguments.population_size == 0:
record_count, total_size = get_total_size(inhandle.name, fileType)
arguments.population_size = record_count
logging.debug("setting population size to: {}"
.format(arguments.population_size))
# get sampled record generator (will use reservoir if P is <0)
sampled_records = indexed_sample_generator(record_iterator,
N=arguments.sample_size,
P=arguments.population_size)
# print out sampled records
count = 0
for record in sampled_records:
outhandle.writelines(record)
count += 1
logging.debug("Sampled %d records" % (count))
if __name__ == '__main__':
main()
|
mit
| -5,833,227,940,174,607,000
| 34.76699
| 79
| 0.602334
| false
| 4.525799
| false
| false
| false
|
Nichol4s/PyHead
|
tests/unreader.py
|
1
|
1888
|
import os
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# Classes that can undo reading data from
# a given type of data source.
class Unreader(object):
def __init__(self):
self.buf = StringIO()
def chunk(self):
raise NotImplementedError()
def read(self, size=None):
if size is not None and not isinstance(size, (int, long)):
raise TypeError("size parameter must be an int or long.")
if size == 0:
return ""
if size < 0:
size = None
self.buf.seek(0, os.SEEK_END)
if size is None and self.buf.tell():
ret = self.buf.getvalue()
self.buf.truncate(0)
return ret
if size is None:
return self.chunk()
while self.buf.tell() < size:
chunk = self.chunk()
if not len(chunk):
ret = self.buf.getvalue()
self.buf.truncate(0)
return ret
self.buf.write(chunk)
data = self.buf.getvalue()
self.buf.truncate(0)
self.buf.write(data[size:])
return data[:size]
def unread(self, data):
self.buf.seek(0, os.SEEK_END)
self.buf.write(data)
class SocketUnreader(Unreader):
def __init__(self, sock, max_chunk=8192):
super(SocketUnreader, self).__init__()
self.sock = sock
self.mxchunk = max_chunk
def chunk(self):
return self.sock.recv(self.mxchunk)
class IterUnreader(Unreader):
def __init__(self, iterable):
super(IterUnreader, self).__init__()
self.iter = iter(iterable)
def chunk(self):
if not self.iter:
return ""
try:
return self.iter.next()
except StopIteration:
self.iter = None
return ""
|
mit
| -3,738,559,382,780,167,700
| 24.863014
| 69
| 0.544492
| false
| 4.068966
| false
| false
| false
|
jzitelli/poolvr.py
|
poolvr/billboards.py
|
1
|
4370
|
import pkgutil
import os.path
from ctypes import c_void_p
import numpy as np
import OpenGL.GL as gl
import OpenGL.error
from .gl_rendering import Node, Technique, Material, Program, DTYPE_COMPONENT_TYPE, Texture
from .gl_primitives import PlanePrimitive
NULL_PTR = c_void_p(0)
# TODO: pkgutils way
TEXTURES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.path.pardir,
'textures')
class BillboardParticles(Node):
technique = Technique(Program(pkgutil.get_data('poolvr', 'shaders/bb_particles_vs.glsl').decode(),
pkgutil.get_data('poolvr', 'shaders/bb_particles_fs.glsl').decode()))
_modelview = np.eye(4, dtype=np.float32)
def __init__(self,
texture=Texture(os.path.join(TEXTURES_DIR, 'sphere_bb_alpha.png')),
normal_map=Texture(os.path.join(TEXTURES_DIR, 'sphere_bb_normal.png')),
num_particles=1, scale=1.0, color=None, translate=None):
Node.__init__(self)
self.texture = texture
self.normal_map = normal_map
self.material = Material(self.technique, textures={'map': texture, 'u_normal': normal_map})
self.num_particles = num_particles
if color is None:
color = np.array([num_particles*[1.0, 1.0, 1.0]], dtype=np.float32)
if translate is None:
translate = np.array([[1.1*scale*i, 0.2, 0.0] for i in range(num_particles)], dtype=np.float32)
self.primitive = PlanePrimitive(width=scale, height=scale,
color=color, translate=translate,
attribute_usage={'color': gl.GL_STATIC_DRAW,
'translate': gl.GL_DYNAMIC_DRAW})
self.primitive.attributes['position'] = self.primitive.attributes['vertices']
self.primitive.attributes['uv'] = self.primitive.attributes['uvs']
self._initialized = False
def init_gl(self, force=False):
if self._initialized and not force:
return
self.material.init_gl(force=force)
self.primitive.init_gl(force=force)
self._initialized = True
def update_gl(self):
if not self._initialized: self.init_gl()
translate = self.primitive.attributes['translate']
values = translate.tobytes()
try:
gl.glNamedBufferSubData(self.primitive.buffers['translate'], 0, len(values), values)
except OpenGL.error.NullFunctionError as e:
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.primitive.buffers['translate'])
gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, len(values), values)
def draw(self, view=None, projection=None, frame_data=None):
self.material.use()
if view is not None:
self.world_matrix.dot(view, out=self._modelview)
gl.glUniformMatrix4fv(self.technique.uniform_locations['u_modelview'], 1, False, self._modelview)
if projection is not None:
gl.glUniformMatrix4fv(self.technique.uniform_locations['u_projection'], 1, False, projection)
for attribute_name, location in self.technique.attribute_locations.items():
attribute = self.primitive.attributes[attribute_name]
gl.glEnableVertexAttribArray(location)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.primitive.buffers[attribute_name])
gl.glVertexAttribPointer(location, attribute.shape[-1],
DTYPE_COMPONENT_TYPE[attribute.dtype], False,
attribute.dtype.itemsize * attribute.shape[-1],
NULL_PTR)
if attribute_name == 'translate' or attribute_name == 'color':
gl.glVertexAttribDivisor(location, 1)
else:
gl.glVertexAttribDivisor(location, 0)
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.primitive.index_buffer)
gl.glDrawElementsInstanced(self.primitive.mode, self.primitive.indices.size,
DTYPE_COMPONENT_TYPE[self.primitive.indices.dtype], NULL_PTR, self.num_particles)
# for location in self.technique.attribute_locations.values():
# gl.glDisableVertexAttribArray(location)
self.material.release()
|
mit
| -6,383,165,777,984,583,000
| 50.411765
| 116
| 0.613501
| false
| 3.933393
| false
| false
| false
|
JioCloud/glance
|
glance/api/middleware/cache.py
|
1
|
12967
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Transparent image file caching middleware, designed to live on
Glance API nodes. When images are requested from the API node,
this middleware caches the returned image file to local filesystem.
When subsequent requests for the same image file are received,
the local cached copy of the image file is returned.
"""
import re
from oslo_log import log as logging
import webob
from glance.api.common import size_checked_iter
from glance.api import policy
from glance.api.v1 import images
from glance.common import exception
from glance.common import utils
from glance.common import wsgi
import glance.db
from glance import i18n
from glance import image_cache
from glance import notifier
import glance.registry.client.v1.api as registry
LOG = logging.getLogger(__name__)
_LI = i18n._LI
_LE = i18n._LE
_LW = i18n._LW
PATTERNS = {
('v1', 'GET'): re.compile(r'^/v1/images/([^\/]+)$'),
('v1', 'DELETE'): re.compile(r'^/v1/images/([^\/]+)$'),
('v2', 'GET'): re.compile(r'^/v2/images/([^\/]+)/file$'),
('v2', 'DELETE'): re.compile(r'^/v2/images/([^\/]+)$')
}
class CacheFilter(wsgi.Middleware):
def __init__(self, app):
self.cache = image_cache.ImageCache()
self.serializer = images.ImageSerializer()
self.policy = policy.Enforcer()
LOG.info(_LI("Initialized image cache middleware"))
super(CacheFilter, self).__init__(app)
def _verify_metadata(self, image_meta):
"""
Sanity check the 'deleted' and 'size' metadata values.
"""
# NOTE: admins can see image metadata in the v1 API, but shouldn't
# be able to download the actual image data.
if image_meta['status'] == 'deleted' and image_meta['deleted']:
raise exception.NotFound()
if not image_meta['size']:
# override image size metadata with the actual cached
# file size, see LP Bug #900959
image_meta['size'] = self.cache.get_image_size(image_meta['id'])
@staticmethod
def _match_request(request):
"""Determine the version of the url and extract the image id
:returns tuple of version and image id if the url is a cacheable,
otherwise None
"""
for ((version, method), pattern) in PATTERNS.items():
if request.method != method:
continue
match = pattern.match(request.path_info)
if match is None:
continue
image_id = match.group(1)
# Ensure the image id we got looks like an image id to filter
# out a URI like /images/detail. See LP Bug #879136
if image_id != 'detail':
return (version, method, image_id)
def _enforce(self, req, action, target=None):
"""Authorize an action against our policies"""
if target is None:
target = {}
try:
self.policy.enforce(req.context, action, target)
except exception.Forbidden as e:
LOG.debug("User not permitted to perform '%s' action", action)
raise webob.exc.HTTPForbidden(explanation=e.msg, request=req)
def _get_v1_image_metadata(self, request, image_id):
"""
Retrieves image metadata using registry for v1 api and creates
dictionary-like mash-up of image core and custom properties.
"""
try:
image_metadata = registry.get_image_metadata(request.context,
image_id)
return utils.create_mashup_dict(image_metadata)
except exception.NotFound as e:
LOG.debug("No metadata found for image '%s'", image_id)
raise webob.exc.HTTPNotFound(explanation=e.msg, request=request)
def _get_v2_image_metadata(self, request, image_id):
"""
Retrieves image and for v2 api and creates adapter like object
to access image core or custom properties on request.
"""
db_api = glance.db.get_api()
image_repo = glance.db.ImageRepo(request.context, db_api)
try:
image = image_repo.get(image_id)
# Storing image object in request as it is required in
# _process_v2_request call.
request.environ['api.cache.image'] = image
return policy.ImageTarget(image)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg, request=request)
def process_request(self, request):
"""
For requests for an image file, we check the local image
cache. If present, we return the image file, appending
the image metadata in headers. If not present, we pass
the request on to the next application in the pipeline.
"""
match = self._match_request(request)
try:
(version, method, image_id) = match
except TypeError:
# Trying to unpack None raises this exception
return None
self._stash_request_info(request, image_id, method, version)
if request.method != 'GET' or not self.cache.is_cached(image_id):
return None
method = getattr(self, '_get_%s_image_metadata' % version)
image_metadata = method(request, image_id)
# Deactivated images shall not be served from cache
if image_metadata['status'] == 'deactivated':
return None
try:
self._enforce(request, 'download_image', target=image_metadata)
except exception.Forbidden:
return None
LOG.debug("Cache hit for image '%s'", image_id)
image_iterator = self.get_from_cache(image_id)
method = getattr(self, '_process_%s_request' % version)
try:
return method(request, image_id, image_iterator, image_metadata)
except exception.NotFound:
msg = _LE("Image cache contained image file for image '%s', "
"however the registry did not contain metadata for "
"that image!") % image_id
LOG.error(msg)
self.cache.delete_cached_image(image_id)
@staticmethod
def _stash_request_info(request, image_id, method, version):
"""
Preserve the image id, version and request method for later retrieval
"""
request.environ['api.cache.image_id'] = image_id
request.environ['api.cache.method'] = method
request.environ['api.cache.version'] = version
@staticmethod
def _fetch_request_info(request):
"""
Preserve the cached image id, version for consumption by the
process_response method of this middleware
"""
try:
image_id = request.environ['api.cache.image_id']
method = request.environ['api.cache.method']
version = request.environ['api.cache.version']
except KeyError:
return None
else:
return (image_id, method, version)
def _process_v1_request(self, request, image_id, image_iterator,
image_meta):
# Don't display location
if 'location' in image_meta:
del image_meta['location']
image_meta.pop('location_data', None)
self._verify_metadata(image_meta)
response = webob.Response(request=request)
raw_response = {
'image_iterator': image_iterator,
'image_meta': image_meta,
}
return self.serializer.show(response, raw_response)
def _process_v2_request(self, request, image_id, image_iterator,
image_meta):
# We do some contortions to get the image_metadata so
# that we can provide it to 'size_checked_iter' which
# will generate a notification.
# TODO(mclaren): Make notification happen more
# naturally once caching is part of the domain model.
image = request.environ['api.cache.image']
self._verify_metadata(image_meta)
response = webob.Response(request=request)
response.app_iter = size_checked_iter(response, image_meta,
image_meta['size'],
image_iterator,
notifier.Notifier())
# NOTE (flwang): Set the content-type, content-md5 and content-length
# explicitly to be consistent with the non-cache scenario.
# Besides, it's not worth the candle to invoke the "download" method
# of ResponseSerializer under image_data. Because method "download"
# will reset the app_iter. Then we have to call method
# "size_checked_iter" to avoid missing any notification. But after
# call "size_checked_iter", we will lose the content-md5 and
# content-length got by the method "download" because of this issue:
# https://github.com/Pylons/webob/issues/86
response.headers['Content-Type'] = 'application/octet-stream'
response.headers['Content-MD5'] = image.checksum
response.headers['Content-Length'] = str(image.size)
return response
def process_response(self, resp):
"""
We intercept the response coming back from the main
images Resource, removing image file from the cache
if necessary
"""
status_code = self.get_status_code(resp)
if not 200 <= status_code < 300:
return resp
try:
(image_id, method, version) = self._fetch_request_info(
resp.request)
except TypeError:
return resp
if method == 'GET' and status_code == 204:
# Bugfix:1251055 - Don't cache non-existent image files.
# NOTE: Both GET for an image without locations and DELETE return
# 204 but DELETE should be processed.
return resp
method_str = '_process_%s_response' % method
try:
process_response_method = getattr(self, method_str)
except AttributeError:
LOG.error(_LE('could not find %s') % method_str)
# Nothing to do here, move along
return resp
else:
return process_response_method(resp, image_id, version=version)
def _process_DELETE_response(self, resp, image_id, version=None):
if self.cache.is_cached(image_id):
LOG.debug("Removing image %s from cache", image_id)
self.cache.delete_cached_image(image_id)
return resp
def _process_GET_response(self, resp, image_id, version=None):
image_checksum = resp.headers.get('Content-MD5')
if not image_checksum:
# API V1 stores the checksum in a different header:
image_checksum = resp.headers.get('x-image-meta-checksum')
if not image_checksum:
LOG.error(_LE("Checksum header is missing."))
# fetch image_meta on the basis of version
image_metadata = None
if version:
method = getattr(self, '_get_%s_image_metadata' % version)
image_metadata = method(resp.request, image_id)
# NOTE(zhiyan): image_cache return a generator object and set to
# response.app_iter, it will be called by eventlet.wsgi later.
# So we need enforce policy firstly but do it by application
# since eventlet.wsgi could not catch webob.exc.HTTPForbidden and
# return 403 error to client then.
self._enforce(resp.request, 'download_image', target=image_metadata)
resp.app_iter = self.cache.get_caching_iter(image_id, image_checksum,
resp.app_iter)
return resp
def get_status_code(self, response):
"""
Returns the integer status code from the response, which
can be either a Webob.Response (used in testing) or httplib.Response
"""
if hasattr(response, 'status_int'):
return response.status_int
return response.status
def get_from_cache(self, image_id):
"""Called if cache hit"""
with self.cache.open_for_read(image_id) as cache_file:
chunks = utils.chunkiter(cache_file)
for chunk in chunks:
yield chunk
|
apache-2.0
| -4,818,873,228,218,001,000
| 39.021605
| 78
| 0.61001
| false
| 4.273896
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.