repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
pprett/scikit-learn | sklearn/feature_selection/from_model.py | Python | bsd-3-clause | 6,968 | 0 | # Authors: Gilles Louppe, Mathieu Blondel, Maheshakya Wijewardena
# License: BSD 3 clause
import numpy as np
from .base import SelectorMixin
from ..base import BaseEstimator, clone
from ..externals import six
from ..exceptions import NotFittedError
from ..utils.fixes import norm
def _get_feature_importances(estimator, norm_order=1):
"""Retrieve or aggregate feature importances from estimator"""
importances = getattr(estimator, "feature_importances_", None)
if importances is None and hasattr(estimator, "coef_"):
if estimator.coef_.ndim == 1:
importances = np.abs(estimator.coef_)
else:
importances = norm(estimator.coef_, axis=0, ord=norm_order)
elif importances is None:
raise ValueError(
"The underlying estimator %s has no `coef_` or "
"`feature_importances_` attribute. Either pass a fitted estimator"
" to SelectFromModel or call fit before calling transform."
% estimator.__class__.__name__)
return importances
def _calculate_threshold(estimator, importances, threshold):
"""Interpret the threshold value"""
if threshold is None:
# determine default from estimator
est_name = estimator.__class__.__name__
if ((hasattr(estimator, "penalty") and estimator.penalty == "l1") or
"Lasso" in est_name):
# the natural default threshold is 0 when l1 penalty was used
threshold = 1e-5
else:
threshold = "mean"
if isinstance(threshold, six.string_types):
if "*" in threshold:
scale, reference = threshold.split("*")
scale = float(scale.strip())
reference = reference.strip()
if reference == "median":
reference = np.median(importances)
elif reference == "mean":
reference = np.mean(importances)
else:
raise ValueError("Unknown reference: " + reference)
threshold = scale * reference
elif threshold == "median":
threshold = np.median(importances)
elif threshold == "mean":
threshold = np.mean(importances)
else:
raise ValueError("Expected threshold='mean' or threshold='median' "
"got %s" % threshold)
else:
threshold = float(threshold)
return threshold
class SelectFromModel(BaseEstimator, SelectorMixin):
"""Meta-transformer for selecting features based on importance weights.
.. versionadded:: 0.17
Parameters
----------
estimator : object
The base estimator from which the transformer is built.
This can be both a fitted (if ``prefit`` is set to True)
or a non-fitted estimator.
threshold : string, float, optional default None
The threshold value to use for feature selection. Features whose
importance is greater or equal are kept while the others are
discarded. If "median" (resp. "mean"), then the ``threshold`` value is
the median (resp. the mean) of the feature importances. A scaling
factor (e.g., "1.25*mean") may also be used. If None and if the
estimator has a parameter penalty set to l1, either explicitly
or implicitly (e.g, Lasso), the threshold used is 1e-5.
Otherwise, "mean" is used by default.
prefit : bool, default False
Whether a prefit model is expected to be passed into the constructor
directly or not. If True, ``transform`` must be called directly
and SelectFromModel cannot be used with ``cross_val_score``,
``GridSearchCV`` and similar utilities that clone the estimator.
Otherwise train the model using ``fit`` and then ``transform`` to do
feature selection.
norm_order : non-zero int, inf, -inf, default 1
Order of the norm used to filter the vectors of coefficients below
``threshold`` in the case where the ``coef_`` attribute of the
estimator is of dimension 2.
Attributes
----------
estimator_ : an estimator
The base estimator from which the transformer is built.
This is stored only when a non-fitted estimator is passed to the
``SelectFromModel``, i.e when prefit is False.
threshold_ : float
The threshold value used for feature selection.
"""
def __init__(self, estimator, threshold=None, prefit=False, norm_order=1):
self.estimator = estimator
self.threshold = threshold
self.prefit = prefit
self.norm_order = norm_order
def _get_support_mask(self):
# SelectFromModel can directly call on transform.
if self.prefit:
estimator = self.estimator
elif hasattr(self, 'estimator_'):
estimator = self.estimator_
else:
raise ValueError(
'Either fit the model before transform or set "prefit=True"'
' while passing the fitted estimator to the constructor.')
scores = _get_feature_importances(estimator, self.norm_order)
self.threshold_ = _calculate_threshold(estimator, scores,
self.threshold)
return scores >= self.threshold_
def fit(self, X, y=None, **fit_params):
"""Fit the SelectFromModel meta-transformer.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values (integers that correspond to classes in
classification, real numbers in regression).
**fit_params : Other estimator specific parameters
Returns
-------
self : object
Returns self.
"""
if self.prefit:
raise NotFittedError(
"Since 'prefit=True', call transform directly")
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X, y, **fit_params)
return self
def partial_fit(self, X, y=None, **fit_params):
"""Fit the SelectFromModel meta-transformer only once.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values (integers that correspond to classes in
classification, real numbers in regression).
**fi | t_params : Other estimator specific parameters
Returns
-------
self : object
Returns self.
"""
if self.prefit:
| raise NotFittedError(
"Since 'prefit=True', call transform directly")
if not hasattr(self, "estimator_"):
self.estimator_ = clone(self.estimator)
self.estimator_.partial_fit(X, y, **fit_params)
return self
|
JulyKikuAkita/PythonPrac | cs15211/RangeModule.py | Python | apache-2.0 | 7,564 | 0.003966 | __source__ = 'https://leetcode.com/problems/rang | e-module/'
# Time: O(logK) to O(K)
# Space: O(A+R), the space used by ranges.
#
# Description: Leetco | de # 715. Range Module
#
# A Range Module is a module that tracks ranges of numbers.
# Your task is to design and implement the following interfaces in an efficient manner.
#
# addRange(int left, int right) Adds the half-open interval [left, right),
# tracking every real number in that interval.
# Adding an interval that partially overlaps with currently tracked numbers
# should add any numbers in the interval [left, right) that are not already tracked.
#
# queryRange(int left, int right) Returns true if and only if every real number in the interval
# [left, right) is currently being tracked.
#
# removeRange(int left, int right) Stops tracking every real number currently being tracked
# in the interval [left, right).
#
# Example 1:
#
# addRange(10, 20): null
# removeRange(14, 16): null
# queryRange(10, 14): true (Every number in [10, 14) is being tracked)
# queryRange(13, 15): false (Numbers like 14, 14.03, 14.17 in [13, 15) are not being tracked)
# queryRange(16, 17): true (The number 16 in [16, 17) is still being tracked,
# despite the remove operation)
#
# Note:
# A half open interval [left, right) denotes all real numbers left <= x < right.
# 0 < left < right < 10^9 in all calls to addRange, queryRange, removeRange.
# The total number of calls to addRange in a single test case is at most 1000.
# The total number of calls to queryRange in a single test case is at most 5000.
# The total number of calls to removeRange in a single test case is at most 1000.
#
import unittest
import bisect
# 308ms 58.44%
class RangeModule(object):
def __init__(self):
self.ranges = []
def _bounds(self, left, right):
i, j = 0, len(self.ranges) - 1
for d in (100, 10, 1):
while i + d - 1 < len(self.ranges) and self.ranges[i+d-1][1] < left:
i += d
while j >= d - 1 and self.ranges[j-d+1][0] > right:
j -= d
return i, j
def addRange(self, left, right):
i, j = self._bounds(left, right)
if i <= j:
left = min(left, self.ranges[i][0])
right = max(right, self.ranges[j][1])
self.ranges[i:j+1] = [(left, right)]
def queryRange(self, left, right):
i = bisect.bisect_left(self.ranges, (left, float('inf')))
if i: i -= 1
return (bool(self.ranges) and
self.ranges[i][0] <= left and
right <= self.ranges[i][1])
def removeRange(self, left, right):
i, j = self._bounds(left, right)
merge = []
for k in xrange(i, j+1):
if self.ranges[k][0] < left:
merge.append((self.ranges[k][0], left))
if right < self.ranges[k][1]:
merge.append((right, self.ranges[k][1]))
self.ranges[i:j+1] = merge
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/range-module/solution/
#
Approach #1: Maintain Sorted Disjoint Intervals [Accepted]
Complexity Analysis
Time Complexity: Let K be the number of elements in ranges.
addRange and removeRange operations have O(K) complexity
queryRange has O(logK) complexity
Because addRange, removeRange adds at most 1 interval at a time, you can bound these further.
For example, if there are A addRange, R removeRange, and Q queryRange number of operations respectively,
we can express our complexity as O((A+R)^2 Qlog(A+R))
Space Complexity: O(A+R), the space used by ranges.
# 121ms 89.92%
class RangeModule {
TreeSet<Interval> ranges;
public RangeModule() {
ranges = new TreeSet();
}
public void addRange(int left, int right) {
Iterator<Interval> itr = ranges.tailSet(new Interval(0, left - 1)).iterator();
while (itr.hasNext()) {
Interval iv = itr.next();
if (right < iv.left) break;
left = Math.min(left, iv.left);
right = Math.max(right, iv.right);
itr.remove();
}
ranges.add(new Interval(left, right));
}
public boolean queryRange(int left, int right) {
Interval iv = ranges.higher(new Interval(0, left));
return (iv != null && iv.left <= left && right <= iv.right);
}
public void removeRange(int left, int right) {
Iterator<Interval> itr = ranges.tailSet(new Interval(0, left)).iterator();
ArrayList<Interval> todo = new ArrayList();
while (itr.hasNext()) {
Interval iv = itr.next();
if (right < iv.left) break;
if (iv.left < left) todo.add(new Interval(iv.left, left));
if (right < iv.right) todo.add(new Interval(right, iv.right));
itr.remove();
}
for (Interval iv: todo) ranges.add(iv);
}
}
class Interval implements Comparable<Interval>{
int left;
int right;
public Interval(int left, int right){
this.left = left;
this.right = right;
}
public int compareTo(Interval that){
if (this.right == that.right) return this.left - that.left;
return this.right - that.right;
}
}
/**
* Your RangeModule object will be instantiated and called as such:
* RangeModule obj = new RangeModule();
* obj.addRange(left,right);
* boolean param_2 = obj.queryRange(left,right);
* obj.removeRange(left,right);
*/
# 136ms 78.23%
class RangeModule {
List<int[]> ranges = new ArrayList<int[]>();
public RangeModule() {
ranges.add(new int[]{-1, -1});
}
public void addRange(int left, int right) {
int l = searchFloor(left);
int r = searchFloor(right);
int[] vl = ranges.get(l);
int[] vr = ranges.get(r);
if (vr[1] < left) {
ranges.add(r + 1, new int[]{left, right});
} else {
for (int k = 0; k < r - l; k++) ranges.remove(l + 1);
if (vl[1] < left) {
ranges.add(l + 1, new int[]{left, Math.max(right, vr[1])});
} else {
ranges.remove(l);
ranges.add(l, new int[] {vl[0], Math.max(right, vr[1])});
}
}
}
public boolean queryRange(int left, int right) {
int l = searchFloor(left);
int[] r = ranges.get(l);
return (r[1] >= right);
}
public void removeRange(int left, int right) {
int l = searchFloor(left);
int r = searchFloor(right);
int[] vl = ranges.get(l);
int[] vr = ranges.get(r);
if (vr[1] <= left) return;
for (int k = 0; k < r - l; k++) ranges.remove(l + 1);
if (vr[1] > right) {
ranges.add(l + 1, new int[]{right, vr[1]});
}
if (vl[1] > left) {
ranges.remove(l);
if (vl[0] < left) {
ranges.add(l, new int[]{vl[0], left});
}
}
}
// search nearest internal starts at or before key and return the index
private int searchFloor(int key) {
int l = 0, h = ranges.size();
while (l + 1 < h) {
int m = l + (h - l) / 2;
int v = ranges.get(m)[0];
if (v < key) {
l = m;
} else if (v == key) {
l = m;
break;
} else {
h = m;
}
}
return l;
}
}
'''
|
mluo613/osf.io | addons/s3/routes.py | Python | apache-2.0 | 1,947 | 0 | from framework.routing import Rule, json_renderer
from addons.s3 import views
api_routes = {
'rules': [
Rule(
[
'/settings/s3/accounts/',
],
'post',
views.s3_add_user_account,
json_renderer,
),
Rule(
[
'/settings/s3/accounts/',
],
'get',
views.s3_account_list,
json_renderer,
),
Rule(
[
'/project/<pid>/s3/settings/',
'/project/<pid>/node/<nid>/s3/settings/',
],
'put',
views.s3_set_config,
json_renderer,
),
Rule(
[
'/project/< | pid>/s3/s | ettings/',
'/project/<pid>/node/<nid>/s3/settings/',
],
'get',
views.s3_get_config,
json_renderer,
),
Rule(
[
'/project/<pid>/s3/user-auth/',
'/project/<pid>/node/<nid>/s3/user-auth/',
],
'put',
views.s3_import_auth,
json_renderer,
),
Rule(
[
'/project/<pid>/s3/user-auth/',
'/project/<pid>/node/<nid>/s3/user-auth/',
],
'delete',
views.s3_deauthorize_node,
json_renderer,
),
Rule(
[
'/project/<pid>/s3/buckets/',
'/project/<pid>/node/<nid>/s3/buckets/',
],
'get',
views.s3_folder_list,
json_renderer,
),
Rule(
[
'/project/<pid>/s3/newbucket/',
'/project/<pid>/node/<nid>/s3/newbucket/',
],
'post',
views.create_bucket,
json_renderer
),
],
'prefix': '/api/v1',
}
|
endlessm/chromium-browser | tools/android/asan/third_party/with_asan.py | Python | bsd-3-clause | 3,894 | 0.012327 | #!/usr/bin/env vpython
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import contextlib
import logging
import os
import subprocess
import sys
_SRC_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', '..'))
sys.path.append(os.path.join(_SRC_ROOT, 'third_party', 'catapult', 'devil'))
from devil import base_error
from devil.android import device_utils
from devil.android.sdk import adb_wrapper
from devil.android.sdk import version_codes
from devil.utils import logging_common
sys.path.append(os.path.join(_SRC_ROOT, 'build', 'android'))
import devil_chromium
_SCRIPT_PATH = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'asan_device_setup.sh'))
@contextlib.contextmanager
def _LogDevicesOnFailure(msg):
try:
yield
except base_error.BaseError:
logging.exception(msg)
logging.error('Devices visible to adb:')
for entry in adb_wrapper.AdbWrapper.Devices(desired_stat | e=None,
long_list=True):
logging.error(' %s: %s',
entry[0].GetDeviceSerial(),
' '.join(entry[1:]))
raise
@contextlib.contextmanager
def Asan(args):
env = os.environ.copy()
env['ADB'] = args.adb
try:
with _LogDevicesOnFailure('Failed to set up the device.'):
| device = device_utils.DeviceUtils.HealthyDevices(
device_arg=args.device)[0]
disable_verity = device.build_version_sdk >= version_codes.MARSHMALLOW
if disable_verity:
device.EnableRoot()
# TODO(crbug.com/790202): Stop logging output after diagnosing
# issues on android-asan.
verity_output = device.adb.DisableVerity()
if verity_output:
logging.info('disable-verity output:')
for line in verity_output.splitlines():
logging.info(' %s', line)
device.Reboot()
# Call EnableRoot prior to asan_device_setup.sh to ensure it doesn't
# get tripped up by the root timeout.
device.EnableRoot()
setup_cmd = [_SCRIPT_PATH, '--lib', args.lib]
if args.device:
setup_cmd += ['--device', args.device]
subprocess.check_call(setup_cmd, env=env)
yield
finally:
with _LogDevicesOnFailure('Failed to tear down the device.'):
device.EnableRoot()
teardown_cmd = [_SCRIPT_PATH, '--revert']
if args.device:
teardown_cmd += ['--device', args.device]
subprocess.check_call(teardown_cmd, env=env)
if disable_verity:
# TODO(crbug.com/790202): Stop logging output after diagnosing
# issues on android-asan.
verity_output = device.adb.EnableVerity()
if verity_output:
logging.info('enable-verity output:')
for line in verity_output.splitlines():
logging.info(' %s', line)
device.Reboot()
def main(raw_args):
parser = argparse.ArgumentParser()
logging_common.AddLoggingArguments(parser)
parser.add_argument(
'--adb', type=os.path.realpath, required=True,
help='Path to adb binary.')
parser.add_argument(
'--device',
help='Device serial.')
parser.add_argument(
'--lib', type=os.path.realpath, required=True,
help='Path to asan library.')
parser.add_argument(
'command', nargs='*',
help='Command to run with ASAN installed.')
args = parser.parse_args()
# TODO(crbug.com/790202): Remove this after diagnosing issues
# with android-asan.
if not args.quiet:
args.verbose += 1
logging_common.InitializeLogging(args)
devil_chromium.Initialize(adb_path=args.adb)
with Asan(args):
if args.command:
return subprocess.call(args.command)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
hgsoft/hgsoft-addons | custom_account_payment_v11/__manifest__.py | Python | gpl-3.0 | 1,867 | 0.008043 | # -*- coding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2018 HGSOFT - www.hgsoft.com.br #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU Affero General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is d | istributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without e | ven the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU General Public License for more details. #
# #
#You should have received a copy of the GNU General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
{
'name': "Custom Account Payment",
'summary': """Account Payment custom field in view, and logic.""",
'description': """Custom view of account payment, with new logic when registering payment.""",
'author': "HGSoft - Soluções Criativas e Inteligentes",
'website': "http://www.hgsoft.com.br/",
'category': 'Account',
'version': '11.0.0',
'depends': ['account'],
'data': [
'views/account_payment_view.xml',
],
} |
niwinz/Green-Mine | src/greenmine/core/utils/slug.py | Python | bsd-3-clause | 1,106 | 0.001808 | # -*- coding: utf-8 -*-
from django.utils import baseconv
from django.template.defaultfilters import | slugify
import time
def slugify_uniquely(value, model, slugfield="slu | g"):
"""
Returns a slug on a name which is unique within a model's table
"""
suffix = 0
potential = base = slugify(value)
if len(potential) == 0:
potential = 'null'
while True:
if suffix:
potential = "-".join([base, str(suffix)])
if not model.objects.filter(**{slugfield: potential}).count():
return potential
suffix += 1
def ref_uniquely(project, model, field='ref'):
"""
Returns a unique reference code based on base64 and time.
"""
# this prevents concurrent and inconsistent references.
time.sleep(0.001)
new_timestamp = lambda: int("".join(str(time.time()).split(".")))
while True:
potential = baseconv.base62.encode(new_timestamp())
params = {field: potential, 'project': project}
if not model.objects.filter(**params).exists():
return potential
time.sleep(0.0002)
|
timfreund/PhotoFUSE | setup.py | Python | mit | 588 | 0.003401 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
| setup(
name='PhotoFUSE',
version='1.0',
description="PhotoFUSE: Show photos based on ratings and tags",
author='Tim Freund',
author_email='tim@freunds.net',
license = 'MIT License',
url='http://github.com/timfreund/photofuse',
install_requires=[
'fusepy',
# PIL
],
packages=['photofuse'],
include_package_data=True,
entry_points="""
[console_scripts]
photofuse-ls = pho | tofuse.cli:ls
photofuse = photofuse.cli:photofuse
""",
)
|
anushbmx/kitsune | kitsune/forums/models.py | Python | bsd-3-clause | 13,732 | 0.000073 | import datetime
import time
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db.models import Q
from django.db.models.signals import pre_save
from django.contrib.contenttypes.fields import GenericRelation
from django.contrib.auth.models import User
from tidings.models import NotificationsMixin
from kitsune import forums
from kitsune.access.utils import has_perm, perm_is_defined_on
from kitsune.flagit.models import FlaggedObject
from kitsune.sumo.templatetags.jinja_helpers import urlparams, wiki_to_html
from kitsune.sumo.urlresolvers import reverse
from kitsune.sumo.models import ModelBase
from kitsune.search.models import (
SearchMappingType, SearchMixin, register_for_indexing,
register_mapping_type)
def _last_post_from(posts, exclude_post=None):
"""Return the most recent post in the given set, excluding the given post.
If there are none, return None.
"""
if exclude_post:
posts = posts.exclude(id=exclude_post.id)
posts = posts.order_by('-created')
try:
return posts[0]
except IndexError:
return None
class ThreadLockedError(Exception):
"""Trying to create a post in a locked thread."""
class Forum(NotificationsMixin, ModelBase):
name = models.CharField(max_length=50, unique=True)
slug = models.SlugField(unique=True)
description = models.TextField(null=True)
last_post = models.ForeignKey('Post', related_name='last_post_in_forum',
null=True, on_delete=models.SET_NULL)
# Dictates the order in which forums are displayed in the forum list.
display_order = models.IntegerField(default=1, db_index=True)
# Whether or not this forum is visible in the forum list.
is_listed = models.BooleanField(default=True, db_index=True)
class Meta(object):
ordering = ['display_order', 'id']
permissions = (
('view_in_forum', 'Can view restricted forums'),
('post_in_forum', 'Can post in restricted forums'))
def __unicode__(self):
return self.name
def get_absolute_url(self):
return reverse('forums.threads', kwargs={'forum_slug': self.slug})
def allows_viewing_by(self, user):
"""Return whether a user can view me, my threads, and their posts."""
return (self._allows_public_viewing() or
has_perm(user, 'forums_forum.view_in_forum', self))
def _allows_public_viewing(self):
"""Return whether I am a world-readable forum.
If a django-authority permission relates to me, I am considered non-
public. (We assume that you attached a permission to me in order to
assign it to some users or groups.) Considered adding a Public flag to
this model, but we didn't want it to show up on form and thus be
accidentally flippable by readers of the Admin forum, who are all
privileged enough to do so.
"""
return not perm_is_defined_on('forums_forum.view_in_forum', self)
def allows_posting_by(self, user):
"""Return whether a user can make threads and posts in me."""
return (self._allows_public_posting() or
has_perm(user, 'forums_forum.post_in_forum', self))
def _allows_public_posting(self):
"""Return whether I am a world-writable forum."""
return not perm_is_defined_on('forums_forum.post_in_forum', self)
def update_last_post(self, exclude_thread=None, exclude_post=None):
"""Set my last post to the newest, excluding given thread and post."""
posts = Post.objects.filter(thread__forum=self)
if exclude_thread:
posts = posts.exclude(thread=exclude_thread)
self.last_post = _last_post_from(posts, exclude_post=exclude_post)
@classmethod
def authorized_forums_for_user(cls, user):
"""Returns the forums this user is authorized to view"""
return [f for f in Forum.objects.all() if f.allows_viewing_by(user)]
class Thread(NotificationsMixin, ModelBase, SearchMixin):
title = models.CharField(max_length=255)
forum = models.ForeignKey('Forum')
created = models.DateTimeField(default=datetime.datetime.now,
db_index=True)
creator = models.ForeignKey(User)
last_post = models.ForeignKey('Post', related_name='last_post_in',
null=True, on_delete=models.SET_NULL)
replies = models.IntegerField(default=0)
is_locked = models.BooleanField(default=False)
is_sticky = models.BooleanField(default=False, db_index=True)
class Meta:
ordering = ['-is_sticky', '-last_post__created']
def __setattr__(self, attr, val):
"""Notice when the forum field changes.
A property won't do here, because it usurps the "forum" name and
prevents us from using lookups like Thread.objects.filter(forum=f).
"""
if attr == 'forum' and not hasattr(self, '_old_forum'):
try:
self._old_forum = self.forum
except ObjectDoesNotExist:
pass
super(Thread, self).__setattr__(attr, val)
@property
def last_page(self):
"""Returns the page number for the last post."""
return self.replies / forums.POSTS_PER_PAGE + 1
def __unicode__(self):
return self.title
def delete(self, *args, **kwargs):
"""Override delete method to update parent forum info."""
forum = Forum.objects.get(pk=self.forum.id)
if forum.last_post and forum.last_post.thread_id == self.id:
forum.update_last_post(exclude_thread=self)
forum.save()
super(Thread, self).delete(*args, **kwargs)
def new_post(self, author, content):
"""Create a new post, if the thread is unlocked."""
if self.is_locked:
raise ThreadLockedError
return self.post_set.create(author=author, content=content)
def get_absolute_url(self):
return reverse('forums.posts', args=[self.forum.slug, self.id])
def get_last_post_url(s | elf):
query = {'last': self.last_post_id}
page = self.last_page
if page > 1:
query['page'] = page
url = reverse('forums.posts', args=[self.forum.slug, self.id])
return urlparams(url, hash='post-%s' % self.last_post_id, | **query)
def save(self, *args, **kwargs):
super(Thread, self).save(*args, **kwargs)
old_forum = getattr(self, '_old_forum', None)
new_forum = self.forum
if old_forum and old_forum != new_forum:
old_forum.update_last_post(exclude_thread=self)
old_forum.save()
new_forum.update_last_post()
new_forum.save()
del self._old_forum
def update_last_post(self, exclude_post=None):
"""Set my last post to the newest, excluding the given post."""
last = _last_post_from(self.post_set, exclude_post=exclude_post)
self.last_post = last
# If self.last_post is None, and this was called from Post.delete,
# then Post.delete will erase the thread, as well.
@classmethod
def get_mapping_type(cls):
return ThreadMappingType
@register_mapping_type
class ThreadMappingType(SearchMappingType):
seconds_ago_filter = 'last_post__created__gte'
@classmethod
def search(cls):
return super(ThreadMappingType, cls).search().order_by('created')
@classmethod
def get_model(cls):
return Thread
@classmethod
def get_query_fields(cls):
return ['post_title', 'post_content']
@classmethod
def get_mapping(cls):
return {
'properties': {
'id': {'type': 'long'},
'model': {'type': 'string', 'index': 'not_analyzed'},
'url': {'type': 'string', 'index': 'not_analyzed'},
'indexed_on': {'type': 'integer'},
'created': {'type': 'integer'},
'updated': {'type': 'integer'},
'post_forum_id': {'type': 'integer'},
'post_title': {'type': 'string', 'analyzer': 'snowball'},
|
mattvonrocketstein/smash | smashlib/ipy3x/utils/tests/test_io.py | Python | mit | 6,837 | 0.000878 | # encoding: utf-8
"""Tests for io.py"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
from __future__ import absolute_import
import io as stdlib_io
import os.path
import stat
import sys
from subprocess import Popen, PIPE
import unittest
import nose.tools as nt
from IPython.testing.decorators import skipif
from IPython.utils.io import (Tee, capture_output, unicode_std_stream,
atomic_writing,
)
from IPython.utils.py3compat import doctest_refactor_print, PY3
from IPython.utils.tempdir import TemporaryDirectory
if PY3:
from io import StringIO
else:
from StringIO import StringIO
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def test_tee_simple():
"Very simple check with stdout only"
chan = StringIO()
text = 'Hello'
tee = Tee(chan, channel='stdout')
print(text, file=chan)
nt.assert_equal(chan.getvalue(), text + "\n")
class TeeTestCase(unittest.TestCase):
def tchan(self, channel, check='close'):
trap = StringIO()
chan = StringIO()
text = 'Hello'
std_ori = getattr(sys, channel)
| setattr(sys, channel, trap)
tee = Tee(chan, channel=channel)
print(text, end='', file=chan)
setattr(sys, channel, std_ori)
trap_val = trap.getvalue()
nt.assert_equal(chan.getvalue(), text)
if check == 'close':
tee.close()
else:
del tee
def test(self):
for chan in ['stdout', 'stderr']:
for check in ['close', 'del']:
self.tchan(chan, check)
def test_io_init() | :
"""Test that io.stdin/out/err exist at startup"""
for name in ('stdin', 'stdout', 'stderr'):
cmd = doctest_refactor_print(
"from IPython.utils import io;print io.%s.__class__" % name)
p = Popen([sys.executable, '-c', cmd],
stdout=PIPE)
p.wait()
classname = p.stdout.read().strip().decode('ascii')
# __class__ is a reference to the class object in Python 3, so we can't
# just test for string equality.
assert 'IPython.utils.io.IOStream' in classname, classname
def test_capture_output():
"""capture_output() context works"""
with capture_output() as io:
print('hi, stdout')
print('hi, stderr', file=sys.stderr)
nt.assert_equal(io.stdout, 'hi, stdout\n')
nt.assert_equal(io.stderr, 'hi, stderr\n')
def test_UnicodeStdStream():
# Test wrapping a bytes-level stdout
if PY3:
stdoutb = stdlib_io.BytesIO()
stdout = stdlib_io.TextIOWrapper(stdoutb, encoding='ascii')
else:
stdout = stdoutb = stdlib_io.BytesIO()
orig_stdout = sys.stdout
sys.stdout = stdout
try:
sample = u"@łe¶ŧ←"
unicode_std_stream().write(sample)
output = stdoutb.getvalue().decode('utf-8')
nt.assert_equal(output, sample)
assert not stdout.closed
finally:
sys.stdout = orig_stdout
@skipif(not PY3, "Not applicable on Python 2")
def test_UnicodeStdStream_nowrap():
# If we replace stdout with a StringIO, it shouldn't get wrapped.
orig_stdout = sys.stdout
sys.stdout = StringIO()
try:
nt.assert_is(unicode_std_stream(), sys.stdout)
assert not sys.stdout.closed
finally:
sys.stdout = orig_stdout
def test_atomic_writing():
class CustomExc(Exception):
pass
with TemporaryDirectory() as td:
f1 = os.path.join(td, 'penguin')
with stdlib_io.open(f1, 'w') as f:
f.write(u'Before')
if os.name != 'nt':
os.chmod(f1, 0o701)
orig_mode = stat.S_IMODE(os.stat(f1).st_mode)
f2 = os.path.join(td, 'flamingo')
try:
os.symlink(f1, f2)
have_symlink = True
except (AttributeError, NotImplementedError, OSError):
# AttributeError: Python doesn't support it
# NotImplementedError: The system doesn't support it
# OSError: The user lacks the privilege (Windows)
have_symlink = False
with nt.assert_raises(CustomExc):
with atomic_writing(f1) as f:
f.write(u'Failing write')
raise CustomExc
# Because of the exception, the file should not have been modified
with stdlib_io.open(f1, 'r') as f:
nt.assert_equal(f.read(), u'Before')
with atomic_writing(f1) as f:
f.write(u'Overwritten')
with stdlib_io.open(f1, 'r') as f:
nt.assert_equal(f.read(), u'Overwritten')
if os.name != 'nt':
mode = stat.S_IMODE(os.stat(f1).st_mode)
nt.assert_equal(mode, orig_mode)
if have_symlink:
# Check that writing over a file preserves a symlink
with atomic_writing(f2) as f:
f.write(u'written from symlink')
with stdlib_io.open(f1, 'r') as f:
nt.assert_equal(f.read(), u'written from symlink')
def test_atomic_writing_newlines():
with TemporaryDirectory() as td:
path = os.path.join(td, 'testfile')
lf = u'a\nb\nc\n'
plat = lf.replace(u'\n', os.linesep)
crlf = lf.replace(u'\n', u'\r\n')
# test default
with stdlib_io.open(path, 'w') as f:
f.write(lf)
with stdlib_io.open(path, 'r', newline='') as f:
read = f.read()
nt.assert_equal(read, plat)
# test newline=LF
with stdlib_io.open(path, 'w', newline='\n') as f:
f.write(lf)
with stdlib_io.open(path, 'r', newline='') as f:
read = f.read()
nt.assert_equal(read, lf)
# test newline=CRLF
with atomic_writing(path, newline='\r\n') as f:
f.write(lf)
with stdlib_io.open(path, 'r', newline='') as f:
read = f.read()
nt.assert_equal(read, crlf)
# test newline=no convert
text = u'crlf\r\ncr\rlf\n'
with atomic_writing(path, newline='') as f:
f.write(text)
with stdlib_io.open(path, 'r', newline='') as f:
read = f.read()
nt.assert_equal(read, text)
|
chaehni/scion-coord | python/local_gen.py | Python | apache-2.0 | 6,540 | 0.002141 | # Copyright 2017 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`local_gen` --- Local config generation tool for a SCIONLab AS
===================================================================
"""
# Standard library
import argparse
import base64
import json
import os
import time
# External packages
from Crypto import Random
# SCION
from lib.crypto.asymcrypto import (
generate_sign_keypair,
generate_enc_keypair,
)
from lib.crypto.certificate import Certificate
from lib.crypto.certificate_chain import CertificateChain
from lib.packet.scion_addr import ISD_AS
from lib.util import read_file
from topology.generator import (
INITIAL_CERT_VERSION,
INITIAL_TRC_VERSION,
TopoID
)
# SCION-Utilities
from local_config_util import (
ASCredential,
generate_sciond_config,
generate_zk_config,
get_elem_dir,
prep_supervisord_conf,
write_as_conf_and_path_policy,
write_certs_trc_keys,
write_dispatcher_config,
write_supervisord_config,
write_topology_file,
write_zlog_file,
TYPES_TO_EXECUTABLES,
TYPES_TO_KEYS,
)
# Directory structure and credential files
SCION_COORD_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
DEFAULT_PACKAGE_PATH = os.path.expanduser("~/scionLabConfigs")
DEFAULT_CORE_CERT_FILE = os.path.join(SCION_COORD_PATH, "credentials", "ISD1.crt")
DEFAULT_CORE_SIG_KEY = os.path.join(SCION_COORD_PATH, "credentials", "ISD1.key")
DEFAULT_TRC_FILE = os.path.join(SCION_COORD_PATH, "credentials", "ISD1.trc")
def create_scionlab_as_local_gen(args, tp):
"""
Creates the usual gen folder structure for an ISD/AS under web_scion/gen,
ready for Ansible deployment
:param str isdas: ISD-AS as a string
:param dict tp: the topology parameter file as a dict of dicts
"""
new_ia = ISD_AS(args.joining_ia)
core_ia = ISD_AS(args.core_ia)
local_gen_path = os.path.join(args.package_path, args.user_id, 'gen')
as_obj = generate_certificate(
new_ia, core_ia, args.core_sign_priv_key_file, args.core_cert_file, args.trc_file)
write_dispatcher_config(local_gen_path)
for service_type, type_key in TYPES_TO_KEYS.items():
executable_name = TYPES_TO_EXECUTABLES[service_type]
instances = tp[type_key].keys()
for instance_name in instances:
config = prep_supervisord_conf(tp[type_key][instance_name], executable_name,
service_type, instance_name, new_ia)
instance_path = get_elem_dir(local_gen_path, new_ia, instance_name)
# TODO(ercanucan): pass the TRC file as a parameter
write_certs_trc_keys(new_ia, as_obj, instance_path)
write_as_conf_and_path_policy(new_ia, as_obj, instance_path)
write_supervisord_config(config, instance_path)
write_topology_file(tp, type_key, instance_path)
write_zlog_file(serv | ice_type, instance_name, instance_path)
generate_zk_config(tp, new_ia, local_gen_path | , simple_conf_mode=True)
generate_sciond_config(TopoID(args.joining_ia), as_obj, tp, local_gen_path)
# We don't initiate the prometheous service for user ASes.
# generate_prom_config(ia, tp, gen_path)
def generate_certificate(joining_ia, core_ia, core_sign_priv_key_file, core_cert_file, trc_file):
"""
"""
core_ia_chain = CertificateChain.from_raw(read_file(core_cert_file))
# AS cert is always expired one second before the expiration of the Core AS cert
validity = core_ia_chain.core_as_cert.expiration_time - int(time.time()) - 1
comment = "AS Certificate"
core_ia_sig_priv_key = base64.b64decode(read_file(core_sign_priv_key_file))
public_key_sign, private_key_sign = generate_sign_keypair()
public_key_encr, private_key_encr = generate_enc_keypair()
cert = Certificate.from_values(
str(joining_ia), str(core_ia), INITIAL_TRC_VERSION, INITIAL_CERT_VERSION, comment,
False, validity, public_key_encr, public_key_sign, core_ia_sig_priv_key)
sig_priv_key = base64.b64encode(private_key_sign).decode()
enc_priv_key = base64.b64encode(private_key_encr).decode()
joining_ia_chain = CertificateChain([cert, core_ia_chain.core_as_cert]).to_json()
trc = open(trc_file).read()
master_as_key = base64.b64encode(Random.new().read(16)).decode('utf-8')
as_obj = ASCredential(sig_priv_key, enc_priv_key, joining_ia_chain, trc, master_as_key)
return as_obj
def main():
"""
Parse the command-line arguments and run the local config generation utility.
"""
# TODO(mlegner): Add option specifying already existing keys and certificates
parser = argparse.ArgumentParser()
parser.add_argument("--joining_ia",
help='ISD-AS for which the configuration is generated.')
parser.add_argument("--core_ia",
help='Signing Core ISD-AS',
default='1-1')
parser.add_argument("--core_sign_priv_key_file",
help='Signing private key of the core AS',
default=DEFAULT_CORE_SIG_KEY)
parser.add_argument("--core_cert_file",
help='Certificate file of the signing core AS',
default=DEFAULT_CORE_CERT_FILE)
parser.add_argument("--trc_file",
help='Trusted Root Configuration file',
default=DEFAULT_TRC_FILE)
parser.add_argument("--topo_file",
help='Topology file to be used for config generation.')
parser.add_argument("--package_path",
help='Path to generate and store AS configurations.',
default=DEFAULT_PACKAGE_PATH)
parser.add_argument("--user_id",
help='User Identifier (email + IA)')
args = parser.parse_args()
with open(args.topo_file) as json_data:
topo_dict = json.load(json_data)
create_scionlab_as_local_gen(args, topo_dict)
if __name__ == '__main__':
main()
|
foxtrot94/EchelonPlanner | src/app/admin.py | Python | mit | 296 | 0.003378 | # Register your mod | els here.
from app.subsystem.usermanagement.professor import *
from app.subsystem.usermanagement.programdirector import *
from app.subsystem.usermanagement.student import *
# admin.sit | e.register(Professor)
# admin.site.register(ProgramDirector)
# admin.site.register(Student) |
msullivan/advent-of-code | 2017/17b.py | Python | mit | 358 | 0.00838 | #!/usr/bin/env | python3
import sys
def main(args):
#cnt = 2017
cnt = 50000000
n = 3
n = 345
size = 0
buf = [0]
pos = 0
at_1 = None
for i in range(cnt):
pos = (pos + n) % (i+1)
if pos == 0:
at_1 = i+1
pos += 1
print(at_1)
if __name__ == '__main | __':
sys.exit(main(sys.argv))
|
uw-it-aca/grading-standard-lti | grading_standard/models.py | Python | apache-2.0 | 3,569 | 0 | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.db import models
from django.core.exceptions import ValidationError
import json
class GradingStandardManager(models.Manager):
def find_by_login(self, login_id, id=None, name=None):
kwargs = {'created_by': login_id, 'is_deleted__isnull': True}
if id is not None:
kwargs['id'] = id
if name is not None:
kwargs['name'] = name
return super(GradingStandardManager, self).get_queryset().filter(
**kwargs).order_by('created_date')
class GradingStandard(models.Model):
""" Represents a grading standard.
"""
UNDERGRADUATE_SCALE = "ug"
GRADUATE_SCALE = "gr"
SCALE_CHOICES = (
(UNDERGRADUATE_SCALE, "Undergraduate Scale (4.0-0.7)"),
(GRADUATE_SCALE, "Graduate Scale (4.0-1.7)"),
)
name = models.CharField(max_length=80)
scale = models.CharField(max_length=5, choices=SCALE_CHOICES)
scheme = models.TextField()
created_by = models.CharField(max_length=32)
created_date = models.DateTimeField(auto_now_add=True)
provisioned_date = models.DateTimeField(null=True)
is_deleted = models.NullBooleanField()
deleted_date = models.DateTimeField(null=True)
objects = GradingStandardManager()
def json_data(self):
try:
scheme_data = json.loads(self.scheme)
except Exception as ex:
scheme_data = []
course_ids = list(GradingStandardCourse.objects.filter(
standard=self).values_list("course_id", flat=True))
return {"id": self.pk,
"name": self.name,
"scale": self.scale,
"scheme": scheme_data,
"course_ids": course_ids,
"created_by": self.created_by,
"created_date": self.created_date.isoformat(),
"provisioned_date": self.provisioned_date.isoformat() if (
self.provisioned_date is not None) else None,
"is_deleted": self.is_deleted,
"deleted_date": self.deleted_date.isoformat() if (
self.deleted_date is not None) else None,
}
@staticmethod
def valid_scheme_name(name):
if name is not None:
name = name.strip()
if len(name):
return name
raise ValidationError('Name is required')
@staticmethod
def valid_scale(scale):
if scale is not None:
scale = scale.lower()
for choice in GradingStandard.SCALE | _CHOICES:
if scale == choice[0]:
return scale
raise ValidationError('Invalid scale: {}'.format(scale))
@staticmethod
def valid_grading_scheme(scheme):
if type(scheme) is list and len(scheme):
return scheme
raise ValidationError('Scheme is required')
@staticmethod
d | ef valid_course_id(sis_course_id):
if sis_course_id is not None:
sis_course_id = sis_course_id.strip()
if len(sis_course_id):
return sis_course_id
raise ValidationError('Course SIS ID is required')
class GradingStandardCourse(models.Model):
""" Represents a grading standard.
"""
standard = models.ForeignKey(GradingStandard, on_delete=models.CASCADE)
course_id = models.CharField(max_length=80)
grading_standard_id = models.CharField(max_length=30, null=True)
provisioned_date = models.DateTimeField(auto_now_add=True)
|
jumpstarter-io/cinder | cinder/volume/drivers/coraid.py | Python | apache-2.0 | 21,417 | 0 | # Copyright 2012 Alyseo.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Desc : Driver to store volumes on Coraid Appliances.
Require : Coraid EtherCloud ESM, Coraid VSX and Coraid SRX.
Author : Jean-Baptiste RANSY <openstack@alyseo.com>
Author : Alex Zasimov <azasimov@mirantis.com>
Author : Nikolay Sobolevsky <nsobolevsky@mirantis.com>
Contrib : Larry Matter <support@coraid.com>
"""
import cookielib
import math
import urllib
import urllib2
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import jsonutils
from cinder.openstack.common import lockutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import units
from cinder.volume import driver
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
coraid_opts = [
cfg.StrOpt('coraid_esm_address',
default='',
help='IP address of Coraid ESM'),
cfg.StrOpt('coraid_user',
default='admin',
help='User name to connect to Coraid ESM'),
cfg.StrOpt('coraid_group',
default='admin',
help='Name of group on Coraid ESM to which coraid_user belongs'
' (must have admin privilege)'),
cfg.StrOpt('coraid_password',
default='password',
help='Password to connect to Coraid ESM'),
cfg.StrOpt('coraid_repository_key',
default='coraid_repository',
help='Volume Type key name to store ESM Repository Name'),
cfg.StrOpt('coraid_default_repository',
help='ESM Repository Name to use if not specified in '
'Volume Type keys'),
]
CONF = cfg.CONF
CONF.register_opts(coraid_opts)
ESM_SESSION_EXPIRED_STATES = ['GeneralAdminFailure',
'passwordInactivityTimeout',
'passwordAbsoluteTimeout']
class CoraidRESTClient(object):
"""Executes REST RPC requests on Coraid ESM EtherCloud Appliance."""
def __init__(self, esm_url):
self._check_esm_url(esm_url)
self._esm_url = esm_url
self._cookie_jar = cookielib.CookieJar()
self._url_opener = urllib2.build_opener(
urllib2.HTTPCookieProcessor(self._cookie_jar))
def _check_esm_url(self, esm_url):
splitted = urlparse.urlsplit(esm_url)
if splitted.scheme != 'https':
raise ValueError(
_('Invalid ESM url scheme "%s". Supported https only.') %
splitted.scheme)
@lockutils.synchronized('coraid_rpc', 'cinder-', False)
def rpc(self, handle, url_params, data, allow_empty_response=False):
return self._rpc(handle, url_params, data, allow_empty_response)
def _rpc(self, handle, url_params, data, allow_empty_response):
"""Execute REST RPC using url <esm_url>/handle?url_params.
Send JSON encoded data in body of POST request.
Exceptions:
urllib2.URLError
1. Name or service not found (e.reason is socket.gaierror)
2. Socket blocking operation timeout (e.reason is
socket.timeout)
3. Network IO error (e.reason is socket.error)
urllib2.HTTPError
1. HTTP 404, HTTP 500 etc.
CoraidJsonEncodeFailure - bad REST response
"""
# Handle must be simple path, for example:
# /configure
if '?' in handle or '&' in handle:
raise ValueError(_('Invalid REST handle name. Expected path.'))
# Request url includes base ESM url, handle path and optional
# URL params.
rest_url = urlparse.urljoin(self._esm_url, handle)
encoded_url_params = urllib.urlencode(url_params)
if encoded_url_params:
rest_url += '?' + encoded_url_params
if data is None:
json_request = None
else:
json_request = jsonutils.dumps(data)
request = urllib2.Request(rest_url, json_request)
response = self._url_opener.open(request).read()
try:
if not response and allow_empty_response:
reply = {}
else:
reply = jsonutils.loads(response)
except (TypeError, ValueError) as exc:
msg = (_('Call to json.loads() failed: %(ex)s.'
' Response: %(resp)s') %
{'ex': exc, 'resp': response})
raise exception.CoraidJsonEncodeFailure(msg)
return reply
def to_coraid_kb(gb):
return math.ceil(float(gb) * units.Gi / 1000)
def coraid_volume_size(gb):
return '{0}K'.format(to_coraid_kb(gb))
class CoraidAppliance(object):
def __init__(self, rest_client, username, password, group):
self._rest_client = rest_client
self._username = username
self._password = password
self._group = group
self._logined = False
def _login(self):
"""Login into ESM.
Perform login request and return available groups.
:returns: dict -- map with group_name to group_id
"""
ADMIN_GROUP_PREFIX = 'admin group:'
url_params = {'op': 'login',
'username': self._username,
'password': self._password}
reply = self._rest_client.rpc('admin', url_params, 'Login')
if reply['state'] != 'adminSucceed':
raise exception.CoraidESMBadCredentials()
# Read groups map from login reply.
groups_map = {}
for group_info in reply.get('values', []):
full_group_name = group_info['fullPath']
if full_group_name.startswith(ADMIN_GROUP_PREFIX):
group_name = full_group_name[len(ADMIN_GROUP_PREFIX):]
| groups_map[group_name] = group_info['groupId']
return groups_map
def _set_effective_group(self, groups_map, group):
"""Set effective group.
Use groups_map returned from _login method.
"""
try:
group_id = groups_map[group]
except KeyError:
raise exception.CoraidESMBadGroup(group_name=group)
url_params = {'op': 'setRbacGroup',
'groupId': group_id}
rep | ly = self._rest_client.rpc('admin', url_params, 'Group')
if reply['state'] != 'adminSucceed':
raise exception.CoraidESMBadCredentials()
self._logined = True
def _ensure_session(self):
if not self._logined:
groups_map = self._login()
self._set_effective_group(groups_map, self._group)
def _relogin(self):
self._logined = False
self._ensure_session()
def rpc(self, handle, url_params, data, allow_empty_response=False):
self._ensure_session()
relogin_attempts = 3
# Do action, relogin if needed and repeat action.
while True:
reply = self._rest_client.rpc(handle, url_params, data,
allow_empty_response)
if self._is_session_expired(reply):
relogin_attempts -= 1
if relogin_attempts <= 0:
raise exception.CoraidESMReloginFailed()
LOG.debug('Session is expired. Relogin on ESM.')
self._relogin()
else:
return reply
def _is_session_expired(self, reply):
return ('state' in reply and
reply['state'] in ESM_SESSION_EXPIRED_STATES and
reply['metaCROp'] == 'reboot |
jfcherng/sublime-TypeShort | functions.py | Python | mit | 401 | 0 | import re
def snake_to_camel(snake, upper_first=False):
# title-cased words
words = [word.title() for word in snak | e.split('_')]
if words and not upper_first:
words[0] = words[0].lower()
return ''.join(words)
def camel_to_snake(camel):
# first upper-cased camel
cam | el = camel[0].upper() + camel[1:]
return '_'.join(re.findall(r'[A-Z][^A-Z]*', camel)).lower()
|
cpennington/edx-platform | common/djangoapps/entitlements/models.py | Python | agpl-3.0 | 21,559 | 0.004267 | """Entitlement Models"""
import logging
import uuid as uuid_tools
from datetime import timedelta
from django.conf import settings
from django.contrib.sites.models import Site
from django.db import IntegrityError, models, transaction
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now
from model_utils import Choices
from model_utils.models import TimeStampedModel
from simple_history.models import HistoricalRecords
from course_modes.models import CourseMode
from entitlements.utils import is_course_run_entitlement_fulfillable
from lms.djangoapps.certificates.models import GeneratedCertificate
from lms.djangoapps.commerce.utils import refund_entitlement
from openedx.core.djangoapps.catalog.utils import get_course_uuid_for_course
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from student.models import CourseEnrollment, CourseEnrollmentException
from util.date_utils import strftime_localized
log = logging.getLogger("common.entitlements.models")
@python_2_unicode_compatible
class CourseEntitlementPolicy(models.Model):
"""
Represents the Entitlement's policy for expiration, refunds, and regaining a used certificate
.. no_pii:
"""
DEFAULT_EXPIRATION_PERIOD_DAYS = 730
DEFAULT_REFUND_PERIOD_DAYS = 60
DEFAULT_REGAIN_PERIOD_DAYS = 14
MODES = Choices((None, u'---------'), CourseMode.VERIFIED, CourseMode.PROFESSIONAL)
# Use a DurationField to calculate time as it returns a timedelta, useful in performing operations with datetimes
expiration_period = models.DurationField(
default=timedelta(days=DEFAULT_EXPIRATION_PERIOD_DAYS),
help_text=u"Duration in days from when an entitlement is created until when it is expired.",
null=False
)
refund_period = models.DurationField(
default=timedelta(days=DEFAULT_REFUND_PERIOD_DAYS),
help_text=u"Duration in days from when an entitlement is created until when it is no longer refundable",
null=False
)
regain_period = models.DurationField(
default=timedelta(days=DEFAULT_REGAIN_PERIOD_DAYS),
help_text=(u"Duration in days from when an entitlement is redeemed for a course run until "
u"it is no longer able to be regained by a user."),
null=False
)
site = models.ForeignKey(Site, null=True, on_delete=models.CASCADE)
mode = models.CharField(max_length=32, choices=MODES, null=True)
def get_days_until_expiration(self, entitlement):
"""
Returns an integer of number of days until the entitlement expires.
Includes the logic for regaining an entitlement.
"""
now_timestamp = now()
expiry_date = entitlement.created + self.expiration_period
days_until_expiry = (expiry_date - now_timestamp).days
if not entitlement.enrollment_course_run:
return days_until_expiry
course_overview = CourseOverview.get_from_id(entitlement.enrollment_course_run.course_id)
# Compute the days left for the regain
days_since_course_start = (now_timestamp - course_overview.start).days
days_since_enrollment = (now_timestamp - entitlement.enrollment_course_run.created).days
days_since_entitlement_created = (now_timestamp - entitlement.created).days
# We want to return whichever days value is less since it is then the more recent one
days_until_regain_ends = (self.regain_period.days - # pylint: disable=no-member
min(days_since_course_start, days_since_enrollment, days_since_entitlement_created))
# If the base days until expiration is less than the days until the regain period ends, use that instead
if days_until_expiry < days_until_regain_ends:
return days_until_expiry
return days_until_regain_ends # pylint: disable=no-member
def is_entitlement_regainable(self, entitlement):
"""
Determines from the policy if an entitlement can still be regained by the user, if they choose
to by leaving and regaining their entitlement within policy.regain_period days from start date of
the course or their redemption, whichever comes later, and the expiration period hasn't passed yet
"""
if entitlement.expired_at:
return False
if entitlement.enrollment_course_run:
if GeneratedCertificate.certificate_for_student(
entitlement.user_id, entitlement.enrollment_course_run.course_id) is not None:
return False
# This is >= because a days_until_expiration 0 means that the expiration day has not fully passed yet
# and that the entitlement should not be expired as there is still time
return self.get_days_until_expiration(entitlement) >= 0
return False
def is_entitlement_refundable(self, entitlement):
"""
Determines from the policy if an entitlement can still be refunded, if the entitlement has not
yet been redeemed (enrollment_course_run is NULL) and policy.refund_period has not yet passed, or if
the entitlement has been redeemed, but the regain period hasn't passed yet.
"""
# If the Entitlement is expired already it is not refundable
if entitlement.expired_at:
return False
# If there's no order number, it cannot be refunded
if entitlement.order_number is None:
return False
# This is > because a get_days_since_created of refund_period means that that many days have passed,
# which should then make the entitlement no longer refundable
if entitlement.get_days_since_created() > self.refund_period.days: # pylint: disable=no-member
return False
if entitlement.enrollment_course_run:
return self.is_entitlement_regainable(entitlement)
return True
def is_entitlement_redeemable(self, entitlement):
"""
Determines from the policy if an entitlement can be redeemed, if it has not passed the
expiration period of policy.expiration_period, and has not already been redeemed
"""
# This is < because a get_days_since_created of expiration_period means that that many days have passed,
# which should then expire the entitlement
return (entitlement.get_days_since_created() < self.expiration_period.days # pylint: disable=no-member
and not entitlement.enrollment_course_run
and not entitlement.expired_at)
def __str__(self):
return u'Course Entitlement Policy: expiration_period: {}, refund_period: {}, regain_period: {}, mode: {}'\
.format(
self.expiration_period,
self.refund_period,
self.regain_period,
self.mode
)
class CourseEntitlement(TimeStamped | Model):
"""
Represents a Student's Entitlement to a Course Run for a given Course.
.. no_pii:
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
uuid = models.UUIDField(d | efault=uuid_tools.uuid4, editable=False, unique=True)
course_uuid = models.UUIDField(help_text=u'UUID for the Course, not the Course Run')
expired_at = models.DateTimeField(
null=True,
help_text=u'The date that an entitlement expired, if NULL the entitlement has not expired.',
blank=True
)
mode = models.CharField(max_length=100, help_text=u'The mode of the Course that will be applied on enroll.')
enrollment_course_run = models.ForeignKey(
'student.CourseEnrollment',
null=True,
help_text=u'The current Course enrollment for this entitlement. If NULL the Learner has not enrolled.',
blank=True,
on_delete=models.CASCADE,
)
order_number = models.CharField(max_length=128, default=None, null=True)
refund_locked = models.BooleanField(default=False)
_policy = models.ForeignKey(CourseEntitlementPolicy, null=True, blank=True, on_delete=models.CASCADE)
history = HistoricalRec |
jamesblunt/kaggle-galaxies | predict_augmented_npy_8433n_maxout2048_pysex.py | Python | bsd-3-clause | 9,590 | 0.005214 | """
Load an analysis file and redo the predictions on the validation set / test set,
this time with augmented data and averaging. Store them as numpy files.
"""
import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
BATCH_SIZE = 32 # 16
NUM_INPUT_FEATURES = 3
CHUNK_SIZE = 8000 # 10000 # this should be a multiple of the batch size
# ANALYSIS_PATH = "analysis/try_convnet_cc_multirot_3x69r45_untied_bias.pkl"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_8433n_maxout2048_pysex.pkl"
DO_VALID = True # disable this to not bother with the validation set evaluation
DO_TEST = True # disable this to not generate predictions on the testset
target_filename = os.path.basename(ANALYSIS_PATH).replace(".pkl", ".npy.gz")
target_path_valid = os.path.join("predictions/final/augmented/valid", target_filename)
target_path_test = os.path.join("predictions/final/augmented/test", target_filename)
print "Loading model data etc."
analysis = np.load(ANALYSIS_PATH)
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)]
num_input_representations = len(ds_transforms)
# split training data into training + a small validation set
num_train = load_data.num_train
num_valid = num_train // 10 # integer division
num_train -= num_valid
num_test = load_data.num_test
valid_ids = load_data.train_ids[num_train:]
train_ids = load_data.train_ids[:num_train]
test_ids = load_data.test_ids
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train+num_valid)
test_indices = np.arange(num_test)
y_valid = np.load("data/solutions_train.npy")[num_train:]
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=8, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=4, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
# l4 = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5)
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens)
print "Load model parameters"
layers.set_param_values(l6, analysis['param_values'])
print "Create generators"
# set here which transforms to use to make predictions
augmentation_transforms = []
for zoom in [1 / 1.2, 1.0, 1.2]:
for angle in np.linspace(0, 360, 10, endpoint=False):
augmentation_transforms.append(ra.build_augmentation_transform(rotation=angle, zoom=zoom))
augmentation_transforms.append(ra.build_augmentation_transform(rotation=(angle + 180), zoom=zoom, shear=180)) # flipped
print " %d augmentation transforms." % len(augmentation_transforms)
augmented_data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms, processor_class=ra.LoadAndProcessFixedPysexCenteringRescaling)
valid_gen = load_data.buffered_gen_mp(augmented_data_gen_valid, buffer_size=1)
augmented_data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms, processor_class=ra.LoadAndProcessFixedPysexCenteringRescaling)
test_gen = load_data.buffered_gen_mp(augmented_data_gen_test, buffer_size=1)
approx_num_chunks_valid = int(np.ceil(num_valid * len(augmentation_transforms) / float(CHUNK_SIZE)))
approx_num_chunks_test = int(np.ceil(num_test * len(augmentation_transforms) / float(CHUNK_SIZE)))
print "Approximately %d chunks for the validation set" % approx_num_chunks_valid
print "Approximately %d chunks for the test set" % approx_num_chunks_test
if DO_VALID:
print
print "VALIDATION SET"
print "Compute predictions"
predictions_list = []
start_time = time.time()
for e, (chunk_data, chunk_length) in enumerate(valid_gen):
print "Chunk %d" % (e + 1)
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
print " load data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int( | np.ceil(chunk_length / float(BATCH_SIZE)))
# make predictions, don't forget to cute off the zeros at the end
predictions_chunk_list = []
for b in xrange(num_batches_chunk):
if b % 1000 == 0:
print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_chunk_list.append(predictions)
predictions_chunk = np.vstack(predictions_chunk_list)
predictions_chunk = p | redictions_chunk[:chunk_length] # cut off zeros / padding
print " compute average over transforms"
predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1)
predictions_list.append(predictions_chunk_avg)
time_since_start = time.time() - start_time
print " %s since start" % load_data.hms(time_since_start)
all_predictions = np.vstack(predictions_list)
print "Write predictions to %s" % target_path_valid
load_data.save_gz(target_path_valid, all_predictions)
print "Evaluate"
rmse_valid = analysis['losses_valid'][-1]
rmse_augmented = np.sqrt(np.mean((y_valid - all_predictions)**2))
print " MSE (last iteration):\t%.6f" % rmse_valid
print " MSE (augmented):\t%.6f" % rmse_augmented
if DO_TEST:
print
print "TEST SET"
print "Compute predictions"
predictions_list = []
start_time = time.time()
for e, (chunk_data, chunk_length) in enumerate(test_gen):
print "Chunk %d" % (e + 1)
|
rsalmaso/django-allauth | allauth/decorators.py | Python | mit | 683 | 0.001464 | from functools import wraps
from allauth import ratelimit
def rate_limit(*, action, **rl_kwargs):
from allauth.account import app_settings
rate = app_settings.RATE_LIMITS.get(action)
if rate:
rate = ratelimit.parse(rate)
rl_kwargs.setdefault("duration", rate.duration)
rl_kwargs.setdefault("amount", rate.amount)
def decorator(function):
@wraps(function)
def wrap(request, *args, **kwargs):
resp = ratelimit.consume_or_429(request, action=action, **rl_kwargs) |
if not resp:
resp = function(request, *args, **k | wargs)
return resp
return wrap
return decorator
|
ivesbai/server | generator/sources/python/KalturaClient/tests/utils.py | Python | agpl-3.0 | 2,114 | 0.014664 | import os, sys, inspect
import unittest
import ConfigParser
from KalturaClient import KalturaClient, KalturaConfiguration
from KalturaClient.Base import KalturaObjectFactory, KalturaEnumsFactory
from KalturaClient.Base import IKalturaLogger
from KalturaClient.Plugins.Core import KalturaSessionType
from KalturaClient.Plugins.Core import KalturaMediaType
dir = os.path.dirname(__file__)
filename = os.path.join(dir, 'config.ini')
config = ConfigParser.ConfigParser()
config.read(filename)
PARTNER_ID = config.getint("Test", "partnerId")
SERVICE_URL = config.get("Test", "serviceUrl")
ADMIN_SECRET = config.get("Test", "adminSecret")
USER_NAME = config.get("Test", "userName")
import logging
logging.basicConfig(level = logging.DEBUG,
format = '%(asctime)s %(levelname)s %(message)s',
stream = sys.stdout)
class KalturaLogger(IKalturaLogger):
def log(self, msg):
logging.info(msg)
def GetConfig():
config = KalturaConfiguration()
config.serviceUrl = SERVICE_URL
config.setLogger(KalturaLogger())
return config
def getTestFile(filename, mode='rb'):
testFileDir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
return file(testFileDir+'/'+filename, mode)
class KalturaBaseTest(unittest.TestCase):
"""Base class for all Kaltura Tests"""
#TODO create a client facto | ry as to avoid thrashing kaltura with logins...
def setUp(self):
#(client session is enough when we do operations in a users scope)
self.config = GetConfig()
self.client = KalturaClient(self.config)
self.ks = self.client.generateSession(ADMIN_SECRET, USER_NAME,
KalturaSessionType.ADMIN, PARTNER_ID,
86400, "")
s | elf.client.setKs(self.ks)
def tearDown(self):
#do cleanup first, probably relies on self.client
self.doCleanups()
del(self.ks)
del(self.client)
del(self.config)
|
NifTK/NiftyNet | niftynet/engine/handler_console.py | Python | apache-2.0 | 1,161 | 0 | # -*- coding: utf-8 -*-
"""
This module implements a console output writer.
"""
import tensorflow as tf
from niftynet.engine.application_variables import CONSOLE
from niftynet.engine.signal import ITER_STARTED, ITER_FINISHED
class ConsoleLogger(object):
"""
This class handles iteration events to print output to the console.
"""
def __init__(self, **_unused):
ITER_STARTED.connect(self.read_console_vars)
ITER_FINISHED.connect(self.print_console_vars)
def read_console_vars(self, sender, **msg):
"""
Event handler to add all console output ops to the iteration message
:param sender: a niftynet.application instance
:param msg: an iteration message instance
:return:
"""
msg['iter_msg'].ops_to_run[CONSOLE] = \
sender.outputs_collector.variables(CONSOLE)
def print_console_va | rs(self, _sender, **msg):
"""
Printing iteration message with ``tf.lo | gging`` interface.
:param _sender:
:param msg: an iteration message instance
:return:
"""
tf.logging.info(msg['iter_msg'].to_console_string())
|
MarioVilas/secondlife-experiments | SimProxy/extract_xml.py | Python | gpl-2.0 | 1,050 | 0.018095 | import os
import types
from sllib.LLSD import LLSD
try:
os.make | dirs('./httpcap')
except:
pass
data = open('httpcap.txt','r').read()
c = 0
btag = '<llsd>'
etag = '</llsd>'
##mbtag = '<key>message</key><string>'
##metag = '</string>'
b = data.find(btag)
mnames = {}
while b >= 0:
e = data.find(etag, b) + len(etag)
xml = data[b:e]
## bm = xml.rfind(mbtag)
## em = xml.find(metag, bm)
## if bm >= 0 and e | m >= 0 and em >= bm:
## bm = bm + len(mbtag)
## m = xml[bm:em]
## mnames[m] = None
## else:
## m = 'Unknown'
ll = LLSD.fromstring(xml)
m = 'DATA'
if type(ll) == types.DictType and ll.has_key('events'):
## print ll
for msg in ll['events']:
m = msg['message']
## print m
mnames[m] = None
name = './httpcap/%s_%d.xml' % (m,c)
try:
open(name, 'w+').write(xml)
except:
print xml
raise
c += 1
b = data.find(btag, e)
print mnames.keys()
|
Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/python2.7/dist-packages/telepathy/_generated/Channel_Type_Room_List.py | Python | gpl-3.0 | 76 | 0.013158 | ../../ | ../../../share/pyshared/telepathy/_generated/Channel_Type_Room | _List.py |
s20121035/rk3288_android5.1_repo | external/chromium_org/chrome/common/extensions/docs/server2/fake_fetchers.py | Python | gpl-3.0 | 6,215 | 0.00901 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# These are fake fetchers that are used for testing and the preview server.
# They return canned responses for URLs. appengine_wrappers.py uses the fake
# fetchers if the App Engine imports fail.
import base64
import json
import os
import re
import appengine_wrappers
from extensions_paths import SERVER2
from path_util import IsDirectory
from test_util import ReadFile, ChromiumPath
import url_constants
# TODO(kalman): Investigate why logging in this class implies that the server
# isn't properly caching some fetched files; often it fetches the same file
# 10+ times. This may be a test anomaly.
def _ReadTestData(*path, **read_args):
return ReadFile(SERVER2, 'test_data', *path, **read_args)
class _FakeFetcher(object):
def _ListDir(self, path):
return os.listdir(path)
def _IsDir(self, path):
return os.path.isdir(path)
def _Stat(self, path):
return int(os.stat(path).st_mtime)
class _FakeOmahaProxy(_FakeFetcher):
def fetch(self, url):
return _ReadTestData('branch_utility', 'first.json')
class _FakeOmahaHistory(_FakeFetcher):
def fetch(self, url):
return _ReadTestData('branch_utility', 'second.json')
_SVN_URL_TO_PATH_PATTERN = re.compile(
r'^.*chrome/.*(trunk|branches/.*)/src/?([^?]*).*?')
def _ExtractPathFromSvnUrl(url):
return _SVN_URL_TO_PATH_PATTERN.match(url).group(2)
class _FakeSubversionServer(_FakeFetcher):
def fetch(self, url):
path = _ExtractPathFromSvnUrl(url)
if IsDirectory(path):
html = ['<html>Revision 000000']
try:
for f in self._ListDir(ChromiumPath(path)):
if f.startswith('.'):
continue
if self._IsDir(ChromiumPath(path, f)):
html.append('<a>' + f + '/</a>')
else:
html.append('<a>' + f + '</a>')
html.append('</html>')
return '\n'.join(html)
except OSError as e:
return None
try:
return ReadFile(path)
except IOError:
return None
_GITILES_BASE_RE = re.escape('%s/%s' %
(url_constants.GITILES_BASE, url_constants.GITILES_SRC_ROOT))
_GITILES_BRANCH_BASE_RE = re.escape('%s/%s/%s' %
(url_constants.GITILES_BASE,
url_constants.GITILES_SRC_ROOT,
url_constants.GITILES_BRANCHES_PATH))
# NOTE: _GITILES_BRANCH_BASE_RE must be first, because _GITILES_BASE_RE is
# a more general pattern.
_GITILES_URL_RE = r'(%s|%s)/' % (_GITILES_BRANCH_BASE_RE, _GITILES_BASE_RE)
_GITILES_URL_TO_COMMIT_PATTERN = re.compile(r'%s[^/]+$' % _GITILES_URL_RE)
_GITILES_URL_TO_PATH_PATTERN = re.compile(r'%s.+?/(.*)' % _GITILES_URL_RE)
def _ExtractPathFromGitilesUrl(url):
return _GITILES_URL_TO_PATH_PATTERN.match(url).group(2)
class _FakeGitilesServer(_FakeFetcher):
def fetch(self, url):
if _GITILES_URL_TO_COMMIT_PATTERN.match(url) is not None:
return json.dumps({'commit': '1' * 40})
path = _ExtractPathFromGitilesUrl(url)
chromium_path = ChromiumPath(path)
if self._IsDir(chromium_path):
jsn = {}
dir_stat = self._Stat(chromium_path)
jsn['id'] = dir_stat
jsn['entries'] = []
for f in self._ListDir(chromium_path):
if f.startswith('.'):
continue
f_path = os.path.join(chromium_path, f)
jsn['entries'].append({
'id': self._Stat(f_path),
'name': f,
'type': 'tree' if self._IsDir(f_path) else 'blob'
})
return json.dumps(jsn)
try:
return base64.b64encode(ReadFile(path))
except IOError:
return None
class _FakeViewvcServer(_FakeFetcher):
def fetch(self, url):
path = ChromiumPath(_ExtractPathFromSvnUrl(url))
if self._IsDir(path):
html = ['<table><tbody><tr>...</tr>']
# The version of the directory.
dir_stat = self._Stat(path)
html.append('<tr>')
html.append('<td>Directory revision:</td>')
html.append('<td><a>%s</a><a></a></td>' % dir_stat)
html.append('</tr>')
# The version of each file.
for f in self._ListDir(path):
if f.startswith('.'):
continue
html.append('<tr>')
html.append(' <td><a>%s%s</a></td>' % (
f, '/' if self._IsDir(os.path.join(path, f)) else ''))
html.append(' <td><a><strong>%s</strong></a></td>' %
self._Stat(os.path.join(path, f)))
html.append('<td></td><td></td><td></td>')
html.append('</tr>')
html.append('</tbody></table>')
return '\n'.join(html)
try:
return ReadFile(path)
except IOError:
return None
class _FakeGithubStat(_FakeFetcher):
def fetch(self, url):
return '{ "sha": 0 }'
class _FakeGithubZip(_FakeFetcher):
def fetch(self, url):
return _ReadTestData('github_file_system', 'apps_samples.zip', mode='rb')
class _FakeRietveldAPI(_FakeFetcher):
def __init__(self):
self._base_pattern = re.compile(r'.*/(api/.*)')
def fetch(self, url):
return _ReadTestData(
'rietveld_patcher', self._base_pattern.match(url).group(1), 'json')
class _FakeRietveldTarball(_FakeFetcher):
def __init__(self):
self._base_pattern = re.compile(r'.*/(tarball/\d+/\d+)')
def fetch(self, url):
return _ReadTestData(
'rietveld_patcher', self._base_pattern.match(url).group(1) + '.tar.bz2',
mode='rb')
def | ConfigureFakeFetchers():
'''Configu | re the fake fetcher paths relative to the docs directory.
'''
appengine_wrappers.ConfigureFakeUrlFetch({
url_constants.OMAHA_HISTORY: _FakeOmahaHistory(),
url_constants.OMAHA_PROXY_URL: _FakeOmahaProxy(),
'%s/.*' % url_constants.SVN_URL: _FakeSubversionServer(),
'%s/.*' % url_constants.VIEWVC_URL: _FakeViewvcServer(),
'%s/.*/commits/.*' % url_constants.GITHUB_REPOS: _FakeGithubStat(),
'%s/.*/zipball' % url_constants.GITHUB_REPOS: _FakeGithubZip(),
'%s/api/.*' % url_constants.CODEREVIEW_SERVER: _FakeRietveldAPI(),
'%s/tarball/.*' % url_constants.CODEREVIEW_SERVER: _FakeRietveldTarball(),
'%s/.*' % _GITILES_BASE_RE: _FakeGitilesServer(),
'%s/.*' % _GITILES_BRANCH_BASE_RE: _FakeGitilesServer()
})
|
pddg/qkouserver | qkoubot/my_exception.py | Python | mit | 136 | 0 | class NoResultScr | aped(Exception):
pass
class NotCompleteParse(Exception):
pass
class CouldNotAuthorize(Exception) | :
pass
|
h4ck3rm1k3/pywikibot-core | scripts/catall.py | Python | mit | 4,132 | 0.000242 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This script shows the categories on each page and lets you change them.
For each page in the target wiki:
* If the page contains no categories, you can specify a list of categories to
add to the page.
* If the page already contains one or more categories, you can specify a new
list of categories to replace the current list of categories of the page.
Usage:
python pwb.py catall [start]
If no starting name is provided, the bot starts at 'A'.
Options:
-onlynew : Only run on pages that do not yet have a category.
"""
#
# (C) Rob W.W. Hooft, Andre Engels, 2004
# (C) Pywikibot team, 2004-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import pywikibot
from pywikibot import i18n, textlib
from | pywikibot.bot import QuitKeyboardInterrupt
def choosecats(pagetext):
"""Coose categories."""
chosen = []
done = False
length = 1000
# TODO: → input_choice
pywikibot.output("""Give the new categories, one per line.
Empty line: if the first, don't change. Otherwise: Ready.
-: I made a mistake, let me start over.
?: Give the text of the page with GUI.
??: Give the text of the page in console.
xx: if the first, remove al | l categories and add no new.
q: quit.""")
while not done:
choice = pywikibot.input(u"?")
if choice == "":
done = True
elif choice == "-":
chosen = choosecats(pagetext)
done = True
elif choice == "?":
from pywikibot import editor as editarticle
editor = editarticle.TextEditor()
editor.edit(pagetext)
elif choice == "??":
pywikibot.output(pagetext[0:length])
length = length + 500
elif choice == "xx" and chosen == []:
chosen = None
done = True
elif choice == "q":
raise QuitKeyboardInterrupt
else:
chosen.append(choice)
return chosen
def make_categories(page, list, site=None):
"""Make categories."""
if site is None:
site = pywikibot.Site()
pllist = []
for p in list:
cattitle = "%s:%s" % (site.namespaces.CATEGORY, p)
pllist.append(pywikibot.Page(site, cattitle))
page.put_async(textlib.replaceCategoryLinks(page.get(), pllist,
site=page.site),
summary=i18n.twtranslate(site, 'catall-changing'))
def main(*args):
"""
Process command line arguments and perform task.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
docorrections = True
start = 'A'
local_args = pywikibot.handle_args(args)
for arg in local_args:
if arg == '-onlynew':
docorrections = False
else:
start = arg
mysite = pywikibot.Site()
for p in mysite.allpages(start=start):
try:
text = p.get()
cats = p.categories()
if not cats:
pywikibot.output(u"========== %s ==========" % p.title())
pywikibot.output('No categories')
pywikibot.output('-' * 40)
newcats = choosecats(text)
if newcats != [] and newcats is not None:
make_categories(p, newcats, mysite)
elif docorrections:
pywikibot.output(u"========== %s ==========" % p.title())
for c in cats:
pywikibot.output(c.title())
pywikibot.output('-' * 40)
newcats = choosecats(text)
if newcats is None:
make_categories(p, [], mysite)
elif newcats != []:
make_categories(p, newcats, mysite)
except pywikibot.IsRedirectPage:
pywikibot.output(u'%s is a redirect' % p.title())
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pywikibot.output('\nQuitting program...')
|
FibercorpLabs/FibercorpDevops | vmware/vcenter/AddDisk2VM.py | Python | gpl-3.0 | 4,831 | 0.005589 | #AddDisk2VM.py
from VMWConfigFile import *
from pyVim import connect
from pyVim.connect import SmartConnect, Disconnect
from pyVmomi import vim, vmodl
import atexit
import os
import sys
import ssl
import requests
import argparse
import time
import getpass
# Disabling urllib3 ssl warnings
requests.packages.urllib3.disable_warnings()
# Disabling SSL certificate verification
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
def get_obj(content, vimtype, name):
# """
# Get the vsphere object associated with a given text name
# """
obj = None
container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
for c in container.view:
if c.name == name:
obj = c
break
return obj
def wait_for_task(task, actionName='job', hideResult=False):
# """
# Waits and provides updates on a vSphere task
# """
while task.info.state == vim.TaskInfo.State.running:
time.sleep(2)
if task.info.state == vim.TaskInfo.State.success:
if task.info.result is not None and not hideResult:
out = '%s completed successfully, result: %s' % (actionName, task.info.result)
print out
else:
out = '%s completed successfully.' % actionName
print out
else:
out = '%s did not complete successfully: %s' % (actionName, task.info.error)
raise task.info.error
print out
return task.info.result
def get_args():
""" Get arguments from CLI """
parser = argparse.ArgumentParser(description='Add disk to existing VM')
parser.add_argument('--disk-type',required=False,action='store',default='thin',choices=['thick', 'thin'],help='thick or thin')
parser.add_argument('--disk-size',required=True,action='store',help='disk size, in GB, to add to the VM')
parser.add_argument('-u', '--user', help='VC User', required=True)
parser.add_argument('-p', '--passw', help='VC User Pass', required=False)
parser.add_argument('-v', '--vm-name', required=True, help='Name of the VM')
args = parser.parse_args()
if not args.passw:
args.passw = getpass.getpass(
prompt='Enter password')
return args
def add_ | disk(vm, si, disk_size, disk_type):
spec = vim.vm.ConfigSpec()
# get all disks on a VM, set unit_number to the next available
for dev in vm.config.hardware.device:
if hasattr(dev.backing, 'fileName'):
| unit_number = int(dev.unitNumber) + 1
# unit_number 7 reserved for scsi controller
if unit_number == 7:
unit_number += 1
if unit_number >= 16:
print "ERROR: we don't support this many disks"
return
if isinstance(dev, vim.vm.device.VirtualSCSIController):
controller = dev
# add disk here
dev_changes = []
new_disk_kb = int(disk_size) * 1024 * 1024
disk_spec = vim.vm.device.VirtualDeviceSpec()
disk_spec.fileOperation = "create"
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
disk_spec.device = vim.vm.device.VirtualDisk()
disk_spec.device.backing = \
vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
if disk_type == 'thin':
disk_spec.device.backing.thinProvisioned = True
disk_spec.device.backing.diskMode = 'persistent'
disk_spec.device.unitNumber = unit_number
disk_spec.device.capacityInKB = new_disk_kb
disk_spec.device.controllerKey = controller.key
dev_changes.append(disk_spec)
spec.deviceChange = dev_changes
vm.ReconfigVM_Task(spec=spec)
print "%sGB disk added to %s" % (disk_size, vm.config.name)
def main():
args = get_args()
try:
si = None
try:
#si = Service Instance of vCenter
si = connect.SmartConnect(host=vc_settings["vcenter"],
user=args.user,
pwd=args.passw,
port=443,
sslContext=context)
except IOError, e:
pass
atexit.register(Disconnect, si)
content = si.RetrieveContent()
vm = get_obj(content, [vim.VirtualMachine], args.vm_name)
if vm:
add_disk(vm, si, args.disk_size, args.disk_type)
else:
print "ERROR: VM not found"
except vmodl.MethodFault, e:
print "ERROR: Caught vmodl fault: %s" % e.msg
return 1
except Exception, e:
print "ERROR: Caught exception: %s" % str(e)
return 1
if __name__ == "__main__":
main()
|
cloudera/hue | desktop/core/ext-py/Babel-2.5.1/tests/conftest.py | Python | apache-2.0 | 396 | 0 | impo | rt os
import pytest
@pytest.fixture
def os_environ(monkeypatch):
mock_environ = dict(os.environ)
monkeypatch.setattr(os, 'environ', mock_environ)
return mock_environ
def pytest_generate_tests(metafunc):
if hasattr(metafunc.function, "all_locales"):
from babel.localedata import locale_identifiers
metafunc | .parametrize("locale", list(locale_identifiers()))
|
Y--/root | documentation/doxygen/converttonotebook.py | Python | lgpl-2.1 | 35,405 | 0.006157 | #!/usr/bin/env python
# Author: Pau Miquel i Mir <pau.miquel.mir@cern.ch> <pmm1g15@soton.ac.uk>>
# Date: July, 2016
#
# DISCLAIMER: This script is a prototype and a work in progress. Indeed, it is possible that
# it may not work for certain tutorials, and that it, or the tutorial, might need to be
# tweaked slightly to ensure full functionality. Please do not hesistate to email the author
# with any questions or with examples that do not work.
#
# HELP IT DOESN'T WORK: Two possible solutions:
# 1. Check that all the types returned by the tutorial are in the gTypesList. If they aren't,
# simply add them.
# 2. If the tutorial takes a long time to execute (more than 90 seconds), add the name of the
# tutorial to the list of long tutorials listLongTutorials, in the fucntion findTimeout.
#
# REQUIREMENTS: This script needs jupyter to be properly installed, as it uses the python
# package nbformat and calls the shell commands `jupyter nbconvert` and `jupyter trust`. The
# rest of the packages used should be included in a standard installation of python. The script
# is intended to be run on a UNIX based system.
#
#
# FUNCTIONING:
# -----------
# The converttonotebook script creates Jupyter notebooks from raw C++ or python files.
# Particulary, it is indicated to convert the ROOT tutorials found in the ROOT
# repository.
#
# The script should be called from bash with the following format:
# python /path/to/script/converttonotebook.py /path/to/<macro>.C /path/to/outdir
#
# Indeed the script takes two arguments, the path to the macro and the path to the directory
# where the notebooks will be created
#
# The script's general functioning is as follows. The macr | o to be converted is imported as a string.
# A series of modifications are made to this string, for instance delimiting where markdown and
# code cells begin and end. Then, this string is converted into ipynb format using a function
# in the nbconvert package. Finally, the notebook is executed and output.
#
# For converting python tutorials it is fairly straightforward. | It extracts the decription and
# author information from the header and then removes it. It also converts any comment at the
# beginning of a line into a Markdown cell.
#
# For C++ files the process is slightly more complex. The script separates the functions from the
# main code. The main function is identified as it has the smae name as the macro file. The other
# functions are considered functions. The main function is "extracted" and presented as main code.
# The helper functions are placed in their own code cell with the %%cpp -d magic to enable function
# defintion. Finally, as with Python macros, relevant information is extracted from the header, and
# newline comments are converted into Markdown cells (unless they are in helper functions).
#
# The script creates an .ipynb version of the macro, with the full output included.
# The files are named:
# <macro>.<C or py>.nbconvert.ipynb
#
# It is called by filter.cxx, which in turn is called by doxygen when processing any file
# in the ROOT repository. filter.cxx only calls convertonotebook.py when the string \notebook
# is found in the header of the turorial, but this script checks for its presence as well.
import re
import os
import sys
import json
import time
import doctest
import textwrap
import subprocess
from nbformat import v3, v4
from datetime import datetime, date
# List of types that will be considered when looking for a C++ function. If a macro returns a
# type not included on the list, the regular expression will not match it, and thus the function
# will not be properly defined. Thus, any other type returned by function must be added to this list
# for the script to work correctly.
gTypesList = ["void", "int", "Int_t", "TF1", "string", "bool", "double", "float", "char",
"TCanvas", "TTree", "TString", "TSeqCollection", "Double_t", "TFile", "Long64_t", "Bool_t", "TH1",
"RooDataSet", "RooWorkspace" , "HypoTestInverterResult" , "TVectorD" , "TArrayF", "UInt_t"]
# -------------------------------------
# -------- Fuction definitions---------
# -------------------------------------
def unindenter(string, spaces = 3):
"""
Returns string with each line unindented by 3 spaces. If line isn't indented, it stays the same.
>>> unindenter(" foobar")
'foobar\\n'
>>> unindenter("foobar")
'foobar\\n'
>>> unindenter('''foobar
... foobar
... foobar''')
'foobar\\nfoobar\\nfoobar\\n'
"""
newstring = ''
lines = string.splitlines()
for line in lines:
if line.startswith(spaces*' '):
newstring += (line[spaces:] + "\n")
else:
newstring += (line + "\n")
return newstring
def readHeaderPython(text):
"""
Extract author and description from header, eliminate header from text. Also returns
notebook boolean, which is True if the string \notebook is present in the header
Also determine options (-js, -nodraw, -header) passed in \notebook command, and
return their booleans
>>> readHeaderPython('''## \\file
... ## \\ingroup tutorials
... ## \\\\notebook
... ## This is the description of the tutorial
... ##
... ## \\macro_image
... ## \\macro_code
... ##
... ## \\\\author John Brown
... def tutorialfuncion()''')
('def tutorialfuncion()\\n', 'This is the description of the tutorial\\n\\n\\n', 'John Brown', True, False, False, False)
>>> readHeaderPython('''## \\file
... ## \\ingroup tutorials
... ## \\\\notebook -js
... ## This is the description of the tutorial
... ##
... ## \\macro_image
... ## \\macro_code
... ##
... ## \\\\author John Brown
... def tutorialfuncion()''')
('def tutorialfuncion()\\n', 'This is the description of the tutorial\\n\\n\\n', 'John Brown', True, True, False, False)
>>> readHeaderPython('''## \\file
... ## \\ingroup tutorials
... ## \\\\notebook -nodraw
... ## This is the description of the tutorial
... ##
... ## \\macro_image
... ## \\macro_code
... ##
... ## \\\\author John Brown
... def tutorialfuncion()''')
('def tutorialfuncion()\\n', 'This is the description of the tutorial\\n\\n\\n', 'John Brown', True, False, True, False)
"""
lines = text.splitlines()
description = ''
author = ''
isNotebook = False
isJsroot = False
nodraw = False
needsHeaderFile = False
for i, line in enumerate(lines):
if line.startswith("## \\aut"):
author = line[11:]
elif line.startswith("## \\note"):
isNotebook = True
if "-js" in line:
isJsroot = True
if "-nodraw" in line:
nodraw = True
if "-header" in line:
needsHeaderFile = True
elif line.startswith("##"):
if not line.startswith("## \\") and isNotebook:
description += (line[3:] + '\n')
else:
break
newtext = ''
for line in lines[i:]:
newtext += (line + "\n")
return newtext, description, author, isNotebook, isJsroot, nodraw, needsHeaderFile
def pythonComments(text):
"""
Converts comments delimited by # or ## and on a new line into a markdown cell.
For python files only
>>> pythonComments('''## This is a
... ## multiline comment
... def function()''')
'# <markdowncell>\\n## This is a\\n## multiline comment\\n# <codecell>\\ndef function()\\n'
>>> pythonComments('''def function():
... variable = 5 # Comment not in cell
... # Comment also not in cell''')
'def function():\\n variable = 5 # Comment not in cell\\n # Comment also not in cell\\n'
"""
text = text.splitlines()
newtext = ''
inComment = False
for i, line in enumerate(text):
if line.startswith("#") and not inComment: # True if first line of comment
inComment = True
newtext += "# <markdowncell>\n"
newtext += (line + "\n")
elif inComment and not line.startswith("#"): # True if first line after comment
i |
yamstudio/Codeforces | 100/148A - Insomnia cure.py | Python | gpl-3.0 | 146 | 0.013699 | x = | []
d = 0
for _ in range(4):
x.append(int(raw_input()))
for i in range(input()):
if any(((i + 1) % j == 0) for j in x):
d += | 1
print d
|
swwendel/ZipUploadPublishWebmapShare | ZipUploadPublishWebMapShare.py | Python | apache-2.0 | 3,872 | 0.012655 | '''
Title: ZipUploadPublishWebMapShare.py
Author: Stephanie Wendel
Date: 4/18/2014
Updated: 10/15/2015
Description:
NOTE: This script uses the request module to make http requests. The script
should be able to access this module in order for it to work correctly. If
it is not installed, one place it can be downloaded from is Github:
https://github.com/kennethreitz/requests
'''
import requests, arcpy, os, zipfile, json, socket
hostname = "http://" + socket.getfqdn()
username = ""
password = ""
shp = "CH_POI.shp"
zip = "CH_POI.zip"
filename = shp[:-4]
outputFolder = r"C:\Temp"
arcpy.env.workspace = outputFolder
serviceName = "CH_POI_test"
# Create Token
token_params ={'username': username,
'password': password,
'referer': hostname,
'f':'json'}
tokenResponse = requests.post("https://www.arcgis.com/sharing/generateToken",\
params=token_params)
token = json.loads(tokenResponse.text)['token']
# Make zip file
zf = zipfile.ZipFile(os.path.join(outputFolder, zip), "w")
for shpfile_part in arcpy.ListFiles(filename+"*"):
zf.write(os.path.join(outputFolder, shpfile_part), shpfile_part,
zipfile.ZIP_DEFLATED)
zf.close()
# Start upload of zip file
add_zip_url = "http://www.arcgis.com/sharing/rest/content/users/{0}/addItem".format(username)
zip_params = {'title': "{}_zip".format(filename), "type": "Shapefile",
'f': 'json', 'token':token}
filesup = {'file':open(os.path.join(outputFolder, zip), 'rb')}
zip_response = requests.post(add_zip_url, params=zip_params, files=filesup)
zipitemid = json.loads(zip_response.text)['id']
# Analyze zip file to get layer info and record count
analyze_url = "http://www.arcgis.com/sharing/rest/content/features/analyze"
analyze_params = {'f':'json', 'itemId': zipitemid, 'file': zip, 'type':'shapefile', 'token':token}
analyze_response = requests.post(analyze_url, params=analyze_params)
response_publishParameters = json.loads(analyze_response.text)['publishParameters']
layerInfo = response_publishParameters['layerInfo']
maxRecordCount = response_publishParameters['maxRecordCount']
# Publish zip file
publish_url = "http://www.arcgis.com/sharing/rest/content/users/{0}/publish".format(username)
publishParameters = json.dumps({'name': serviceName})
publish_parameters = {'itemID': zipitemid, 'token': token, 'filetype':'shapefile','f': 'json','publishParameters': publishParameters}
publish_response = requests.post(publish_url, params=publish_parameters)
services = json.loads(publish_response.text)['services']
for item in services:
OP_serviceurl = item['serviceurl'] + "/0"
OP_serviceItemId = item['serviceItemId']
# create webmap with zip file service as operational layer
webmap_name = "{} Webmap".format(filename)
webmap_url = "http://www.arcgis.com/sharing/rest/content/users/{0}/addItem".format(username)
text = json.dumps({'operationalLayers':[{'url': OP_serviceurl, 'visibility':'true' | ,"opacity":1, 'title': filename}],"baseMap": {'baseMapLayers':[{'id':"World_Imagery_1068",'opacity':1,'visibility':'true','url':'http://services.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer'}],'title':'Imagery'},'version':'1.9.1'})
webmap_params = {'title': webmap_name, 'type':'Web Map', 'text':te | xt, 'f': 'json','token': token}
webmap_response = requests.post(webmap_url, params=webmap_params)
print json.loads(webmap_response.text)
webmap_id = json.loads(webmap_response.text)['id']
# Share the web map with the organziation
share_webmap_url = "http://www.arcgis.com/sharing/rest/content/users/{0}/items/{1}/share".format(username, webmap_id)
share_webmap_params = {'everyone': 'false', 'org':'true', 'f':'json', 'token':token}
share_webmap_response = requests.post(share_webmap_url, params=share_webmap_params)
print json.loads(share_webmap_response.text)
|
knoppo/pi3bar | pi3bar/plugins/disk.py | Python | mit | 3,744 | 0.00187 | import os
from pi3bar.plugins.base import Plugin
from pi3bar.utils import humanize_size_bytes
class Disk(Plugin):
"""
:class:`pi3bar.app.Pi3Bar` plugin to disk usage.
Available format replacements (``*_p`` = percentage):
* ``%(size)s`` E.g. '100GB'
* ``%(free)s`` E.g. '70GB'
* ``%(free_p)f`` E.g. 70.0
* ``%(available)s`` E.g. '65GB'
* ``%(available_p)f`` E.g. 65.0
* ``%(usage)s`` E.g. '30GB'
* ``%(usage_p)f`` E.g. 30.0
:param full_format: :class:`str` - Format string (default: '%(usage_p).2f%% (%(size)s)')
:param short_format: :class:`str` - Short format string (default: '%(usage_p).2f%%')
:param warning_usage: :class:`int` - Warning breakpoint (default: 90)
:param warning_color: :class:`str` - Warning color (default: '#ffff00')
:param warning_background: :class:`str` - Warning background color (default: Non | e)
:param critical_usage: :class:`int` - Critical breakpoint (default: 95)
:param critical_color: :class:`str` - Critical color (default: None)
:param critical_background: :class:`str` - Critical background color (default: '#ff0000')
Examples:
.. code-block:: python
# root
Disk('/')
# custom | format (escape '%' with '%')
Disk('/', full_format='%(usage)s / %(size)s', short_format='%(free_p)f%%')
# adjust warning/critical switches
Disk('/mnt', warning_usage=80, critical_usage=90)
"""
def __init__(self, mount_path, **kwargs):
self.instance = mount_path
self.mount_path = mount_path
self.full_format = kwargs.pop('full_format', '%(usage_p).2f%% (%(size)s)')
self.short_format = kwargs.pop('short_format', '%(usage_p).2f%%')
self.warning_usage = kwargs.pop('warning_usage', 90)
self.warning_color = kwargs.pop('warning_color', '#ffff00')
self.warning_background = kwargs.pop('warning_background', None)
self.critical_usage = kwargs.pop('critical_usage', 95)
self.critical_color = kwargs.pop('critical_color', None)
self.critical_background = kwargs.pop('critical_background', '#ff0000')
super(Disk, self).__init__(**kwargs)
def get_stats(self):
statvfs = os.statvfs(self.mount_path)
size_bytes = statvfs.f_frsize * statvfs.f_blocks
free_bytes = statvfs.f_frsize * statvfs.f_bfree # with reserved space
free_percent = 100.0 / size_bytes * free_bytes
available_bytes = statvfs.f_frsize * statvfs.f_bavail # without reserved space
available_percent = 100.0 / size_bytes * available_bytes
usage_bytes = size_bytes - free_bytes
usage_percent = 100.0 / size_bytes * usage_bytes
return {
'size': humanize_size_bytes(size_bytes), # 100GB
'free': humanize_size_bytes(free_bytes), # 70GB
'available': humanize_size_bytes(available_bytes), # 65GB
'usage': humanize_size_bytes(usage_bytes), # 30GB
'free_p': free_percent, # 70.0
'available_p': available_percent, # 65.0
'usage_p': usage_percent, # 30.0
}
def cycle(self):
stats = self.get_stats()
prefix = '%s ' % self.mount_path
self.full_text = prefix + self.full_format % stats
self.short_text = prefix + self.short_format % stats
if float(stats['usage_p']) > self.critical_usage:
self.color = self.critical_color
self.background = self.critical_background
elif float(stats['usage_p']) > self.warning_usage:
self.color = self.warning_color
self.background = self.warning_background
else:
self.color = None
self.background = None
|
mF2C/COMPSs | compss/programming_model/bindings/python/src/pycompss/worker/piper/commons/__init__.py | Python | apache-2.0 | 670 | 0 | #!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appli | cable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" | BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
|
colour-science/colour | colour/temperature/tests/test_hernandez1999.py | Python | bsd-3-clause | 4,583 | 0.000218 | """Defines the unit tests for the :mod:`colour.temperature.hernandez1999` module."""
import numpy as np
import unittest
from itertools import permutations
from colour.temperature import xy_to_CCT_Hernandez1999, CCT_to_xy_Hernandez1999
from colour.utilities import ignore_numpy_errors
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"Testxy_to_CCT_Hernandez1999",
"TestCCT_to_xy_Hernandez1999",
]
class Testxy_to_CCT_Hernandez1999(unittest.TestCase):
"""
Define :func:`colour.temperature.hernandez1999.xy_to_CCT_Hernandez1999`
definition unit tests methods.
"""
def test_xy_to_CCT_Hernandez1999(self):
"""
Test :func:`colour.temperature.hernandez1999.xy_to_CCT_McCamy1992`
definition.
"""
self.assertAlmostEqual(
xy_to_CCT_Hernandez1999(np.array([0.31270, 0.32900])),
6500.74204318,
places=7,
)
self.assertAlmostEqual(
xy_to_CCT_Hernandez1999(np.array([0.44757, 0.40745])),
2790.64222533,
places=7,
)
self.assertAlmostEqual(
xy_to_CCT_Hernandez1999(
np.array([0.244162248213914, 0.240333674758318])
),
64448.11092565,
places=7,
)
def test_n_dimensional_xy_to_CCT_Hernandez1999(self):
"""
Test :func:`colour.temperature.hernandez1999.xy_to_CCT_Hernandez1999`
definition n-dimensional arrays support.
"""
xy = np.array([0.31270, 0.32900])
CCT = xy_to_CCT_Hernandez1999(xy)
xy = np.tile(xy, (6, 1))
CCT = np.tile(CCT, 6)
np.testing.assert_almost_equal(
xy_to_CCT_Hernandez1999(xy), CCT, decimal=7
)
xy = np.reshape(xy, (2, 3, 2))
CCT = np.reshape(CCT, (2, 3))
np.testing.assert_almost_equal(
xy_to_CCT_Hernandez1999(xy), CCT, decimal=7
)
@ignore_numpy_errors
def test_nan_xy_to_CCT_Hernandez1999(self):
"""
Test :func:`colour.temperature.hernandez1999.xy_to_CCT_Hernandez1999`
definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=2))
for case in cases:
xy_to_CCT_Hernandez1999(case)
class TestCCT_to_xy_Hernandez1999(unittest.TestCase):
"""
Define :func:`colour.temperature.hernandez1999.CCT_to_xy_Hernandez1999`
definition unit tests methods.
"""
def test_CCT_to_xy_Hernandez1999(self):
"""
Test :func:`colour.temperature.hernandez1999.CCT_to_xy_Hernandez1999`
definition.
"""
np.testing.assert_almost_equal(
CCT_to_xy_Hernandez1999(6500.74204318, {"method": "Nelder-Mead"}),
np.array([0.31269943, 0.32900373]),
decimal=7,
)
np.testing.assert_almost_equal(
CCT_to_xy_Hernandez1999(2790.64222533, {"method": "Nelder-Mead"}),
np.array([0.42864308, 0.36754776]),
decimal=7,
)
np.testing.assert_almost_equal(
CCT_to_xy_Hernandez1999(64448.11092565, {"method": "Nelder-Mead"}),
np.array([0.08269106, 0.36612620]),
decimal=7,
)
def test_n_dimen | sional_CCT_to_xy_Hernandez1999(self):
"""
Test :func:`colour.temperature.hernandez1999.CCT_to_xy_Hernandez1999`
definition n-dimensional arrays support.
"""
CCT = 6500.74204318
xy = CCT_to_xy_Hernandez1999(CCT) |
CCT = np.tile(CCT, 6)
xy = np.tile(xy, (6, 1))
np.testing.assert_almost_equal(
CCT_to_xy_Hernandez1999(CCT), xy, decimal=7
)
CCT = np.reshape(CCT, (2, 3))
xy = np.reshape(xy, (2, 3, 2))
np.testing.assert_almost_equal(
CCT_to_xy_Hernandez1999(CCT), xy, decimal=7
)
@ignore_numpy_errors
def test_nan_CCT_to_xy_Hernandez1999(self):
"""
Test :func:`colour.temperature.hernandez1999.CCT_to_xy_Hernandez1999`
definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=2))
for case in cases:
CCT_to_xy_Hernandez1999(case)
if __name__ == "__main__":
unittest.main()
|
ngageoint/geoq | geoq/agents/admin.py | Python | mit | 710 | 0.008451 | # -*- coding: utf-8 -*-
# This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C-F600, and
# is subject to t | he Rights in Technical Data-Noncommercial Items clause at DFARS 252.227-7013 (FEB 2012)
from reversion.admin import VersionAdmin
from django.contrib.gis import admin
from .models import Feedback,Topic
@admin.register(Feedback)
class FeedbackAdmin(VersionAdmin, admin.ModelAdmi | n):
model = Feedback
list_display = ['name', 'email', 'topic', 'message']
save_as = True
ordering = ['topic']
@admin.register(Topic)
class TopicAdmin(VersionAdmin, admin.ModelAdmin):
model = Topic
list_display = ['name']
save_as = True
ordering = ['name']
|
andrewsmedina/django-admin2 | djadmin2/tests/test_auth_admin.py | Python | bsd-3-clause | 2,211 | 0 | from django import forms
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import RequestFactory
from djadmin2.site import djadmin2_site
from ..admin2 import UserAdmin2
class UserAdminTest(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = User(
username='admin',
is_staff=True,
is_superuser=True)
self.user.set_password('admin')
self.user.save()
def test_create_form_uses_floppyform_widgets(self):
form = UserAdmin2.create_form_class()
self.assertTrue(
isinstance(form.fields['username'].widget,
forms.TextInput))
request = self.factory.get(reverse('admin2:auth_user_create'))
request.user = self.user
model_admin = UserAdmin2(User, djadmin2_site)
view = model_admin.create_view.view.as_view(
**model_admin.get_create_kwargs())
response = view(request)
form = response.context_data['form']
self.assertTrue(
isinstance(form.fields['username'].widget,
forms.TextInput))
def test_update_form_uses_floppyform_widgets(self):
form = UserAdmin2.update_form_class()
self.assertTrue(
isinstance(form.fields['username'].widget,
forms.TextInput))
self.assertTrue(
isinstance(form.fields['date_joined'].widget,
forms.DateTimeInput))
request = self | .factory.get(
reverse('admin2:auth_user_update', args=(self.user.pk,)))
request.user = self.user
model_admin = UserAdmin2(User, djadmin2_site)
view = model_admin.update_view.view.as_view(
**model_admin.get_update_kwargs())
response = view(request, pk=self.user.pk)
form = response.cont | ext_data['form']
self.assertTrue(
isinstance(form.fields['username'].widget,
forms.TextInput))
self.assertTrue(
isinstance(form.fields['date_joined'].widget,
forms.DateTimeInput))
|
peret/visualize-bovw | datamanagers/ClefManager.py | Python | gpl-2.0 | 3,399 | 0.00912 | from DataManager import DataManager, NoSuchCategoryException
import string
class CLEFManager(DataManager):
def __init__(self, *args, **kwargs):
super(CLEFManager, self).__init__(*args, **kwargs)
self.PATHS = {
"BASE" : "/home/peter/thesis/data_ImageCLEF",
"KEYPOINTS" : "/home/peter/thesis/data_ImageCLEF/features",
"BOW" : "/home/peter/thesis/data_ImageCLEF/bow",
"IMG" : "/home/peter/thesis/data_ImageCLEF/images",
"CLASSIFIER" : "/home/peter/thesis/data_ImageCLEF/classifiers",
"RESULTS" : "/home/peter/thesis/data_ImageCLEF/results",
"LOGS" : "/home/peter/thesis/data_ImageCLEF/logs",
"METADATA" : "/home/peter/thesis/data_ImageCLEF/metadata",
"CATEGORY_LIST" : "/home/peter/thesis/data_ImageCLEF/metadata/concepts_2011.txt",
"TRAIN_CATEGORIES" : "/home/peter/thesis/data_ImageCLEF/metadata/trainset_gt_annotations_corrected.txt",
"TEST_CATEGORIES" : "/home/peter/thesis/data_ImageCLEF/metadata/testset_GT_annotations.txt",
}
# TODO: to super class?
def _get_positive_samples(self, dataset, category):
filenames = []
if dataset in ["train", "all"]:
filenames += self._extract_positive_samples(category, self.PATHS["TRAIN_CATEGORIES"])
if dataset in ["test", "all"]:
filenames += self._extract_positive_samples(category, self.PATHS["TEST_CATEGORIES"])
return filenames
def _extract_positive_samples(self, category, file_list):
category_id = self._get_category_number(category)
with open(file_list, "r") as f:
return [line.split()[0] for line in f if line.split()[category_id + 1] == "1"]
def _get_category_number(self, category_name):
with open(self.PATHS["CATEGORY_LIST"], "r") as f:
for line in f:
nr, name = line.split()
if string.lower(category_name) == string.lower(name):
return int(nr)
raise NoSuchCategoryException("There is no category %s in the ImageCLEF dataset."
% category_name)
def get_image_names(self, dataset, category = None):
img_names = []
if dataset in ["train", "all"]:
img_names += self._get_image_names(self.PATHS["TRAIN_CATEGORIES"])
if dataset in ["test", "all"]:
img_names += self._get_i | mage_names(self.PATHS["TEST_CATEGORIES"])
return img_names
def _get_image_names(self, file_list):
with open(file_list, "r") as f:
return [line.split()[0].strip() for line in f]
# Overwr | ite parent method to make category optional
def build_sample_matrix(self, dataset, category = None):
# TODO: Cache results?
return super(CLEFManager, self).build_sample_matrix(dataset, category)
def _build_sample_matrix(self, dataset, category = None, files = None):
if not files:
files = self.get_image_names(dataset)
bow_files = self.get_bow_filenames(files)
return self._stack_bow_vectors(bow_files)
def _build_class_vector(self, dataset, category):
bow_files = self.get_bow_filenames(self.get_image_names(dataset))
positives = self.get_positive_samples(dataset, category)
return self._generate_class_vector(category, bow_files, positives) |
google/llvm-propeller | lldb/source/Interpreter/embedded_interpreter.py | Python | apache-2.0 | 4,415 | 0.001586 | import sys
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
import code
import lldb
import traceback
try:
import readline
import rlcompleter
except ImportError:
have_readline = False
except AttributeError:
# This exception gets hit by the rlcompleter when Linux is using
# the readline suppression import.
have_readline = False
else:
have_readline = True
if 'libedit' in readline.__doc__:
readline.parse_and_bind('bind ^I rl_complete')
else:
readline.parse_and_bind('tab: complete')
g_builtin_override_called = False
class LLDBQuitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
self()
def __call__(self, code=None):
global g_builtin_override_called
g_builtin_override_called = True
raise SystemExit(-1)
def setquit():
'''Redefine builtin functions 'quit()' and 'exit()' to print a message and raise an EOFError exception.'''
# This function will be called prior to each interactive
# interpreter loop or each single line, so we set the global
# g_builtin_override_called to False so we know if a SystemExit
# is thrown, we can catch it and tell the difference between
# a call to "quit()" or "exit()" and something like
# "sys.exit(123)"
global g_builtin_override_called
g_builtin_override_called = False
builtins.quit = LLDBQuitter('quit')
builtins.exit = LLDBQuitter(' | exit')
# When running one line, we might place the string to run in this string
# in case it would be hard to correctly escape a string's contents
g_run_on | e_line_str = None
def get_terminal_size(fd):
try:
import fcntl
import termios
import struct
hw = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
hw = (0, 0)
return hw
def readfunc_stdio(prompt):
sys.stdout.write(prompt)
sys.stdout.flush()
line = sys.stdin.readline()
# Readline always includes a trailing newline character unless the file
# ends with an incomplete line. An empty line indicates EOF.
if not line:
raise EOFError
return line.rstrip()
def run_python_interpreter(local_dict):
# Pass in the dictionary, for continuity from one session to the next.
setquit()
try:
fd = sys.stdin.fileno()
interacted = False
if get_terminal_size(fd)[1] == 0:
try:
import termios
old = termios.tcgetattr(fd)
if old[3] & termios.ECHO:
# Need to turn off echoing and restore
new = termios.tcgetattr(fd)
new[3] = new[3] & ~termios.ECHO
try:
termios.tcsetattr(fd, termios.TCSADRAIN, new)
interacted = True
code.interact(
banner="Python Interactive Interpreter. To exit, type 'quit()', 'exit()'.",
readfunc=readfunc_stdio,
local=local_dict)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old)
except:
pass
# Don't need to turn off echoing
if not interacted:
code.interact(
banner="Python Interactive Interpreter. To exit, type 'quit()', 'exit()' or Ctrl-D.",
readfunc=readfunc_stdio,
local=local_dict)
else:
# We have a real interactive terminal
code.interact(
banner="Python Interactive Interpreter. To exit, type 'quit()', 'exit()' or Ctrl-D.",
local=local_dict)
except SystemExit as e:
global g_builtin_override_called
if not g_builtin_override_called:
print('Script exited with %s' % (e))
def run_one_line(local_dict, input_string):
global g_run_one_line_str
setquit()
try:
repl = code.InteractiveConsole(local_dict)
if input_string:
repl.runsource(input_string)
elif g_run_one_line_str:
repl.runsource(g_run_one_line_str)
except SystemExit as e:
global g_builtin_override_called
if not g_builtin_override_called:
print('Script exited with %s' % (e))
|
neizod/coding-analysis | framework/github/extract/compatibility.py | Python | mit | 4,835 | 0 | import os
import json
import logging
from framework._utils import FunctionHook
class GitHubExtractCompatibility(FunctionHook):
''' test compatibility over language versions. '''
@staticmethod
def handle_python(directory):
''' returns number of Python files in a repository that is syntactic
compatible in Python 2/3. this method use standard Python program
with module compileall to chec | k. '''
from subprocess import getoutput
finder = 'find {} -name "{}" | wc -l'
return (int(getoutput(('python2 -m compileall {} 2> /dev/null | '
'grep "File" | wc -l').format(directory))),
int(getoutput(('python3 -m compileall {} 2> /dev/null | '
'grep "File" | wc -l').format(directory))),
int(getoutput(finder.format(directory | , '*.py'))))
@staticmethod
def handle_php(directory):
''' returns number of PHP files in a repository that is syntactic
compatible in PHP 5.3~5.6. this method use standard PHP program
(`php -l`), selection between PHP versions is done by phpbrew. '''
from subprocess import getoutput
check_php = (r'for file in $(find {} -name "*.php");'
r'do php -l "$file" &> /dev/null;'
r'[ $? != 0 ] && echo; done | wc -l').format(directory)
args = '/bin/bash -c', '. ~/.phpbrew/bashrc', 'phpbrew use', check_php
finder = 'find {} -name "{}" | wc -l'
return (int(getoutput('{} \'{}; {} 5.3.29; {}\''.format(*args))),
int(getoutput('{} \'{}; {} 5.4.38; {}\''.format(*args))),
int(getoutput('{} \'{}; {} 5.5.22; {}\''.format(*args))),
int(getoutput('{} \'{}; {} 5.6.6; {}\''.format(*args))),
int(getoutput(finder.format(directory, '*.php'))))
@staticmethod
def git_snapshot(repo_obj, commit):
''' checkout repository snapshot at specific commit. '''
if repo_obj.is_dirty():
repo_obj.git.clean('-df')
repo_obj.git.checkout('--')
repo_obj.git.checkout(commit, '-f')
return repo_obj
def check_compat(self, lang, directory, done, limits=2):
''' returns syntactic compatibility of each repository over versions
of the language over commit history. '''
import git
import random
repo_obj = git.Repo(directory)
latest_commit = repo_obj.head.reference
do_only = [commit for commit in repo_obj.iter_commits()
if commit.hexsha not in {row['hash'] for row in done}]
random.shuffle(do_only)
for commit in do_only[:limits]:
repo_obj = self.git_snapshot(repo_obj, commit)
if lang == 'Python':
py2, py3, files = self.handle_python(directory)
yield {'date': commit.committed_date, 'hash': commit.hexsha,
'py2': py2, 'py3': py3, 'files': files}
if lang == 'PHP':
php53, php54, php55, php56, files = self.handle_php(directory)
yield {'date': commit.committed_date, 'hash': commit.hexsha,
'php53': php53, 'php54': php54, 'php55': php55,
'php56': php56, 'files': files}
repo_obj = self.git_snapshot(repo_obj, latest_commit)
def main(self, only_repo=None, only_lang=None, count=1, **_):
from framework._utils import write
from framework._utils.misc import datapath
from framework.github._helper import iter_repos
os.makedirs(datapath('github', 'extract'), exist_ok=True)
filepath = datapath('github', 'extract', 'compatibility.json')
if os.path.isfile(filepath):
extracted_data = json.load(open(filepath))
else:
extracted_data = {}
for lang, repo in iter_repos():
if only_lang is not None and lang.lower() != only_lang.lower():
continue
if only_repo is not None and repo['name'] != only_repo:
continue
directory = datapath('github', 'repos', repo['name'])
logging.info('extract: %s', repo['name'])
if repo['name'] not in extracted_data:
extracted_data[repo['name']] = []
done = extracted_data[repo['name']]
done += self.check_compat(lang, directory, done, limits=count)
done.sort(key=lambda row: row['date'])
write.json(extracted_data, open(filepath, 'w'))
def modify_parser(self):
self.parser.add_argument(
'-r', '--only-repo', help='''only this repository.''')
self.parser.add_argument(
'-c', '--count', type=int, default=1,
help='''limit checkout count.''')
|
mindw/grin | examples/grinpython.py | Python | bsd-3-clause | 4,086 | 0.002203 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
""" Transform Python code by omitting strings, comments, and/or code.
"""
from six.moves import StringIO
import os
import shlex
import string
import sys
import tokenize
import grin
__version__ = '1.2'
class Transformer(object):
""" Transform Python files to remove certain features.
"""
def __init__(self, python_code, comments, strings):
# Keep code.
self.python_code = python_code
# Keep comments.
| self.comments = comments
# Keep strings.
self.strings = strings
|
table = [' '] * 256
for s in string.whitespace:
table[ord(s)] = s
# A table for the translate() function that replaces all non-whitespace
# characters with spaces.
self.space_table = ''.join(table)
def keep_token(self, kind):
""" Return True if we should keep the token in the output.
"""
if kind in (tokenize.NL, tokenize.NEWLINE):
return True
elif kind == tokenize.COMMENT:
return self.comments
elif kind == tokenize.STRING:
return self.strings
else:
return self.python_code
def replace_with_spaces(self, s):
""" Replace all non-newline characters in a string with spaces.
"""
return s.translate(self.space_table)
def __call__(self, filename, mode='rb'):
""" Open a file and convert it to a filelike object with transformed
contents.
"""
g = StringIO()
f = open(filename, mode)
try:
gen = tokenize.generate_tokens(f.readline)
old_end = (1, 0)
for kind, token, start, end, line in gen:
if old_end[0] == start[0]:
dx = start[1] - old_end[1]
else:
dx = start[1]
# Put in any omitted whitespace.
g.write(' ' * dx)
old_end = end
if not self.keep_token(kind):
token = self.replace_with_spaces(token)
g.write(token)
finally:
f.close()
# Seek back to the beginning of the file.
g.seek(0, 0)
return g
def get_grinpython_arg_parser(parser=None):
""" Create the command-line parser.
"""
parser = grin.get_grin_arg_parser(parser)
parser.set_defaults(include='*.py')
parser.description = ("Search Python code with strings, comments, and/or "
"code removed.")
for action in parser._actions:
if hasattr(action, 'version'):
action.version = 'grinpython %s' % __version__
group = parser.add_argument_group('Code Transformation')
group.add_argument('-p', '--python-code', action='store_true',
help="Keep non-string, non-comment Python code.")
group.add_argument('-c', '--comments', action='store_true',
help="Keep Python comments.")
group.add_argument('-t', '--strings', action='store_true',
help="Keep Python strings, especially docstrings.")
return parser
def grinpython_main(argv=None):
if argv is None:
# Look at the GRIN_ARGS environment variable for more arguments.
env_args = shlex.split(os.getenv('GRIN_ARGS', ''))
argv = [sys.argv[0]] + env_args + sys.argv[1:]
parser = get_grinpython_arg_parser()
args = parser.parse_args(argv[1:])
if args.context is not None:
args.before_context = args.context
args.after_context = args.context
args.use_color = args.force_color or (not args.no_color and
sys.stdout.isatty() and
(os.environ.get('TERM') != 'dumb'))
xform = Transformer(args.python_code, args.comments, args.strings)
regex = grin.get_regex(args)
g = grin.GrepText(regex, args)
for filename, kind in grin.get_filenames(args):
if kind == 'text':
# Ignore gzipped files.
report = g.grep_a_file(filename, opener=xform)
sys.stdout.write(report)
if __name__ == '__main__':
grinpython_main()
|
psusloparov/sneeze | pocket_change/pocket_change/__init__.py | Python | apache-2.0 | 2,161 | 0.009255 | #### DB SETUP ####
from pocket_change.database import SQLAlchemyWrapperProxy
sqlalchemy_db = SQLAlchemyWrapperProxy()
from sneeze.database.models import add_models as sneeze_models
from pocket.database import add_models as pocket_models
model_adders = [sneeze_models, pocket_models]
#### APP CONFIG ####
from flask import Flask, render_template, request, session
class AppContextClass(Flask.app_ctx_globals_class):
@property
def jira(self):
try:
return self._jira
except AttributeError:
self._jira = JiraClient({'server' : app.config['JIRA_HOST']})
return self._jira
Flask.app_ctx_globals_class = AppContextClass
app = Flask(__name__.split()[0])
#app.config.from_envvar('POCKET_CHANGE_CONFIG')
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////Users/smoke/sneeze//kaichu.sqlite'
app.config['JIRA_HOST'] = 'http://localhost:48080'
app.config['JIRA_AUTH'] = 'qa_reporter;password'
app.debug = True
app.secret_key = 'why would I tell you my secret key?'
##### KAICHU ####
from pocket_change.jira_lib import Client as JiraClient
try:
from kaichu.models import add_models as kaichu_models
except ImportError:
kaichu_models = None
if (JiraClient and kaichu_models
and app.config.get('JIRA_HOST', False)
and app.config.get('JIRA_APP_KEY', | False)
and app.config.get('JIRA_RSA_KEY_FILE', False)):
app.config['KAICHU_ENABLED'] = True
model_adders.append(kaichu_models)
else:
app.config['KAICHU_ENABLED'] = False
#### DB INIT ####
sqlalchemy_db.make(app, *model_adders)
#### AUTH ####
from pocket_change.auth import login_manager
login_manager.init_app(app)
#### REST ####
f | rom pocket_change.rest import api
from pocket_change.rest.components import load as load_rest_components
load_rest_components(app)
app.register_blueprint(api, url_prefix='/rest')
#### GUI ####
from pocket_change.ui import load as load_ui, core as core_ui
load_ui(app)
app.register_blueprint(core_ui)
if app.config['KAICHU_ENABLED']:
from pocket_change.ui import kaichu as kaichu_ui
app.register_blueprint(kaichu_ui)
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
y-sira/atcoder | abc005/a.py | Python | mit | 48 | 0 | x, y = map(int, input().split())
print(y // x) | ||
ATNF/askapsdp | Tests/SelavySourcefindingService/clients/coast.py | Python | gpl-2.0 | 2,777 | 0.006122 | #!/usr/bin/env python
import sys
import os
import hashlib
import uuid
from ftplib import FTP
def usage(progname):
print "Usage:"
print progname + " upload <filename.fits>"
print "or"
print progname + " submit <filename.job>"
print "or"
print progname + " download <jobid>"
sys.exit(1)
def ftpconnect():
global ftp
ftp = FTP('ftp.atnf.csiro.au')
ftp.login('duchamp001', 'es0Gaith')
def sendfile(localname, remotename):
ftpconnect()
ftp.cwd('incoming')
ftp.storbinary("STOR " + remotename, open(localname, "rb"))
ftp.quit()
def upload(filename):
if os.path.splitext(filename)[1] != ".fits":
print "Error: File should have fits extension"
sys.exit(1)
print "Calculating MD5 hash of file..."
# Calculate the MD5 digest for the file
m = hashlib.md5()
f = open(filename, 'rb')
while True:
t = f.read(1024)
if len(t) == 0:
break # end of file
m.update(t)
digest = m.hexdigest()
f.close
# Upload file
print "Uploading file " + filename + "..."
remotename = digest + ".fits"
sendfile(filename, remotename)
# Upload the complet | ion file (indicates completion of the first upload)
sendfile("/dev/null", remotename + ".md5")
print "Upload completed. File ID: " + remotename
def submit(filename):
f=open(filename,' | r')
lines=f.read().split('\n')
f.close()
if(len([elem for elem in lines if (len(elem.split())==3 and elem[0]!='#' and elem.split()[0]=='Selavy.email')])==0):
print "Error: No valid email address provided. Not submitting job."
sys.exit(1)
print "Submitting job " + filename + "..."
job_uuid = uuid.uuid4()
remotename = str(job_uuid) + ".job"
sendfile(filename, remotename)
# Upload the completion file (indicates completion of the first upload)
sendfile("/dev/null", remotename + ".md5")
print "Job submission complete. Job ID: " + str(job_uuid)
def download(jobid):
print "Checking for results to download..."
ftpconnect()
ftp.cwd('outgoing')
files = []
# Find all files matching the pattern
ftp.retrlines("NLST " + jobid + ".*", files.append)
if len(files) == 0:
print "No files to download for jobid " + jobid
ftp.quit()
return
for filename in files:
print "Downloading file " + filename
file = open(filename, 'wb')
ftp.retrbinary("RETR " + filename, file.write)
file.close
ftp.quit()
# main()
if len(sys.argv) != 3:
usage(sys.argv[0])
if sys.argv[1] == "upload":
upload(sys.argv[2])
elif sys.argv[1] == "submit":
submit(sys.argv[2])
elif sys.argv[1] == "download":
download(sys.argv[2])
else:
usage(sys.argv[0])
|
Synerty/vortexpy | vortex/PayloadPriority.py | Python | mit | 213 | 0 | """
* Created by Synerty Pty Ltd
*
* This software is open source, the MIT licens | e applies | .
*
* Website : http://www.synerty.com
* Support : support@synerty.com
"""
RPC_PRIORITY = 10
DEFAULT_PRIORITY = 100
|
PaulEG/Various-Projects | Sudu.py | Python | artistic-2.0 | 347 | 0.011527 | MARV 2 21 7710 36 0 70
MARV 5 01 x 0 0 0
MARV 5 02 7710 66 420 40
MARV 5 14 5710 33 315 70
MARV 5 67 7710 66 500 70
MARV 6 09 + 0 5 x 20
MARV 6 16 + 310 0 0 0
MARV 6 46 + 0 66 660 70
MARV 6 49 + x 66 0 70
MARV 6 82a 7710 66 x x
MARV 6 82b 0 0 x 70
MARV 7 27 x x 600 0
MARV 7 63 x 7 70 x
MARV 9 01 | x x 0
MARV 9 06 x 0 0 0
MARV 9 12 | 7710 66 660 70
|
ruleant/buildtime-trend | get_read_key.py | Python | gpl-3.0 | 1,578 | 0.000634 | #!/usr/bin/env python
# vim: set expandtab sw=4 ts=4:
'''
Generate a read key for Keen.io API
Usage : get_read_key.py [project_name]
When argument project_name is used, the read key will be
generated to access this project.
If no argument is given, the default project name will be used.
Copyright (C) 2014 Dieter Adriaenssens <ruleant@users.sourceforge.net>
This file is part of buildtime-trend
<https://github.com/ruleant/buildtime-trend/>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is | distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from buildtimetrend.keenio import keen_io_generate_read_key
fro | m buildtimetrend.settings import Settings
import sys
def get_read_key():
'''
Generate a read key for the project and print that key
'''
settings = Settings()
settings.load_config_file("config.yml")
# get project name from argument
if len(sys.argv) > 1:
settings.set_project_name(sys.argv[1])
# generate a read key
print keen_io_generate_read_key(settings.get_project_name())
if __name__ == "__main__":
get_read_key()
|
Lekanich/intellij-community | python/testData/console/indent10.after.py | Python | apache-2.0 | 363 | 0.00551 | x = '''Multiline starts;
next line with indent;
next line with indent;
multiline ends'''
x = '''Multiline sta | rts;
| first
second
third
fourth
fifth
multiline ends'''
x = '''Multiline starts;
#line
#line
multiline ends''' |
jborlik/AdventOfCode2015 | day24.py | Python | mit | 1,524 | 0.015748 | # -*- coding: utf-8 -*-
import itertools
def evaluateGroup(packages, grouping):
firstcount = 0
qe = 1
weight = [0,0]
for i in range(len(packages)):
weight[ grouping[i] ] = weight[ grouping[i] ] + packages[i]
if (grouping[i] == 0):
firstcount = firstcount + 1
qe = qe*packages[i]
equalweights = (3*weight[0]==weight[1])
return ( equalweights, firstcount, qe)
if __name__ == "__main__":
"""Day24: Balancing"""
packages = []
with open('day24.dat') as datafile:
for thisstring in datafile:
packages.append(int(thisstring))
# another way of doing this:
#packages = map(int, [line.strip("\n") for line in open('day24.dat')])
# from: https://www.reddit.com/r/adventofcode/comments/3y1s7f/day_24_solutions/
bestfirstcount = 10000
bestqe = 100000000000
bestgroup = None
count = 0
for group in itertools.product(range(0,2), | repeat=len(packages)):
coun | t = count + 1
(isequal, firstcount, qe) = evaluateGroup(packages,group)
if (isequal):
print("{}. Equal weights. firstcount={}, qe={}".format(count,firstcount,qe))
if (firstcount <= bestfirstcount):
bestfirstcount=firstcount
if (qe < bestqe):
bestqe = qe
bestgroup = group
print("BEST QE={} with {} in first area. Group={}".format(bestqe, bestfirstcount,bestgroup))
|
bq/horus | src/horus/gui/workbench/calibration/main.py | Python | gpl-2.0 | 9,466 | 0.004543 | # -*- coding: utf-8 -*-
# This file is part of the Horus Project
__author__ = 'Jesús Arroyo Torrens <jesus.arroyo@bq.com>'
__copyright__ = 'Copyright (C) 2014-2016 Mundo Reader S.L.'
__license__ = 'GNU General Public License v2 http://www.gnu.org/licenses/gpl2.html'
from horus.util import profile
from horus.gui.engine import driver, pattern, calibration_data, image_capture, image_detection, \
laser_segmentation
from horus.gui.util.video_view import VideoView
from horus.gui.workbench.workbench import Workbench
from horus.gui.workbench.calibration.panels import PatternSettings, CameraIntrinsics, \
ScannerAutocheck, LaserTriangulation, PlatformExtrinsics, VideoSettings
from horus.gui.workbench.calibration.pages.camera_intrinsics import CameraIntrinsicsPages
from horus.gui.workbench.calibration.pages.scanner_autocheck import ScannerAutocheckPages
from horus.gui.workbench.calibration.pages.laser_triangulation import LaserTriangulationPages
from horus.gui.workbench.calibration.pages.platform_extrinsics import PlatformExtrinsicsPages
class CalibrationWorkbench(Workbench):
def __init__(self, parent):
Workbench.__init__(self, parent, name=_('Calibration workbench'))
def add_panels(self):
self.add_panel(
'pattern_settings', PatternSettings, self.on_pattern_settings_selected)
self.add_panel(
'scanner_autocheck', ScannerAutocheck, self.on_scanner_autocheck_selected)
self.add_panel(
'laser_triangulation', LaserTriangulation, self.on_laser_triangulation_selected)
self.add_panel(
'platform_extrinsics', PlatformExtrinsics, self.on_platform_extrinsics_selected)
self.add_panel(
'video_settings', VideoSettings, self.on_video_settings_selected)
self.add_panel(
'camera_intrinsics', CameraIntrinsics, self.on_camera_intrinsics_selected)
def add_pages(self):
self.add_page('video_view', VideoView(self, self.get_image))
self.add_page('camera_intrinsics_pages', CameraIntrinsicsPages(
self, start_callback=self.disable_panels, exit_callback=self.update_panels))
self.add_page('scanner_autocheck_pages', ScannerAutocheckPages(
self, start_callback=self.disable_panels, exit_callback=self.update_panels))
self.add_page('laser_triangulation_pages', LaserTriangulationPages(
self, start_callback=self.disable_panels, exit_callback=self.update_panels))
self.add_page('platform_extrinsics_pages', PlatformExtrinsicsPages(
self, start_callback=self.disable_panels, exit_callback=self.update_panels))
self.pages_collection['camera_intrinsics_pages'].Hide()
self.pages_collection['scanner_autocheck_pages'].Hide()
self.pages_collection['laser_triangulation_pages'].Hide()
self.pages_collection['platform_extrinsics_pages'].Hide()
self.pages_collection['camera_intrinsics_pages'].Disable()
self.pages_collection['scanner_autocheck_pages'].Disable()
self.pages_collection['laser_triangulation_pages'].Disable()
self.pages_collection['platform_extrinsics_pages'].Disable()
if not profile.settings['view_mode_advanced']:
self.panels_collection.expandable_panels['video_settings'].Hide()
self.panels_collection.expandable_panels['camera_intrinsics'].Hide()
if profile.settings['current_panel_calibration'] == 'video_settings' or \
profile.settings['current_panel_calibration'] == 'camera_intrinsics':
self.on_pattern_settings_selected()
self.panels_collection.expandable_panels[
profile.settings['current_panel_calibration']].on_title_clicked(None)
def get_image(self):
image = image_capture.capture_pattern()
return image_detection.detect_pattern(image)
def on_open(self):
if driver.is_connected:
self.pages_collection['camera_intrinsics_pages'].Enable()
self.pages_collection['scanner_autocheck_pages'].Enable()
self.pages_collection['laser_triangulation_pages'].Enable()
self.pages_collection['platform_extrinsics_pages'].Enable()
else:
for page in self.pages_collection:
self.pages_collection[page].stop()
self.pages_collection['camera_intrinsics_pages'].Disable()
self.pages_collection['scanner_autocheck_pages'].Disable()
self.pages_collection['laser_triangulation_pages'].Disable()
self.pages_collection['platform_extrinsics_pages'].Disable()
self.panels_collection.expandable_panels[
profile.settings['current_panel_calibration']].on_title_clicked(None)
def on_close(self):
try:
for page in self.pages_collection:
self.pages_collection[page].stop()
except:
pass
def reset(self):
for page in self.pages_collection:
self.pages_collection[page].reset()
def setup_engine(self):
driver.camera.set_frame_rate(int(profile.settings['frame_rate']))
driver.camera.set_resolution(
profile.settings['camera_width'], profile.settings['camera_height'])
driver.camera.set_rotate(profile.settings['camera_rotate'])
driver.camera.set_hflip(profile.settings['camera_hflip'])
driver.camera.set_vflip(profile.settings['camera_vflip'])
driver.camera.set_luminosity(profile.settings['luminosity'])
image | _capture.set_mode_pattern()
pattern_mode = image_capture.pattern_mode
pattern_mode.set_brightness(profile.settings['brightness_pattern_calibration'])
pattern_mode.set_contrast(profile | .settings['contrast_pattern_calibration'])
pattern_mode.set_saturation(profile.settings['saturation_pattern_calibration'])
pattern_mode.set_exposure(profile.settings['exposure_pattern_calibration'])
laser_mode = image_capture.laser_mode
laser_mode.brightness = profile.settings['brightness_laser_calibration']
laser_mode.contrast = profile.settings['contrast_laser_calibration']
laser_mode.saturation = profile.settings['saturation_laser_calibration']
laser_mode.exposure = profile.settings['exposure_laser_calibration']
image_capture.set_use_distortion(profile.settings['use_distortion'])
image_capture.set_remove_background(profile.settings['remove_background_calibration'])
laser_segmentation.red_channel = profile.settings['red_channel_calibration']
laser_segmentation.threshold_enable = profile.settings['threshold_enable_calibration']
laser_segmentation.threshold_value = profile.settings['threshold_value_calibration']
laser_segmentation.blur_enable = profile.settings['blur_enable_calibration']
laser_segmentation.set_blur_value(profile.settings['blur_value_calibration'])
laser_segmentation.window_enable = profile.settings['window_enable_calibration']
laser_segmentation.window_value = profile.settings['window_value_calibration']
laser_segmentation.refinement_method = profile.settings['refinement_calibration']
pattern.rows = profile.settings['pattern_rows']
pattern.columns = profile.settings['pattern_columns']
pattern.square_width = profile.settings['pattern_square_width']
pattern.origin_distance = profile.settings['pattern_origin_distance']
width, height = driver.camera.get_resolution()
calibration_data.set_resolution(width, height)
calibration_data.camera_matrix = profile.settings['camera_matrix']
calibration_data.distortion_vector = profile.settings['distortion_vector']
def on_pattern_settings_selected(self):
profile.settings['current_panel_calibration'] = 'pattern_settings'
self._on_panel_selected(self.pages_collection['video_view'])
def on_video_settings_selected(self):
profile.settings['current_panel_calibration'] = 'video_settings'
self._on_panel_selected(self.pages_collection['video_view'])
def on_camera_intrinsics_selected(self):
profile.settings |
ad-m/taravel | taravel/orders/migrations/0001_initial.py | Python | mit | 1,251 | 0.003997 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-08 23:49
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = | True
dependencies = [
('trips', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models | .AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Creation date')),
('paid', models.DateField(blank=True, null=True, verbose_name='Date of payment')),
('trip', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='trips.Trip', verbose_name='Trip')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'verbose_name': 'Order',
'verbose_name_plural': 'Orders',
},
),
]
|
kantel/processingpy | sketches/modes/PythonMode/examples/Topics/Simulate/MultipleParticleSystems/particle_system.py | Python | mit | 1,129 | 0.000886 | # A list is used to manage the list of Particles.
from particle import Particle
from crazy_particle import CrazyParticle
class ParticleSystem(object):
def __init__(self, num, v):
self.particles = [] # Initialize the list.
self.origin = v.get() # Store the origin point.
for i in range(num):
# Add "num" amount of particles to the list.
self.particles.append(Particle(self.origin))
def run(self):
| # Cycle through the list backwards, because we are deleting while
# iterating.
| for i in reversed(range(len(self.particles))):
p = self.particles[i]
p.run()
if p.isDead():
del self.particles[i]
def addParticle(self):
p = None
# Add either a Particle or CrazyParticle to the system.
if int(random(0, 2)) == 0:
p = Particle(self.origin)
else:
p = CrazyParticle(self.origin)
self.particles.append(p)
# A method to test if the particle system still has particles.
def dead(self):
return self.particles.isEmpty()
|
anhstudios/swganh | data/scripts/templates/object/tangible/loot/loot_schematic/shared_spear_rack_schematic.py | Python | mit | 479 | 0.045929 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS | MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/loot/loot_schematic/shared_spear_rack_schematic.iff"
result.attribute_template_id = -1
result.stfName("craft_item_ingredients_n","spear_rack")
#### | BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
mikeshultz/whatthediff | whatthediff/forms.py | Python | gpl-2.0 | 818 | 0.013447 | from django import forms
from django.contrib.auth.forms import UserCreationForm
from .models import WhatTheUser
class RegistrationForm(UserCreationForm):
#first_name = forms.CharField()
#last_name = forms.CharField | ()
#email = forms.EmailField()
#password1 = forms.CharField()
#password2 = forms.CharField()
class Meta:
model = WhatTh | eUser
fields = ['email', 'first_name', 'last_name', ]
#fields = [first_name, last_name, email, password1, password2]
"""def clean(self):
cleaned_data = super(RegistrationtForm, self).clean()
if cleaned_data.get('password1') is None \
or cleaned_data.get('password1') == cleaned_data.get('password2'):
raise forms.ValidationError('Passwords must be provided and they must match.')
""" |
mwkang/zeppelin | flink/src/main/resources/python/zeppelin_pyflink.py | Python | apache-2.0 | 2,045 | 0.004401 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License f | or the specific language governing permissions and
# limitat | ions under the License.
#
from pyflink.common import *
from pyflink.dataset import *
from pyflink.datastream import *
from pyflink.table import *
from pyflink.table.catalog import *
from pyflink.table.descriptors import *
from pyflink.table.window import *
import pyflink
from py4j.java_gateway import java_import
intp = gateway.entry_point
pyflink.java_gateway._gateway = gateway
pyflink.java_gateway.import_flink_view(gateway)
pyflink.java_gateway.install_exception_handler()
b_env = pyflink.dataset.ExecutionEnvironment(intp.getJavaExecutionEnvironment())
bt_env = BatchTableEnvironment.create(b_env)
s_env = StreamExecutionEnvironment(intp.getJavaStreamExecutionEnvironment())
st_env = StreamTableEnvironment.create(s_env)
from zeppelin_context import PyZeppelinContext
#TODO(zjffdu) merge it with IPyFlinkZeppelinContext
class PyFlinkZeppelinContext(PyZeppelinContext):
def __init__(self, z, gateway):
super(PyFlinkZeppelinContext, self).__init__(z, gateway)
def show(self, obj):
from pyflink.table import Table
if isinstance(obj, Table):
print(self.z.showData(obj._j_table))
else:
super(PyFlinkZeppelinContext, self).show(obj)
z = __zeppelin__ = PyFlinkZeppelinContext(intp.getZeppelinContext(), gateway)
__zeppelin__._setup_matplotlib()
|
iLoop2/ResInsight | ThirdParty/Ert/devel/python/python/ert/job_queue/job_queue_manager.py | Python | gpl-3.0 | 2,799 | 0.01965 | # Copyright (C) 2014 Statoil ASA, Norway.
#
# The file 'job_queue_manager.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
"""
Module implementing a queue for managing external jobs.
"""
import ctypes
from types import StringType, IntType
import time
from ert.cwrap import BaseCClass, CWrapper
from ert.job_queue import JOB_QUEUE_LIB, Job, JobStatusType
class JobQueueManager(BaseCClass):
def __init__(self, queue):
c_ptr = JobQueueManager.cNamespace().alloc(queue)
super(JobQueueManager, self).__init__(c_ptr)
def startQueue(self , total_size , verbose = False):
JobQueueManager.cNamespace().start_queue( self , total_size , verbose )
def getNumRunning(self):
return JobQueueManager.cNamespace().get_num_running( self )
def getNumComplete(self):
return JobQueueManager.cNamespace().get_num_complete( self )
def isRunning(self):
re | turn JobQueueManager.cNamespace().is_running( self )
def free(self):
JobQueueManager.cNamespace().free(self)
def jobComplete(self , job_index):
return JobQueueManager.cNamespace().job_complete( self , job_index )
#################################################################
cwrapper = CWrapper(JOB_QUEUE_LIB)
cwrapper.registerObjec | tType("job_queue_manager", JobQueueManager)
JobQueueManager.cNamespace().alloc = cwrapper.prototype("c_void_p job_queue_manager_alloc( job_queue) ")
JobQueueManager.cNamespace().free = cwrapper.prototype("void job_queue_manager_free( job_queue_manager )")
JobQueueManager.cNamespace().start_queue = cwrapper.prototype("void job_queue_manager_start_queue( job_queue_manager , int , bool)")
JobQueueManager.cNamespace().get_num_running = cwrapper.prototype("int job_queue_manager_get_num_running( job_queue_manager )")
JobQueueManager.cNamespace().get_num_complete = cwrapper.prototype("int job_queue_manager_get_num_complete( job_queue_manager )")
JobQueueManager.cNamespace().is_running = cwrapper.prototype("bool job_queue_manager_is_running( job_queue_manager )")
JobQueueManager.cNamespace().job_complete = cwrapper.prototype("bool job_queue_manager_job_complete( job_queue_manager , int)")
|
edx/edx-analytics-dashboard | analytics_dashboard/learner_analytics_api/v0/views.py | Python | agpl-3.0 | 6,267 | 0.002074 | from requests.exceptions import ConnectTimeout
from rest_framework.exceptions import PermissionDenied
from rest_framework.generics import RetrieveAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from .clients import LearnerAPIClient
from .permissions import HasCourseAccessPermission
from .renderers import TextRenderer
# TODO: Consider caching responses from the data api when working on AN-6157
class BaseLearnerApiView(RetrieveAPIView):
permission_classes = (IsAuthenticated, HasCourseAccessPermission,)
# Serialize the the Learner Analytics API response to JSON, by default.
serializer_type = 'json'
# Do not return the HTTP headers from the Data API, by default.
# This will be further investigated in AN-6928.
include_headers = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.client = LearnerAPIClient(serializer_type=self.serializer_type)
def get_queryset(self):
"""
DRF requires that we override this method. Since we don't actually use
querysets/django models in this API, this method doesn't have to return
anything.
"""
@property
def course_id(self):
"""
Gets the course_id either from the URL or the querystring parameters.
"""
course_id = getattr(self.request, 'course_id')
if not course_id:
course_id = self.request.query_params.get('course_id')
return course_id
def get(self, request, *args, **kwargs):
"""
Return the response from the Data API.
"""
api_response = self.get_api_response(request, *args, **kwargs)
response_kwargs = dict(
data=api_response.serialized_content,
status=api_response.status_code,
)
if self.include_headers:
response_kwargs['headers'] = api_response.headers
return Response(**response_kwargs)
def get_api_response(self, request, *args, **kwargs):
"""
Fetch the response from the API.
Must be implemented by subclasses.
"""
raise NotImplementedError('Override this method to return the Learner Analytics API response for this view.')
def handle_exception(self, exc):
"""
Handles timeouts raised by the API client by returning an HTTP
504.
"""
if isinstance(exc, ConnectTimeout):
return Response(
data={'developer_message': 'Learner Analytics API timed out.', 'error_code': 'analytics_api_timeout'},
status=504
)
return super().handle_exception(exc)
class DownloadLearnerApiViewMixin:
"""
Requests text/csv data from the Learner Analytics API, and ensures that the REST framework returns it unparsed,
including the response headers.
"""
include_headers = True
content_type = 'text/csv'
serializer_type = 'text'
def get_api_response(self, request, **kwargs):
"""
Sets the HTTP_ACCEPT header on the request to tell the Learner Analytics API which format to return its data in.
And tells the REST framework to render as text. NB: parent class must also define get_api_response()
"""
request.META['Accept'] = self.content_type
request.accepted_renderer = TextRenderer()
return super().get_api_response(request, **kwargs)
class NotFoundLearnerApiViewMixin:
"""
Returns 404s rather than 403s when PermissionDenied exceptions are raised.
"""
@property
def not_found_developer_message(self):
raise NotImplementedError('Override this attribute t | o define the developer message returned with 404s.')
@property
def not_found_error_code(self):
raise NotImplementedError('Override this attribute to define the error_code string returned with 404s.')
def handle_exception(self, exc):
if isinstance(exc, PermissionDenied):
return Response(
data={'developer_message': self.not_found_developer_message, 'error_code': self.not_found_error_code},
status=404
)
| return super().handle_exception(exc)
class LearnerDetailView(NotFoundLearnerApiViewMixin, BaseLearnerApiView):
"""
Forwards requests to the Learner Analytics API's Learner Detail endpoint.
"""
not_found_error_code = 'no_learner_for_course'
@property
def not_found_developer_message(self):
message = 'Learner {} not found'.format(self.kwargs.get('username', ''))
message += f'for course {self.course_id}.' if self.course_id else '.'
return message
def get_api_response(self, request, username, **kwargs):
return self.client.learners(username).get(**request.query_params)
class LearnerListView(BaseLearnerApiView):
"""
Forwards requests to the Learner Analytics API's Learner List endpoint.
"""
def get_api_response(self, request, **kwargs):
return self.client.learners.get(**request.query_params)
class LearnerListCSV(DownloadLearnerApiViewMixin, LearnerListView):
"""
Forwards text/csv requests to the Learner Analytics API's Learner List endpoint,
and returns a simple text response.
"""
class EngagementTimelinesView(NotFoundLearnerApiViewMixin, BaseLearnerApiView):
"""
Forwards requests to the Learner Analytics API's Engagement Timeline
endpoint.
"""
not_found_error_code = 'no_learner_engagement_timeline'
@property
def not_found_developer_message(self):
message = 'Learner {} engagement timeline not found'.format(self.kwargs.get('username', ''))
message += f'for course {self.course_id}.' if self.course_id else '.'
return message
def get_api_response(self, request, username, **kwargs):
return self.client.engagement_timelines(username).get(**request.query_params)
class CourseLearnerMetadataView(BaseLearnerApiView):
"""
Forwards requests to the Learner Analytics API's Course Metadata endpoint.
"""
def get_api_response(self, request, course_id, **kwargs):
return self.client.course_learner_metadata(course_id).get(**request.query_params)
|
Kronuz/pyXapiand | xapiand/server/base.py | Python | gpl-2.0 | 11,482 | 0.001742 | from __future__ import unicode_literals, absolute_import, print_function
import time
import logging
import weakref
import threading
from hashlib import md5
from functools import wraps
from gevent import socket
from gevent.server import StreamServer
from gevent.threadpool import ThreadPool
from ..utils import format_time, sendall, readline
class QuitCommand(Exception) | :
pass
class DeadException(Exception):
def __init__(self, command):
self.command = command
class AliveCommand(object):
| """
Raises DeadException if the object's cmd_id id is not the same
as it was when the object was created.
"""
cmds_duration = 0
cmds_start = 0
cmds_count = 0
def __init__(self, parent, cmd, origin):
parent.cmd_id = getattr(parent, 'cmd_id', 0) + 1
self.parent = parent
self.cmd_id = parent.cmd_id
self.cmd = cmd
self.origin = origin
self.log = parent.log
self.start = time.time()
def __nonzero__(self):
if self.cmd_id == self.parent.cmd_id:
return False
raise DeadException(self)
def executed(self, results, message="Executed command %d", logger=None):
if logger is None:
logger = self.log.debug
now = time.time()
cmd_duration = now - self.start
AliveCommand.cmds_duration += cmd_duration
AliveCommand.cmds_count += 1
logger(
"%s %s%s by %s ~%s (%0.3f cps)",
message % self.cmd_id,
self.cmd,
" -> %s" % results if results is not None else "",
self.origin,
format_time(cmd_duration),
AliveCommand.cmds_count / AliveCommand.cmds_duration,
)
if now - AliveCommand.cmds_start > 2 or AliveCommand.cmds_count >= 10000:
AliveCommand.cmds_start = now
AliveCommand.cmds_duration = 0
AliveCommand.cmds_count = 0
def cancelled(self):
self.executed(None, message="Command %d cancelled", logger=self.log.warning)
def error(self, e):
self.executed(e, message="Command %d ERROR", logger=self.log.error)
def command(threaded=False, **kwargs):
def _command(func):
func.command = func.__name__
func.threaded = threaded
for attr, value in kwargs.items():
setattr(func, attr, value)
if func.threaded:
@wraps(func)
def wrapped(self, command, client_socket, *args, **kwargs):
current_thread = threading.current_thread()
tid = current_thread.name.rsplit('-', 1)[-1]
current_thread.name = '%s-%s-%s' % (self.client_id[:14], command.cmd, tid)
# Create a gevent socket for this thread from the other tread's socket
# (using the raw underlying socket, '_sock'):
self.client_socket = socket.socket(_sock=client_socket._sock)
try:
command.executed(func(self, *args, **kwargs))
except (IOError, RuntimeError, socket.error) as e:
command.error(e)
except DeadException:
command.cancelled()
return wrapped
else:
return func
if callable(threaded):
func, threaded = threaded, False
return _command(func)
return _command
class ClientReceiver(object):
delimiter = b'\r\n'
def __init__(self, server, client_socket, address, log=logging,
encoding='utf-8', encoding_errors='strict'):
self.log = log
self._server = weakref.ref(server)
self.address = address
self.local = threading.local()
self.client_socket = client_socket
self.closed = False
self.encoding = encoding
self.encoding_errors = encoding_errors
self.cmd_id = 0
self.activity = time.time()
self.client_id = ("Client-%s" % md5('%s:%s' % (address[0], address[1])).hexdigest())
current_thread = threading.current_thread()
tid = current_thread.name.rsplit('-', 1)[-1]
current_thread.name = '%s-%s' % (self.client_id[:14], tid)
@property
def server(self):
return self._server()
def close(self):
self.closed = True
@property
def client_socket(self):
return self.local.client_socket
@client_socket.setter
def client_socket(self, value):
self.local.client_socket = value
def handle(self):
for line in readline(self.client_socket, encoding=self.encoding, encoding_errors=self.encoding_errors):
if not line or self.closed:
break
try:
self.lineReceived(line)
except QuitCommand:
break
def dispatch(self, func, line, command):
if func.threaded:
commands_pool = self.server.pool
pool_size = self.server.pool_size
pool_size_warning = self.server.pool_size_warning
commands_pool.spawn(func, command, self.client_socket, line, command)
pool_used = len(commands_pool)
if not (pool_size_warning - pool_used) % 10:
self.log.warning("Commands pool is close to be full (%s/%s)", pool_used, pool_size)
elif pool_used == pool_size:
self.log.error("Commands poll is full! (%s/%s)", pool_used, pool_size)
else:
try:
command.executed(func(line))
except (IOError, RuntimeError, socket.error) as e:
command.error(e)
except DeadException:
command.cancelled()
def connectionMade(self, client):
pass
def connectionLost(self, client):
pass
def sendLine(self, line):
line += self.delimiter
if line[0] not in ("#", " "):
line = "%s. %s" % (self.cmd_id, line)
sendall(self.client_socket, line, encoding=self.encoding, encoding_errors=self.encoding_errors)
def lineReceived(self, line):
self.activity = time.time()
class CommandServer(StreamServer):
receiver_class = ClientReceiver
pool_size = 10
def __init__(self, *args, **kwargs):
self.log = kwargs.pop('log', logging)
self.pool_size = kwargs.pop('pool_size', self.pool_size)
super(CommandServer, self).__init__(*args, **kwargs)
self.pool_size_warning = int(self.pool_size / 3.0 * 2.0)
self.pool = ThreadPool(self.pool_size)
self.clients = set()
def build_client(self, client_socket, address):
return self.receiver_class(self, client_socket, address, log=self.log)
def handle(self, client_socket, address):
client = self.build_client(client_socket, address)
self.clients.add(client)
client.connectionMade(client)
try:
client.handle()
finally:
self.clients.discard(client)
client.connectionLost(client)
def close(self, max_age=None):
if self.closed:
if max_age is None:
self.log.error("Forcing server shutdown (%s clients)...", len(self.clients))
else:
if max_age is None:
max_age = 10
self.log.warning("Hitting Ctrl+C again will terminate all running tasks!")
super(CommandServer, self).close()
now = time.time()
clean = []
for client in self.clients:
if max_age is None or client._weak or now - client.activity > max_age:
try:
client.client_socket._sock.close()
except AttributeError:
pass
clean.append(client)
for client in clean:
self.clients.discard(client)
return not bool(self.clients)
class CommandReceiver(ClientReceiver):
welcome = "# Welcome to the server! Type quit to exit."
def __init__(self, *args, **kwargs):
self._weak = False
super(CommandReceiver, self).__init__(*args, **kwargs)
def connectionMade(self, client):
self.log.info("New connection from %s: %s:%d (%d open |
ministryofjustice/cla_backend | cla_backend/apps/core/routers.py | Python | mit | 1,336 | 0 | from rest_framework | .routers import DefaultRouter, Route
class SingletonRouter(DefaultRouter):
"""
Use this router instead of the DRF DefaultRouter if you have
only one resource accessible from an endpoint.
This gives you the following urls:
* prefix/
GET: returns 404 or the object
POST: creates the object if it doesn't exist
PUT: updates the obj | ect
PATCH: updates the object partially
DELETE: deletes the object
* prefix/<method>/
used for @action and @link methods (NOTE: not tested yet)
"""
routes = [
# List route.
Route(
url=r"^{prefix}{trailing_slash}$",
mapping={
"get": "retrieve",
"post": "create",
"put": "update",
"patch": "partial_update",
"delete": "destroy",
},
name="{basename}-detail",
initkwargs={"suffix": "Instance"},
),
# Dynamically generated routes.
# Generated using @action or @link decorators on methods of the viewset
Route(
url=r"^{prefix}/{methodname}{trailing_slash}$",
mapping={"{httpmethod}": "{methodname}"},
name="{basename}-{methodnamehyphen}",
initkwargs={},
),
]
|
andybond13/fontina | fontina.py | Python | gpl-2.0 | 7,414 | 0.054222 | #!/usr/bin/python
import csv
import datetime
import os
import copy
class Trip:
dols = []
dists = []
gals = []
octs = []
eths = []
drivers = []
tires = []
miles = 0
gallons = 0
actualGals = 0
days = 0
octane = 0
snowtires = | 0
make = 0
model = 0
year = 0
engineIV = 0
enginecyl = 0
engineL = 0
ethanol = 0
driver = 0
avgMileage = 0
beginDate = 0
hybrid = 0
def write(self):
out = [self.miles,self.gallons,self.actualGals,self.dollars,self.days,sel | f.octane,self.snowtires,self.make,self.model,self.year,self.engineIV,self.enginecyl,self.engineL,self.ethanol,self.driver,self.avgMileage,self.beginDate,self.hybrid]
return out
def clear(self):
self.dols[:] = []
self.dists[:] = []
self.gals[:] = []
self.octs[:] = []
self.eths[:] = []
self.drivers[:] = []
self.tires[:] = []
self.miles = 0
self.gallons = 0
self.actualGals = 0
self.days = 0
self.octane = 0
self.snowtires = 0
self.make = 0
self.model = 0
self.year = 0
self.engineIV = 0
self.enginecyl = 0
self.engineL = 0
self.ethanol = 0
self.driver = ""
self.avgMileage = 0
self.beginDate = 0
self.hybrid = 0
def wavg(series, weight):
avg = 0
if (weight[0] <= 0):
weight = weight[1:]
assert(len(series) == len(weight))
for i in range(len(weight)):
avg += float(series[i])*float(weight[i])/float(sum(weight))
return avg
def octaneCode(inOct):
if (inOct == 1):
return 87;
elif (inOct == 2):
return 89;
elif (inOct == 3):
return 93;
else:
print "Unknown octane code", inOct
assert(1 == 0)
def driverCode(driver):
if (driver == "Mark"):
return 0
elif (driver == "Mary"):
return 1
elif (driver == "Andy"):
return 2
elif (driver == "Jeff"):
return 3
else:
print "Unknown driver: ", driver
assert(1 == 0)
def makeCode(make):
if (make == "Chevrolet"):
return 0
elif (make == "Buick"):
return 1
elif (make == "Oldsmobile"):
return 2
elif (make == "Mercury"):
return 3
elif (make == "Plymouth"):
return 4
elif (make == "Volkswagen"):
return 5
elif (make == "Toyota"):
return 6
elif (make == "Honda"):
return 7
else:
print "Unknown make: ", make
assert(1 == 0)
def modelCode(model):
if (model == "Concourse"):
return 0
elif (model == "Vega"):
return 1
elif (model == "Century"):
return 2
elif (model == "Cierra"):
return 3
elif (model == "Sable"):
return 4
elif (model == "Voyager"):
return 5
elif (model == "Highlander"):
return 6
elif (model == "CRV"):
return 7
elif (model == "Jetta"):
return 8
else:
print "Unknown model: ", model
assert(1 == 0)
def gasTank(model,year):
if (model == "Concourse"):
return 21.0
elif (model == "Vega"):
return 16.0
elif (model == "Century"):
return 15.0
elif (model == "Cierra"):
return 15.7
elif (model == "Sable"):
return 16.0
elif (model == "Voyager"):
return 20.0
elif (model == "Highlander"):
if (year == 2003):
return 19.8
elif (year == 2008):
return 17.2
elif (model == "CRV"):
return 15.3
elif (model == "Jetta"):
return 14.5
else:
print "Unknown model: ", model
assert(1 == 0)
def dateMaker(date):
start = 0
while date.find("/",start) > -1:
start = date.find("/",start) + 1
year = date[start:]
if len(year) == 2:
if (int(year) > 50):
year = 1900 + int(year)
if (int(year) <= 50):
year = 2000 + int(year)
return date[0:start] + str(year)
def check(fill,gastype,driver,snowtires,ethanol,hybrid):
assert(fill == 0 or fill == 1)
assert(gastype == 1 or gastype == 2 or gastype == 3)
assert(driver == "Andy" or driver == "Mark" or driver == "Mary" or driver == "Jeff")
assert(snowtires == 0 or snowtires == 1)
assert(ethanol == 0 or ethanol == 1)
assert(hybrid == 0 or hybrid == 1)
#ethanol
def checkTrip(a):
a.miles = sum(a.dists)
a.dollars = sum(a.dols)
a.actualGals = sum(i for i in a.gals if i > 0)
a.gallons = sum(a.gals)
a.octane = wavg(a.octs,a.gals)
print "octane",a.octane
a.ethanol = wavg(a.eths,a.gals)
print "ethanol",a.ethanol
a.snowtires = wavg(a.tires,a.dists)
a.driver = sorted(a.drivers)[len(a.drivers)/2]
print a.beginDate
assert(min(a.dists) > 0)
assert(min(a.dols) > 0)
assert(a.days > 0)
assert(a.miles > 0)
assert(a.dollars > 0)
assert(a.gallons > 0)
def checkInterTrip(a,b):
print a.beginDate
print "mpg: ", a.miles/a.gallons, b.miles/b.gallons
print "price: ", a.dollars/a.actualGals, b.dollars/b.actualGals
if(abs((a.miles/a.gallons)/(b.miles/b.gallons) - 1) > 0.5):
status = raw_input("Press Enter to continue... (mpg)")
if(abs((a.dollars/a.actualGals)/(b.dollars/b.actualGals) - 1) > 0.2):
status = raw_input("Press Enter to continue... (price)")
print ""
def main(dir,outfile):
trips = []
for file in os.listdir(dir):
if not file.endswith('.csv'):
continue
print file
f = open(dir+file,'rU')
datareader = csv.reader(f, dialect = csv.excel_tab)
lineNum = 0
beginMiles = 0
beginDate = 0
for row in datareader:
lineNum += 1
line = str(row)
line = line[2:-2].split(',')
if (line[0] == "Date"):
continue
date = dateMaker(str(line[0]))
odometer = int(line[1])
fill = int(line[2])
gastype = int(line[3])
gallons = float(line[4])
dollars = float(line[5])
driver = str(line[6])
snowtires = int(line[7])
ethanol = int(line[8])
make = str(line[9])
model = str(line[10])
year = int(line[11])
engineL = float(line[12])
enginecyl = int(line[13])
engineIV = int(line[14])
hybrid = int(line[15])
if (fill == -1):
#begin trip
#make trip opject
a = Trip()
beginMiles = odometer
beginDate = date
beginOctane = 87
beginEthanol = 0
if (year >= 1994):
beginEthanol = 1
a.gals.append(gallons)
else:
#check and add to trip
a.dols.append(dollars)
a.gals.append(gallons)
a.dists.append(odometer - beginMiles)
a.octs.append(beginOctane)
a.eths.append(beginEthanol)
a.drivers.append(driverCode(driver))
a.tires.append(snowtires)
check(fill,gastype,driver,snowtires,ethanol,hybrid)
beginMiles = odometer
#update gas contents
tank = gasTank(model, year)
beginOctane = (gallons * octaneCode(gastype) + (tank - gallons) * beginOctane) / tank
beginEthanol = (gallons * ethanol + (tank - gallons) * beginEthanol) / tank
if (fill == 1):
#end trip
tripMiles = sum(a.dists)
dateobj1 = datetime.datetime.strptime(beginDate,'%m/%d/%Y').date()
dateobj2 = datetime.datetime.strptime(date,'%m/%d/%Y').date()
tripDate = dateobj2 - dateobj1
tripDays = tripDate.days
if (tripDays == 0):
tripDays += 1
a.days = tripDays
a.make = makeCode(make)
a.model = modelCode(model)
a.year = year
a.engineIV = engineIV
a.enginecyl = enginecyl
a.engineL = engineL
a.beginDate = beginDate
a.hybrid = hybrid
a.avgMileage = odometer - 0.5*tripMiles
#check and save trip
checkTrip(a)
if (len(trips) > 0):
checkInterTrip(a,trips[-1])
trips.append(copy.deepcopy(a))
#reset dollars and gallons
#make trip opject
a.clear()
beginDate = date
beginMiles = odometer
fo = open(outfile,'wb')
datareader = csv.writer(fo, delimiter=',')
#print trips
for thisTrip in trips:
out = thisTrip.write()
datareader.writerow(out)
dir = './raw/'
outfile = './car_data.csv'
main(dir,outfile)
|
hubenjm/Project-Euler | problem18.py | Python | mit | 992 | 0.044355 | datafile1 = "./problem18data.t | xt"
datafile2 = "./problem67data.txt"
def getdata(filename):
l = [ | ]
with open(filename, 'r') as f:
for line in f:
row = [int(s.strip()) for s in line.split(" ")]
l.append(row)
return l
def read_data_test():
l = getdata(datafile)
print l
def longest_path(filename):
l = getdata(filename)
m = len(l)
longest = l[0]
longest_next = []
for j in xrange(1,m):
#j corresponds to level we are computing longest paths for, j = 0 corresponds to first level
longest_next = []
longest_next.append(l[j][0] + longest[0])
for k in xrange(1,j):
#cycle through the elements of level j+1 and compute longest path to that element using the longest path lengths to the elements of previous level
longest_next.append(l[j][k] + max(longest[k-1], longest[k]))
longest_next.append(l[j][-1] + longest[-1])
longest = longest_next
return max(longest)
if __name__=="__main__":
print longest_path(datafile1)
print longest_path(datafile2)
|
jaraco/paramiko | paramiko/common.py | Python | lgpl-2.1 | 8,105 | 0 | # Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Common constants and global variables.
"""
import logging
from paramiko.py3compat import byte_chr, PY2, long, b
MSG_DISCONNECT, MSG_IGNORE, MSG_UNIMPLEMENTED, MSG_DEBUG, \
MSG_SERVICE_REQUEST, MSG_SERVICE_ACCEPT = range(1, 7)
MSG_KEXINIT, MSG_NEWKEYS = range(20, 22)
MSG_USERAUTH_REQUEST, MSG_USERAUTH_FAILURE, MSG_USERAUTH_SUCCESS, \
MSG_USERAUTH_BANNER = range(50, 54)
MSG_USERAUTH_PK_OK = 60
MSG_USERAUTH_INFO_REQUEST, MSG_USERAUTH_INFO_RESPONSE = range(60, 62)
MSG_USERAUTH_GSSAPI_RESPONSE, MSG_USERAUTH_GSSAPI_TOKEN = range(60, 62)
MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE, MSG_USERAUTH_GSSAPI_ERROR,\
MSG_USERAUTH_GSSAPI_ERRTOK, MSG_USERAUTH_GSSAPI_MIC = range(63, 67)
HIGHEST_USERAUTH_MESSAGE_ID = 79
MSG_GLOBAL_REQUEST, MSG_REQUEST_SUCCESS, MSG_REQUEST_FAILURE = range(80, 83)
MSG_CHANNEL_OPEN, MSG_CHANNEL_OPEN_SUCCESS, MSG_CHANNEL_OPEN_FAILURE, \
MSG_CHANNEL_WINDOW_ADJUST, MSG_CHANNEL_DATA, MSG_CHANNEL_EXTENDED_DATA, \
MSG_CHANNEL_EOF, MSG_CHANNEL_CLOSE, MSG_CHANNEL_REQUEST, \
MSG_CHANNEL_SUCCESS, MSG_CHANNEL_FAILURE = range(90, 101)
cMSG_DISCONNECT = byte_chr(MSG_DISCONNECT)
cMSG_IGNORE = byte_chr(MSG_IGNORE)
cMSG_UNIMPLEMENTED = byte_chr(MSG_UNIMPLEMENTED)
cMSG_DEBUG = byte_chr(MSG_DEBUG)
cMSG_SERVICE_REQUEST = byte_chr(MSG_SERVICE_REQUEST)
cMSG_SERVICE_ACCEPT = byte_chr(MSG_SERVICE_ACCEPT)
cMSG_KEXINIT = byte_chr(MSG_KEXINIT)
cMSG_NEWKEYS = byte_chr(MSG_NEWKEYS)
cMSG_USERAUTH_REQUEST = byte_chr(MSG_USERAUTH_REQUEST)
cMSG_USERAUTH_FAILURE = byte_chr(MSG_USERAUTH_FAILURE)
cMSG_USERAUTH_SUCCESS = byte_chr(MSG_USERAUTH_SUCCESS)
cMSG_USERAUTH_BANNER = byte_chr(MSG_USERAUTH_BANNER)
cMSG_USERAUTH_PK_OK = byte_chr(MSG_USERAUTH_PK_OK)
cMSG_USERAUTH_INFO_REQUEST = byte_chr(MSG_USERAUTH_INFO_REQUEST)
cMSG_USERAUTH_INFO_RESPONSE = byte_chr(MSG_USERAUTH_INFO_RESPONSE)
cMSG_USERAUTH_GSSAPI_RESPONSE = byte_chr(MSG_USERAUTH_GSSAPI_RESPONSE)
cMSG_USERAUTH_GSSAPI_TOKEN = byte_chr(MSG_USERAUTH_GSSAPI_TOKEN)
cMSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE = \
byte_chr(MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE)
cMSG_USERAUTH_GSSAPI_ERROR = byte_chr(MSG_USERAUTH_GSSAPI_ERROR)
cMSG_USERAUTH_GSSAPI_ERRTOK = byte_chr(MSG_USERAUTH_GSSAPI_ERRTOK)
cMSG_USERAUTH_GSSAPI_MIC = byte_chr(MSG_USERAUTH_GSSAPI_MIC)
cMSG_GLOBAL_REQUEST = byte_chr(MSG_GLOBAL_REQUEST)
cMSG_REQUEST_SUCCESS = byte_chr(MSG_REQUEST_SUCCESS)
cMSG_REQUEST_FAILURE = byte_chr(MSG_REQUEST_FAILURE)
cMSG_CHANNEL_OPEN = byte_chr(MSG_CHANNEL_OPEN)
cMSG_CHANNEL_OPEN_SUCCESS = byte_chr(MSG_CHANNEL_OPEN_SUCCESS)
cMSG_CHANNEL_OPEN_FAILURE = byte_chr(MSG_CHANNEL_OPEN_F | AILURE)
cMSG_CHANNEL_WINDOW_ADJUST = byte_chr(MSG_CHANNEL_WINDOW_ADJUST)
cMSG_CHANNEL_DATA = byte_chr(MSG_CHANNEL_DATA)
| cMSG_CHANNEL_EXTENDED_DATA = byte_chr(MSG_CHANNEL_EXTENDED_DATA)
cMSG_CHANNEL_EOF = byte_chr(MSG_CHANNEL_EOF)
cMSG_CHANNEL_CLOSE = byte_chr(MSG_CHANNEL_CLOSE)
cMSG_CHANNEL_REQUEST = byte_chr(MSG_CHANNEL_REQUEST)
cMSG_CHANNEL_SUCCESS = byte_chr(MSG_CHANNEL_SUCCESS)
cMSG_CHANNEL_FAILURE = byte_chr(MSG_CHANNEL_FAILURE)
# for debugging:
MSG_NAMES = {
MSG_DISCONNECT: 'disconnect',
MSG_IGNORE: 'ignore',
MSG_UNIMPLEMENTED: 'unimplemented',
MSG_DEBUG: 'debug',
MSG_SERVICE_REQUEST: 'service-request',
MSG_SERVICE_ACCEPT: 'service-accept',
MSG_KEXINIT: 'kexinit',
MSG_NEWKEYS: 'newkeys',
30: 'kex30',
31: 'kex31',
32: 'kex32',
33: 'kex33',
34: 'kex34',
40: 'kex40',
41: 'kex41',
MSG_USERAUTH_REQUEST: 'userauth-request',
MSG_USERAUTH_FAILURE: 'userauth-failure',
MSG_USERAUTH_SUCCESS: 'userauth-success',
MSG_USERAUTH_BANNER: 'userauth--banner',
MSG_USERAUTH_PK_OK: 'userauth-60(pk-ok/info-request)',
MSG_USERAUTH_INFO_RESPONSE: 'userauth-info-response',
MSG_GLOBAL_REQUEST: 'global-request',
MSG_REQUEST_SUCCESS: 'request-success',
MSG_REQUEST_FAILURE: 'request-failure',
MSG_CHANNEL_OPEN: 'channel-open',
MSG_CHANNEL_OPEN_SUCCESS: 'channel-open-success',
MSG_CHANNEL_OPEN_FAILURE: 'channel-open-failure',
MSG_CHANNEL_WINDOW_ADJUST: 'channel-window-adjust',
MSG_CHANNEL_DATA: 'channel-data',
MSG_CHANNEL_EXTENDED_DATA: 'channel-extended-data',
MSG_CHANNEL_EOF: 'channel-eof',
MSG_CHANNEL_CLOSE: 'channel-close',
MSG_CHANNEL_REQUEST: 'channel-request',
MSG_CHANNEL_SUCCESS: 'channel-success',
MSG_CHANNEL_FAILURE: 'channel-failure',
MSG_USERAUTH_GSSAPI_RESPONSE: 'userauth-gssapi-response',
MSG_USERAUTH_GSSAPI_TOKEN: 'userauth-gssapi-token',
MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE: 'userauth-gssapi-exchange-complete',
MSG_USERAUTH_GSSAPI_ERROR: 'userauth-gssapi-error',
MSG_USERAUTH_GSSAPI_ERRTOK: 'userauth-gssapi-error-token',
MSG_USERAUTH_GSSAPI_MIC: 'userauth-gssapi-mic'
}
# authentication request return codes:
AUTH_SUCCESSFUL, AUTH_PARTIALLY_SUCCESSFUL, AUTH_FAILED = range(3)
# channel request failed reasons:
(OPEN_SUCCEEDED,
OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED,
OPEN_FAILED_CONNECT_FAILED,
OPEN_FAILED_UNKNOWN_CHANNEL_TYPE,
OPEN_FAILED_RESOURCE_SHORTAGE) = range(0, 5)
CONNECTION_FAILED_CODE = {
1: 'Administratively prohibited',
2: 'Connect failed',
3: 'Unknown channel type',
4: 'Resource shortage'
}
DISCONNECT_SERVICE_NOT_AVAILABLE, DISCONNECT_AUTH_CANCELLED_BY_USER, \
DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE = 7, 13, 14
zero_byte = byte_chr(0)
one_byte = byte_chr(1)
four_byte = byte_chr(4)
max_byte = byte_chr(0xff)
cr_byte = byte_chr(13)
linefeed_byte = byte_chr(10)
crlf = cr_byte + linefeed_byte
if PY2:
cr_byte_value = cr_byte
linefeed_byte_value = linefeed_byte
else:
cr_byte_value = 13
linefeed_byte_value = 10
def asbytes(s):
"""
Coerce to bytes if possible or return unchanged.
"""
try:
# Attempt to run through our version of b(), which does the Right Thing
# for string/unicode/buffer (Py2) or bytes/str (Py3), and raises
# TypeError if it's not one of those types.
return b(s)
except TypeError:
try:
# If it wasn't a string/byte/buffer type object, try calling an
# asbytes() method, which many of our internal classes implement.
return s.asbytes()
except AttributeError:
# Finally, just do nothing & assume this object is sufficiently
# byte-y or buffer-y that everything will work out (or that callers
# are capable of handling whatever it is.)
return s
xffffffff = long(0xffffffff)
x80000000 = long(0x80000000)
o666 = 438
o660 = 432
o644 = 420
o600 = 384
o777 = 511
o700 = 448
o70 = 56
DEBUG = logging.DEBUG
INFO = logging.INFO
WARNING = logging.WARNING
ERROR = logging.ERROR
CRITICAL = logging.CRITICAL
# Common IO/select/etc sleep period, in seconds
io_sleep = 0.01
DEFAULT_WINDOW_SIZE = 64 * 2 ** 15
DEFAULT_MAX_PACKET_SIZE = 2 ** 15
# lower bound on the max packet size we'll accept from the remote host
# Minimum packet size is 32768 bytes according to
# http://www.ietf.org/rfc/rfc4254.txt
MIN_WINDOW_SIZE = 2 ** 15
# However, according to http://www.ietf.org/rfc/rfc4253.txt it is perfectly
# legal to accept a size much smaller, as OpenSSH client does as size 16384.
MIN_PACKET_SIZE = 2 ** 12
# Max windows size according to http://www.ietf.org/rfc/rfc4254.txt
MAX_WINDOW_SIZE = 2 ** 32 - 1
|
jantman/biweeklybudget | biweeklybudget/alembic/versions/08b6358a04bf_txnreconcile_allow_txn_id_to_be_null.py | Python | agpl-3.0 | 765 | 0 | """TxnReconcile allow txn_id to be null
Revision ID: 08b6358a04bf
Revises: 04e61490804b
Create Date: | 2018-03-07 19:48:06.050926
"""
from alembic import op
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '08b6358a04bf'
down_revision = '04e61490804b'
branch_labels = None
depends_on = None
def upgrade():
op.alter_column(
'txn_reconciles', 'txn_id',
| existing_type=mysql.INTEGER(display_width=11),
nullable=True
)
def downgrade():
conn = op.get_bind()
conn.execute("SET FOREIGN_KEY_CHECKS=0")
op.alter_column(
'txn_reconciles', 'txn_id',
existing_type=mysql.INTEGER(display_width=11),
nullable=False
)
conn.execute("SET FOREIGN_KEY_CHECKS=1")
|
christiangalsterer/httpbeat | vendor/github.com/elastic/beats/metricbeat/tests/system/test_reload.py | Python | apache-2.0 | 2,260 | 0.000885 | import re
import sys
import unittest
import metricbeat
import os
import time
# Further tests:
# * Mix full config modules with reloading modules
# * Load empty file
# * Add remove module
# * enabled / disable module
# * multiple files
# * Test empty file
class Test(metricbeat.BaseTest):
@unittest.skipUnless(re.match("(?i)win|linux|darwin|freebsd|openbsd", sys.platform), "os")
def test_reload(self):
"""
Test basic reload
"""
self.render_config_template(
reload=True,
reload_path=self.working_dir + "/configs/*.yml",
)
proc = self.start_beat()
os.mkdir(self.working_dir + "/configs/")
systemConfig = """
- module: system
metricsets: ["cpu"]
period: 1s
"""
with open(self.working_dir + "/configs/system.yml", 'w') as f:
f.write(systemConfig)
self.wait_until(lambda: self.output_lines() > 0)
proc.check_kill_and_wait()
@unittest.skipUnless(re.match("(?i)win|linux|darwin|freebsd|openbsd", sys.platform), "os")
def test_start_stop(self):
"""
Test if module is properly started and stoppped
"""
self.render_config_template(
reload=True,
reload_path=self.working_dir + "/configs/*.yml",
)
os.mkdir(self.working_dir + "/configs/")
config_path = self.working_dir + "/configs/system.yml"
proc = self.start_beat()
systemConfig = """
- module: system
metricsets: ["cpu"]
period: 1s
"""
with open(config_path, 'w') as f:
f.write(systemConfig)
# Wait until offset for ne | w line is updated
self.wait_until(
lambda: self.log_contains("Starting 1 runner"),
max_timeout=10)
self.wait_until(lambda: self.output_lines() > 0)
# Remove config again
os.remove(config_path)
# Wait until offset for new line is updated
self.wait_until(
lambda: self.log_contains("Runner stopped:"),
max_timeout=10)
lines = self.output_line | s()
time.sleep(1)
# Make sure no new lines were added since stopping
assert lines == self.output_lines()
proc.check_kill_and_wait()
|
saeedhadi/rt-thread | bsp/x86/rtconfig.py | Python | gpl-2.0 | 1,647 | 0.009715 | import os
# toolchains options
ARCH='ia32'
CPU=''
CROSS_TOOL='gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery,
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = 'E:/Program Files/CodeSourcery/Sourcery_CodeBench_Lite_for_IA32_ELF/bin'
elif CROSS_TOOL == 'keil':
print '================ERROR============================'
print 'Not support keil yet!'
print '================================================='
exit(0)
elif CROSS_TOOL == 'iar':
print '========= | =======ERROR===================== | ======='
print 'Not support iar yet!'
print '================================================='
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'i686-elf-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mtune=generic'
CFLAGS = DEVICE + ' -Wall'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread-ia32.map,-cref,-u,_start -T x86_ram.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
|
sdague/home-assistant | tests/components/bond/test_cover.py | Python | apache-2.0 | 4,207 | 0.000713 | """Tests for the Bond cover device."""
from datetime import tim | edelta
from bond_api import Action, DeviceType
from homeassistant import core
from homeassistant.components.cover import DOMAIN as COVER_DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_CLOSE_COVER,
SERVICE_OPEN_COVER,
SERVICE_STOP_COVER,
)
from homeassistant.helpers.entity_registry import EntityRegistry
from homeassistant.util import utcnow
from .common import (
help_test_entity_available,
patch_bond_a | ction,
patch_bond_device_state,
setup_platform,
)
from tests.common import async_fire_time_changed
def shades(name: str):
"""Create motorized shades with given name."""
return {"name": name, "type": DeviceType.MOTORIZED_SHADES}
async def test_entity_registry(hass: core.HomeAssistant):
"""Tests that the devices are registered in the entity registry."""
await setup_platform(
hass,
COVER_DOMAIN,
shades("name-1"),
bond_version={"bondid": "test-hub-id"},
bond_device_id="test-device-id",
)
registry: EntityRegistry = await hass.helpers.entity_registry.async_get_registry()
entity = registry.entities["cover.name_1"]
assert entity.unique_id == "test-hub-id_test-device-id"
async def test_open_cover(hass: core.HomeAssistant):
"""Tests that open cover command delegates to API."""
await setup_platform(
hass, COVER_DOMAIN, shades("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_open, patch_bond_device_state():
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_OPEN_COVER,
{ATTR_ENTITY_ID: "cover.name_1"},
blocking=True,
)
await hass.async_block_till_done()
mock_open.assert_called_once_with("test-device-id", Action.open())
async def test_close_cover(hass: core.HomeAssistant):
"""Tests that close cover command delegates to API."""
await setup_platform(
hass, COVER_DOMAIN, shades("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_close, patch_bond_device_state():
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: "cover.name_1"},
blocking=True,
)
await hass.async_block_till_done()
mock_close.assert_called_once_with("test-device-id", Action.close())
async def test_stop_cover(hass: core.HomeAssistant):
"""Tests that stop cover command delegates to API."""
await setup_platform(
hass, COVER_DOMAIN, shades("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_hold, patch_bond_device_state():
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_STOP_COVER,
{ATTR_ENTITY_ID: "cover.name_1"},
blocking=True,
)
await hass.async_block_till_done()
mock_hold.assert_called_once_with("test-device-id", Action.hold())
async def test_update_reports_open_cover(hass: core.HomeAssistant):
"""Tests that update command sets correct state when Bond API reports cover is open."""
await setup_platform(hass, COVER_DOMAIN, shades("name-1"))
with patch_bond_device_state(return_value={"open": 1}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("cover.name_1").state == "open"
async def test_update_reports_closed_cover(hass: core.HomeAssistant):
"""Tests that update command sets correct state when Bond API reports cover is closed."""
await setup_platform(hass, COVER_DOMAIN, shades("name-1"))
with patch_bond_device_state(return_value={"open": 0}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("cover.name_1").state == "closed"
async def test_cover_available(hass: core.HomeAssistant):
"""Tests that available state is updated based on API errors."""
await help_test_entity_available(
hass, COVER_DOMAIN, shades("name-1"), "cover.name_1"
)
|
SUSE/azure-sdk-for-python | azure-mgmt-resource/azure/mgmt/resource/resources/v2016_02_01/models/resources_move_info.py | Python | mit | 1,105 | 0 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Gene | rator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ResourcesMoveInfo(Model):
"""Parameters of move resources.
:param resources: The ids of the resources.
:type resources: list of str
:param tar | get_resource_group: The target resource group.
:type target_resource_group: str
"""
_attribute_map = {
'resources': {'key': 'resources', 'type': '[str]'},
'target_resource_group': {'key': 'targetResourceGroup', 'type': 'str'},
}
def __init__(self, resources=None, target_resource_group=None):
self.resources = resources
self.target_resource_group = target_resource_group
|
markgw/jazzparser | bin/analysis/findsong.py | Python | gpl-3.0 | 10,264 | 0.006333 | #!/usr/bin/env ../jazzshell
"""
Perform song identification by loading up a corpus of harmonic analyses
and comparing parse results to all of them, according to some distance metric.
"""
"""
============================== License ========================================
Copyright (C) 2008, 2010-12 University of Edinburgh, Mark Granroth-Wilding
This file is part of The Jazz Parser.
The Jazz Parser is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The Jazz Parser is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with The Jazz Parser. If not, see <http://www.gnu.org/licenses/>.
============================ End license ======================================
"""
__author__ = "Mark Granroth-Wilding <mark.granroth-wilding@ed.ac.uk>"
import sys
from optparse import OptionParser
from jazzparser.data.parsing import ParseResults
from jazzparser.parsers.cky.parser import DirectedCkyParser
from jazzparser.utils.options import options_help_text, ModuleOption
from jazzparser.data.tonalspace import TonalSpaceAnalysisSet
from jazzparser.formalisms.music_halfspan import Formalism
from jazzparser.utils.tableprint import pprint_table
def main():
usage = "%prog [options] <song-set> <results-file0> [<results-file1> ...]"
parser = OptionParser(usage=usage)
parser.add_option("--popt", "--parser-options", dest="popts", action="append", help="specify options for the parser that interprets the gold standard annotations. Type '--popt help' to get a list of options (we use a DirectedCkyParser)")
parser.add_option("-m", "--metric", dest="metric", action="store", help="semantics distance metric to use. Use '-m help' for a list of available metrics")
parser.add_option("--mopt", "--metric-options", dest="mopts", action="append", help="options to pass to the semantics metric. Use with '--mopt help' with -m to see available options")
parser.add_option("-r", "--print-results", dest="print_results", action="store", default=5, type="int", help="number of top se | arch results to print for each query (parse result). Default: 5. Use -1 to print dista | nces from all songs in the corpus")
parser.add_option("-g", "--gold-only", dest="gold_only", action="store_true", help="skip results that have no gold standard sequence associated with them (we can't tell which is the right answer for these)")
parser.add_option("--mc", "--metric-computation", dest="metric_computation", action="store_true", help="output the computation information for the metric between the parse result and each top search result")
options, arguments = parser.parse_args()
# For now, we always use the music_halfspan formalism with this script
# If we wanted to make it generic, we'd just load the formalism according
# to a command-line option
formalism = Formalism
# Process parser options
if options.popts is not None:
poptstr = options.popts
if "help" in [s.strip().lower() for s in poptstr]:
# Output this parser's option help
print options_help_text(DirectedCkyParser.PARSER_OPTIONS, intro="Available options for gold standard interpreter")
sys.exit(0)
poptstr = ":".join(poptstr)
else:
poptstr = ""
popts = ModuleOption.process_option_string(poptstr)
# Check that the options are valid
try:
DirectedCkyParser.check_options(popts)
except ModuleOptionError, err:
logger.error("Problem with parser options (--popt): %s" % err)
sys.exit(1)
# Get a distance metric
# Just check this, as it'll cause problems
if len(formalism.semantics_distance_metrics) == 0:
print "ERROR: the formalism defines no distance metrics, so this "\
"script won't work"
sys.exit(1)
# First get the metric
if options.metric == "help":
# Print out a list of metrics available
print "Available distance metrics:"
print ", ".join([metric.name for metric in \
formalism.semantics_distance_metrics])
sys.exit(0)
if options.metric is None:
# Use the first in the list as default
metric_cls = formalism.semantics_distance_metrics[0]
else:
for m in formalism.semantics_distance_metrics:
if m.name == options.metric:
metric_cls = m
break
else:
# No metric found matching this name
print "No metric '%s'" % options.metric
sys.exit(1)
print >>sys.stderr, "Using distance metric: %s" % metric_cls.name
# Now process the metric options
if options.mopts is not None:
moptstr = options.mopts
if "help" in [s.strip().lower() for s in moptstr]:
# Output this parser's option help
print options_help_text(metric_cls.OPTIONS, intro="Available options for metric '%s'" % metric_cls.name)
sys.exit(0)
moptstr = ":".join(moptstr)
else:
moptstr = ""
mopts = ModuleOption.process_option_string(moptstr)
# Instantiate the metric with these options
metric = metric_cls(options=mopts)
if len(arguments) < 2:
print >>sys.stderr, "Specify a song corpus name and one or more files to read results from"
sys.exit(1)
# First argument is an TonalSpaceAnalysisSet
corpus_name = arguments[0]
# Load the corpus file
corpus = TonalSpaceAnalysisSet.load(corpus_name)
# The rest of the args are result files to analyze
res_files = arguments[1:]
# Work out how many results to print out
if options.print_results == -1:
print_up_to = None
else:
print_up_to = options.print_results
ranks = []
num_ranked = 0
for filename in res_files:
# Load the parse results
pres = ParseResults.from_file(filename)
if options.gold_only and pres.gold_sequence is None:
# Skip this sequence altogether if requested
continue
print "######################"
print "Read %s" % filename
# Try to get a correct answer from the PR file
if pres.gold_sequence is None:
print "No correct answer specified in input file"
correct_song = None
else:
# Process the name of the sequence in the same way that
# TonalSpaceAnalysisSet does
# Ideally, they should make a common function call, but let's be
# bad for once
correct_song = pres.gold_sequence.string_name.lower()
print "Correct answer: %s" % correct_song
# Could have an empty result list: skip if it does
if len(pres.semantics) == 0:
print "No results"
# Failed to get any result: if this is one of the sequences that
# is in the corpus, count it as a 0 result. Otherwise, skip:
# we wouldn't have counted it anyway
num_ranked += 1
ranks.append(None)
continue
result = pres.semantics[0][1]
# Compare to each of the songs
distances = []
for name,songsem in corpus:
# Get the distance from this song
dist = metric.distance(result, songsem)
distances.append((name,dist,songsem))
# Sort them to get the closest first
distances.sort(key=lambda x:x[1])
print
# Print out the top results, as many as requested
top_results = distances[:print_up_to]
table = [["","Song","Distance"]] + [
["*" if res[0] == correct_song else "",
"%s" % res[0],
"%.2f" % res[1 |
parpg/parpg | parpg/dialogueprocessor.py | Python | gpl-3.0 | 15,770 | 0.003044 | # This file is part of PARPG.
#
# PARPG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PARPG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PARPG. If not, see <http://www.gnu.org/licenses/>.
"""
Provides the core interface to the dialogue subsystem used to process player
L{Dialogues<Dialogue>} with NPCs.
"""
import logging
from parpg.common.utils import dedent_chomp
if (__debug__):
from collections import Sequence, MutableMapping
from parpg.dialogue import Dialogue
logger = logging.getLogger('dialogueprocessor')
class DialogueProcessor(object):
"""
Primary interface to the dialogue subsystem used to initiate and process a
L{Dialogue} with an NPC.
To begin a dialogue with an NPC a L{DialogueProcessor} must first be
instantiated with the dialogue data to process and a dictionary of Python
objects defining the game state for testing of response conditionals. The
L{initiateDialogue} must be called to initialized the L{DialogueProcessor},
and once it is initialized processing of
L{DialogueSections<DialogueSection>} and
L{DialogueResponses<DialogueResponse>} can be initiated via the
L{continueDialogue} and L{reply} class methods.
The state of dialogue processing is stored via the
L{dialogue_section_stack} class attribute, which stores a list of
L{DialogueSections<DialogueSection>} that have been or are currently being
processed. Each time L{reply} is called with a L{DialogueResponse} its
next_section_id attribute is used to select a new L{DialogueSection} from
the L{dialogue}. The selected L{DialogueSection} is then pushed
onto the end of the L{dialogue_section_stack}, ready to be processed via
L{continueDialogue}. The exception to this rule occurs when L{reply} is
called with a L{DialogueResponse} whose next_section_id attribute is "end"
or "back". "end" terminates the dialogue as described below, while "back"
removes the last L{DialogueSection} on the L{dialogue_section_stack}
effectively going back to the previous section of dialogue.
The L{DialogueProcessor} terminates dialogue processing once L{reply} is
called with a L{DialogueResponse} whose next_section_id == 'end'.
Processing can also be manually terminated by calling the L{endDialogue}
class method.
@note: See the dialogue_demo.py script for a complete example of how the
L{DialogueProcessor} can be used.
@ivar dialogue: dialogue data currently being processed.
@type dialogue: L{Dialogue}
@ivar dialogue_section_stack: sections of dialogue that have been or are
currently being processed.
@type dialogue_section_stack: list of L{DialogueSections<DialogueSection>}
@ivar game_state: objects defining the game state that should be made
available for testing L{DialogueResponse} conditionals.
@type game_state: dict of Python objects
@ivar in_dialogue: whether a dialogue has been initiated.
@type in_dialogue: Bool
Usage:
>>> game_state = {'pc': player_character, 'quest': quest_engine}
>>> dialogue_processor = DialogueProcessor(dialogue, game_state)
>>> dialogue_processor.initiateDialogue()
>>> while dialogue_processor.in_dialogue:
... valid_responses = dialogue_processor.continueDialogue()
... response = choose_response(valid_responses)
... dialogue_processor.reply(response)
"""
_logger = logging.getLogger('dialogueengine.DialogueProcessor')
def dialogue():
def fget(self):
return self._dialogue
def fset(self, dialogue):
assert isinstance(dialogue, Dialogue), \
'{0} does not implement Dialogu | e interface'.format(dialogue)
self._dialogue = dialogue
return locals()
| dialogue = property(**dialogue())
def dialogue_section_stack():
def fget(self):
return self._dialogue_section_stack
def fset(self, new_value):
assert isinstance(new_value, Sequence) and not \
isinstance(new_value, basestring), \
'dialogue_section_stack must be a Sequence, not {0}'\
.format(new_value)
self._dialogue_section_stack = new_value
return locals()
dialogue_section_stack = property(**dialogue_section_stack())
def game_state():
def fget(self):
return self._game_state
def fset(self, new_value):
assert isinstance(new_value, MutableMapping),\
'game_state must be a MutableMapping, not {0}'\
.format(new_value)
self._game_state = new_value
return locals()
game_state = property(**game_state())
def in_dialogue():
def fget(self):
return self._in_dialogue
def fset(self, value):
assert isinstance(value, bool), '{0} is not a bool'.format(value)
self._in_dialogue = value
return locals()
in_dialogue = property(**in_dialogue())
def __init__(self, dialogue, game_state):
"""
Initialize a new L{DialogueProcessor} instance.
@param dialogue: dialogue data to process.
@type dialogue: L{Dialogue}
@param game_state: objects defining the game state that should be made
available for testing L{DialogueResponse} conditions.
@type game_state: dict of objects
"""
self._dialogue_section_stack = []
self._dialogue = dialogue
self._game_state = game_state
self._in_dialogue = False
def getDialogueGreeting(self):
"""
Evaluate the L{RootDialogueSections<RootDialogueSection>} conditions
and return the valid L{DialogueSection} which should be displayed
first.
@return: Valid root dialogue section.
@rtype: L{DialogueSection}
@raise: RuntimeError - evaluation of a DialogueGreeting condition fails
by raising an exception (e.g. due to a syntax error).
"""
dialogue = self.dialogue
dialogue_greeting = None
for greeting in dialogue.greetings:
try:
condition_met = eval(greeting.condition, self.game_state)
except Exception as exception:
error_message = dedent_chomp('''
exception raised in DialogueGreeting {id} condition:
{exception}
''').format(id=greeting.id, exception=exception)
self._logger.error(error_message)
if (condition_met):
dialogue_greeting = greeting
if (dialogue_greeting is None):
dialogue_greeting = dialogue.default_greeting
return dialogue_greeting
def initiateDialogue(self):
"""
Prepare the L{DialogueProcessor} to process the L{Dialogue} by pushing
the starting L{DialogueSection} onto the L{dialogue_section_stack}.
@raise RuntimeError: Unable to determine the root L{DialogueSection}
defined by the L{Dialogue}.
"""
if (self.in_dialogue):
self.endDialogue()
dialogue_greeting = self.getDialogueGreeting()
self.dialogue_section_stack.append(dialogue_greeting)
self.in_dialogue = True
self._logger.info('initiated dialogue {0}'.format(self.dialogue))
def continueDialogue(self):
"""
Process the L{DialogueSection} at the top of the
L{dialogue_section_stack}, run any L{DialogueActions<Dial |
mitsuhiko/python-unio | unio.py | Python | bsd-3-clause | 15,682 | 0 | import io
import os
import sys
import codecs
import contextlib
# We do not trust traditional unixes about having reliable file systems.
# In that case we know better than what the env says and declare this to
# be utf-8 always.
has_likely_buggy_unicode_filesystem = \
sys.platform.startswith('linux') or 'bsd' in sys.platform
def is_ascii_encoding(encoding):
"""Given an encoding this figures out if the encoding is actually ASCII
(which is something we don't actually want in most cases). This is
necessary because ASCII comes under many names such as ANSI_X3.4-1968.
"""
if encoding is None:
return False
try:
codec = codecs.lookup(encoding)
except LookupError:
return False
return codec.name == 'ascii'
def get_filesystem_encoding():
"""Returns the filesystem encoding that should be used. Note that
this is different from the Python understanding of the filesystem
encoding which might be deeply flawed. Do not use this value against
Python's unicode APIs because it might be different.
The concept of a filesystem encoding in generally is not something
you should rely on. As such if you ever need to use this function
except for writing wrapper code reconsider.
"""
if has_likely_buggy_unicode_filesystem:
return 'utf-8'
rv = sys.getfilesystemencoding()
if is_ascii_encoding(rv):
return 'utf-8'
return rv
def get_file_encoding(for_writing=False):
"""Returns the encoding for text file data. This is always the same
on all operating systems because this is the only thing that makes
sense when wanting to make data exchange feasible. This is utf-8 no
questions asked. The only simplification is that if a file is opened
for reading then we allo utf-8-sig.
"""
if for_writing:
return 'utf-8'
return 'utf-8-sig'
def get_std_stream_encoding():
"""Returns the default stream encoding if not found."""
rv = sys.getdefaultencoding()
if is_ascii_encoding(rv):
return 'utf-8'
return rv
class BrokenEnvironment(Exception):
"""This error is raised on Python 3 if the system was malconfigured
beyond repair.
"""
class _NonClosingTextIOWrapper(io.TextIOWrapper):
"""Subclass of the wrapper that does not close the underlying file
in the destructor. This is necessary so that our wrapping of the
standard streams does not accidentally close the original file.
"""
def __del__(self):
pass
class _FixupStream(object):
"""The new io interface needs more from streams than streams
traditionally implement. As such this fixup stuff is necessary in
some circumstances.
"""
def __init__(self, stream):
self._stream = stream
def __getattr__(self, name):
return getattr(self._stream, name)
def readable(self):
x = getattr(self._stream, 'readable', None)
if x is not None:
return x
try:
self._stream.read(0)
except Exception:
return False
return True
def writable(self):
x = getattr(self._stream, 'writable', None)
if x is not None:
return x
try:
self._stream.write('')
except Exception:
try:
self._stream.write(b'')
except Exception:
return False
return True
def seekable(self):
x = getattr(self._stream, 'seekable', None)
if x is not None:
return x
try:
self._stream.seek(self._stream.tell())
except Exception:
return False
return True
PY2 = sys.version_info[0] == 2
if PY2:
import StringIO
text_type = unicode
TextIO = io.StringIO
BytesIO = io.BytesIO
NativeIO = StringIO.StringIO
def _make_text_stream(stream, encoding, errors):
if encoding is None:
encoding = get_std_stream_encoding()
if errors is None:
errors = 'replace'
return _NonClosingTextIOWrapper(_FixupStream(stream), encoding, errors)
def get_binary_stdin():
return sys.stdin
def get_binary_stdout():
return sys.stdout
def get_binary_stderr():
return sys.stderr
def get_binary_argv():
return list(sys.argv)
def get_text_stdin(encoding=None, errors=None):
return _make_text_stream(sys.stdin, encoding, errors)
def get_text_stdout(encoding=None, errors=None):
return _make_text_stream(sys.stdout, encoding, errors)
def get_text_stderr(encoding=None, errors=None):
return _make_text_stream(sys.stderr, encoding, errors)
@contextlib.contextmanager
def wrap_standard_stream(stream_type, stream):
if stream_type not in ('stdin', 'stdout', 'stderr'):
raise TypeError('Invalid stream %s' % stream_type)
old_stream = getattr(sys, stream_type)
setattr(sys, stream_type, stream)
try:
yield stream
finally:
setattr(sys, stream_type, old_stream)
@contextlib.contextmanager
def capture_stdout(and_stderr=False):
stream = StringIO.StringIO()
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = stream
if and_stderr:
sys.stderr = stream
try:
yield stream
finally:
sys.stdout = old_stdout
if and_stderr:
sys.stderr = old_stderr
binary_env = os.environ
else:
text_type = str
TextIO = io.StringIO
BytesIO = io.BytesIO
NativeIO = io.StringIO
def _is_binary_reader(stream, default=False):
try:
return isinstance(stream.read(0), bytes)
except Exception:
return default
# This happens in some cases where the stream was already
# closed. In this case we assume the defalt.
def _is_binary_writer(stream, default=False):
try:
stream.write(b'')
except Exception:
try:
stream.write('')
return False
except Exception:
pass
return default
return True
def _find_binary_reader(stream):
# We need to figure out if the given stream is already binary.
# This can happen because the official docs recommend detatching
# the streams to get binary streams. Some code might do this, so
# we need to deal with this case explicitly.
is_binary = _is_binary_reader(stream, False)
if is_binary:
return stream
buf = getattr(stream, 'buffer', None)
# Same situation here, this time we assume that the buffer is
# actually binary in case it's closed.
if buf is not None and _is_binary_reader(buf, True):
return buf
def _find_binary_writer(stream):
# We need to figure out if the given stream is already binary.
# This can happen because the official docs recommend detatching
# the streams to get binary streams. Some code might do this, so
# we need to deal with this case explicitly.
if _is_binary_writer(stream, False):
return stream
buf = getattr(stream, 'buffer', None)
# Same situation here, this time we assume that the buffer is
# actually binary in case it's closed.
if buf is not None and | _is_binary_reader(buf, True):
return buf
def _stream_is_misconfigured(stream):
"""A stream is misconfigured if it's encoding is ASCII."""
return is_ascii_encoding(getattr(stream, 'encoding', None))
def _wrap_stream_for_text(stream, encoding, errors):
if errors is None:
errors = 'replace'
if encoding is None:
encoding | = get_std_stream_encoding()
return _NonClosingTextIOWrapper(_FixupStream(stream), encoding, errors)
def _is_compatible_text_stream(stream, encoding, errors):
stream_encoding = getattr(stream, 'encoding', None)
stream_errors = getattr(stream, 'errors', None)
|
ICTU/quality-time | components/collector/src/source_collectors/gitlab/source_version.py | Python | apache-2.0 | 700 | 0.001429 | """GitLab source version collector."""
from packaging.version import Version
from base_collectors import SourceVersionCollector
from collecto | r_utilities.type import Response, URL
from .base import GitLabBase
class GitLabSourceVersion(GitLabBase, SourceVersionCollector):
"""Collector class to measure the version of a GitLab instance."""
async def _api_url(self) -> URL:
"""Override to return the version API URL."""
return URL | (f"{await super()._api_url()}/api/v4/version")
async def _parse_source_response_version(self, response: Response) -> Version:
"""Override to return the GitLab version."""
return Version((await response.json())["version"])
|
knuu/competitive-programming | atcoder/corp/aising2019_a.py | Python | mit | 84 | 0 | N = int(input())
H = int(inp | ut())
W = int(input())
print((N - | H + 1) * (N - W + 1))
|
ranea/ccepy | setup.py | Python | mit | 4,235 | 0.000236 | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='ccepy',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.0.1',
description='Criptografía con Curvas Elípticas con Python',
long_description=long_description,
# The project's main homepage.
url='https://github.com/ranea/ccepy',
# Author details
author='Adrian Ranea',
author_email='adrianranea@gmail.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Security :: Cryptography',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='criptografía curvas elípticas llave pública protocolos criptográficos',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "ins | tall_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
# install_requires=['peppercorn'],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
# i | nstall_requires=['peppercorn'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['sphinx'],
'test': ['hypothesis'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'sample=sample:main',
],
},
)
|
nielsbuwen/ilastik | ilastik/shell/gui/splashScreen.py | Python | gpl-3.0 | 1,515 | 0.006601 | ############### | ################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) | 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
import os
from PyQt4.QtGui import QSplashScreen, QPixmap
from PyQt4.QtCore import Qt
import ilastik
splashScreen = None
def showSplashScreen():
splash_path = os.path.join(os.path.split(ilastik.__file__)[0], 'ilastik-splash.png')
splashImage = QPixmap(splash_path)
global splashScreen
splashScreen = QSplashScreen(splashImage)
splashScreen.showMessage( ilastik.__version__, Qt.AlignBottom | Qt.AlignRight )
splashScreen.show()
def hideSplashScreen():
import startShellGui
global splashScreen
splashScreen.finish(startShellGui.shell)
|
leonidas141/HIT-ML-2016-report | load.py | Python | mpl-2.0 | 1,459 | 0.028787 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 8 14:45:26 2017
@author: leonidas
"""
import numpy as np
import operator
def classify(inputPoint,dataSet,labels,k):
dataSetSize = dataSet.shape[0]
diffMat = np.tile(inputPoint,(dataSetSize,1))-dataSet
sqDiffMat = pow(diffMat,2)
sqDistances = sqDiffMat.sum(axis=1)
distances = pow(sqDistances,0.5)
sortedDistIndicies = distances.argsort()
classCount = {}
for i in range(k):
voteIlabel = labels[ sortedDistIndicies[i] ]
classCount[voteIlabel] = classCount.get(voteIlabel,0)+1
#sort by the apperance number
sortedClassCount = sorted(classCount.items(), key = operator.itemgetter(1), reverse = True)
return sortedClassCount[0][0]
def mat_to_vect(filename):
vect = []
data = open(fil | ename)
for i in range(32):
temp = data.readline()
for j in range(32):
try:
vect.append(int(temp[j]))
except(ValueError):
print temp[j],'error',ValueError
return vect
def loa | d_train_data():
train_lables = []
size = 100
train_data = np.zeros((size*10,1024))
for i in range(10):
for j in range(size):
train_lables.append(i)
train_data[i*100+j,:] = mat_to_vect('train/%s/%s.txt' %( i,j ))
return train_lables,train_data
def classnumCut(fileName):
return int(fileName[0]) |
ehouarn-perret/EhouarnPerret.Python.Kattis | Trivial/Different Distances.py | Python | mit | 179 | 0.039106 | impo | rt math
while True:
s = input()
if s != "0":
x1, y1, x2, y2, p = map(fl | oat, s.split())
r = (abs(x1 - x2) ** p + abs(y1 - y2) ** p) ** (1 / p)
print(r)
else:
break
|
SINTEF-Infosec/Incident-Information-Sharing-Tool | incidents/migrations/0024_incidentnotificationstatus.py | Python | apache-2.0 | 624 | 0.001603 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('incidents', '0023_alert_link'),
]
operations = [
migrations.CreateModel(
name='IncidentNotificationStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
| ('notified', models.BooleanField(default=False)),
| ('incident', models.ForeignKey(to='incidents.Incident')),
],
),
]
|
shojikai/python-google-api-clients | test/test_bigquery_dataset.py | Python | apache-2.0 | 3,541 | 0.003389 | import os
import sys
import unittest
from pprint import pprint
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + '/../')
from google_api_clients.bigquery import BigQuery
from google_api_clients.bigquery.errors import ParameterError
class BigQueryTest(unittest.TestCase):
def setUp(self):
self.project_id = os.getenv('PROJECT_ID')
self.dataset_id = os.getenv('DATASET_ID', 'test_dataset')
if self.project_id is None:
print('PROJECT_ID is not defined.')
sys.exit(1)
self.bq = BigQuery(self.project_id)
def TearDown(self):
pass
def test_normal(self):
print('exists dataset')
if self.bq.exists_dataset(self.dataset_id):
print('delete dataset')
res = self.bq.drop_dataset(self.dataset_id, delete_contents=True)
self.assertTrue(bool(res))
print('create dataset')
res = self.bq.create_dataset(self.dataset_id)
self.assertTrue(bool(res))
print('create exists dataset')
res = self.bq.create_dataset(self.dataset_id)
self.assertFalse(bool(res))
print('show datasets')
res = self.bq.show_datasets()
self.assertIn(self.dataset_id, res)
print('\n'.join(res))
print('delete dataset')
res = self.bq.drop_dataset(self.dataset_id)
self.assertFalse(bool(res))
print('delete no exists dataset')
res = self.bq.drop_dataset(self.dataset_id)
self.assertFalse(bool(res))
def test_normal_with_args(self):
print('exists dataset: ' + self.dataset_id)
if self.bq.exists_dataset(self.dataset_id, project_id=self.project_id):
print('exists')
print('delete dataset: ' + self.dataset_id)
res = self.bq.drop_dataset(self.dataset_id, project_id=self.project_id, delete_contents=True)
self.assertTrue(bool(res))
else:
print('no exists')
print('create dataset')
access = [
{ 'role': 'OWNER', 'specialGroup': 'projectOwners' },
]
res = self.bq.create_dataset(self.dataset_id, project_id=self.project_id, access=access,
default_tabl | e_expiration_ms=3600000, description='Description', friendly_name='Friendly Name',
location='EU')
self.assertTrue(bool(res))
print('info dataset')
res = self.bq.info_dataset(self.dataset_id, project_id=self.project_id)
self.assertEqual(1, len(res['access']))
self.assertEqual('OWNER', res['access'][0]['role'])
self.assertEqual('projectOwners', res['access'][0]['specialGroup'])
self.assertEqual(3600000, int(res['defaultTableExpirationM | s']))
self.assertEqual('Description', res['description'])
self.assertEqual('Friendly Name', res['friendlyName'])
self.assertEqual('EU', res['location'])
pprint(res)
print('show datasets')
res = self.bq.show_datasets(project_id=self.project_id, all=True, max_results=10)
self.assertIn(self.dataset_id, res)
print('\n'.join(res))
print('delete dataset: ' + self.dataset_id)
res = self.bq.drop_dataset(self.dataset_id, delete_contents=True)
self.assertFalse(bool(res))
def test_error(self):
with self.assertRaises(TypeError):
self.bq.create_dataset()
with self.assertRaises(ParameterError):
self.bq.create_dataset(dataset_id=None)
if __name__ == '__main__':
unittest.main()
|
appfluence/prioritymatrix-python | pmatrix/user.py | Python | apache-2.0 | 3,046 | 0.005253 | import json
import demjson
from datetime import datetime, timedelta
class User(object):
account_manager = ""
date_joined = None
email = ""
first_name = ""
id = None
last_login = None
last_name = ""
paying_single = False
paying_team = False
resource_uri = ""
teammate = False
user_profile = ""
username = ""
def __init__(self, userJson):
self.__dict__ = json.loads(demjson.encode(userJson))
def getAccount_manager(self):
if self.account_manager == "":
self.update()
return self.account_manager
def getDate_joined(self):
return self.date_joined
def getEmail(self):
return self.email
def getFirst_name(self):
return self.first_name
def getID(self):
return self.id
def getId(self):
return self.id
def getLast_login(self):
return self.last_login
def getLast_name(self):
return self.last_name
def getPaying_single(self):
if self.paying_single == "":
self.update()
return self.paying_single
def getPaying_team(self):
if self.paying_team == "":
self.update()
return self.paying_team
def getResource_uri(self):
return self.resource_uri
def getTeammate(self):
return self.teammate
def getUser_profile(self):
return self.user_profile
def getUsername(self):
return self.username
def getName(self):
return self.username
def update(self):
import slumber
import pprint
import json
import requests
session = requests.session()
session.headers['Authorization'] = ("Bearer " + "NahSKHWDnxGzVL8Ac21p3etG218mly")
api = slumber.API("http://stage.appfluence.com/api/v1/", session=session)
user_update = api.user(self.getID()).get(format=json)
self.__init__(user_update)
def save(self):
import slumber
import pprint
import json
import requests
session = requests.session()
session.headers['Authorization'] = ("Bearer " + "NahSKHWDnxGzVL8Ac21p3etG218mly")
api = slumber.API("http://stage.appfluence.com/api/v1/", session=session)
api.user(self.getID()).put(self.getJson())
def printUser(self):
print ("User email: " + self.email)
if (self.first_name != ""):
print ("First name: " + self.first_name)
if (self.last_name != ""):
print ("Last name: " + self.last_name)
print ("ID: " + str(self.id))
print ("Username: " + self.username)
print ("Resource Uri: " + self.resource_uri)
print ("User pro | file: " + self.user_profile)
print("Date joined: " + str(datetime.fromtimestamp(float(self.date_joined))))
print("Last login: " + str(datetime.fromtimestamp(float(self.last_login))))
if (self.teammate):
| print("Is teammate")
else:
print("Is not teammate")
|
foxdog-studios/pyddp | ddp/pubsub/pod_message_filter.py | Python | apache-2.0 | 1,493 | 0.00067 | # -*- coding: utf-8 -*-
# Copyright 2014 Foxdog Studios
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .subscriber import Subscriber
from .topics import PodAccepted, PodReceived, PodRej | ected
__all__ = ['PodMessageFilter']
class PodMessageFilter(Subscriber):
def __init__(self, board, pod_message_filter):
super(PodMessageFilter, self).__init__(board, {
PodReceived: self._on_received})
self._board = board
self._pod_message_filter = pod_message_filter
def _on_received(self, topic, | pod):
if self._accept(pod):
topic = PodAccepted + self._get_type(pod)
else:
topic = PodRejected
self._board.publish(topic, pod)
def _accept(self, pod):
return self._pod_message_filter.accept(pod)
def _get_type(self, pod):
return self._pod_message_filter.get_type(pod)
|
justin-l/alltest | thread.py | Python | apache-2.0 | 440 | 0.015909 | #!/usr/bin/env python
#coding = utf-8
import threading
lk = threading.Lock()
g_counter = 0
def count(n):
global g_counter
for i in xrange(n):
lk.acq | uire()
g_counter += 1
lk.release()
print g_counter
return g_cou | nter
t1 = threading.Thread(target=count,args=(10000000,))
t2 = threading.Thread(target=count,args=(10000000,))
t1.start()
t2.start()
print t1.join()
print t2.join()
print "final",g_counter |
oesteban/niworkflows | niworkflows/interfaces/bids.py | Python | bsd-3-clause | 35,326 | 0.001698 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Interfaces for handling BIDS-like neuroimaging structures."""
from collections import defaultdict
from json import dumps, loads
from pathlib import Path
from shutil import copytree, rmtree
from pkg_resources import resource_filename as _pkgres
import re
import nibabel as nb
import numpy as np
from nipype import logging
from nipype.interfaces.base import (
traits,
isdefined,
Undefined,
TraitedSpec,
BaseInterfaceInputSpec,
DynamicTraitedSpec,
File,
Directory,
InputMultiObject,
OutputMultiObject,
Str,
SimpleInterface,
)
from nipype.interfaces.io import add_traits
from templateflow.api import templates as _get_template_list
from ..utils.bids import _init_layout, relative_to_root
from ..utils.images import set_consumables, unsafe_write_nifti_header_and_data
from ..utils.misc import splitext as _splitext, _copy_any
regz = re.compile(r"\.gz$")
_pybids_spec = loads(Path(_pkgres("ni | workflows", "data/nipreps.json")).read_text())
BIDS_DERIV_ENTITIES = frozenset({e["name"] for e in _pybids_spec["entities"]})
BIDS_DERIV_PATTERNS = tuple(_pybids_spec["default_path_patterns"])
STANDARD_SPACES = _get_template_list()
LOGGER = logging.getLogger("nipype.interface")
def _none():
return None
# Automatically coerce certain suffixes (DerivativesDataSink)
DEFAULT_DTYPES = defaultdict(
_none,
(
("mask", "uint8"),
("dseg", | "int16"),
("probseg", "float32"),
("boldref", "source"),
),
)
class _BIDSBaseInputSpec(BaseInterfaceInputSpec):
bids_dir = traits.Either(
(None, Directory(exists=True)), usedefault=True, desc="optional bids directory"
)
bids_validate = traits.Bool(True, usedefault=True, desc="enable BIDS validator")
class _BIDSInfoInputSpec(_BIDSBaseInputSpec):
in_file = File(mandatory=True, desc="input file, part of a BIDS tree")
class _BIDSInfoOutputSpec(DynamicTraitedSpec):
subject = traits.Str()
session = traits.Str()
task = traits.Str()
acquisition = traits.Str()
reconstruction = traits.Str()
run = traits.Int()
suffix = traits.Str()
class BIDSInfo(SimpleInterface):
"""
Extract BIDS entities from a BIDS-conforming path.
This interface uses only the basename, not the path, to determine the
subject, session, task, run, acquisition or reconstruction.
>>> bids_info = BIDSInfo(bids_dir=str(datadir / 'ds054'), bids_validate=False)
>>> bids_info.inputs.in_file = '''\
sub-01/func/ses-retest/sub-01_ses-retest_task-covertverbgeneration_bold.nii.gz'''
>>> res = bids_info.run()
>>> res.outputs
<BLANKLINE>
acquisition = <undefined>
reconstruction = <undefined>
run = <undefined>
session = retest
subject = 01
suffix = bold
task = covertverbgeneration
<BLANKLINE>
>>> bids_info = BIDSInfo(bids_dir=str(datadir / 'ds054'), bids_validate=False)
>>> bids_info.inputs.in_file = '''\
sub-01/func/ses-retest/sub-01_ses-retest_task-covertverbgeneration_rec-MB_acq-AP_run-1_bold.nii.gz'''
>>> res = bids_info.run()
>>> res.outputs
<BLANKLINE>
acquisition = AP
reconstruction = MB
run = 1
session = retest
subject = 01
suffix = bold
task = covertverbgeneration
<BLANKLINE>
>>> bids_info = BIDSInfo(bids_dir=str(datadir / 'ds054'), bids_validate=False)
>>> bids_info.inputs.in_file = '''\
sub-01/func/ses-retest/sub-01_ses-retest_task-covertverbgeneration_acq-AP_run-01_bold.nii.gz'''
>>> res = bids_info.run()
>>> res.outputs
<BLANKLINE>
acquisition = AP
reconstruction = <undefined>
run = 1
session = retest
subject = 01
suffix = bold
task = covertverbgeneration
<BLANKLINE>
>>> bids_info = BIDSInfo(bids_validate=False)
>>> bids_info.inputs.in_file = str(
... datadir / 'ds114' / 'sub-01' / 'ses-retest' /
... 'func' / 'sub-01_ses-retest_task-covertverbgeneration_bold.nii.gz')
>>> res = bids_info.run()
>>> res.outputs
<BLANKLINE>
acquisition = <undefined>
reconstruction = <undefined>
run = <undefined>
session = retest
subject = 01
suffix = bold
task = covertverbgeneration
<BLANKLINE>
>>> bids_info = BIDSInfo(bids_validate=False)
>>> bids_info.inputs.in_file = '''\
sub-01/func/ses-retest/sub-01_ses-retest_task-covertverbgeneration_bold.nii.gz'''
>>> res = bids_info.run()
>>> res.outputs
<BLANKLINE>
acquisition = <undefined>
reconstruction = <undefined>
run = <undefined>
session = retest
subject = 01
suffix = bold
task = covertverbgeneration
<BLANKLINE>
"""
input_spec = _BIDSInfoInputSpec
output_spec = _BIDSInfoOutputSpec
def _run_interface(self, runtime):
from bids.layout import parse_file_entities
bids_dir = self.inputs.bids_dir
in_file = self.inputs.in_file
if bids_dir is not None:
try:
in_file = str(Path(in_file).relative_to(bids_dir))
except ValueError:
pass
params = parse_file_entities(in_file)
self._results = {
key: params.get(key, Undefined)
for key in _BIDSInfoOutputSpec().get().keys()
}
return runtime
class _BIDSDataGrabberInputSpec(BaseInterfaceInputSpec):
subject_data = traits.Dict(Str, traits.Any)
subject_id = Str()
class _BIDSDataGrabberOutputSpec(TraitedSpec):
out_dict = traits.Dict(desc="output data structure")
fmap = OutputMultiObject(desc="output fieldmaps")
bold = OutputMultiObject(desc="output functional images")
sbref = OutputMultiObject(desc="output sbrefs")
t1w = OutputMultiObject(desc="output T1w images")
roi = OutputMultiObject(desc="output ROI images")
t2w = OutputMultiObject(desc="output T2w images")
flair = OutputMultiObject(desc="output FLAIR images")
class BIDSDataGrabber(SimpleInterface):
"""
Collect files from a BIDS directory structure.
>>> bids_src = BIDSDataGrabber(anat_only=False)
>>> bids_src.inputs.subject_data = bids_collect_data(
... str(datadir / 'ds114'), '01', bids_validate=False)[0]
>>> bids_src.inputs.subject_id = '01'
>>> res = bids_src.run()
>>> res.outputs.t1w # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
['.../ds114/sub-01/ses-retest/anat/sub-01_ses-retest_T1w.nii.gz',
'.../ds114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz']
"""
input_spec = _BIDSDataGrabberInputSpec
output_spec = _BIDSDataGrabberOutputSpec
_require_funcs = True
def __init__(self, *args, **kwargs):
anat_only = kwargs.pop("anat_only")
anat_derivatives = kwargs.pop("anat_derivatives", None)
super(BIDSDataGrabber, self).__init__(*args, **kwargs)
if anat_only is not None:
self._require_funcs = not anat_only
self._require_t1w = anat_derivatives is None
def _run_interface(self, runtime):
bids_dict = self.inputs.subject_data
self._results["out_dict"] = bids_dict
self._results.update(bids_dict)
if self._require_t1w and not bids_dict['t1w']:
raise FileNotFoundError(
"No T1w images found for subject sub-{}".format(self.inputs.subject_id)
)
if self._requ |
newvem/pytz | pytz/zoneinfo/Asia/Rangoon.py | Python | mit | 564 | 0.069149 | '''tzinfo timezone information for Asia/Rangoon.''' |
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Rangoon(DstTzInfo):
'''Asia/Rangoon timezone definition. See datetime.tzinfo for details'''
zone = 'Asia/Rangoon' |
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1919,12,31,17,35,24),
d(1942,4,30,17,30,0),
d(1945,5,2,15,0,0),
]
_transition_info = [
i(23100,0,'RMT'),
i(23400,0,'BURT'),
i(32400,0,'JST'),
i(23400,0,'MMT'),
]
Rangoon = Rangoon()
|
dtkelch/Tagger | tweepy/tweepy/models.py | Python | mit | 12,645 | 0.001503 | # Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
from tweepy.error import TweepError
from tweepy.utils import parse_datetime, parse_html_value, parse_a_href
class ResultSet(list):
"""A list like object that holds results from a Twitter API query."""
def __init__(self, max_id=None, since_id=None):
super(ResultSet, self).__init__()
self._max_id = max_id
self._since_id = since_id
@property
def max_id(self):
if self._max_id:
return self._max_id
ids = self.ids()
# Max_id is always set to the *smallest* id, minus one, in the set
return (min(ids) - 1) if ids else None
@property
def since_id(self):
if self._since_id:
return self._since_id
ids = self.ids()
# Since_id is always set to the *greatest* id in the set
return max(ids) if ids else None
def ids(self):
return [item.id for item in self if hasattr(item, 'id')]
class Model(object):
def __init__(self, api=None):
self._api = api
def __getstate__(self):
# pickle
pickle = dict(self.__dict__)
try:
del pickle['_api'] # do not pickle the API reference
except KeyError:
pass
return pickle
@classmethod
def parse(cls, api, json):
"""Parse a JSON object into a model instance."""
raise NotImplementedError
@classmethod
def parse_list(cls, api, json_list):
"""Parse a list of JSON objects into a result set of model instances."""
results = ResultSet()
for obj in json_list:
if obj:
results.append(cls.parse(api, obj))
return results
def __repr__(self):
state = ['%s=%s' % (k, repr(v)) for (k,v) in vars(self).items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(state))
class Status(Model):
@classmethod
def parse(cls, api, json):
status = cls(api)
setattr(status, '_json', json)
for k, v in json.items():
if k == 'user':
user_model = getattr(api.parser.model_factory, 'user') if api else User
user = user_model.parse(api, v)
setattr(status, 'author', user)
setattr(status, 'user', user) # DEPRECIATED
elif k == 'created_at':
setattr(status, k, parse_datetime(v))
elif k == 'source':
if '<' in v:
setattr(status, k, parse_html_value(v))
setattr(status, 'source_url', parse_a_href(v))
else:
setattr(status, k, v)
setattr(status, 'source_url', None)
elif k == 'retweeted_status':
setattr(status, k, Status.parse(api, v))
elif k == 'place':
if v is not None:
setattr(status, k, Place.parse(api, v))
else:
setattr(status, k, None)
else:
setattr(status, k, v)
return status
def destroy(self):
return self._api.destroy_status(self.id)
def retweet(self):
return self._api.retweet(self.id)
def retweets(self):
return self._api.retweets(self.id)
def favorite(self):
return self._api.create_favorite(self.id)
class User(Model):
@classmethod
def parse(cls, api, json):
user = cls(api)
setattr(user, '_json', json)
for k, v in json.items():
if k == 'created_at':
setattr(user, k, parse_datetime(v))
elif k == 'status':
setattr(user, k, Status.parse(api, v))
elif k == 'following':
# twitter sets this to null if it is false
if v is True:
setattr(user, k, True)
else:
setattr(user, k, False)
else:
setattr(user, k, v)
return user
@classmethod
def parse_list(cls, api, json_list):
if isinstance(json_list, list):
item_list = json_list
else:
item_list = json_list['users']
results = ResultSet()
for obj in item_list:
results.append(cls.parse(api, obj))
return results
def timeline(self, **kargs):
return self._api.user_timeline(user_id=self.id, **kargs)
def friends(self, **kargs):
return self._api.friends(user_id=self.id, **kargs)
def followers(self, **kargs):
return self._api.followers(user_id=self.id, **kargs)
def follow(self):
self._api.create_friendship(user_id=self.id)
self.following = True
def unfollow(self):
self._api.destroy_friendship(user_id=self.id)
self.following = False
def lists_memberships(self, *args, **kargs):
return self._api.lists_memberships(user=self.screen_name, *args, **kargs)
def lists_subscriptions(self, *args, **kargs):
return self._api.lists_subscriptions(user=self.screen_name, *args, **kargs)
def lists(self, *args, **kargs):
return self._api.lists_all(user=self.screen_name, *args, **kargs)
def followers_ids(self, *args, **kargs):
return self._api.followers_ids(user_id=self.id, *args, **kargs)
class DirectMessage(Model):
@classmethod
def parse | (cls, api, json):
dm = cls(api)
for k, v in json.items():
if k == 'sender' or k == 'recipient':
setattr(dm, k, User.parse(api, v))
elif k == 'created_at':
setattr(dm, k, parse_datetime(v))
else:
setattr(dm, k, v)
return dm
def destroy(self):
return self._api.destroy_direct_message(self.id)
class Friendship(Model):
@classmethod
def parse(cls, api, json) | :
relationship = json['relationship']
# parse source
source = cls(api)
for k, v in relationship['source'].items():
setattr(source, k, v)
# parse target
target = cls(api)
for k, v in relationship['target'].items():
setattr(target, k, v)
return source, target
class Category(Model):
@classmethod
def parse(cls, api, json):
category = cls(api)
for k, v in json.items():
setattr(category, k, v)
return category
class SavedSearch(Model):
@classmethod
def parse(cls, api, json):
ss = cls(api)
for k, v in json.items():
if k == 'created_at':
setattr(ss, k, parse_datetime(v))
else:
setattr(ss, k, v)
return ss
def destroy(self):
return self._api.destroy_saved_search(self.id)
class SearchResults(ResultSet):
@classmethod
def parse(cls, api, json):
metadata = json['search_metadata']
results = SearchResults()
results.refresh_url = metadata.get('refresh_url')
results.completed_in = metadata.get('completed_in')
results.query = metadata.get('query')
results.count = metadata.get('count')
results.next_results = metadata.get('next_results')
for status in json['statuses']:
results.append(Status.parse(api, status))
return results
class List(Model):
@classmethod
def parse(cls, api, json):
lst = List(api)
for k,v in json.items():
if k == 'user':
setattr(lst, k, User.parse(api, v))
elif k == 'created_at':
setattr(lst, k, parse_datetime(v))
else:
setattr(lst, k, v)
return lst
@classmethod
def parse_list(cls, api, json_list, result_set=None):
results = ResultSet()
if isinstance(json_list, dict):
json_list = json_list['lists']
for obj in json_list:
results.append(cls.parse(api, obj))
return results
def update(self, **kargs):
return self._api.update_list(self.slug, **kargs)
def destroy(self):
return self._api.destroy_li |
Kryz/sentry | tests/sentry/web/frontend/test_auth_organization_login.py | Python | bsd-3-clause | 6,946 | 0 | from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import AuthIdentity, AuthProvider, OrganizationMember
from sentry.testutils import AuthProviderTestCase
# TODO(dcramer): this is an integration test
class OrganizationAuthSettingsTest(AuthProviderTestCase):
def test_renders_basic_login_form(self):
organization = self.create_organization(name='foo', owner=self.user)
team = self.create_team(organization=organization)
project = self.create_project(team=team)
path = reverse('sentry-auth-organization', args=[organization.slug])
self.login_as(self.user)
resp = self.client.get(path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/organization-login.html')
assert resp.context['form']
assert 'provider_key' not in resp.context
assert resp.context['CAN_REGISTER']
def test_renders_auth_provider(self):
organization = self.create_organization(name='foo', owner=self.user)
team = self.create_team(organization=organization)
project = self.create_project(team=team)
auth_provider = AuthProvider.objects.create(
organization=organization,
provider='dummy',
)
path = reverse('sentry-auth-organization', args=[organization.slug])
self.login_as(self.user)
resp = self.client.get(path)
self.assertTemplateUsed(resp, 'sentry/organization-login.html')
assert resp.context['provider_key'] == 'dummy'
assert not resp.context['CAN_REGISTER']
def test_basic_provider_flow_as_anonymous(self):
organization = self.create_organization(name='foo', owner=self.user)
team = self.create_team(organization=organization)
project = self.create_project(team=team)
auth_provider = AuthProvider.objects.create(
organization=organization,
provider='dummy',
)
path = reverse('sentry-auth-organization', args=[organization.slug])
resp = self.client.post(path)
| assert resp.status_code == 200
assert self.provider.TEMPLATE in resp.content
path = reverse('sentry-auth-sso')
resp = self.client.post(path, {'email': 'foo@example.com'})
self.assertTemplateUsed(resp, 'sentry/auth-confirm-identity.html')
assert resp.status_code == 200
resp = self.client.post(path, {'op': 'newuser'})
assert resp.s | tatus_code == 302
auth_identity = AuthIdentity.objects.get(
auth_provider=auth_provider,
)
user = auth_identity.user
assert user.email == 'foo@example.com'
member = OrganizationMember.objects.get(
organization=organization,
user=user,
)
assert getattr(member.flags, 'sso:linked')
assert not getattr(member.flags, 'sso:invalid')
def test_basic_provider_flow_as_existing_user(self):
organization = self.create_organization(name='foo', owner=self.user)
team = self.create_team(organization=organization)
project = self.create_project(team=team)
auth_provider = AuthProvider.objects.create(
organization=organization,
provider='dummy',
)
user = self.create_user('bar@example.com')
path = reverse('sentry-auth-organization', args=[organization.slug])
self.login_as(user)
resp = self.client.post(path)
assert resp.status_code == 200
assert self.provider.TEMPLATE in resp.content
path = reverse('sentry-auth-sso')
resp = self.client.post(path, {'email': 'bar@example.com'})
self.assertTemplateUsed(resp, 'sentry/auth-confirm-link.html')
assert resp.status_code == 200
resp = self.client.post(path, {'op': 'confirm'})
assert resp.status_code == 302
auth_identity = AuthIdentity.objects.get(
auth_provider=auth_provider,
)
assert auth_identity.user == user
member = OrganizationMember.objects.get(
organization=organization,
user=user,
)
assert getattr(member.flags, 'sso:linked')
assert not getattr(member.flags, 'sso:invalid')
def test_basic_provider_flow_as_existing_user_new_account(self):
organization = self.create_organization(name='foo', owner=self.user)
team = self.create_team(organization=organization)
project = self.create_project(team=team)
auth_provider = AuthProvider.objects.create(
organization=organization,
provider='dummy',
)
user = self.create_user('bar@example.com')
path = reverse('sentry-auth-organization', args=[organization.slug])
self.login_as(user)
resp = self.client.post(path)
assert resp.status_code == 200
assert self.provider.TEMPLATE in resp.content
path = reverse('sentry-auth-sso')
resp = self.client.post(path, {'email': 'foo@example.com'})
self.assertTemplateUsed(resp, 'sentry/auth-confirm-link.html')
assert resp.status_code == 200
resp = self.client.post(path, {'op': 'newuser'})
assert resp.status_code == 302
auth_identity = AuthIdentity.objects.get(
auth_provider=auth_provider,
)
assert auth_identity.user != user
assert auth_identity.user.email == 'foo@example.com'
member = OrganizationMember.objects.get(
organization=organization,
user=auth_identity.user,
)
assert getattr(member.flags, 'sso:linked')
assert not getattr(member.flags, 'sso:invalid')
def test_basic_provider_flow_as_existing_user_and_identity(self):
organization = self.create_organization(name='foo', owner=self.user)
team = self.create_team(organization=organization)
project = self.create_project(team=team)
auth_provider = AuthProvider.objects.create(
organization=organization,
provider='dummy',
)
auth_identity = AuthIdentity.objects.create(
auth_provider=auth_provider,
ident='bar@example.com',
user=self.user,
)
path = reverse('sentry-auth-organization', args=[organization.slug])
self.login_as(self.user)
resp = self.client.post(path)
assert resp.status_code == 200
assert self.provider.TEMPLATE in resp.content
path = reverse('sentry-auth-sso')
resp = self.client.post(path, {'email': 'bar@example.com'})
assert resp.status_code == 302
member = OrganizationMember.objects.get(
organization=organization,
user=self.user,
)
assert getattr(member.flags, 'sso:linked')
assert not getattr(member.flags, 'sso:invalid')
|
ee-in/python-api | plotly/tests/test_core/test_graph_objs/test_get_data.py | Python | mit | 6,703 | 0 | from __future__ import absolute_import
from unittest import TestCase
from plotly.graph_objs import (Data, Figure, Layout, Line, Margin, Marker,
Scatter, XAxis, YAxis)
class TestGetData(TestCase):
fig = None
def setUp(self):
super(TestGetData, self).setUp()
self.fig = Figure(
data=Data([
Scatter(
x=[52698, 43117],
y=[53, 31],
mode='markers',
name='North America',
text=['United States', 'Canada'],
marker=Marker(
color='rgb(164, 194, 244)',
size=12,
line=Line(
color='white',
width=0.5
)
)
),
Scatter(
x=[39317, 37236, 35650, 30066, 29570, 27159, 23557, 21046,
18007],
y=[33, 20, 13, 19, 27, 19, 49, 44, 38],
mode='markers',
name='Europe',
text=['Germany', 'Britain', 'France', 'Spain', 'Italy',
'Czech Rep.', 'Greece', 'Poland'],
marker=Marker(
color='rgb(255, 217, 102)',
size=12,
line=Line(
color='white',
width=0.5
)
)
),
Scatter(
x=[42952, 37037, 33106, 17478, 9813, 5253, 4692, 3899],
y=[23, 42, 54, 89, 14, 99, 93, 70],
mode='markers',
name='Asia/Pacific',
text=['Australia', 'Japan', 'South Korea', 'Malaysia',
'China', 'Indonesia', 'Philippines', 'India'],
marker=Marker(
color='rgb(234, 153, 153)',
size=12,
line=Line(
color='white',
width=0.5
)
)
),
Scatter(
x=[19097, 18601, 15595, 13546, 12026, 7434, 5419],
y=[43, 47, 56, 80, 86, 93, 80],
mode='markers',
name='Latin America',
text=['Chile', 'Argentina', 'Mexico', 'Venezuela',
'Venezuela', 'El Salvador', 'Bolivia'],
marker=Marker(
color='rgb(142, 124, 195)',
size=12,
line=Line(
color='white',
width=0.5
)
)
)
]),
layout=Layout(
title='Quarter 1 Growth',
autosize=False,
width=500,
height=500,
xaxis=XAxis(
title='GDP per Capita',
showgrid=False,
zeroline=False
),
yaxis=YAxis(
title='Percent',
showline=False
),
margin=Margin(
l=65,
r=50,
b=65,
t=90
)
)
)
def test_get_data(self):
data = self.fig.get_data()
comp_data = [
{
| 'name': 'North America',
'text': ['United States', 'Canada'],
'x' | : [52698, 43117],
'y': [53, 31]
},
{
'name': 'Europe',
'text': ['Germany', 'Britain', 'France', 'Spain', 'Italy',
'Czech Rep.', 'Greece', 'Poland'],
'x': [39317, 37236, 35650, 30066, 29570, 27159, 23557, 21046,
18007],
'y': [33, 20, 13, 19, 27, 19, 49, 44, 38]
},
{
'name': 'Asia/Pacific',
'text': ['Australia', 'Japan', 'South Korea', 'Malaysia',
'China', 'Indonesia', 'Philippines', 'India'],
'x': [42952, 37037, 33106, 17478, 9813, 5253, 4692, 3899],
'y': [23, 42, 54, 89, 14, 99, 93, 70]},
{
'name': 'Latin America',
'text': ['Chile', 'Argentina', 'Mexico', 'Venezuela',
'Venezuela', 'El Salvador', 'Bolivia'],
'x': [19097, 18601, 15595, 13546, 12026, 7434, 5419],
'y': [43, 47, 56, 80, 86, 93, 80]
}
]
self.assertEqual(data, comp_data)
def test_get_data_flatten(self):
# this is similar to above, except nested objects are flattened
flat_data = self.fig.get_data(flatten=True)
comp_data = {
'Europe.x': [39317, 37236, 35650, 30066, 29570, 27159, 23557,
21046, 18007],
'Europe.y': [33, 20, 13, 19, 27, 19, 49, 44, 38],
'Asia/Pacific.x': [42952, 37037, 33106, 17478, 9813, 5253, 4692,
3899],
'Latin America.text': ['Chile', 'Argentina', 'Mexico', 'Venezuela',
'Venezuela', 'El Salvador', 'Bolivia'],
'North America.x': [52698, 43117],
'Asia/Pacific.y': [23, 42, 54, 89, 14, 99, 93, 70],
'Asia/Pacific.text': ['Australia', 'Japan', 'South Korea',
'Malaysia', 'China', 'Indonesia',
'Philippines', 'India'],
'North America.y': [53, 31],
'North America.text': ['United States', 'Canada'],
'Europe.text': ['Germany', 'Britain', 'France', 'Spain', 'Italy',
'Czech Rep.', 'Greece', 'Poland'],
'Latin America.x': [19097, 18601, 15595, 13546, 12026, 7434, 5419],
'Latin America.y': [43, 47, 56, 80, 86, 93, 80]
}
self.assertEqual(flat_data, comp_data)
# TODO test for Data, Scatter, etc..
def test_flatten_repeated_trace_names(self):
dl = Data([Scatter(name='thesame', x=[1, 2, 3]) for _ in range(3)])
data = dl.get_data(flatten=True)
comp_data = {
'thesame.x': [1, 2, 3],
'thesame_1.x': [1, 2, 3],
'thesame_2.x': [1, 2, 3]
}
self.assertEqual(data, comp_data)
|
johngian/mozillians | mozillians/common/utils.py | Python | bsd-3-clause | 3,547 | 0.000282 | import sys
from django.conf import settings
import requests
import waffle
from nameparser import HumanName
def absolutify(url):
"""Takes a URL and prepends the SITE_URL"""
site_url = getattr(settings, 'SITE_URL', False)
# If we don't define it explicitly
if not site_url:
protocol = settings.PROTOCOL
hostname = settings.DOMAIN
port = settings.PORT
if (protocol, port) in (('https://', 443), ('http://', 80)):
site_url = ''.join(map(str, (protocol, hostname)))
else:
site_url = ''.join(map(str, (protocol, hostname, ':', port)))
return site_url + url
def akismet_spam_check(user_ip, user_agent, **optional):
"""Checks for spam content against Akismet API."""
AKISMET_API_KEY = getattr(settings, 'AKISMET_API_KEY', '')
AKISMET_CHECK_ENABLED = waffle.switch_is_active('AKISMET_CHECK_ENABLED')
if not AKISMET_API_KEY or not AKISMET_CHECK_ENABLED:
return None
AKISMET_URL = 'https://{0}.rest.akismet.com/1.1/comment-check'.format(AKISMET_API_KEY)
parameters = {
'blog': settings.SITE_URL,
'user_ip': user_ip,
'user_agent': user_agent,
}
parameters.update(optional)
response = requests.post(AKISMET_URL, data=parameters)
response.raise_for_status()
try:
return {'true': True, 'false': False}[response.text]
except KeyError:
error = response.headers.get('x-akismet-debug-help')
raise Exception('Akismet raised an error: {0}'.format(error))
def is_test_environment():
"""Check if environment is a test runner."""
if len(sys.argv) > 1 and sys.argv[1] == 'test':
return True
return False
def bundle_profile_data(profile_id, delete=False):
"""Packs all the Identity Profiles of a user into a dictionary."""
from mozillians.common.templatetags.helpers import get_object_or_none
from mozillians.users.models import IdpProfile, UserProfile
try:
profile = UserProfile.objects.get(pk=profile_id)
except UserProfile.DoesNotExist:
return []
human_name = HumanName(profile.full_name)
primary_idp = get_object_or_none(IdpProfile, profile=profile, primary=True)
primary_login_email = profile.email
if primary_idp:
primary_login_email = primary_idp.email
results = []
for idp in profile.idp_profiles.all():
data = {
'user_id': idp.auth0_user_id,
'timezone': profile.timezone,
'active': profile.user.is_active,
'lastModified': profile.last_updated.isoformat(),
'created': profile.user.date_joined.isoformat(),
'userName': profile.user.username,
'displayName': profile.display_name,
'primaryEmail': primary_login_email,
'emails': profile.get_cis_emails(),
'uris': profile.get_cis_uris(),
'picture': profile.get_photo_url(),
'shirtSize': profile.get_tshirt_display() or '',
'groups': [] if delete else profile.get_cis_g | roups(idp),
'tags': [] if delete else profile.get_cis_tags(),
# Derived fields
'firstName': human_name.first,
'lastName': human_name.last,
# Hardcoded fields
'preferredLanguage': 'en_US',
'phoneNumbers': [],
'nicknames': [],
| 'SSHFingerprints': [],
'PGPFingerprints': [],
'authoritativeGroups': []
}
results.append(data)
return results
|
f0x11/Merak | spider/__init__.py | Python | mit | 68 | 0 | #!/usr/b | in/env python3
# -*- coding: utf-8 | -*-
__author__ = 'f0x11'
|
mypaint/mypaint | gui/meta.py | Python | gpl-2.0 | 10,067 | 0.002598 | # -*- coding: utf-8 -*-
# This file is part of MyPaint.
# Copyright (C) 2007-2015 by Martin Renold <martinxyz@gmx.ch>
# Copyright (C) 2015-2016 by the MyPaint Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Program meta-information: consts & display.
See also `lib.meta`.
"""
## Imports
from __future__ import division, print_function
import sys
import os
import platform
from lib.gibindings import Gtk
from lib.gibindings import GdkPixbuf
from lib.gibindings import GLib
import cairo
from lib.gettext import C_
import lib.meta
from lib.xml import escape
## Program-related string constants
COPYRIGHT_STRING = C_(
"About dialog: copyright statement",
u"Copyright (C) 2005-2020\n"
u"Martin Renold and the MyPaint Development Team"
)
WEBSITE_URI = "http://mypaint.org"
LICENSE_SUMMARY = C_(
"About dialog: license summary",
u"This program is free software; you can redistribute it and/or modify "
u"it under the terms of the GNU General Public License as published by "
u"the Free Software Foundation; either version 2 of the License, or "
u"(at your option) any later version.\n"
u"\n"
u"This program is distributed in the hope that it will be useful, "
u"but WITHOUT ANY WARRANTY. See the COPYING file for more details."
)
## Credits-related string constants
# Strings for specific tasks, all translated
_TASK_PROGRAMMING = C_(
"About dialog: credits: tasks",
u"programming"
)
_TASK_PORTING = C_(
"About dialog: credits: tasks",
u"portability"
)
_TASK_PROJECT_MANAGEMENT = C_(
"About dialog: credits: tasks",
u"project management"
)
_TASK_BRUSHES = C_(
"About dialog: credits: tasks: brush presets and icons",
u"brushes"
)
_TASK_PATTERNS = C_(
"About dialog: credits: tasks: background paper textures",
u"patterns"
)
_TASK_TOOL_ICONS = C_(
"About dialog: credits: tasks: icons for internal tools",
u"tool icons"
)
_TASK_APP_ICON = C_(
"About dialog: credits: tasks: the main application icon",
u"desktop icon"
)
_TASK_PALETTES = C_(
"About dialog: credits: tasks: palettes",
u"palettes"
)
_TASK_DOCS = C_(
"About dialog: credits: tasks: docs, manuals and HOWTOs",
u"documentation"
)
_TASK_SUPPORT = C_(
"About dialog: credits: tasks: user support",
u"support"
)
_TASK_OUTREACH = C_(
"About dialog: credits: tasks: outreach (social media, ads?)",
u"outreach"
)
_TASK_COMMUNITY = C_(
"About dialog: credits: tasks: running or building a community",
u"community"
)
_TASK_COMMA = C_(
"About dialog: credits: tasks: joiner punctuation",
u", ",
)
# List contributors in order of their appearance.
# The author's name is always written in their native script,
# and is not marked for translation. It may also have:
# transcriptions (Latin, English-ish) in brackets following, and/or
# a quoted ’nym in Latin script.
# For <given(s)> <surname(s)> combinations,
# a quoted publicly-known alias may go after the given name.
# TODO: Simplify/unify how the dialog is built.
# - This should really be built from a giant matrix.
# - Each task type should determine a tab of the about dialog
# - Contributors will still appear on multiple tabs,
# - but that'd be automatic now
# - Keep it reasonably simple, so that contributors can add themselves!
# - Maybe get rid of the (%s) formatting junk?
# - Split out ’nyms and transliterations too?
_AUTHOR_CREDITS = [
u"Martin Renold (%s)" % _TASK_PROGRAMMING,
u"Yves Combe (%s)" % _TASK_PORTING,
u"Popolon (%s)" % _TASK_PROGRAMMING,
u"Clement Skau (%s)" % _TASK_PROGRAMMING,
u"Jon Nordby (%s)" % _TASK_PROGRAMMING,
u"Álinson Santos (%s)" % _TASK_PROGRAMMING,
u"Tumagonx (%s)" % _TASK_PORTING,
u"Ilya Portnov (%s)" % _TASK_PROGRAMMING,
u"Jonas Wagner (%s)" % _TASK_PROGRAMMING,
u"Luka Čehovin (%s)" % _TASK_PROGRAMMING,
u"Andrew Chadwick (%s)" % _TASK_COMMA.join([
_TASK_PROGRAMMING,
_TASK_PROJECT_MANAGEMENT,
_TASK_PORTING,
]),
u"Till Hartmann (%s)" % _TASK_PROGRAMMING,
u'David Grundberg (%s)' % _TASK_PROGRAMMING,
u"Krzysztof Pasek (%s)" % _TASK_PROGRAMMING,
u"Ben O’Steen (%s)" % _TASK_PROGRAMMING,
u"Ferry Jérémie (%s)" % _TASK_PROGRAMMING,
u"しげっち ‘sigetch’ (%s)" % _TASK_PROGRAMMING,
u"Richard Jones (%s)" % _TASK_PROGRAMMING,
u"David Gowers (%s)" % _TASK_PROGRAMMING,
u"Micael Dias (%s)" % _TASK_PROGRAMMING,
u"Anna Harren (%s)" % _TASK_COMMA.join([
_TASK_COMMUNITY,
_TASK_PROGRAMMING,
_TASK_DOCS,
]),
u"Sebastien Leon (%s)" % _TASK_PROGRAMMING,
u"Ali Lown (%s)" % _TASK_PROGRAMMING,
u"Brien Dieterle (%s)" % _TASK_PROGRAMMING,
u"Jenny Wong (%s)" % _TASK_PROGRAMMING,
u"Dmitry Utkin ‘loentar’ (%s)" % _TASK_PROGRAMMING,
u"ShadowKyogre (%s)" % _TASK_PROGRAMMING,
u"Albert Westra (%s)" % _TASK_COMMA.join([
_TASK_COMMUNITY,
_TASK_PROGRAMMING,
]),
u"Cortexer (%s)" % _TASK_PROGRAMMING,
u"Elliott Sales de Andrade (%s)" % _TASK_PORTING,
u"Alberto Leiva Popper (%s)" % _TASK_PROGRAMMING,
u"Alinson Xavier (%s)" % _TASK_PROGRAMMING,
u"Jesper Lloyd (%s)" % _TASK_COMMA.join([
_TASK_PROGRAMMING,
_TASK_PROJECT_MANAGEMENT,
]),
]
_ARTIST_CREDITS = [
u"Artis Rozentāls (%s)" % _TASK_BRUSHES,
u"Popolon (%s)" % _TASK_BRUSHES,
u"Marcelo ‘Tanda’ Cerviño (%s)" % _TASK_COMMA.join([
_TASK_PATTERNS,
_TASK_BRUSHES,
]),
u"David Revoy (%s)" % _TASK_COMMA.join([
_TASK_BRUSHES,
_TASK_TOOL_ICONS,
_TASK_OUTREACH,
]),
u"Ramón Miranda (%s)" % _TASK_COMMA.join([
_TASK_BRUSHES,
_TASK_PATTERNS,
]),
u"Enrico Guarnieri ‘Ico_dY’ (%s)" % _TASK_BRUSHES,
u'Sebastian Kraft (%s)' % _TASK_APP_ICON,
u"Nicola Lunghi (%s)" % _TASK_PATTERNS,
u"Toni Kasurinen (%s)" % _TASK_BRUSHES,
u"Сан Саныч ‘MrMamurk’ (%s)" % _TASK_PATTERNS,
u"Andrew Chadwick (%s)" % _TASK_TOOL_ICONS,
u"Ben O’Steen (%s)" % _TASK_TOOL_ICONS,
u"Guillaume Loussarévian ‘Kaerhon’ (%s)" % _TASK_BRUSHES,
u"Jakub Steiner ‘jimmac’ (%s)" % _TASK_COMMA.join([
_TASK_APP_ICON,
_TASK_PALETTES,
]),
u"ShadowKyogre (%s)" % _TASK_TOOL_ICONS,
u"Albert Westra (%s)" % _TASK_TOOL_ICONS,
u"Brien Dieterle (%s)" % _TASK_BRUSHES,
u"Jesper Lloyd (%s)" % _TASK_APP_ICON,
]
_TRANSLATOR_CREDITS = C_(
"About dialog: credits: translator credits (your name(s) here!)",
# TRANSLATORS: THIS SHOULD NOT BE TRANSLATED LITERALLY
# TRANSLATORS: The "translation" of this string should be a list of names
# TRANSLATORS: of the people who have contributed to the translation to
# TRANSLATORS: this language. One name pe | r line, optionally with an email
# TRANSLATORS: address within angle brackets "<email@somewhere.com>", and
# TRANSLATORS: optionally with a year or year range indicating when the
# TRANSLATORS: contributions were made, e.g: 2005 or 2010-2012 etc.
u"translator-credits",
)
## About dialog for the app
def get_libs_versi | on_string():
"""Get a string describing the versions of important libs.
>>> type(get_libs_version_string()) == str
True
"""
versions = [
("Python", "{major}.{minor}.{micro}".format(
major = sys.version_info.major,
minor = sys.version_info.minor,
micro = sys.version_info.micro,
)),
("GTK", "{major}.{minor}.{micro}".format(
major = Gtk.get_major_version(),
minor = Gtk.get_minor_version(),
micro = Gtk.get_micro_version(),
)),
("GdkPixbuf", GdkPixbuf.PIXBUF_VERSION),
("Cairo", cairo.cairo_version_string()), # NOT cairo.version
("GLib", "{major}.{minor}.{micro}".format(
major = GLib.MAJOR_VERSION,
minor = GLib.MINOR_VERSION,
micro = GLib.MICRO_VERSION,
)),
]
return ", ".j |
google-research/language | language/mentionmemory/utils/test_utils.py | Python | apache-2.0 | 4,490 | 0.008241 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test utils."""
import os
from typing import Dict, Optional
from absl.testing import parameterized
from jax.lib import xla_bridge
import numpy as np
class TestCase(parameterized.TestCase):
"""Custom test class containing additional useful utility methods."""
def assertArrayEqual(self, actual: np.ndarray, expected: np.ndarray):
actual = actual.ravel().tolist()
expected = expected.ravel().tolist()
self.assertSequenceEqual(actual, expected)
def assertArrayAlmostEqual(self,
actual: np.ndarray,
expected: np.ndarray,
places: Optional[int] = 7):
actual = actual.ravel().tolist()
expected = expected.ravel().tolist()
self.assertSequenceAlmostEqual(actual, expected, places=places)
def force_multi_devices(num_cpu_devices: int):
"""Run with set number of CPU devices."""
prev_xla_flags = os.getenv('XLA_FLAGS')
flags_str = prev_xla_flags or ''
# Don't override user-specified device count, or other XLA flags.
if 'xla_force_host_platform_device_count' not in flags_str:
os.environ['XLA_FLAGS'] = (
flags_str +
' --xla_force_host_platform_device_count={}'.format(num_cpu_devices))
# Clear any cached backends so new CPU backend will pick up the env var.
xla_bridge.get_backend.cache_clear()
def tensor_to_numpy(tensor):
"""Convert numpy if not already numpy array."""
if isinstance(tensor, np.ndarray):
return tensor
else:
return tensor.numpy()
def gen_mention_pretraining_sample(
text_length: int,
n_mentions: int,
n_linked_mentions: int,
max_length: int = 100,
vocab_size: int = 100,
entity_vocab_size: int = 1000,
mention_size: int = 2,
) -> Dict[str, np.ndarray]:
"""Generate test raw decoded input for mention pre-training pipeline."""
text_pad_shape = (0, max_length - text_length)
text_ids = np.random.randint(
low=1, high=vocab_size, size=text_length, dtype=np.int64)
text_ids = np.pad(text_ids, pad_width=text_pad_shape, mode='constant')
text_mask = np.pad(
np.ones(shape=text_length, dtype=np.int64),
pad_width=text_pad_shape,
mode='constant')
mention_start_positions = np.random.choice(
text_length // mention_size, size=n_mentions,
replace=False) * mentio | n_size
mention_start_positions.sort()
mention_end_positions = mention_start_positions + mention_size - 1
dense_span_starts = np.zeros(shape=max_length, dtype=np.int64)
dense_span_starts[mention_start_positions] = 1
dense_span_ends = np.zeros(shape=max_length, dtype=np.int64)
dense_span_ends[mention_end_positions] = 1
linked_mention_indices = np.arange(n_linked_mentions)
linked_mention_position_slices = [
np.arange(mention_start_positions[idx], | mention_end_positions[idx] + 1)
for idx in linked_mention_indices
]
if n_linked_mentions > 0:
dense_linked_mention_positions = np.concatenate(
linked_mention_position_slices)
else:
dense_linked_mention_positions = np.arange(0)
linked_mention_ids = np.random.randint(
low=1, high=entity_vocab_size, size=len(linked_mention_indices))
dense_mention_mask = np.zeros(shape=max_length, dtype=np.int64)
dense_mention_mask[dense_linked_mention_positions] = 1
dense_mention_ids = np.zeros(shape=max_length, dtype=np.int64)
for idx, position_slice in enumerate(linked_mention_position_slices):
dense_mention_ids[position_slice] = linked_mention_ids[idx]
dense_answer_mask = np.ones_like(dense_mention_mask)
raw_example = {
'text_ids': text_ids,
'text_mask': text_mask,
'dense_span_starts': dense_span_starts,
'dense_span_ends': dense_span_ends,
'dense_mention_mask': dense_mention_mask,
'dense_mention_ids': dense_mention_ids,
'dense_answer_mask': dense_answer_mask,
}
return raw_example
|
gnodar01/basic-blog | util/jinjaenv.py | Python | apache-2.0 | 325 | 0 | import os
import jinja2
tem | plate_dir = os.path.join(os.path.dirname(__file__), '../views')
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir),
autoescape=True)
def rende | r_str(template, **params):
t = jinja_env.get_template(template)
return t.render(params)
|
MAPC/cedac | map/models.py | Python | bsd-3-clause | 1,599 | 0.001876 | from django.db import models
from django.utils.translation import ugettext as _
class Category(models.Model):
title = models.CharField(max_length=50)
slug = models.SlugField()
order = models.IntegerField(default=1)
class Meta:
verbose_name = _('Category')
verbose_name_plural = _('Categories')
ordering = ['order']
def __unicode__(self):
return self.title
class WMSServer(models.Model):
title = models.CharField(max_length=50)
url = models.URLField()
attribution = models.CharField(max_length=50)
class Meta:
verbose_name = _('WMSServer')
verbose_name_plural = _('WMSServers')
def __unicode__(self):
return self.title
class Layer(models.Model):
WMS_FORMAT_OPTIONS = (
('image/ | png', 'image/png'),
('image/jpeg', 'image/jpeg'),
)
title = models.CharField(max_length=100)
category = models.ForeignKey(Category)
visible = models.BooleanField()
category_order = models.IntegerField(default=1)
map_order = models.IntegerFie | ld(default=1)
wms_server = models.ForeignKey(WMSServer)
wms_layers = models.CharField(max_length=100)
wms_styles = models.CharField(max_length=100, null=True, blank=True)
wms_format = models.CharField(max_length=10, choices=WMS_FORMAT_OPTIONS, default='image/png')
wms_transparent = models.BooleanField(default=True)
class Meta:
verbose_name = _('Layer')
verbose_name_plural = _('Layers')
ordering = ['category_order']
def __unicode__(self):
return self.title
|
mcclurmc/juju | juju/providers/ec2/securitygroup.py | Python | agpl-3.0 | 7,289 | 0.000274 | from twisted.internet.defer import Deferred, inlineCallbacks, returnValue
from txaws.ec2.exception import EC2Error
from juju.errors import ProviderInteractionError
from juju.lib.twistutils import gather_results
from .utils import log
def _get_juju_security_group(provider):
"""Get EC2 security group name for environment of `provider`."""
return "juju-%s" % provider.environment_name
def _get_machine_group_name(provider, machine_id):
"""Get EC2 security group name associated just with `machine_id`."""
return "juju-%s-%s" % (provider.environment_name, machine_id)
# TODO These security group functions do not handle the eventual
# consistency seen with EC2. A future branch will add support for
# retry so that using code doesn't have to be aware of this issue.
#
# In addition, the functions work with respect to the machine id,
# since they manipulate a security group permanently associated with
# the EC2 provided machine, and the machine must be launched into this
# security group. This security group, per the above
# `_get_machine_group_name`, embeds the machine id, eg
# juju-moon-42. Ideally, this would not be the case. See the
# comments associated with the merge proposal of
# https://code.launchpad.net/~jimbaker/juju/expose-provider-ec2/
@inlineCallbacks
def open_provider_port(provider, machine, machine_id, port, protocol):
"""Authorize `port`/`proto` for the machine security group."""
try:
yield provider.ec2.authorize_security_group(
_get_machine_group_name(provider, machine_id),
ip_protocol=protocol,
from_port=str(port), to_port=str(port),
cidr_ip="0.0.0.0/0")
log.debug("Opened %s/%s on provider machine %r",
port, protocol, machine.instance_id)
except EC2Error, e:
raise ProviderInteractionError(
"Unexpected EC2Error opening %s/%s on machine %s: %s"
% (port, protocol, machine.instance_id, e.get_error_messages()))
@inlineCallbacks
def close_provider_port(provider, machine, machine_id, port, protocol):
"""Revoke `port`/`proto` for the machine security group."""
try:
yield provider.ec2.revoke_security_group(
_get_machine_group_name(provider, machine_id),
ip_protocol=protocol,
from_port=str(port), to_port=str(port),
cidr_ip="0.0.0.0/0")
log.debug("Closed %s/%s on provider machine %r",
port, protocol, machine.instance_id)
except EC2Error, e:
raise ProviderInteractionError(
"Unexpected EC2Error closing %s/%s on machine %s: %s"
% (port, protocol, machine.instance_id, e.get_error_messages()))
@inlineCallbacks
def get_provider_opened_ports(provider, machine, machine_id):
"""Gets the opened ports for `machine`.
Retrieves the IP permissions associated with the machine
security group, then parses them to return a set of (port,
proto) pairs.
"""
try:
security_groups = yield provider.ec2.describe_security_groups(
_get_machine_group_name(provider, machine_id))
except EC2Error, e:
raise ProviderInteractionError(
"Unexpected EC2Error getting open ports on machine %s: %s"
% (machine.instance_id, e.get_error_messages()))
opened_ports = set() # made up of (port, protocol) pairs
for ip_permission in security_groups[0].allowed_ips:
if ip_permission.cidr_ip != "0.0.0.0/0":
continue
from_port = int(ip_permission.from_port)
to_port = int(ip_permission.to_port)
if from_port == to_port:
# Only return ports that are individually opened. We
# ignore multi-port ranges, since they are set outside of
# juju (at this time at least)
opened_ports.add((from_port, ip_permission.ip_protocol))
returnValue(opened_ports)
def _get_machine_security_group_from_instance(provider, instance):
"""Parses the `reservation` of `instance` to get assoc machine group."""
juju_security_group = _get_juju_security_group(provider)
for group in instance.reservation.groups:
if group != juju_security_group:
return group
# Ignore if no such group exists; this allows some limited
# backwards compatibility with old setups without machine
# security group
log.info("Ignoring missing machine security group for instance %r",
instance.instance_id)
return None
@inlineCallbacks
def _delete_security_group(provider, group):
"""Wrap EC2 delete_security_group."""
try:
yield provider.ec2.delete_security_group(group)
log.debug("Deleted security group %r", group)
except EC2Error, e:
raise ProviderInteractionError(
"EC2 error when attempting to delete group %s: %s" % (group, e))
@inlineCallbacks
def remove_security_groups(provider, instance_ids):
"""Remove security groups associated with `instance_ids` for `provider`"""
log.info(
"Waiting on %d EC2 instances to transition to terminated state, "
"this may take a while", len(instance_ids))
# Repeatedly poll EC2 until instances are in terminated state;
# upon reaching that state, delete associated machine security
# groups. The limit of 200 polls is arbitrary and could be
# specified by a command line option (and/or an overall
# timeout). It's based on an observed ~500 ms roundtrip time per
# call of the describe_instances web service, along with typically
# taking about 40s to move all instances to a terminated state.
wait_on = set(instance_ids)
pending_deletions = []
for i in xrange(200):
if not wait_on:
break
instances = yield provider.ec2.describe_instances(*wait_on)
for instance in instances:
if instance.instance_state == "terminated":
log.debug("Instance %r was terminated",
instance.instance_id)
wait_on.discard(instance.instance_id)
group = _get_machine_security_group_from_instance(
provider, instance)
if group:
pending_deletions.append(
_delete_security_group(provider, group))
if wait_on:
outstanding = [
_get_machine_security_group_from_instance(provider, instance)
for instance in instances]
log.error("Instance shutdown taking too long, "
"could not delete groups %s",
", ".join(sorted(outstanding)))
# Wait for all pending deletions to complete
yield gather_results(pending_deletions)
@inlineCallbacks
def destroy_environment_security_group(provider):
"""Delete the security group for the environment of `provider`"""
group = _get_juju_security_group(provider)
try:
yield provider.ec2.delete_security_group(group)
log.debug("Deleted environment security group %r", group)
returnValue(True)
except EC2Error, e:
# Ignore, since this is only attempting to cleanup
log.debug(
"Ignoring EC2 error when attempting to delete group %s: %s" % (
gr | oup, e))
returnValue(False) | |
miguelgrinberg/heat | heat/engine/resources/openstack/neutron/net.py | Python | apache-2.0 | 7,583 | 0 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import properties
from heat.engine.resources.openstack.neutron import neutron
from heat.engine impor | t support
class Net(neutron.NeutronResource):
PROPERTIES = (
NAME, VALUE_SPECS, ADMIN_STATE_UP, TENANT_ID, SHARED,
DHCP_AGENT_IDS, PORT_SECURITY_ENABLED,
) = (
'name', 'value_specs', 'admin_state_up', 'tenant_id', 'shared',
| 'dhcp_agent_ids', 'port_security_enabled',
)
ATTRIBUTES = (
STATUS, NAME_ATTR, SUBNETS, ADMIN_STATE_UP_ATTR, TENANT_ID_ATTR,
PORT_SECURITY_ENABLED_ATTR, MTU_ATTR,
) = (
"status", "name", "subnets", "admin_state_up", "tenant_id",
"port_security_enabled", "mtu",
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('A string specifying a symbolic name for the network, which is '
'not required to be unique.'),
update_allowed=True
),
VALUE_SPECS: properties.Schema(
properties.Schema.MAP,
_('Extra parameters to include in the "network" object in the '
'creation request. Parameters are often specific to installed '
'hardware or extensions.'),
default={},
update_allowed=True
),
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('A boolean value specifying the administrative status of the '
'network.'),
default=True,
update_allowed=True
),
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the tenant which will own the network. Only '
'administrative users can set the tenant identifier; this '
'cannot be changed using authorization policies.')
),
SHARED: properties.Schema(
properties.Schema.BOOLEAN,
_('Whether this network should be shared across all tenants. '
'Note that the default policy setting restricts usage of this '
'attribute to administrative users only.'),
default=False,
update_allowed=True
),
DHCP_AGENT_IDS: properties.Schema(
properties.Schema.LIST,
_('The IDs of the DHCP agent to schedule the network. Note that '
'the default policy setting in Neutron restricts usage of this '
'property to administrative users only.'),
update_allowed=True
),
PORT_SECURITY_ENABLED: properties.Schema(
properties.Schema.BOOLEAN,
_('Flag to enable/disable port security on the network. It '
'provides the default value for the attribute of the ports '
'created on this network'),
update_allowed=True,
support_status=support.SupportStatus(version='5.0.0')
),
}
attributes_schema = {
STATUS: attributes.Schema(
_("The status of the network."),
type=attributes.Schema.STRING
),
NAME_ATTR: attributes.Schema(
_("The name of the network."),
type=attributes.Schema.STRING
),
SUBNETS: attributes.Schema(
_("Subnets of this network."),
type=attributes.Schema.LIST
),
ADMIN_STATE_UP_ATTR: attributes.Schema(
_("The administrative status of the network."),
type=attributes.Schema.STRING
),
TENANT_ID_ATTR: attributes.Schema(
_("The tenant owning this network."),
type=attributes.Schema.STRING
),
PORT_SECURITY_ENABLED_ATTR: attributes.Schema(
_("Port security enabled of the network."),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.BOOLEAN
),
MTU_ATTR: attributes.Schema(
_("The maximum transmission unit size(in bytes) for the network."),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.INTEGER
),
}
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
dhcp_agent_ids = props.pop(self.DHCP_AGENT_IDS, None)
net = self.neutron().create_network({'network': props})['network']
self.resource_id_set(net['id'])
if dhcp_agent_ids:
self._replace_dhcp_agents(dhcp_agent_ids)
def _show_resource(self):
return self.neutron().show_network(
self.resource_id)['network']
def check_create_complete(self, *args):
attributes = self._show_resource()
return self.is_built(attributes)
def handle_delete(self):
client = self.neutron()
try:
client.delete_network(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
else:
return True
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
props = self.prepare_update_properties(json_snippet)
dhcp_agent_ids = props.pop(self.DHCP_AGENT_IDS, None)
if self.DHCP_AGENT_IDS in prop_diff:
if dhcp_agent_ids is not None:
self._replace_dhcp_agents(dhcp_agent_ids)
del prop_diff[self.DHCP_AGENT_IDS]
if len(prop_diff) > 0:
self.neutron().update_network(
self.resource_id, {'network': props})
def check_update_complete(self, *args):
attributes = self._show_resource()
return self.is_built(attributes)
def _replace_dhcp_agents(self, dhcp_agent_ids):
ret = self.neutron().list_dhcp_agent_hosting_networks(
self.resource_id)
old = set([agent['id'] for agent in ret['agents']])
new = set(dhcp_agent_ids)
for dhcp_agent_id in new - old:
try:
self.neutron().add_network_to_dhcp_agent(
dhcp_agent_id, {'network_id': self.resource_id})
except Exception as ex:
# if 409 is happened, the agent is already associated.
if not self.client_plugin().is_conflict(ex):
raise
for dhcp_agent_id in old - new:
try:
self.neutron().remove_network_from_dhcp_agent(
dhcp_agent_id, self.resource_id)
except Exception as ex:
# assume 2 patterns about status_code following:
# 404: the network or agent is already gone
# 409: the network isn't scheduled by the dhcp_agent
if not (self.client_plugin().is_conflict(ex) or
self.client_plugin().is_not_found(ex)):
raise
def resource_mapping():
return {
'OS::Neutron::Net': Net,
}
|
benkonrath/transip-api | tests/service_tests/test_webhosting.py | Python | mit | 4,475 | 0.001341 | import unittest
from transip.client import MODE_RO, MODE_RW
from transip.service.objects import MailBox, MailForward, WebHost
from transip.service.webhosting import WebhostingService
try:
from unittest.mock import Mock, patch
except ImportError:
from mock import patch, Mock
class TestWebhostingService(unittest.TestCase):
@patch('transip.client.SudsClient')
def setUp(self, mock_client):
self.service = WebhostingService(login='sundayafternoon')
self.service.build_cookie = Mock(return_value={"cookie": "value"})
self.service.update_cookie = Mock()
self.i = mock_client.return_value
def set_return_value(self, method, value):
getattr(self.i.service, method).return_value = value
def _generic_test(self, soap_method, method, result, parameters=(), mode=MODE_RO):
self.set_return_value(soap_method, result)
# CALL
soap_result = getattr(self.service, method)(*parameters)
# VERIFY
self.service.build_cookie.assert_called_with(mode=mode, method=soap_method, parameters=parameters)
self.service.update_cookie.assert_called_with({"cookie": "value"})
getattr(self.i.service, soap_method).assert_called_with(*parameters)
self.assertEqual(soap_result, result)
def testConstructor(self):
vs = WebhostingService(login='sundayafternoon')
self.assertEqual(vs.url, 'https://api.transip.nl/wsdl/?service=WebhostingService')
def test_available_packages(self):
self._generic_test(
soap_method='getAvailablePackages',
method='get_available_packages',
result=['Webhosting s', 'Webhosting l', 'Webhosting xl', 'Email only']
)
def test_webhosting_domain_names(self):
self._generic_test(
soap_method='getWebhostingDomainNames',
method='get_webhosting_domain_names',
result=['example.com', 'since we are mocking, the results do not mater']
)
def test_info(self):
self._generic_test(
soap_method='getInfo',
method='get_info',
result=WebHost('example.com'),
parameters=('example.com', )
)
def test_create_mailbox(self):
mailbox = MailBox('info@example.com')
self._generic_test(
soap_method='createMailBox',
method='create_mailbox',
result=mailbox,
parameters=('info@example.com', mailbox),
mode=MODE_RW
)
def test_update_mailbox(self):
self._generic_test(
soap_method='modifyMailBox',
method='update_mailbox',
result='mock',
parameters=('info@example.com', 'mock'),
mode=MODE_RW
)
def test_delete_mailbox(self):
self._generic_test(
soap_method='deleteMailBox',
method='delete_mailbox',
result='mock',
parameters=('info@example.com', 'mock'),
mode=MODE_RW
)
def test_create_mail_forward(self):
mail_forward = MailForward('test', 'info@example.com')
self._generic_test(
soap_method='createMailForward',
method='create_mail_forward',
result=mail_forward,
parameters=('info@example.com', mail_forward),
mode=MODE_RW
)
def test_update_mail_forward(self):
self._generic_test(
soap_method='modifyMailForward',
method='update_mail_forward',
result='mock',
parameters=('info@example.com', 'mock'),
mode=MODE_RW
)
def test_delete_mail_forward(self):
self._generic_test(
soap_method='deleteMailForward',
method='delete_mail_forward',
| result='mock',
parameters=('info@example.com', 'mock'),
mode=MODE_RW
)
def test_get_available_upgrades(self):
self._gen | eric_test(
soap_method='getAvailableUpgrades',
method='get_available_upgrades',
result='mock',
parameters=('example.com',),
mode=MODE_RO
)
def test_set_mailbox_password(self):
self._generic_test(
soap_method='setMailBoxPassword',
method='set_mailbox_password',
result='mock',
parameters=('example.com', 'mailbox', 'password'),
mode=MODE_RW
)
|
egens/toolkit-for-ynab | lib/kango-framework-latest/kango/settings.py | Python | mit | 416 | 0.004808 | import os
import json
VERSION = '1.7.9'
BUILD = 'e39b6ff2fcc8'
PACKAGE_ID = 'dev'
KEYWORDS = {
"product": "kango",
"ie.engine": "KangoEngine",
"ie.bho": "KangoBHO"
}
try:
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'se | ttings.json'), 'r') as f:
settings = json.load(f)
KEYWORDS.update(settings.get('keywords', {}))
except IO | Error:
pass |
lithiumtech/skybase.io | skybase/skytask/pack/submit.py | Python | apache-2.0 | 11,301 | 0.005309 | import os
import logging
import tarfile
import shutil
import ConfigParser
import tempfile
from skybase import config as sky_cfg
from skybase.skytask import SkyTask
from skybase import skytask
from skybase.actions import pack as pack_actions
from skybase.skytask import pack
from skybase.actions import sky_boto as sky_boto_actions
from skybase.utils.logger import Logger
def pack_submit_add_arguments(parser):
parser.add_argument(
'-d',
'--directory',
dest='base_dir',
action='store'
)
parser.add_argument(
'-a',
'--artiball',
dest='artiball',
action='store',
default=None
)
parser.add_argument(
'-m', '--mode',
dest='exec_mode',
action='store',
choices={'local', 'restapi'},
default='restapi',
help='execution mode (default local)'
)
class Submit(SkyTask):
def __init__(self, all_args, runner_cfg):
SkyTask.__init__(self, all_args, runner_cfg)
self.logger = Logger(logging.getLogger(__name__), logging.INFO)
self.name = 'pack.submit'
self.args = all_args
self.runner_cfg = runner_cfg
self.aws_access_key_id = None
self.aws_secret_access_key = None
if self.args['base_dir']:
self.base_dir = self.args['base_dir']
if self.base_dir.split('/')[-1] is not 'skybase':
self.base_dir = os.path.join(self.base_dir, 'skybase')
else:
self.tdir = tempfile.mkdtemp()
self.base_dir = os.path.join(self.tdir, 'skybase')
self.tmp_dir = os.path.join(self.base_dir, 'tmp')
if not os.path.exists(self.tmp_dir):
os.makedirs(self.tmp_dir)
def execute(self):
aws_creds_profile = self.runner_cfg.data['package_depot_aws_profile']
aws_creds_file = os.path.join(self.runner_cfg.data['runner_credentials_dir'], 'aws', 'config')
if os.path.exists(aws_creds_file):
config = ConfigParser.ConfigParser()
config.read([str(aws_creds_file)])
self.aws_access_key_id = config.get('profile ' + aws_creds_profile, 'aws_access_key_id')
self.aws_secret_access_key = config.get('profile ' + aws_creds_profile, 'aws_secret_access_key')
if self.args['artiball'].endswith('.tar.gz'):
artiball = self.args['artiball']
else:
artiball = self.args['artiball'] + '.tar.gz'
pack.set_incoming_s3_bucket()
self.logger.write('Downloading package from S3 bucket ' + pack.PACKAGE_INCOMING_S3_BUCKET, multi_line=False)
download_result = sky_boto_actions.download_from_s3(pack.PACKAGE_INCOMING_S3_BUCKET, artiball,
self.tmp_dir, self.logger,
access_key=self.aws_access_key_id,
secret_key=self.aws_secret_access_key,
dry_run=False)
self.result.output += download_result["result_string"]
if not download_result["valid"]:
self.result.status = sky_cfg.API_STATUS_FAIL
return self.result
artiball_file = tarfile.open(os.path.join(self.tmp_dir, artiball), 'r:gz')
artiball_dir = os.path.join(self.tmp_dir, artiball.split('.tar.gz')[0])
artiball_file.extractall(os.path.join(artiball_dir, 'skybase'))
self.logger.write('Validating package in ' + artiball_dir, multi_line=False)
validate_result = pack_actions.validate_with_schema(artiball_dir, 'artiball',
update_content_from_config=False)
if validate_result["valid"]:
self.result.output += "All content validated, ready for pack submit.\n"
else:
self.result.output += "Invalid content for submission, please verify artiball is valid.\n"
self.result.status = sky_cfg.API_STATUS_FAIL
return self.result
app_dir = os.path.join(artiball_dir, 'skybase', 'app')
yum_aws_creds_file = os.path.join(self.runner_cfg.data['runner_credentials_dir'], 'aws', 'config')
if os.path.exists(yum_aws_creds_file):
config = ConfigParser.ConfigParser()
config.read([str(yum_aws_creds_file)])
yum_replications = self.runner_cfg.data['yum_replication']
for yum_replication in yum_replications:
yum_aws_creds_profile = yum_replication['profile']
yum_aws_access_key_id = config.get('profile ' + yum_aws_creds_profile, 'aws_access_key_id')
yum_aws_secret_access_key = config.get('profile ' + yum_aws_creds_profile, 'aws_secret_access_key')
for f in os.listdir(app_dir):
if os.path.splitext(f)[1] == '.rpm':
# Hardcoding default to True for pack group command, revisit later
self.args['apply'] = True
if self.args['apply']:
upload_result = sky_boto_actions.upload_to_s3(yum_replication['name'],
os.path.join(app_dir, f),
self.logger, prefix='inbox/skybase',
access_key=yum_aws_access_key_id,
secret_key=yum_aws_secret_access_key,
dry_run=False)
else:
upload_result = sky_boto_actions.upload_to_s3(yum_replication['name'],
os.path.join(app_dir, f),
self.logger, prefix='inbox/skybase',
access_key=yum_aws_access_key_id,
secret_key=yum_aws_secret_access_key,
dry_run=True)
self.result.output += upload_result["result_string"]
if not upload_result["valid"]:
self.result.status = sky_cfg.API_STATUS_FAIL
return self.result
else:
self.result.output += "Cannot locate aws credentials, please confirm they are set in " \
+ yum_aws_creds_file + "\n"
| self.result.status = sky_cfg.API_STATUS_FAIL
return self.result
else:
self.result.output += "Cannot locate aws credentials, please confirm they are set in " + aws_creds_file \
+ "\n"
self.result.status = sky_cfg.API_STATUS_FAIL
return self.result
file_path = os.path.join(self.tmp | _dir, artiball)
depot_bucket_name = os.path.expanduser(self.runner_cfg.data['package_depot_S3_bucket'])
# Hardcoding default to True for pack group command, revisit later
self.args['apply'] = True
if self.args['apply']:
self.logger.write('Uploading package to S3 bucket ' + depot_bucket_name, multi_line=False)
upload_result = sky_boto_actions.upload_to_s3(depot_bucket_name, file_path, self.logger,
access_key=self.aws_access_key_id,
secret_key=self.aws_secret_access_key,
dry_run=F |
krishna11888/ai | third_party/gensim/gensim/models/hdpmodel.py | Python | gpl-2.0 | 23,000 | 0.002174 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Jonathan Esterhazy <jonathan.esterhazy at gmail.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
#
# HDP inference code is adapted from the onlinehdp.py script by
# Chong Wang (chongw at cs.princeton.edu).
# http://www.cs.princeton.edu/~chongw/software/onlinehdp.tar.gz
#
# Some show/print topics code is adapted from Dr. Hoffman's online lda sample code,
# (C) 2010 Matthew D. Hoffman, GNU GPL 3.0
# http://www.cs.princeton.edu/~mdhoffma/code/onlineldavb.tar
"""
This module encapsulates functionality for the online Hierarchical Dirichlet Process algorithm.
It allows both model estimation from a training corpus and inference of topic
distribution on new, unseen documents.
The core estimation code is directly adapted from the `onlinelhdp.py` script
by C. Wang see
**Wang, Paisley, Blei: Online Variational Inference for the Hierarchical Dirichlet
Process, JMLR (2011).**
http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf
The algorithm:
* is **streamed**: training documents come in sequentially, no random access,
* runs in **constant memory** w.r.t. the number of documents: size of the
training corpus does not affect memory footprint
"""
from __future__ import with_statement
import logging, time
import numpy as np
import scipy.special as sp
from gensim import interfaces, utils, matutils
from six.moves import xrange
logger = logging.getLogger(__name__)
meanchangethresh = 0.00001
rhot_bound = 0.0
def log_normalize(v):
log_max = 100.0
if len(v.shape) == 1:
max_val = np.max(v)
log_shift = log_max - np.log(len(v) + 1.0) - max_val
tot = np.sum(np.exp(v + log_shift))
log_norm = np.log(tot) - log_shift
v = v - log_norm
else:
max_val = np.max(v, 1)
log_shift = log_max - np.log(v.shape[1] + 1.0) - max_val
tot = np.sum(np.exp(v + log_shift[:, np.newaxis]), 1)
log_norm = np.log(tot) - log_shift
v = v - log_norm[:, np.newaxis]
return (v, log_norm)
def dirichlet_expectation(alpha):
"""
For a vector theta ~ Dir(alpha), compute E[log(theta)] given alpha.
"""
if (len(alpha.shape) == 1):
return(sp.psi(alpha) - sp.psi(np.sum(alpha)))
return(sp.psi(alpha) - sp.psi(np.sum(alpha, 1))[:, np.newaxis])
def expect_log_sticks(sticks):
"""
For stick-breaking hdp, return the E[log(sticks)]
"""
dig_sum = sp.psi(np.sum(sticks, 0))
ElogW = sp.psi(sticks[0]) - dig_sum
Elog1_W = sp.psi(sticks[1]) - dig_sum
n = len(sticks[0]) + 1
E | logsticks = np.zeros(n)
Elogsticks[0: n - 1] = ElogW
Elogsticks[1:] = Elogsticks[1:] + np.cumsum(Elog1_W)
return Elogsticks
def lda_e_step(doc_word_ids, doc_word_counts, alpha, beta, max_iter=100):
gamma = np.ones(len(alpha))
expElogtheta = np.exp(dirichlet_expectation(gamma))
betad | = beta[:, doc_word_ids]
phinorm = np.dot(expElogtheta, betad) + 1e-100
counts = np.array(doc_word_counts)
for _ in xrange(max_iter):
lastgamma = gamma
gamma = alpha + expElogtheta * np.dot(counts / phinorm, betad.T)
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = np.exp(Elogtheta)
phinorm = np.dot(expElogtheta, betad) + 1e-100
meanchange = np.mean(abs(gamma - lastgamma))
if (meanchange < meanchangethresh):
break
likelihood = np.sum(counts * np.log(phinorm))
likelihood += np.sum((alpha - gamma) * Elogtheta)
likelihood += np.sum(sp.gammaln(gamma) - sp.gammaln(alpha))
likelihood += sp.gammaln(np.sum(alpha)) - sp.gammaln(np.sum(gamma))
return (likelihood, gamma)
class SuffStats(object):
def __init__(self, T, Wt, Dt):
self.m_chunksize = Dt
self.m_var_sticks_ss = np.zeros(T)
self.m_var_beta_ss = np.zeros((T, Wt))
def set_zero(self):
self.m_var_sticks_ss.fill(0.0)
self.m_var_beta_ss.fill(0.0)
class HdpModel(interfaces.TransformationABC):
"""
The constructor estimates Hierachical Dirichlet Process model parameters based
on a training corpus:
>>> hdp = HdpModel(corpus, id2word)
>>> hdp.print_topics(topics=20, topn=10)
Inference on new documents is based on the approximately LDA-equivalent topics.
Model persistency is achieved through its `load`/`save` methods.
"""
def __init__(self, corpus, id2word, max_chunks=None, max_time=None,
chunksize=256, kappa=1.0, tau=64.0, K=15, T=150, alpha=1,
gamma=1, eta=0.01, scale=1.0, var_converge=0.0001,
outputdir=None):
"""
`gamma`: first level concentration
`alpha`: second level concentration
`eta`: the topic Dirichlet
`T`: top level truncation level
`K`: second level truncation level
`kappa`: learning rate
`tau`: slow down parameter
`max_time`: stop training after this many seconds
`max_chunks`: stop after having processed this many chunks (wrap around
corpus beginning in another corpus pass, if there are not enough chunks
in the corpus)
"""
self.corpus = corpus
self.id2word = id2word
self.chunksize = chunksize
self.max_chunks = max_chunks
self.max_time = max_time
self.outputdir = outputdir
self.lda_alpha = None
self.lda_beta = None
self.m_W = len(id2word)
self.m_D = 0
if corpus:
self.m_D = len(corpus)
self.m_T = T
self.m_K = K
self.m_alpha = alpha
self.m_gamma = gamma
self.m_var_sticks = np.zeros((2, T - 1))
self.m_var_sticks[0] = 1.0
self.m_var_sticks[1] = range(T - 1, 0, -1)
self.m_varphi_ss = np.zeros(T)
self.m_lambda = np.random.gamma(1.0, 1.0, (T, self.m_W)) * self.m_D * 100 / (T * self.m_W) - eta
self.m_eta = eta
self.m_Elogbeta = dirichlet_expectation(self.m_eta + self.m_lambda)
self.m_tau = tau + 1
self.m_kappa = kappa
self.m_scale = scale
self.m_updatect = 0
self.m_status_up_to_date = True
self.m_num_docs_processed = 0
self.m_timestamp = np.zeros(self.m_W, dtype=int)
self.m_r = [0]
self.m_lambda_sum = np.sum(self.m_lambda, axis=1)
self.m_var_converge = var_converge
if self.outputdir:
self.save_options()
# if a training corpus was provided, start estimating the model right away
if corpus is not None:
self.update(corpus)
def inference(self, chunk):
if self.lda_alpha is None or self.lda_beta is None:
raise RuntimeError("model must be trained to perform inference")
chunk = list(chunk)
if len(chunk) > 1:
logger.debug("performing inference on a chunk of %i documents" % len(chunk))
gamma = np.zeros((len(chunk), self.lda_beta.shape[0]))
for d, doc in enumerate(chunk):
if not doc: # leave gamma at zero for empty documents
continue
ids, counts = zip(*doc)
_, gammad = lda_e_step(ids, counts, self.lda_alpha, self.lda_beta)
gamma[d, :] = gammad
return gamma
def __getitem__(self, bow, eps=0.01):
is_corpus, corpus = utils.is_corpus(bow)
if is_corpus:
return self._apply(corpus)
gamma = self.inference([bow])[0]
topic_dist = gamma / sum(gamma) if sum(gamma) != 0 else []
return [(topicid, topicvalue) for topicid, topicvalue in enumerate(topic_dist)
if topicvalue >= eps]
def update(self, corpus):
save_freq = max(1, int(10000 / self.chunksize)) # save every 10k docs, roughly
chunks_processed = 0
start_time = time.clock()
while True:
for chunk in utils.grouper(corpus, self.chunksize):
self.update_chunk(chunk)
self.m_num_docs_processed += len(chunk)
chunks_processed += 1
if self.update_finished(start_time, chunks_processed, self |
AsgerPetersen/QGIS | python/plugins/processing/algs/lidar/lastools/blast2dem.py | Python | gpl-2.0 | 3,426 | 0.001168 | # -*- coding: utf-8 -*-
"""
***************************************************************************
blast2dem.py
---------------------
Date : September 2013
Copyright : (C) 2013 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Martin Isenburg'
__date__ = 'September 2013'
__copyright__ = '(C) 2013, Martin Isenburg'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from .LAStoolsUtils import LAStoolsUtils
from .LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterSelection
from processing.core.parameters import ParameterBoolean
class blast2dem(LAStoolsAlgorithm):
ATTRIBUTE = "ATTRIBUTE"
PRODUCT = "PRODUCT"
ATTRIBUTES = ["elevation", "slope", "intensity", "rgb"]
PRODUCTS = ["actual values", "hillshade", "gray", "false"]
USE_TILE_BB = "USE_TILE_BB"
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('blast2dem')
self.group, self.i18n_group = self.trAlgorithm('LAStools')
self.addParametersVerboseGUI()
self.addParametersPointInputGUI()
self.addParametersFilter1ReturnClassFlagsGUI()
self.addParametersStepGUI()
self.addParameter(ParameterSelection(blast2dem.ATTRIBUTE,
self.tr("Attribute"), blast2dem.ATTRIBUTES, 0))
self.addParameter(ParameterSelection(blast2dem.PRODUCT,
self.tr("Product"), blast2dem.PRODUCTS, 0))
self.addParameter(ParameterBoolean(blast2dem.USE_TILE_BB,
self.tr("Use tile bounding box (after tiling with buffer)"), False))
self.addParametersRasterOutputGUI()
self.addParametersAdditionalGUI()
def processAlgorithm(self, progress):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "blast2dem")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputCommands(commands)
self.addParametersFilter1ReturnClassFlagsCommands(commands)
self.addParametersStepCommands(commands)
attribute = self.getParameterValue(blast2dem.ATTRIBUTE)
if attribute != 0:
commands.append("-" + blast2dem.ATTRIBUTES[attribute])
product = self.getParameterValue(blast2dem.PRODUCT)
if product != 0:
commands.append("-" + blast2dem.PRODUCTS[product])
if (self.getParameterValue(blast2dem.USE_TILE_BB)):
comm | ands.append("-use_tile_bb")
self.addParametersRasterOutputCommands(commands)
self.addParame | tersAdditionalCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
|
Clarify/clarify_python_2 | setup.py | Python | mit | 1,348 | 0.000742 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='clarify_python_2',
version='1.0.1',
description='The Clarify Python 2 Helper Library wraps the entire Clarify API in Python 2.x function calls.',
long_description=readme + '\n\n' + history,
author='Paul Murphy',
au | thor_email='murphy@clarify.io',
url='https://github.com/Clarify/clarify_python_2',
pa | ckages=[
'clarify_python_2',
],
package_dir={'clarify_python_2':
'clarify_python_2'},
include_package_data=True,
install_requires=[
],
license="MIT",
zip_safe=False,
keywords='clarify_python_2 clarify',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
test_suite='tests',
)
|
jl4ge/cs3240-labdemo | names.py | Python | mit | 67 | 0 |
N | AMES = ["Agatha", "Be | rnard", "Lucy", "Russle", "April", "Sammy"]
|
JungInkyo/Qualcomm_sensor | pa10a/btserver/bthandler.py | Python | gpl-2.0 | 2,423 | 0.002889 | import asyncore
import logging
import re
from bterror import BTError
logger = logging.getLogger(__name__)
class BTClientHandler(asyncore.dispatcher_with_send):
"""BT handler for client-side socket"""
def __init__(self, socket, server):
asyncore.dispatcher_with_send.__init__(self, socket)
self.server = server
self.data = ""
self.sending_status = {'real-time': False, 'history': [True, -1, -1]}
def handle_read(self):
try:
data = self.recv(1024)
if not data:
return
lf_char_index = data.find('\n')
if lf_char_index == -1:
# No new line character in data, so we append all.
self.data += data
else:
# We see a new line character in data, so append rest and handle.
self.data += data[:lf_char_index]
print "Received [{}]".format(self.data)
self.handle_command(self.data)
# Clear the buffer
self.data = ""
except Exception as e:
BTError.print_error(handler=self, error=BTError.ERR_READ, error_message=repr(e))
self.data = ""
self.handle_close()
def handle_command(self, command):
# We should support following commands:
# - start
# Start sending real time data by setting 'sending_status' variable to 0
# - stop
# Stop sending real time data by setting 'send | ing_status' variable to False
# - history start_time end_time
# Stop sending real time data, and quer | y the history data from the database. Getting history data might
# take some time so we should use a different thread to handle this request
if re.match('stop', command) is not None:
self.sending_status['real-time'] = False
pass
if re.match('start', command) is not None:
self.sending_status['real-time'] = True
pass
result = re.match(r"history (\d+) (\d+)", command)
if result is not None:
self.sending_status['history'] = [True, int(result.group(1)), int(result.group(2))]
def handle_close(self):
# flush the buffer
while self.writable():
self.handle_write()
self.server.active_client_handlers.remove(self)
self.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.