repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
WhatDo/FlowFairy
|
examples/sine_fix/dilated3.py
|
1
|
2514
|
import tensorflow as tf
import tensorflow.contrib.slim as slim
from flowfairy.conf import settings
from util import lrelu, conv2d, maxpool2d, embedding, avgpool2d, GLU, causal_GLU
from functools import partial
import ops
discrete_class = settings.DISCRETE_CLASS
batch_size = settings.BATCH_SIZE
samplerate = sr = settings.SAMPLERATE
embedding_size = settings.EMBEDDING_SIZE
num_classes = settings.CLASS_COUNT
def broadcast(l, emb):
sh = l.get_shape().as_list()[1]
emb = emb[:, None, None, :]
emb = tf.tile(emb, (1,sh,1,1))
return tf.concat([l, emb], 3)
# Create model
def conv_net(x, cls, dropout, is_training=False):
xs = tf.expand_dims(x, -1)
xs = tf.expand_dims(xs, -1)
conv1 = causal_GLU(xs, 4, [128, 1], scope='conv1_1', normalizer_fn=slim.batch_norm, normalizer_params={'is_training': is_training, 'decay': 0.9})
print('conv1', conv1)
conv1_d1 = GLU(conv1, 4, [128, 1], scope='conv1_d1')
print('conv1_d1 ', conv1_d1)
# Parallel
conv1_d2 = GLU(conv1, 4, [128, 1], rate=2, scope='conv1_d2')
print('conv1_d2 ', conv1_d2)
conv1_d4 = GLU(conv1, 4, [128, 1], rate=4, scope='conv1_d4')
print('conv1_d4 ', conv1_d4)
conv1 = tf.concat([conv1_d1, conv1_d2, conv1_d4], 3)
print('conv1_concat', conv1)
#conv1 = GLU(conv1, 4, [256, 1], scope='conv1_2')
#with tf.name_scope('embedding'):
#convblock 2
conv2 = GLU(conv1, 8, [128, 1], scope='conv2_1')
conv2 = GLU(conv2, 8, [128, 1], scope='conv2_2')
conv2 = slim.max_pool2d(conv2, [2,1])
print('conv2: ', conv2)
with tf.variable_scope('embedding'):
emb1 = embedding(cls, embedding_size, num_classes)
embedded = broadcast(conv2, emb1)
print('embedded:', embedded)
#convblock 3
conv3 = GLU(embedded, 16, [128, 1], scope='conv3_1')
conv3 = GLU(conv3, 16, [128, 1], scope='conv3_2')
print('conv3: ', conv3)
#convblock 4
conv4 = tf.depth_to_space(conv3, 4) #upconv
print('d2sp: ', conv4)
conv4 = tf.reshape(conv4, shape=[-1, sr, 1, 8]) # reshape upconvolution to have proper shape
conv4 = GLU(conv4, 16, [128, 1], scope='conv4_1')
#convblock 5
conv4 = tf.concat([conv4, conv1], 3) # <- unet like concat first with last
conv4 = GLU(conv4, 16, [128, 1], scope='conv4_2')
print('conv4: ', conv4)
conv5 = GLU(conv4, discrete_class, [2,1], scope='conv5')
print('conv5: ', conv5)
#out
out = tf.reshape(conv5, [-1, sr, discrete_class])
print('out: ', out)
return out
|
mit
| 379,616,175,697,035,800
| 29.289157
| 149
| 0.624503
| false
| 2.717838
| false
| false
| false
|
anthill-services/anthill-common
|
anthill/common/social/google.py
|
1
|
5627
|
import tornado.httpclient
import ujson
import jwt
import abc
from urllib import parse
from .. import admin as a
from .. social import SocialNetworkAPI, APIError, AuthResponse, SocialPrivateKey
class GoogleAPI(SocialNetworkAPI, metaclass=abc.ABCMeta):
GOOGLE_OAUTH = "https://www.googleapis.com/oauth2/"
NAME = "google"
def __init__(self, cache):
super(GoogleAPI, self).__init__(GoogleAPI.NAME, cache)
# noinspection PyMethodMayBeStatic
def __parse_friend__(self, friend):
try:
return {
"id": friend["id"],
"avatar": friend["image"]["url"],
"profile": friend["url"],
"display_name": friend["displayName"]
}
except KeyError:
return None
async def api_auth(self, gamespace, code, redirect_uri):
private_key = await self.get_private_key(gamespace)
fields = {
"code": code,
"client_id": private_key.app_id,
"client_secret": private_key.app_secret,
"redirect_uri": redirect_uri,
"grant_type": "authorization_code",
"access_type": "offline"
}
try:
response = await self.api_post("token", fields)
except tornado.httpclient.HTTPError as e:
raise APIError(
e.code,
e.response.body if hasattr(e.response, "body") else str(e))
else:
payload = ujson.loads(response.body)
refresh_token = payload.get("refresh_token", None)
access_token = payload["access_token"]
expires_in = payload["expires_in"]
id_token = payload["id_token"]
user_info = jwt.decode(id_token, verify=False)
username = user_info["sub"]
result = AuthResponse(
access_token=access_token,
expires_in=expires_in,
refresh_token=refresh_token,
username=username,
import_social=True)
return result
async def api_get(self, operation, fields, v="v4", **kwargs):
fields.update(**kwargs)
result = await self.client.fetch(
GoogleAPI.GOOGLE_OAUTH + v + "/" + operation + "?" +
parse.urlencode(fields))
return result
async def api_get_user_info(self, access_token=None):
try:
response = await self.api_get(
"userinfo",
{},
v="v2",
access_token=access_token)
except tornado.httpclient.HTTPError as e:
raise APIError(e.code, e.response.body)
else:
data = ujson.loads(response.body)
return GoogleAPI.process_user_info(data)
async def api_post(self, operation, fields, v="v4", **kwargs):
fields.update(**kwargs)
result = await self.client.fetch(
GoogleAPI.GOOGLE_OAUTH + v + "/" + operation,
method="POST",
body=parse.urlencode(fields))
return result
async def api_refresh_token(self, refresh_token, gamespace):
private_key = await self.get_private_key(gamespace)
try:
response = await self.api_post("token", {
"client_id": private_key.app_id,
"client_secret": private_key.app_secret,
"refresh_token": refresh_token,
"grant_type": "refresh_token"
})
except tornado.httpclient.HTTPError as e:
raise APIError(e.code, e.response.body)
else:
data = ujson.loads(response.body)
return data
async def get(self, url, headers=None, **kwargs):
result = await self.client.fetch(
url + "?" + parse.urlencode(kwargs),
headers=headers)
return result
@staticmethod
def process_user_info(data):
return {
"name": data["name"],
"avatar": data["picture"],
"language": data["locale"],
"email": data["email"]
}
def has_private_key(self):
return True
def new_private_key(self, data):
return GooglePrivateKey(data)
class GooglePrivateKey(SocialPrivateKey):
def __init__(self, key):
super(GooglePrivateKey, self).__init__(key)
self.app_secret = self.data["web"]["client_secret"] if self.data else None
self.app_id = self.data["web"]["client_id"] if self.data else None
def get_app_id(self):
return self.app_id
def dump(self):
return {
"web": {
"client_secret": self.app_secret,
"client_id": self.app_id
}
}
def has_ui(self):
return True
def get(self):
return {
"app_secret": self.app_secret,
"app_id": self.app_id
}
def render(self):
return {
"app_id": a.field(
"Client ID", "text", "primary", "non-empty",
order=1,
description="Client ID from Google's project Credentials, "
"see <a href=\"https://console.developers.google.com/apis/credentials\">Google "
"Credentials</a>"),
"app_secret": a.field(
"Client Secret", "text", "primary", "non-empty",
order=2,
description="Same as above, but called \"Client Secret\"")
}
def update(self, app_secret, app_id, **ignored):
self.app_secret = app_secret
self.app_id = app_id
|
mit
| -8,178,881,027,545,703,000
| 28.772487
| 108
| 0.53421
| false
| 4.168148
| false
| false
| false
|
mahabs/nitro
|
nssrc/com/citrix/netscaler/nitro/resource/config/rewrite/rewritepolicy_rewriteglobal_binding.py
|
1
|
6060
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class rewritepolicy_rewriteglobal_binding(base_resource) :
""" Binding class showing the rewriteglobal that can be bound to rewritepolicy.
"""
def __init__(self) :
self._boundto = ""
self._priority = 0
self._activepolicy = 0
self._gotopriorityexpression = ""
self._labeltype = ""
self._labelname = ""
self._name = ""
self.___count = 0
@property
def boundto(self) :
"""Location where policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@boundto.setter
def boundto(self, boundto) :
"""Location where policy is bound.
"""
try :
self._boundto = boundto
except Exception as e:
raise e
@property
def name(self) :
"""Name of the rewrite policy.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the rewrite policy.
"""
try :
self._name = name
except Exception as e:
raise e
@property
def priority(self) :
"""Specifies the priority of the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@property
def labelname(self) :
"""Name of the label to invoke if the current policy rule evaluates to TRUE.
"""
try :
return self._labelname
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@property
def labeltype(self) :
"""Type of policy label invocation.<br/>Possible values = reqvserver, resvserver, policylabel.
"""
try :
return self._labeltype
except Exception as e:
raise e
@property
def activepolicy(self) :
"""Indicates whether policy is bound or not.
"""
try :
return self._activepolicy
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(rewritepolicy_rewriteglobal_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.rewritepolicy_rewriteglobal_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
""" Use this API to fetch rewritepolicy_rewriteglobal_binding resources.
"""
try :
obj = rewritepolicy_rewriteglobal_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
""" Use this API to fetch filtered set of rewritepolicy_rewriteglobal_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = rewritepolicy_rewriteglobal_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
""" Use this API to count rewritepolicy_rewriteglobal_binding resources configued on NetScaler.
"""
try :
obj = rewritepolicy_rewriteglobal_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
""" Use this API to count the filtered set of rewritepolicy_rewriteglobal_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = rewritepolicy_rewriteglobal_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class rewritepolicy_rewriteglobal_binding_response(base_response) :
def __init__(self, length=1) :
self.rewritepolicy_rewriteglobal_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.rewritepolicy_rewriteglobal_binding = [rewritepolicy_rewriteglobal_binding() for _ in range(length)]
|
apache-2.0
| -7,462,817,870,828,477,000
| 26.420814
| 137
| 0.701155
| false
| 3.441227
| false
| false
| false
|
1tush/reviewboard
|
reviewboard/reviews/models/base_review_request_details.py
|
1
|
9919
|
from __future__ import unicode_literals
import re
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from djblets.db.fields import JSONField
from reviewboard.diffviewer.models import DiffSet
from reviewboard.reviews.markdown_utils import markdown_escape
from reviewboard.reviews.models.default_reviewer import DefaultReviewer
from reviewboard.scmtools.errors import InvalidChangeNumberError
@python_2_unicode_compatible
class BaseReviewRequestDetails(models.Model):
"""Base information for a review request and draft.
ReviewRequest and ReviewRequestDraft share a lot of fields and
methods. This class provides those fields and methods for those
classes.
"""
MAX_SUMMARY_LENGTH = 300
summary = models.CharField(_("summary"), max_length=MAX_SUMMARY_LENGTH)
description = models.TextField(_("description"), blank=True)
testing_done = models.TextField(_("testing done"), blank=True)
bugs_closed = models.CharField(_("bugs"), max_length=300, blank=True)
branch = models.CharField(_("branch"), max_length=300, blank=True)
rich_text = models.BooleanField(_("rich text"), default=False)
commit_id = models.CharField(_('commit ID'), max_length=64, blank=True,
null=True, db_index=True)
extra_data = JSONField(null=True)
def get_review_request(self):
raise NotImplementedError
def get_bug_list(self):
"""Returns a list of bugs associated with this review request."""
if self.bugs_closed == "":
return []
bugs = list(set(re.split(r"[, ]+", self.bugs_closed)))
# First try a numeric sort, to show the best results for the majority
# case of bug trackers with numeric IDs. If that fails, sort
# alphabetically.
try:
bugs.sort(key=int)
except ValueError:
bugs.sort()
return bugs
def get_screenshots(self):
"""Returns the list of all screenshots on a review request.
This includes all current screenshots, but not previous ones.
By accessing screenshots through this method, future review request
lookups from the screenshots will be avoided.
"""
review_request = self.get_review_request()
for screenshot in self.screenshots.all():
screenshot._review_request = review_request
yield screenshot
def get_inactive_screenshots(self):
"""Returns the list of all inactive screenshots on a review request.
This only includes screenshots that were previously visible but
have since been removed.
By accessing screenshots through this method, future review request
lookups from the screenshots will be avoided.
"""
review_request = self.get_review_request()
for screenshot in self.inactive_screenshots.all():
screenshot._review_request = review_request
yield screenshot
def get_file_attachments(self):
"""Returns the list of all file attachments on a review request.
This includes all current file attachments, but not previous ones.
By accessing file attachments through this method, future review
request lookups from the file attachments will be avoided.
"""
review_request = self.get_review_request()
for file_attachment in self.file_attachments.all():
file_attachment._review_request = review_request
yield file_attachment
def get_inactive_file_attachments(self):
"""Returns all inactive file attachments on a review request.
This only includes file attachments that were previously visible
but have since been removed.
By accessing file attachments through this method, future review
request lookups from the file attachments will be avoided.
"""
review_request = self.get_review_request()
for file_attachment in self.inactive_file_attachments.all():
file_attachment._review_request = review_request
yield file_attachment
def add_default_reviewers(self):
"""Add default reviewers based on the diffset.
This method goes through the DefaultReviewer objects in the database
and adds any missing reviewers based on regular expression comparisons
with the set of files in the diff.
"""
diffset = self.get_latest_diffset()
if not diffset:
return
people = set()
groups = set()
# TODO: This is kind of inefficient, and could maybe be optimized in
# some fancy way. Certainly the most superficial optimization that
# could be made would be to cache the compiled regexes somewhere.
files = diffset.files.all()
reviewers = DefaultReviewer.objects.for_repository(self.repository,
self.local_site)
for default in reviewers:
try:
regex = re.compile(default.file_regex)
except:
continue
for filediff in files:
if regex.match(filediff.source_file or filediff.dest_file):
for person in default.people.all():
people.add(person)
for group in default.groups.all():
groups.add(group)
break
existing_people = self.target_people.all()
for person in people:
if person not in existing_people:
self.target_people.add(person)
existing_groups = self.target_groups.all()
for group in groups:
if group not in existing_groups:
self.target_groups.add(group)
def update_from_commit_id(self, commit_id):
"""Updates the data from a server-side changeset.
If the commit ID refers to a pending changeset on an SCM which stores
such things server-side (like perforce), the details like the summary
and description will be updated with the latest information.
If the change number is the commit ID of a change which exists on the
server, the summary and description will be set from the commit's
message, and the diff will be fetched from the SCM.
"""
scmtool = self.repository.get_scmtool()
changeset = None
if scmtool.supports_pending_changesets:
changeset = scmtool.get_changeset(commit_id, allow_empty=True)
if changeset and changeset.pending:
self.update_from_pending_change(commit_id, changeset)
elif self.repository.supports_post_commit:
self.update_from_committed_change(commit_id)
else:
if changeset:
raise InvalidChangeNumberError()
else:
raise NotImplementedError()
def update_from_pending_change(self, commit_id, changeset):
"""Updates the data from a server-side pending changeset.
This will fetch the metadata from the server and update the fields on
the review request.
"""
if not changeset:
raise InvalidChangeNumberError()
# If the SCM supports changesets, they should always include a number,
# summary and description, parsed from the changeset description. Some
# specialized systems may support the other fields, but we don't want
# to clobber the user-entered values if they don't.
self.commit = commit_id
if self.rich_text:
description = markdown_escape(changeset.description)
testing_done = markdown_escape(changeset.testing_done)
else:
description = changeset.description
testing_done = changeset.testing_done
self.summary = changeset.summary
self.description = description
if testing_done:
self.testing_done = testing_done
if changeset.branch:
self.branch = changeset.branch
if changeset.bugs_closed:
self.bugs_closed = ','.join(changeset.bugs_closed)
def update_from_committed_change(self, commit_id):
"""Updates from a committed change present on the server.
Fetches the commit message and diff from the repository and sets the
relevant fields.
"""
commit = self.repository.get_change(commit_id)
summary, message = commit.split_message()
self.commit = commit_id
self.summary = summary.strip()
if self.rich_text:
self.description = markdown_escape(message.strip())
else:
self.description = message.strip()
DiffSet.objects.create_from_data(
repository=self.repository,
diff_file_name='diff',
diff_file_contents=commit.diff.encode('utf-8'),
parent_diff_file_name=None,
parent_diff_file_contents=None,
diffset_history=self.get_review_request().diffset_history,
basedir='/',
request=None)
def save(self, **kwargs):
self.bugs_closed = self.bugs_closed.strip()
self.summary = self._truncate(self.summary, self.MAX_SUMMARY_LENGTH)
super(BaseReviewRequestDetails, self).save(**kwargs)
def _truncate(self, string, num):
if len(string) > num:
string = string[0:num]
i = string.rfind('.')
if i != -1:
string = string[0:i + 1]
return string
def __str__(self):
if self.summary:
return six.text_type(self.summary)
else:
return six.text_type(_('(no summary)'))
class Meta:
abstract = True
app_label = 'reviews'
|
mit
| -8,082,577,163,745,702,000
| 34.679856
| 78
| 0.633229
| false
| 4.650258
| true
| false
| false
|
Clarity-89/clarityv2
|
src/clarityv2/accounts/managers.py
|
1
|
1420
|
from django.contrib.auth.models import BaseUserManager
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, username, email, password, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
if not username:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
username = self.model.normalize_username(username)
user = self.model(username=username, email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, email=None, password=None, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(username, email, password, **extra_fields)
def create_superuser(self, username, email, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(username, email, password, **extra_fields)
|
mit
| -5,557,580,562,345,280,000
| 40.764706
| 79
| 0.661268
| false
| 4.115942
| false
| false
| false
|
GalPressman/matrigram
|
docs/conf.py
|
1
|
10481
|
# -*- coding: utf-8 -*-
#
# matrigram documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 20 11:09:38 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../matrigram'))
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'matrigram'
copyright = u'2016, Gal Pressman & Yuval Fatael'
author = u'Gal Pressman & Yuval Fatael'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'github_user': 'GalPressman',
'github_repo': 'matrigram',
'github_banner': True,
'github_button': True,
'travis_button': True,
'show_powered_by': False,
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'matrigram v0.0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
html_logo = 'logo.jpg'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
html_sidebars = {
'**': [
'about.html',
'badges.html',
'navigation.html',
'searchbox.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'matrigramdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'matrigram.tex', u'matrigram Documentation',
u'Gal Pressman \\& Yuval Fatael', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'matrigram', u'matrigram Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'matrigram', u'matrigram Documentation',
author, 'matrigram', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
mit
| 2,914,533,651,014,203,000
| 27.793956
| 80
| 0.68753
| false
| 3.558913
| true
| false
| false
|
migasfree/migasfree
|
migasfree/stats/views/software.py
|
1
|
6246
|
# -*- coding: utf-8 -*-
import json
from collections import defaultdict
from django.contrib.auth.decorators import login_required
from django.db.models import Count
from django.shortcuts import render
from django.urls import reverse
from django.utils.translation import ugettext as _
from ...server.models import Project, Store, Package
from ...catalog.models import Application
def application_by_category():
total = Application.objects.count()
link = '{}?_REPLACE_'.format(
reverse('admin:catalog_application_changelist')
)
data = []
for item in Application.objects.values(
'category',
).annotate(
count=Count('category')
).order_by('-count'):
percent = float(item.get('count')) / total * 100
data.append({
'name': '{}'.format(dict(Application.CATEGORIES)[item.get('category')]),
'value': item.get('count'),
'y': float('{:.2f}'.format(percent)),
'url': link.replace(
'_REPLACE_',
'category__exact={}'.format(item.get('category'))
),
})
return {
'title': _('Applications / Category'),
'total': total,
'data': json.dumps(data),
'url': link.replace('?_REPLACE_', ''),
}
def application_by_level():
total = Application.objects.count()
link = '{}?_REPLACE_'.format(
reverse('admin:catalog_application_changelist')
)
data = []
for item in Application.objects.values(
'level',
).annotate(
count=Count('level')
).order_by('-count'):
percent = float(item.get('count')) / total * 100
data.append({
'name': '{}'.format(dict(Application.LEVELS)[item.get('level')]),
'value': item.get('count'),
'y': float('{:.2f}'.format(percent)),
'url': link.replace(
'_REPLACE_',
'level__exact={}'.format(item.get('level'))
),
})
return {
'title': _('Applications / Level'),
'total': total,
'data': json.dumps(data),
'url': link.replace('?_REPLACE_', ''),
}
def package_by_store(user):
total = Package.objects.scope(user).count()
link = '{}?_REPLACE_'.format(
reverse('admin:server_package_changelist')
)
values = defaultdict(list)
for item in Package.objects.scope(user).values(
'project__id', 'store__id', 'store__name'
).annotate(
count=Count('id')
).order_by('project__id', '-count'):
percent = float(item.get('count')) / total * 100
values[item.get('project__id')].append(
{
'name': item.get('store__name'),
'value': item.get('count'),
'y': float('{:.2f}'.format(percent)),
'url': link.replace(
'_REPLACE_',
'project__id__exact={}&store__id__exact={}'.format(
item.get('project__id'),
item.get('store__id'),
)
),
}
)
data = []
for project in Project.objects.scope(user).all():
if project.id in values:
count = sum(item.get('value') for item in values[project.id])
percent = float(count) / total * 100
data.append(
{
'name': project.name,
'value': count,
'y': float('{:.2f}'.format(percent)),
'url': link.replace(
'_REPLACE_',
'project__id__exact={}'.format(project.id)
),
'data': values[project.id]
}
)
return {
'title': _('Packages / Store'),
'total': total,
'data': json.dumps(data),
'url': link.replace('?_REPLACE_', ''),
}
def store_by_project(user):
total = Store.objects.scope(user).count()
link = '{}?_REPLACE_'.format(
reverse('admin:server_store_changelist')
)
data = []
for item in Store.objects.scope(user).values(
'project__name',
'project__id',
).annotate(
count=Count('id')
).order_by('-count'):
percent = float(item.get('count')) / total * 100
data.append({
'name': item.get('project__name'),
'value': item.get('count'),
'y': float('{:.2f}'.format(percent)),
'url': link.replace(
'_REPLACE_',
'project__id__exact={}'.format(item.get('project__id'))
),
})
return {
'title': _('Stores / Project'),
'total': total,
'data': json.dumps(data),
'url': link.replace('?_REPLACE_', ''),
}
@login_required
def stores_summary(request):
user = request.user.userprofile
return render(
request,
'stores_summary.html',
{
'title': _('Stores'),
'chart_options': {
'no_data': _('There are no data to show'),
'reset_zoom': _('Reset Zoom'),
},
'store_by_project': store_by_project(user),
'opts': Store._meta,
}
)
@login_required
def packages_summary(request):
user = request.user.userprofile
return render(
request,
'packages_summary.html',
{
'title': _('Packages/Sets'),
'chart_options': {
'no_data': _('There are no data to show'),
'reset_zoom': _('Reset Zoom'),
},
'package_by_store': package_by_store(user),
'opts': Package._meta,
}
)
@login_required
def applications_summary(request):
return render(
request,
'applications_summary.html',
{
'title': _('Applications'),
'chart_options': {
'no_data': _('There are no data to show'),
'reset_zoom': _('Reset Zoom'),
},
'application_by_category': application_by_category(),
'application_by_level': application_by_level(),
'opts': Application._meta,
}
)
|
gpl-3.0
| 6,193,939,635,404,802,000
| 27.651376
| 84
| 0.490394
| false
| 4.180723
| false
| false
| false
|
kapilgarg1996/mospy
|
mospy/GUI.py
|
1
|
2367
|
import os
import pickle
from Tkinter import *
from PIL import ImageTk, Image
import tkMessageBox
import tkFileDialog
from ttk import Frame, Button, Label, Style
from random import randint
from PIL import Image
import mosaic
class MainFrame(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.initUI()
def initUI(self):
self.pack(fill=BOTH, expand=0)
Button(self, text = "Select Image Dataset Directory", command = lambda: openDir(self)).grid(row=0, column=0, pady=5)
self.dirName = StringVar()
Label(self, textvariable=self.dirName).grid(row=0, column=1, columnspan=2, pady=5, sticky=W)
Button(self, text = "Select File", command = lambda: openFile(self)).grid(row=1, column=0, pady=5)
self.fileName = StringVar()
Label(self, textvariable=self.fileName).grid(row=1, column=1, columnspan=2, pady=5, sticky=W)
self.iamgelabel = Label(self)
self.iamgelabel.grid(row=1, column=3)
Label(self, text = "Enter Number of Grids: ").grid(row=2, column=0, pady=5)
self.entry = Entry(self, bd=5)
self.entry.grid(row=2, column=1, pady=5, sticky=W)
Button(self, text = "CREATE", command = lambda: startMosaic(self.dirName.get(), self.fileName.get(), self.entry.get(), self.parent)).grid(row=3, column=0, pady=5)
def openDir(app):
dirName = tkFileDialog.askdirectory(initialdir='./')
app.dirName.set(dirName)
def openFile (app):
dirName = app.dirName.get()
if not os.path.isdir(dirName):
dirName = './'
fileName = tkFileDialog.askopenfilename(initialdir = dirName)
app.fileName.set(fileName)
size = 64, 64
img = Image.open(fileName)
img.thumbnail(size)
imgtk = ImageTk.PhotoImage(img)
app.iamgelabel.configure(image=imgtk)
app.iamgelabel.image = imgtk
def startMosaic(dirName, fileName, num_grids, frame):
wind = Toplevel(frame)
try:
mosaic.build_mosaic(fileName, num_grids=int(num_grids), root=wind, datasetdir=dirName)
except ValueError:
mosaic.build_mosaic(fileName, root=wind, datasetdir=dirName)
def main():
root = Tk()
size = 220, 220
root.title('MOSPY')
app = MainFrame(root)
root.geometry("480x360")
root.mainloop()
|
mit
| 8,678,145,406,439,883,000
| 28.6
| 170
| 0.646388
| false
| 3.273859
| false
| false
| false
|
pxg/pxg.github.io
|
slides/deployment/python/5_two_step_deploy.py
|
1
|
2664
|
"""
By default the HEAD of the current branch will be pushed to the remote server:
fab stage deploy
This can be overridden by providing a hash:
fab stage deploy:2ab1c583e35c99b66079877d49e3ec03812d3e53
If you don't like all the output:
fab stage deploy --hide=stdout
"""
import os
from fabric.api import env, execute, local, parallel
from fabric.operations import run, put
from fabric.context_managers import cd, prefix
from fabric.decorators import roles
def stage():
env.roledefs = {
'web': ['ubuntu@54.228.188.132', 'ubuntu@54.228.188.133'],
'master': ['ubuntu@54.228.188.132']
}
env.user = 'ec2-user'
# Note: the site root is now /var/www/goatse.cx/current
# Previous releases can be found in /var/www/goatse.cx/releases/<hash>
env.release_dir = '/var/www/goatse.cx'
env.key_filename = ['~/.ssh/goatse.pem/']
env.git_repo_dir = '/var/www/git_goatase/'
env.venv_activate = '/var/lib/venv/goatse/bin/activate'
def deploy(id='HEAD'):
"""
Main tasks to update the server from the given commit id, will use the
HEAD of the current branch by default
"""
release_dir = prepare_deploy(id)
activate_deploy(release_dir)
def prepare_deploy(id='HEAD'):
"""
Execute all steps which can in advance of actually switching the site live
This is done to speed up activating deployments
"""
packaged_code, release_dir = _package_code(id)
execute(deploy_package, packaged_code, release_dir)
execute(install_requirements, release_dir)
execute(backup_database)
execute(collectstatic, release_dir)
_clean_up(packaged_code)
return release_dir
def activate_deploy(release_dir):
"""
Switch the deployment to being live. This is the risk zone where downtime
could potentially happen.
"""
execute(migrate_database, release_dir)
execute(switch_release, release_dir)
execute(reload_server)
def _package_code(id):
"""
Locally compress the git repo into an archive, and generate the release dir
variable
"""
hash = local('git rev-parse %s' % id, capture=True)
file = '%s.tar.gz' % hash
local('git archive --format tar.gz %s -o %s' % (id, file))
release_dir = os.path.join(env.release_dir, 'releases', hash)
return file, release_dir
@parallel
@roles('web')
def deploy_package(file, release_dir):
"""
Move the packaged code to the webservers
"""
run('mkdir -p %s' % release_dir)
put(file, release_dir)
with cd(release_dir):
run('tar -xf %s' % file)
def _clean_up(packaged_code):
"""
Delete the packaged code
"""
local('rm %s' % packaged_code)
|
mit
| 5,292,689,465,113,000,000
| 26.75
| 79
| 0.67042
| false
| 3.380711
| false
| false
| false
|
btrent/knave
|
pychess/Utils/EndgameTable.py
|
1
|
2387
|
from gobject import GObject, SIGNAL_RUN_FIRST
from Move import Move
from lutils.egtb_k4it import egtb_k4it
from lutils.egtb_gaviota import egtb_gaviota
providers = []
class EndgameTable(GObject):
""" Wrap the low-level providers of exact endgame knowledge. """
__gsignals__ = {
"scored": (SIGNAL_RUN_FIRST, None, (object,)),
}
def __init__ (self):
GObject.__init__(self)
global providers
if not providers:
providers = [ egtb_gaviota(), egtb_k4it() ]
self.providers = providers
def _pieceCounts (self, board):
return sorted([ bin(board.friends[i]).count("1") for i in range(2) ])
def scoreGame (self, lBoard, omitDepth=False, probeSoft=False):
""" Return result and depth to mate. (Intended for engine use.)
lBoard: A low-level board structure
omitDepth: Look up only the game's outcome (may save time)
probeSoft: Fail if the probe would require disk or network access.
Return value:
game_result: Either WHITEWON, DRAW, BLACKWON, or (on failure) None
depth: Depth to mate, or (if omitDepth or the game is drawn) None
"""
pc = self._pieceCounts(lBoard)
for provider in self.providers:
if provider.supports(pc):
result, depth = provider.scoreGame(lBoard, needDepth, probeSoft)
if result is not None:
return result, depth
return None, None
def scoreAllMoves (self, lBoard):
""" Return each move's result and depth to mate.
lBoard: A low-level board structure
Return value: a list, with best moves first, of:
move: A high-level move structure
game_result: Either WHITEWON, DRAW, BLACKWON
depth: Depth to mate
"""
pc = self._pieceCounts(lBoard)
for provider in self.providers:
if provider.supports(pc):
results = provider.scoreAllMoves(lBoard)
if results:
ret = []
for lMove, result, depth in results:
ret.append( (Move(lMove), result, depth) )
#self.emit("scored", (lBoard, ret))
return ret
return []
|
gpl-3.0
| -4,503,694,866,982,983,700
| 34.626866
| 80
| 0.560117
| false
| 4.217314
| false
| false
| false
|
martey/django-shortcodes
|
shortcodes/parser.py
|
1
|
1822
|
import re
import shortcodes.parsers
from django.core.cache import cache
def import_parser(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def parse(value):
ex = re.compile(r'\[(.*?)\]')
groups = ex.findall(value)
pieces = {}
parsed = value
for item in groups:
if ' ' in item:
name, space, args = item.partition(' ')
args = __parse_args__(args)
# If shortcode does not use spaces as a separator, it might use equals
# signs.
elif '=' in item:
name, space, args = item.partition('=')
args = __parse_args__(args)
else:
name = item
args = {}
item = re.escape(item)
try:
if cache.get(item):
parsed = re.sub(r'\[' + item + r'\]', cache.get(item), parsed)
else:
module = import_parser('shortcodes.parsers.' + name)
function = getattr(module, 'parse')
result = function(args)
cache.set(item, result, 3600)
parsed = re.sub(r'\[' + item + r'\]', result, parsed)
except ImportError:
pass
return parsed
def __parse_args__(value):
ex = re.compile(r'[ ]*(\w+)=([^" ]+|"[^"]*")[ ]*(?: |$)')
groups = ex.findall(value)
kwargs = {}
for group in groups:
if group.__len__() == 2:
item_key = group[0]
item_value = group[1]
if item_value.startswith('"'):
if item_value.endswith('"'):
item_value = item_value[1:]
item_value = item_value[:item_value.__len__() - 1]
kwargs[item_key] = item_value
return kwargs
|
mit
| -7,133,755,439,112,442,000
| 26.606061
| 78
| 0.487925
| false
| 3.986871
| false
| false
| false
|
edison7500/dugong
|
apps/photos/migrations/0004_auto_20200313_1554.py
|
1
|
1198
|
# Generated by Django 2.2.11 on 2020-03-13 07:54
import apps.images.handlers
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [("photos", "0003_auto_20200306_1424")]
operations = [
migrations.AlterModelOptions(
name="category",
options={"verbose_name": "分类", "verbose_name_plural": "分类"},
),
migrations.AddField(
model_name="exif",
name="shot_time",
field=models.DateTimeField(
db_index=True,
default=django.utils.timezone.now,
editable=False,
),
),
migrations.AlterField(
model_name="category",
name="image",
field=models.ImageField(
blank=True,
null=True,
upload_to=apps.images.handlers.hexdigest_filename,
),
),
migrations.AlterField(
model_name="photo",
name="file",
field=models.ImageField(
upload_to=apps.images.handlers.hexdigest_filename
),
),
]
|
gpl-3.0
| -897,384,506,030,921,200
| 27.333333
| 72
| 0.529412
| false
| 4.524715
| false
| false
| false
|
m3z/HT
|
openstack_dashboard/dashboards/project/instances/views.py
|
1
|
7166
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing instances.
"""
import logging
from django import http
from django import shortcuts
from django.core.urlresolvers import reverse, reverse_lazy
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tabs
from horizon import tables
from horizon import workflows
from openstack_dashboard import api
from .forms import UpdateInstance
from .tabs import InstanceDetailTabs
from .tables import InstancesTable
from .workflows import LaunchInstance
LOG = logging.getLogger(__name__)
class IndexView(tables.DataTableView):
table_class = InstancesTable
template_name = 'project/instances/index.html'
def get_data(self):
# Gather our instances
try:
instances = api.server_list(self.request)
except:
instances = []
exceptions.handle(self.request,
_('Unable to retrieve instances.'))
# Gather our flavors and correlate our instances to them
if instances:
try:
flavors = api.flavor_list(self.request)
except:
flavors = []
exceptions.handle(self.request, ignore=True)
full_flavors = SortedDict([(str(flavor.id), flavor)
for flavor in flavors])
# Loop through instances to get flavor info.
for instance in instances:
try:
flavor_id = instance.flavor["id"]
if flavor_id in full_flavors:
instance.full_flavor = full_flavors[flavor_id]
else:
# If the flavor_id is not in full_flavors list,
# get it via nova api.
instance.full_flavor = api.flavor_get(self.request,
flavor_id)
except:
msg = _('Unable to retrieve instance size information.')
exceptions.handle(self.request, msg)
return instances
class LaunchInstanceView(workflows.WorkflowView):
workflow_class = LaunchInstance
template_name = "project/instances/launch.html"
def get_initial(self):
initial = super(LaunchInstanceView, self).get_initial()
initial['project_id'] = self.request.user.tenant_id
initial['user_id'] = self.request.user.id
return initial
def console(request, instance_id):
try:
# TODO(jakedahn): clean this up once the api supports tailing.
tail = request.GET.get('length', None)
data = api.server_console_output(request,
instance_id,
tail_length=tail)
except:
data = _('Unable to get log for instance "%s".') % instance_id
exceptions.handle(request, ignore=True)
response = http.HttpResponse(mimetype='text/plain')
response.write(data)
response.flush()
return response
def vnc(request, instance_id):
try:
console = api.server_vnc_console(request, instance_id)
instance = api.server_get(request, instance_id)
return shortcuts.redirect(console.url +
("&title=%s(%s)" % (instance.name, instance_id)))
except:
redirect = reverse("horizon:project:instances:index")
msg = _('Unable to get VNC console for instance "%s".') % instance_id
exceptions.handle(request, msg, redirect=redirect)
class UpdateView(forms.ModalFormView):
form_class = UpdateInstance
template_name = 'project/instances/update.html'
context_object_name = 'instance'
success_url = reverse_lazy("horizon:project:instances:index")
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
context["instance_id"] = self.kwargs['instance_id']
return context
def get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
instance_id = self.kwargs['instance_id']
try:
self._object = api.server_get(self.request, instance_id)
except:
redirect = reverse("horizon:project:instances:index")
msg = _('Unable to retrieve instance details.')
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
return {'instance': self.kwargs['instance_id'],
'tenant_id': self.request.user.tenant_id,
'name': getattr(self.get_object(), 'name', '')}
class DetailView(tabs.TabView):
tab_group_class = InstanceDetailTabs
template_name = 'project/instances/detail.html'
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
context["instance"] = self.get_data()
return context
def get_data(self):
if not hasattr(self, "_instance"):
try:
instance_id = self.kwargs['instance_id']
instance = api.server_get(self.request, instance_id)
instance.volumes = api.instance_volumes_list(self.request,
instance_id)
# Sort by device name
instance.volumes.sort(key=lambda vol: vol.device)
instance.full_flavor = api.flavor_get(self.request,
instance.flavor["id"])
instance.security_groups = api.server_security_groups(
self.request, instance_id)
except:
redirect = reverse('horizon:project:instances:index')
exceptions.handle(self.request,
_('Unable to retrieve details for '
'instance "%s".') % instance_id,
redirect=redirect)
self._instance = instance
return self._instance
def get_tabs(self, request, *args, **kwargs):
instance = self.get_data()
return self.tab_group_class(request, instance=instance, **kwargs)
|
apache-2.0
| -4,953,366,527,320,025,000
| 37.320856
| 78
| 0.596428
| false
| 4.555626
| false
| false
| false
|
madsmtm/fbchat
|
fbchat/utils.py
|
1
|
8679
|
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
import re
import json
from time import time
from random import random
import warnings
import logging
from .models import *
try:
from urllib.parse import urlencode
basestring = (str, bytes)
except ImportError:
from urllib import urlencode
basestring = basestring
# Python 2's `input` executes the input, whereas `raw_input` just returns the input
try:
input = raw_input
except NameError:
pass
# Log settings
log = logging.getLogger("client")
log.setLevel(logging.DEBUG)
# Creates the console handler
handler = logging.StreamHandler()
log.addHandler(handler)
#: Default list of user agents
USER_AGENTS = [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/601.1.10 (KHTML, like Gecko) Version/8.0.5 Safari/601.1.10",
"Mozilla/5.0 (Windows NT 6.3; WOW64; ; NCT50_AAP285C84A1328) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6"
]
LIKES = {
'large': EmojiSize.LARGE,
'medium': EmojiSize.MEDIUM,
'small': EmojiSize.SMALL,
'l': EmojiSize.LARGE,
'm': EmojiSize.MEDIUM,
's': EmojiSize.SMALL
}
MessageReactionFix = {
'😍': ('0001f60d', '%F0%9F%98%8D'),
'😆': ('0001f606', '%F0%9F%98%86'),
'😮': ('0001f62e', '%F0%9F%98%AE'),
'😢': ('0001f622', '%F0%9F%98%A2'),
'😠': ('0001f620', '%F0%9F%98%A0'),
'👍': ('0001f44d', '%F0%9F%91%8D'),
'👎': ('0001f44e', '%F0%9F%91%8E')
}
GENDERS = {
# For standard requests
0: 'unknown',
1: 'female_singular',
2: 'male_singular',
3: 'female_singular_guess',
4: 'male_singular_guess',
5: 'mixed',
6: 'neuter_singular',
7: 'unknown_singular',
8: 'female_plural',
9: 'male_plural',
10: 'neuter_plural',
11: 'unknown_plural',
# For graphql requests
'UNKNOWN': 'unknown',
'FEMALE': 'female_singular',
'MALE': 'male_singular',
#'': 'female_singular_guess',
#'': 'male_singular_guess',
#'': 'mixed',
'NEUTER': 'neuter_singular',
#'': 'unknown_singular',
#'': 'female_plural',
#'': 'male_plural',
#'': 'neuter_plural',
#'': 'unknown_plural',
}
class ReqUrl(object):
"""A class containing all urls used by `fbchat`"""
SEARCH = "https://www.facebook.com/ajax/typeahead/search.php"
LOGIN = "https://m.facebook.com/login.php?login_attempt=1"
SEND = "https://www.facebook.com/messaging/send/"
UNREAD_THREADS = "https://www.facebook.com/ajax/mercury/unread_threads.php"
UNSEEN_THREADS = "https://www.facebook.com/mercury/unseen_thread_ids/"
THREADS = "https://www.facebook.com/ajax/mercury/threadlist_info.php"
MESSAGES = "https://www.facebook.com/ajax/mercury/thread_info.php"
READ_STATUS = "https://www.facebook.com/ajax/mercury/change_read_status.php"
DELIVERED = "https://www.facebook.com/ajax/mercury/delivery_receipts.php"
MARK_SEEN = "https://www.facebook.com/ajax/mercury/mark_seen.php"
BASE = "https://www.facebook.com"
MOBILE = "https://m.facebook.com/"
STICKY = "https://0-edge-chat.facebook.com/pull"
PING = "https://0-edge-chat.facebook.com/active_ping"
UPLOAD = "https://upload.facebook.com/ajax/mercury/upload.php"
INFO = "https://www.facebook.com/chat/user_info/"
CONNECT = "https://www.facebook.com/ajax/add_friend/action.php?dpr=1"
REMOVE_USER = "https://www.facebook.com/chat/remove_participants/"
LOGOUT = "https://www.facebook.com/logout.php"
ALL_USERS = "https://www.facebook.com/chat/user_info_all"
SAVE_DEVICE = "https://m.facebook.com/login/save-device/cancel/"
CHECKPOINT = "https://m.facebook.com/login/checkpoint/"
THREAD_COLOR = "https://www.facebook.com/messaging/save_thread_color/?source=thread_settings&dpr=1"
THREAD_NICKNAME = "https://www.facebook.com/messaging/save_thread_nickname/?source=thread_settings&dpr=1"
THREAD_EMOJI = "https://www.facebook.com/messaging/save_thread_emoji/?source=thread_settings&dpr=1"
MESSAGE_REACTION = "https://www.facebook.com/webgraphql/mutation"
TYPING = "https://www.facebook.com/ajax/messaging/typ.php"
GRAPHQL = "https://www.facebook.com/api/graphqlbatch/"
ATTACHMENT_PHOTO = "https://www.facebook.com/mercury/attachments/photo/"
EVENT_REMINDER = "https://www.facebook.com/ajax/eventreminder/create"
MODERN_SETTINGS_MENU = "https://www.facebook.com/bluebar/modern_settings_menu/"
pull_channel = 0
def change_pull_channel(self, channel=None):
if channel is None:
self.pull_channel = (self.pull_channel + 1) % 5 # Pull channel will be 0-4
else:
self.pull_channel = channel
self.STICKY = "https://{}-edge-chat.facebook.com/pull".format(self.pull_channel)
self.PING = "https://{}-edge-chat.facebook.com/active_ping".format(self.pull_channel)
facebookEncoding = 'UTF-8'
def now():
return int(time()*1000)
def strip_to_json(text):
try:
return text[text.index('{'):]
except ValueError:
raise FBchatException('No JSON object found: {}, {}'.format(repr(text), text.index('{')))
def get_decoded_r(r):
return get_decoded(r._content)
def get_decoded(content):
return content.decode(facebookEncoding)
def parse_json(content):
return json.loads(content)
def get_json(r):
return json.loads(strip_to_json(get_decoded_r(r)))
def digitToChar(digit):
if digit < 10:
return str(digit)
return chr(ord('a') + digit - 10)
def str_base(number, base):
if number < 0:
return '-' + str_base(-number, base)
(d, m) = divmod(number, base)
if d > 0:
return str_base(d, base) + digitToChar(m)
return digitToChar(m)
def generateMessageID(client_id=None):
k = now()
l = int(random() * 4294967295)
return "<{}:{}-{}@mail.projektitan.com>".format(k, l, client_id)
def getSignatureID():
return hex(int(random() * 2147483648))
def generateOfflineThreadingID():
ret = now()
value = int(random() * 4294967295)
string = ("0000000000000000000000" + format(value, 'b'))[-22:]
msgs = format(ret, 'b') + string
return str(int(msgs, 2))
def check_json(j):
if j.get('error') is None:
return
if 'errorDescription' in j:
# 'errorDescription' is in the users own language!
raise FBchatFacebookError('Error #{} when sending request: {}'.format(j['error'], j['errorDescription']), fb_error_code=j['error'], fb_error_message=j['errorDescription'])
elif 'debug_info' in j['error'] and 'code' in j['error']:
raise FBchatFacebookError('Error #{} when sending request: {}'.format(j['error']['code'], repr(j['error']['debug_info'])), fb_error_code=j['error']['code'], fb_error_message=j['error']['debug_info'])
else:
raise FBchatFacebookError('Error {} when sending request'.format(j['error']), fb_error_code=j['error'])
def check_request(r, as_json=True):
if not r.ok:
raise FBchatFacebookError('Error when sending request: Got {} response'.format(r.status_code), request_status_code=r.status_code)
content = get_decoded_r(r)
if content is None or len(content) == 0:
raise FBchatFacebookError('Error when sending request: Got empty response')
if as_json:
content = strip_to_json(content)
try:
j = json.loads(content)
except ValueError:
raise FBchatFacebookError('Error while parsing JSON: {}'.format(repr(content)))
check_json(j)
return j
else:
return content
def get_jsmods_require(j, index):
if j.get('jsmods') and j['jsmods'].get('require'):
try:
return j['jsmods']['require'][0][index][0]
except (KeyError, IndexError) as e:
log.warning('Error when getting jsmods_require: {}. Facebook might have changed protocol'.format(j))
return None
def get_emojisize_from_tags(tags):
if tags is None:
return None
tmp = [tag for tag in tags if tag.startswith('hot_emoji_size:')]
if len(tmp) > 0:
try:
return LIKES[tmp[0].split(':')[1]]
except (KeyError, IndexError):
log.exception('Could not determine emoji size from {} - {}'.format(tags, tmp))
return None
|
bsd-3-clause
| -4,507,116,148,179,314,000
| 35.686441
| 207
| 0.647263
| false
| 3.043234
| false
| false
| false
|
Azure/azure-storage-python
|
samples/queue/encryption_usage.py
|
1
|
7709
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import uuid
from os import urandom
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import (
OAEP,
MGF1,
)
from cryptography.hazmat.primitives.asymmetric.rsa import generate_private_key
from cryptography.hazmat.primitives.hashes import SHA1
from cryptography.hazmat.primitives.keywrap import (
aes_key_wrap,
aes_key_unwrap,
)
# Sample implementations of the encryption-related interfaces.
class KeyWrapper:
def __init__(self, kid):
self.kek = urandom(32)
self.backend = default_backend()
self.kid = 'local:' + kid
def wrap_key(self, key, algorithm='A256KW'):
if algorithm == 'A256KW':
return aes_key_wrap(self.kek, key, self.backend)
raise ValueError(_ERROR_UNKNOWN_KEY_WRAP_ALGORITHM)
def unwrap_key(self, key, algorithm):
if algorithm == 'A256KW':
return aes_key_unwrap(self.kek, key, self.backend)
raise ValueError(_ERROR_UNKNOWN_KEY_WRAP_ALGORITHM)
def get_key_wrap_algorithm(self):
return 'A256KW'
def get_kid(self):
return self.kid
class KeyResolver:
def __init__(self):
self.keys = {}
def put_key(self, key):
self.keys[key.get_kid()] = key
def resolve_key(self, kid):
return self.keys[kid]
class RSAKeyWrapper:
def __init__(self, kid):
self.private_key = generate_private_key(public_exponent=65537,
key_size=2048,
backend=default_backend())
self.public_key = self.private_key.public_key()
self.kid = 'local:' + kid
def wrap_key(self, key, algorithm='RSA'):
if algorithm == 'RSA':
return self.public_key.encrypt(key,
OAEP(
mgf=MGF1(algorithm=SHA1()),
algorithm=SHA1(),
label=None)
)
raise ValueError(_ERROR_UNKNOWN_KEY_WRAP_ALGORITHM)
def unwrap_key(self, key, algorithm):
if algorithm == 'RSA':
return self.private_key.decrypt(key,
OAEP(
mgf=MGF1(algorithm=SHA1()),
algorithm=SHA1(),
label=None)
)
raise ValueError(_ERROR_UNKNOWN_KEY_WRAP_ALGORITHM)
def get_key_wrap_algorithm(self):
return 'RSA'
def get_kid(self):
return self.kid
class QueueEncryptionSamples():
def __init__(self, account):
self.account = account
def run_all_samples(self):
self.service = self.account.create_queue_service()
self.put_encrypted_message()
self.peek_get_update_encrypted()
self.decrypt_with_key_encryption_key()
self.require_encryption()
self.alternate_key_algorithms()
def _get_queue_reference(self, prefix='queue'):
queue_name = '{}{}'.format(prefix, str(uuid.uuid4()).replace('-', ''))
return queue_name
def _create_queue(self, prefix='queue'):
queue_name = self._get_queue_reference(prefix)
self.service.create_queue(queue_name)
return queue_name
def put_encrypted_message(self):
queue_name = self._create_queue()
# KeyWrapper implements the key encryption key interface
# outlined in the get/update message documentation.
# Setting the key_encryption_key property will tell these
# APIs to encrypt messages.
self.service.key_encryption_key = KeyWrapper('key1')
self.service.put_message(queue_name, 'message1')
self.service.delete_queue(queue_name)
def peek_get_update_encrypted(self):
queue_name = self._create_queue()
# The KeyWrapper is still needed for encryption
self.service.key_encryption_key = KeyWrapper('key1')
self.service.put_message(queue_name, 'message1')
# KeyResolver is used to resolve a key from its id.
# Its interface is defined in the get/peek messages documentation.
key_resolver = KeyResolver()
key_resolver.put_key(self.service.key_encryption_key)
self.service.key_resolver_function = key_resolver.resolve_key
self.service.peek_messages(queue_name)
messages = self.service.get_messages(queue_name)
self.service.update_message(queue_name,
messages[0].id,
messages[0].pop_receipt,
0,
content='encrypted_message2')
self.service.delete_queue(queue_name)
def require_encryption(self):
queue_name = self._create_queue()
self.service.put_message(queue_name, 'Not encrypted')
# Set the require_encryption property on the service to
# ensure all messages sent/received are encrypted.
self.service.require_encryption = True
# If the property is set, but no kek is specified upon
# upload, the method will throw.
try:
self.service.put_message(queue_name, 'message1')
except:
pass
self.service.key_encryption_key = KeyWrapper('key1')
self.service.key_resolver_function = KeyResolver()
self.service.key_resolver_function.put_key(self.service.key_encryption_key)
# If encryption is required, but a retrieved message is not
# encrypted, the method will throw.
try:
self.service.peek_message(queue_name, 'message1')
except:
pass
self.service.delete_queue(queue_name)
def alternate_key_algorithms(self):
queue_name = self._create_queue()
# To use an alternate method of key wrapping, simply set the
# key_encryption_key property to a wrapper that uses a different algorithm.
self.service.key_encryption_key = RSAKeyWrapper('key2')
self.service.key_resolver_function = None
self.service.put_message(queue_name, 'message')
key_resolver = KeyResolver()
key_resolver.put_key(self.service.key_encryption_key)
self.service.key_resolver_function = key_resolver.resolve_key
message = self.service.peek_messages(queue_name)
self.service.delete_queue(queue_name)
def decrypt_with_key_encryption_key(self):
queue_name = self._create_queue()
# The KeyWrapper object also defines methods necessary for
# decryption as defined in the get/peek messages documentation.
# Since the key_encryption_key property is still set, messages
# will be decrypted automatically.
kek = KeyWrapper('key1')
self.service.key_encryption_key = kek
self.service.put_message(queue_name, 'message1')
# When decrypting, if both a kek and resolver are set,
# the resolver will take precedence. Remove the resolver to just use the kek.
self.service.key_resolver_function = None
messages = self.service.peek_messages(queue_name)
self.service.delete_queue(queue_name)
|
mit
| 5,391,607,162,951,025,000
| 35.192488
| 85
| 0.586587
| false
| 4.247383
| false
| false
| false
|
communityshare/communityshare
|
community_share/flask_helpers.py
|
1
|
2439
|
from functools import wraps
from flask import request
from typing import Dict
from community_share.app_exceptions import Unauthorized, Forbidden
from community_share.authorization import get_requesting_user
from community_share.models.user import User
from community_share.models.base import Serializable
def api_path(path, query_args={}):
query = []
for name, values in query_args.items():
if not isinstance(values, list):
values = [values]
query += ['{}={}'.format(name, value) for value in values]
return '{base_url}rest/{path}{query}'.format(
base_url=request.url_root,
path=path,
query='?{}'.format('&'.join(query)) if query else ''
)
def needs_auth(auth_level='user'):
def needs_auth_decorator(f):
@wraps(f)
def auth_check(*args, **kwargs):
# Don't use
#
# user = kwargs.pop('requester', get_requesting_user())
#
# here because the eager execution of get_requesting_user
# will force us to be in flask app context during any test
# that uses a @needs_auth() method, and that makes unit
# tests harder.
if 'requester' in kwargs:
user = kwargs.pop('requester')
else:
user = get_requesting_user()
if user is None:
raise Unauthorized()
if 'admin' == auth_level and not user.is_administrator:
raise Unauthorized()
return f(*args, requester=user, **kwargs)
return auth_check
return needs_auth_decorator
def needs_admin_auth():
return needs_auth('admin')
def serialize(user, raw_item, fields=None):
if raw_item is None:
return None
item = raw_item.serialize(user)
if item is None:
return None
if fields is None:
return item
return {key: item[key] for key in item if key in fields + ['id']}
def serialize_many(user, raw_items, fields=None):
items = [serialize(user, item, fields) for item in raw_items]
return [item for item in items if item is not None]
def make_OK_response(message='OK'):
return {'message': message}
def make_single_response(requester: User, item: Serializable) -> Dict[str, Dict]:
serialized = serialize(requester, item)
if serialized is None:
raise Forbidden()
return {'data': serialized}
|
mpl-2.0
| 6,863,411,846,151,159,000
| 25.802198
| 81
| 0.611316
| false
| 4.058236
| false
| false
| false
|
shyampurk/bluemix-parking-meter
|
testApp/app.py
|
1
|
1873
|
'''*********************************************************************************
APP - SMART PARKING LOT SYSTEM
*********************************************************************************'''
from pubnub import Pubnub
from threading import Thread
import sys
pub_key = "demo"
sub_key = "demo"
g_userData = dict()
g_myCar = dict()
g_lotNumber = sys.argv[1]
g_carNumber = sys.argv[2]
def init():
#Pubnub Key Initialization
global pubnub
pubnub = Pubnub(publish_key=pub_key,subscribe_key=sub_key)
pubnub.subscribe(channels='parkingapp-resp', callback=callback, error=callback,
connect=connect, reconnect=reconnect, disconnect=disconnect)
pubnub.subscribe(channels=g_carNumber, callback=caRcallback, error=caRcallback,
connect=connect, reconnect=reconnect, disconnect=disconnect)
def callback(message, channel):
g_userData.update(message)
def caRcallback(message, channel):
g_myCar.update(message)
def dataHandling(stdin):
l_action = int(stdin.readline().strip())
if(l_action == 1):
pubnub.publish(channel='parkingapp-req',message={"requester":"APP",
"lotNumber":0,"requestType":1,"requestValue":0})
elif(l_action == 2):
pubnub.publish(channel='parkingapp-req',
message={"requester":"APP","lotNumber":g_lotNumber,
"requestType":2,"requestValue":g_carNumber})
elif(l_action == 3):
print "\n\n", g_userData
print "\n\n", g_myCar
else:
pass
def error(message):
print("ERROR : " + str(message))
def connect(message):
print "CONNECTED"
def reconnect(message):
print("RECONNECTED")
def disconnect(message):
print("DISCONNECTED")
if __name__ == '__main__':
init()
while True:
t1 = Thread(target=dataHandling, args=(sys.stdin,))
t1.start()
t1.join()
#End of the Script
##*****************************************************************************************************##
|
mit
| -4,338,057,441,368,052,000
| 26.558824
| 105
| 0.595836
| false
| 3.320922
| false
| false
| false
|
sambandi/eMonitor
|
emonitor/lib/location.py
|
2
|
5124
|
import re
from math import cos, sqrt, tan, sin, atan, trunc, radians, degrees
def getFloat(value):
try:
return float(value)
except ValueError:
value = str(value).replace('B', '8').replace('O', '0').replace(',', '.')
_errcount = 0
for i in value:
if not re.match(r'[0-9\.]]', i):
_errcount += 1
if _errcount == 0:
return float(value)
return None
class Location:
"""
Location class for position calculation and conversion, can handle GK and Wgs84 notation - default Wgs84
"""
earthRadius = 6378137.0 # Earth radius in m
aBessel = 6377397.155
eeBessel = 0.0066743722296294277832
ScaleFactor = 0.00000982
RotXRad = -7.16069806998785E-06
RotYRad = 3.56822869296619E-07
RotZRad = 7.06858347057704E-06
ShiftXMeters = 591.28
ShiftYMeters = 81.35
ShiftZMeters = 396.39
def __init__(self, x, y, geotype='wgs84'): # wgs84 (default), gk
self.x = getFloat(x)
self.y = getFloat(y)
self.geotype = geotype.lower()
def __repr__(self):
return u"<location: {}, {} ({})>".format(self.x, self.y, self.geotype)
def getLatLng(self, use_wgs84=None):
if self.geotype == 'gk': # gauss kruger
(x, y) = self._gk_transformation()
return Location.seven_parameter_helmert_transf(x, y, use_wgs84)
else:
return self.x, self.y
def getDistance(self, lat, lng):
"""
get distance in meters
"""
(lat1, lng1) = self.getLatLng()
x = ((radians(lng - lng1)) * cos(0.5 * (radians(lat + lat1))))**2
return Location.earthRadius * sqrt(x + (radians(lat - lat1))**2)
def _gk_transformation(self): # transformation for gauss kruger
# Check for invalid Parameters
if not ((self.x > 1000000) and (self.y > 1000000)) and self.geotype != 'gk':
raise ValueError("No valid Gauss-Kruger-Code.")
# Variables to prepare the geovalues
bii = (self.y / 10000855.7646)**2
bf = (325632.08677 * (self.y / 10000855.7646) * (((((0.00000562025 * bii + 0.00022976983) * bii - 0.00113566119) * bii + 0.00424914906) * bii - 0.00831729565) * bii + 1)) / degrees(3600)
g2 = 0.0067192188 * cos(bf)**2
fa = (self.x - trunc(self.x / 1000000) * 1000000 - 500000) / (6398786.849 / sqrt(1 + g2))
geo_dez_right = degrees(bf - fa**2 * tan(bf) * (1 + g2) / 2 + fa**4 * tan(bf) * (5 + 3 * tan(bf)**2 + 6 * g2 - 6 * g2 * tan(bf)**2) / 24)
geo_dez_height = degrees(fa - fa**3 * (1 + 2 * tan(bf)**2 + g2) / 6 + fa**5 * (1 + 28 * tan(bf)**2 + 24 * tan(bf)**4) / 120) / cos(bf) + trunc(self.x / 1000000) * 3
return geo_dez_right, geo_dez_height
@staticmethod
def seven_parameter_helmert_transf(x, y, use_wgs84=False):
# calculate coordinates with helmert transformation
latitudeit = 99999999
if use_wgs84:
ee = 0.0066943799
else:
ee = 0.00669438002290
n = Location.aBessel / sqrt(1 - (Location.eeBessel * sin(radians(x))**2))
cartesian_x_meters = n * cos(radians(x)) * cos(radians(y))
cartesian_y_meters = n * cos(radians(x)) * sin(radians(y))
cartesian_z_meters = n * (1 - Location.eeBessel) * sin(radians(x))
cart_output_x_meters = (1 + Location.ScaleFactor) * cartesian_x_meters + Location.RotZRad * cartesian_y_meters - Location.RotYRad * cartesian_z_meters + Location.ShiftXMeters
cart_output_y_meters = -1 * Location.RotZRad * cartesian_x_meters + (1 + Location.ScaleFactor) * cartesian_y_meters + Location.RotXRad * cartesian_z_meters + Location.ShiftYMeters
cart_output_z_meters = Location.RotYRad * cartesian_x_meters - Location.RotXRad * cartesian_y_meters + (1 + Location.ScaleFactor) * cartesian_z_meters + Location.ShiftZMeters
geo_dez_height = atan(cart_output_y_meters / cart_output_x_meters)
latitude = atan(cart_output_z_meters / sqrt((cart_output_x_meters * cart_output_x_meters) + (cart_output_y_meters * cart_output_y_meters)))
while abs(latitude - latitudeit) >= 0.000000000000001:
latitudeit = latitude
n = Location.earthRadius / sqrt(1 - ee * sin(latitude)**2)
latitude = atan((cart_output_z_meters + ee * n * sin(latitudeit)) / sqrt(cart_output_x_meters**2 + cart_output_y_meters * cart_output_y_meters))
return degrees(latitude), degrees(geo_dez_height)
if __name__ == "__main__":
# test values
# location1 (48.124570, 11.582328)
lkx1 = 4469012.74
lky1 = 5331920.84
# location2 (48.1103206, 11.7233732)
lkx2 = 4479507.160
lky2 = "53302B9,O32" # test value with error
l1 = Location(lkx1, lky1, geotype='gk')
l2 = Location(lkx2, lky2, geotype='gk')
l3 = Location(48.1103206, 11.7233732) # test coordinates (imprecision)
print "l1: {}\nl2: {}\nl3: {}".format(l1, l2, l3)
print "\nl2->l3 {:8.2f} m (precision)".format(l2.getDistance(*l3.getLatLng()))
print "l2->l1 {:8.2f} m".format(l2.getDistance(*l1.getLatLng()))
|
bsd-3-clause
| 2,488,118,184,217,061,400
| 42.423729
| 194
| 0.598946
| false
| 2.949914
| false
| false
| false
|
rzhxeo/youtube-dl
|
youtube_dl/extractor/youtube.py
|
1
|
76832
|
# coding: utf-8
from __future__ import unicode_literals
import itertools
import json
import os.path
import re
import time
import traceback
from .common import InfoExtractor, SearchInfoExtractor
from .subtitles import SubtitlesInfoExtractor
from ..jsinterp import JSInterpreter
from ..swfinterp import SWFInterpreter
from ..compat import (
compat_chr,
compat_parse_qs,
compat_urllib_parse,
compat_urllib_request,
compat_urlparse,
compat_str,
)
from ..utils import (
clean_html,
ExtractorError,
get_element_by_attribute,
get_element_by_id,
int_or_none,
OnDemandPagedList,
orderedSet,
unescapeHTML,
unified_strdate,
uppercase_escape,
)
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_TWOFACTOR_URL = 'https://accounts.google.com/SecondFactor'
_NETRC_MACHINE = 'youtube'
# If True it will raise an error if no login info is provided
_LOGIN_REQUIRED = False
def _set_language(self):
self._set_cookie(
'.youtube.com', 'PREF', 'f1=50000000&hl=en',
# YouTube sets the expire time to about two months
expire_time=time.time() + 2 * 30 * 24 * 3600)
def _login(self):
"""
Attempt to log in to YouTube.
True is returned if successful or skipped.
False is returned if login failed.
If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
"""
(username, password) = self._get_login_info()
# No authentication to be performed
if username is None:
if self._LOGIN_REQUIRED:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return True
login_page = self._download_webpage(
self._LOGIN_URL, None,
note='Downloading login page',
errnote='unable to fetch login page', fatal=False)
if login_page is False:
return
galx = self._search_regex(r'(?s)<input.+?name="GALX".+?value="(.+?)"',
login_page, 'Login GALX parameter')
# Log in
login_form_strs = {
'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
'Email': username,
'GALX': galx,
'Passwd': password,
'PersistentCookie': 'yes',
'_utf8': '霱',
'bgresponse': 'js_disabled',
'checkConnection': '',
'checkedDomains': 'youtube',
'dnConn': '',
'pstMsg': '0',
'rmShown': '1',
'secTok': '',
'signIn': 'Sign in',
'timeStmp': '',
'service': 'youtube',
'uilel': '3',
'hl': 'en_US',
}
# Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
# chokes on unicode
login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in login_form_strs.items())
login_data = compat_urllib_parse.urlencode(login_form).encode('ascii')
req = compat_urllib_request.Request(self._LOGIN_URL, login_data)
login_results = self._download_webpage(
req, None,
note='Logging in', errnote='unable to log in', fatal=False)
if login_results is False:
return False
if re.search(r'id="errormsg_0_Passwd"', login_results) is not None:
raise ExtractorError('Please use your account password and a two-factor code instead of an application-specific password.', expected=True)
# Two-Factor
# TODO add SMS and phone call support - these require making a request and then prompting the user
if re.search(r'(?i)<form[^>]* id="gaia_secondfactorform"', login_results) is not None:
tfa_code = self._get_tfa_info()
if tfa_code is None:
self._downloader.report_warning('Two-factor authentication required. Provide it with --twofactor <code>')
self._downloader.report_warning('(Note that only TOTP (Google Authenticator App) codes work at this time.)')
return False
# Unlike the first login form, secTok and timeStmp are both required for the TFA form
match = re.search(r'id="secTok"\n\s+value=\'(.+)\'/>', login_results, re.M | re.U)
if match is None:
self._downloader.report_warning('Failed to get secTok - did the page structure change?')
secTok = match.group(1)
match = re.search(r'id="timeStmp"\n\s+value=\'(.+)\'/>', login_results, re.M | re.U)
if match is None:
self._downloader.report_warning('Failed to get timeStmp - did the page structure change?')
timeStmp = match.group(1)
tfa_form_strs = {
'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
'smsToken': '',
'smsUserPin': tfa_code,
'smsVerifyPin': 'Verify',
'PersistentCookie': 'yes',
'checkConnection': '',
'checkedDomains': 'youtube',
'pstMsg': '1',
'secTok': secTok,
'timeStmp': timeStmp,
'service': 'youtube',
'hl': 'en_US',
}
tfa_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in tfa_form_strs.items())
tfa_data = compat_urllib_parse.urlencode(tfa_form).encode('ascii')
tfa_req = compat_urllib_request.Request(self._TWOFACTOR_URL, tfa_data)
tfa_results = self._download_webpage(
tfa_req, None,
note='Submitting TFA code', errnote='unable to submit tfa', fatal=False)
if tfa_results is False:
return False
if re.search(r'(?i)<form[^>]* id="gaia_secondfactorform"', tfa_results) is not None:
self._downloader.report_warning('Two-factor code expired. Please try again, or use a one-use backup code instead.')
return False
if re.search(r'(?i)<form[^>]* id="gaia_loginform"', tfa_results) is not None:
self._downloader.report_warning('unable to log in - did the page structure change?')
return False
if re.search(r'smsauth-interstitial-reviewsettings', tfa_results) is not None:
self._downloader.report_warning('Your Google account has a security notice. Please log in on your web browser, resolve the notice, and try again.')
return False
if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
self._downloader.report_warning('unable to log in: bad username or password')
return False
return True
def _real_initialize(self):
if self._downloader is None:
return
self._set_language()
if not self._login():
return
class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
IE_DESC = 'YouTube.com'
_VALID_URL = r"""(?x)^
(
(?:https?://|//) # http(s):// or protocol-independent URL
(?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
(?:www\.)?deturl\.com/www\.youtube\.com/|
(?:www\.)?pwnyoutube\.com/|
(?:www\.)?yourepeat\.com/|
tube\.majestyc\.net/|
youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
(?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
(?:.*?&)? # any other preceding param (like /?s=tuff&v=xxxx)
v=
)
))
|youtu\.be/ # just youtu.be/xxxx
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?!.*?&list=) # combined list/video URLs are handled by the playlist IE
(?(1).+)? # if we found the ID, everything can follow
$"""
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
_formats = {
'5': {'ext': 'flv', 'width': 400, 'height': 240},
'6': {'ext': 'flv', 'width': 450, 'height': 270},
'13': {'ext': '3gp'},
'17': {'ext': '3gp', 'width': 176, 'height': 144},
'18': {'ext': 'mp4', 'width': 640, 'height': 360},
'22': {'ext': 'mp4', 'width': 1280, 'height': 720},
'34': {'ext': 'flv', 'width': 640, 'height': 360},
'35': {'ext': 'flv', 'width': 854, 'height': 480},
'36': {'ext': '3gp', 'width': 320, 'height': 240},
'37': {'ext': 'mp4', 'width': 1920, 'height': 1080},
'38': {'ext': 'mp4', 'width': 4096, 'height': 3072},
'43': {'ext': 'webm', 'width': 640, 'height': 360},
'44': {'ext': 'webm', 'width': 854, 'height': 480},
'45': {'ext': 'webm', 'width': 1280, 'height': 720},
'46': {'ext': 'webm', 'width': 1920, 'height': 1080},
# 3d videos
'82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'preference': -20},
'83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'preference': -20},
'84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'preference': -20},
'85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'preference': -20},
'100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'preference': -20},
'101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'preference': -20},
'102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'preference': -20},
# Apple HTTP Live Streaming
'92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
'93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'preference': -10},
'94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'preference': -10},
'95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'preference': -10},
'96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'preference': -10},
'132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
'151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'preference': -10},
# DASH mp4 video
'133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'138': {'ext': 'mp4', 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, # Height can vary (https://github.com/rg3/youtube-dl/issues/4559)
'160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
'299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
'266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'h264'},
# Dash mp4 audio
'139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 48, 'preference': -50, 'container': 'm4a_dash'},
'140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 128, 'preference': -50, 'container': 'm4a_dash'},
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 256, 'preference': -50, 'container': 'm4a_dash'},
# Dash webm
'167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
'168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
'169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
'170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
'218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
'219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
'278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'container': 'webm', 'vcodec': 'VP9'},
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'},
'303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'},
'308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'},
'313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'VP9'},
'315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'},
# Dash webm audio
'171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
'172': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
# Dash webm audio with opus inside
'249': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50, 'preference': -50},
'250': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70, 'preference': -50},
'251': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160, 'preference': -50},
# RTMP (unnamed)
'_rtmp': {'protocol': 'rtmp'},
}
IE_NAME = 'youtube'
_TESTS = [
{
'url': 'http://www.youtube.com/watch?v=BaW_jenozKc',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
'categories': ['Science & Technology'],
'like_count': int,
'dislike_count': int,
}
},
{
'url': 'http://www.youtube.com/watch?v=UxxajLWwzqY',
'note': 'Test generic use_cipher_signature video (#897)',
'info_dict': {
'id': 'UxxajLWwzqY',
'ext': 'mp4',
'upload_date': '20120506',
'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
'description': 'md5:fea86fda2d5a5784273df5c7cc994d9f',
'uploader': 'Icona Pop',
'uploader_id': 'IconaPop',
}
},
{
'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
'note': 'Test VEVO video with age protection (#956)',
'info_dict': {
'id': '07FYdnEawAQ',
'ext': 'mp4',
'upload_date': '20130703',
'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
'description': 'md5:64249768eec3bc4276236606ea996373',
'uploader': 'justintimberlakeVEVO',
'uploader_id': 'justintimberlakeVEVO',
}
},
{
'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
'note': 'Embed-only video (#1746)',
'info_dict': {
'id': 'yZIXLfi8CZQ',
'ext': 'mp4',
'upload_date': '20120608',
'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
'uploader': 'SET India',
'uploader_id': 'setindia'
}
},
{
'url': 'http://www.youtube.com/watch?v=a9LDPn-MO4I',
'note': '256k DASH audio (format 141) via DASH manifest',
'info_dict': {
'id': 'a9LDPn-MO4I',
'ext': 'm4a',
'upload_date': '20121002',
'uploader_id': '8KVIDEO',
'description': '',
'uploader': '8KVIDEO',
'title': 'UHDTV TEST 8K VIDEO.mp4'
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
},
# DASH manifest with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
'info_dict': {
'id': 'IB3lcPjvWLA',
'ext': 'm4a',
'title': 'Afrojack, Spree Wilson - The Spark ft. Spree Wilson',
'description': 'md5:12e7067fa6735a77bdcbb58cb1187d2d',
'uploader': 'AfrojackVEVO',
'uploader_id': 'AfrojackVEVO',
'upload_date': '20131011',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
},
# JS player signature function name containing $
{
'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
'info_dict': {
'id': 'nfWlot6h_JM',
'ext': 'm4a',
'title': 'Taylor Swift - Shake It Off',
'description': 'md5:2acfda1b285bdd478ccec22f9918199d',
'uploader': 'TaylorSwiftVEVO',
'uploader_id': 'TaylorSwiftVEVO',
'upload_date': '20140818',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
},
# Controversy video
{
'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
'info_dict': {
'id': 'T4XJQO3qol8',
'ext': 'mp4',
'upload_date': '20100909',
'uploader': 'The Amazing Atheist',
'uploader_id': 'TheAmazingAtheist',
'title': 'Burning Everyone\'s Koran',
'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
}
},
# Normal age-gate video (No vevo, embed allowed)
{
'url': 'http://youtube.com/watch?v=HtVdAasjOgU',
'info_dict': {
'id': 'HtVdAasjOgU',
'ext': 'mp4',
'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
'description': 're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
'uploader': 'The Witcher',
'uploader_id': 'WitcherGame',
'upload_date': '20140605',
},
},
# Age-gate video with encrypted signature
{
'url': 'http://www.youtube.com/watch?v=6kLq3WMV1nU',
'info_dict': {
'id': '6kLq3WMV1nU',
'ext': 'mp4',
'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
'uploader': 'LloydVEVO',
'uploader_id': 'LloydVEVO',
'upload_date': '20110629',
},
},
# video_info is None (https://github.com/rg3/youtube-dl/issues/4421)
{
'url': '__2ABJjxzNo',
'info_dict': {
'id': '__2ABJjxzNo',
'ext': 'mp4',
'upload_date': '20100430',
'uploader_id': 'deadmau5',
'description': 'md5:12c56784b8032162bb936a5f76d55360',
'uploader': 'deadmau5',
'title': 'Deadmau5 - Some Chords (HD)',
},
'expected_warnings': [
'DASH manifest missing',
]
},
# Olympics (https://github.com/rg3/youtube-dl/issues/4431)
{
'url': 'lqQg6PlCWgI',
'info_dict': {
'id': 'lqQg6PlCWgI',
'ext': 'mp4',
'upload_date': '20120731',
'uploader_id': 'olympic',
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
'uploader': 'Olympics',
'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
},
'params': {
'skip_download': 'requires avconv',
}
},
# Non-square pixels
{
'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
'info_dict': {
'id': '_b-2C3KPAM0',
'ext': 'mp4',
'stretched_ratio': 16 / 9.,
'upload_date': '20110310',
'uploader_id': 'AllenMeow',
'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
'uploader': '孫艾倫',
'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
},
}
]
def __init__(self, *args, **kwargs):
super(YoutubeIE, self).__init__(*args, **kwargs)
self._player_cache = {}
def report_video_info_webpage_download(self, video_id):
"""Report attempt to download video info webpage."""
self.to_screen('%s: Downloading video info webpage' % video_id)
def report_information_extraction(self, video_id):
"""Report attempt to extract video information."""
self.to_screen('%s: Extracting video information' % video_id)
def report_unavailable_format(self, video_id, format):
"""Report extracted video URL."""
self.to_screen('%s: Format %s not available' % (video_id, format))
def report_rtmp_download(self):
"""Indicate the download will use the RTMP protocol."""
self.to_screen('RTMP download detected')
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
def _extract_signature_function(self, video_id, player_url, example_sig):
id_m = re.match(
r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\.(?P<ext>[a-z]+)$',
player_url)
if not id_m:
raise ExtractorError('Cannot identify player %r' % player_url)
player_type = id_m.group('ext')
player_id = id_m.group('id')
# Read from filesystem cache
func_id = '%s_%s_%s' % (
player_type, player_id, self._signature_cache_id(example_sig))
assert os.path.basename(func_id) == func_id
cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
if cache_spec is not None:
return lambda s: ''.join(s[i] for i in cache_spec)
if player_type == 'js':
code = self._download_webpage(
player_url, video_id,
note='Downloading %s player %s' % (player_type, player_id),
errnote='Download of %s failed' % player_url)
res = self._parse_sig_js(code)
elif player_type == 'swf':
urlh = self._request_webpage(
player_url, video_id,
note='Downloading %s player %s' % (player_type, player_id),
errnote='Download of %s failed' % player_url)
code = urlh.read()
res = self._parse_sig_swf(code)
else:
assert False, 'Invalid player type %r' % player_type
if cache_spec is None:
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = res(test_string)
cache_spec = [ord(c) for c in cache_res]
self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
return res
def _print_sig_code(self, func, example_sig):
def gen_sig_code(idxs):
def _genslice(start, end, step):
starts = '' if start == 0 else str(start)
ends = (':%d' % (end + step)) if end + step >= 0 else ':'
steps = '' if step == 1 else (':%d' % step)
return 's[%s%s%s]' % (starts, ends, steps)
step = None
# Quelch pyflakes warnings - start will be set when step is set
start = '(Never used)'
for i, prev in zip(idxs[1:], idxs[:-1]):
if step is not None:
if i - prev == step:
continue
yield _genslice(start, prev, step)
step = None
continue
if i - prev in [-1, 1]:
step = i - prev
start = prev
continue
else:
yield 's[%d]' % prev
if step is None:
yield 's[%d]' % i
else:
yield _genslice(start, i, step)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = func(test_string)
cache_spec = [ord(c) for c in cache_res]
expr_code = ' + '.join(gen_sig_code(cache_spec))
signature_id_tuple = '(%s)' % (
', '.join(compat_str(len(p)) for p in example_sig.split('.')))
code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen('Extracted signature function:\n' + code)
def _parse_sig_js(self, jscode):
funcname = self._search_regex(
r'\.sig\|\|([a-zA-Z0-9$]+)\(', jscode,
'Initial JS player signature function name')
jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s])
def _parse_sig_swf(self, file_contents):
swfi = SWFInterpreter(file_contents)
TARGET_CLASSNAME = 'SignatureDecipher'
searched_class = swfi.extract_class(TARGET_CLASSNAME)
initial_function = swfi.extract_function(searched_class, 'decipher')
return lambda s: initial_function([s])
def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
"""Turn the encrypted s field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt signature without player_url')
if player_url.startswith('//'):
player_url = 'https:' + player_url
try:
player_id = (player_url, self._signature_cache_id(s))
if player_id not in self._player_cache:
func = self._extract_signature_function(
video_id, player_url, s
)
self._player_cache[player_id] = func
func = self._player_cache[player_id]
if self._downloader.params.get('youtube_print_sig_code'):
self._print_sig_code(func, s)
return func(s)
except Exception as e:
tb = traceback.format_exc()
raise ExtractorError(
'Signature extraction failed: ' + tb, cause=e)
def _get_available_subtitles(self, video_id, webpage):
try:
subs_doc = self._download_xml(
'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
video_id, note=False)
except ExtractorError as err:
self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err))
return {}
sub_lang_list = {}
for track in subs_doc.findall('track'):
lang = track.attrib['lang_code']
if lang in sub_lang_list:
continue
params = compat_urllib_parse.urlencode({
'lang': lang,
'v': video_id,
'fmt': self._downloader.params.get('subtitlesformat', 'srt'),
'name': track.attrib['name'].encode('utf-8'),
})
url = 'https://www.youtube.com/api/timedtext?' + params
sub_lang_list[lang] = url
if not sub_lang_list:
self._downloader.report_warning('video doesn\'t have subtitles')
return {}
return sub_lang_list
def _get_available_automatic_caption(self, video_id, webpage):
"""We need the webpage for getting the captions url, pass it as an
argument to speed up the process."""
sub_format = self._downloader.params.get('subtitlesformat', 'srt')
self.to_screen('%s: Looking for automatic captions' % video_id)
mobj = re.search(r';ytplayer.config = ({.*?});', webpage)
err_msg = 'Couldn\'t find automatic captions for %s' % video_id
if mobj is None:
self._downloader.report_warning(err_msg)
return {}
player_config = json.loads(mobj.group(1))
try:
args = player_config['args']
caption_url = args['ttsurl']
timestamp = args['timestamp']
# We get the available subtitles
list_params = compat_urllib_parse.urlencode({
'type': 'list',
'tlangs': 1,
'asrs': 1,
})
list_url = caption_url + '&' + list_params
caption_list = self._download_xml(list_url, video_id)
original_lang_node = caption_list.find('track')
if original_lang_node is None:
self._downloader.report_warning('Video doesn\'t have automatic captions')
return {}
original_lang = original_lang_node.attrib['lang_code']
caption_kind = original_lang_node.attrib.get('kind', '')
sub_lang_list = {}
for lang_node in caption_list.findall('target'):
sub_lang = lang_node.attrib['lang_code']
params = compat_urllib_parse.urlencode({
'lang': original_lang,
'tlang': sub_lang,
'fmt': sub_format,
'ts': timestamp,
'kind': caption_kind,
})
sub_lang_list[sub_lang] = caption_url + '&' + params
return sub_lang_list
# An extractor error can be raise by the download process if there are
# no automatic captions but there are subtitles
except (KeyError, ExtractorError):
self._downloader.report_warning(err_msg)
return {}
@classmethod
def extract_id(cls, url):
mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
video_id = mobj.group(2)
return video_id
def _extract_from_m3u8(self, manifest_url, video_id):
url_map = {}
def _get_urls(_manifest):
lines = _manifest.split('\n')
urls = filter(lambda l: l and not l.startswith('#'),
lines)
return urls
manifest = self._download_webpage(manifest_url, video_id, 'Downloading formats manifest')
formats_urls = _get_urls(manifest)
for format_url in formats_urls:
itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag')
url_map[itag] = format_url
return url_map
def _extract_annotations(self, video_id):
url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
def _parse_dash_manifest(
self, video_id, dash_manifest_url, player_url, age_gate):
def decrypt_sig(mobj):
s = mobj.group(1)
dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
return '/signature/%s' % dec_s
dash_manifest_url = re.sub(r'/s/([\w\.]+)', decrypt_sig, dash_manifest_url)
dash_doc = self._download_xml(
dash_manifest_url, video_id,
note='Downloading DASH manifest',
errnote='Could not download DASH manifest')
formats = []
for r in dash_doc.findall('.//{urn:mpeg:DASH:schema:MPD:2011}Representation'):
url_el = r.find('{urn:mpeg:DASH:schema:MPD:2011}BaseURL')
if url_el is None:
continue
format_id = r.attrib['id']
video_url = url_el.text
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength'))
f = {
'format_id': format_id,
'url': video_url,
'width': int_or_none(r.attrib.get('width')),
'height': int_or_none(r.attrib.get('height')),
'tbr': int_or_none(r.attrib.get('bandwidth'), 1000),
'asr': int_or_none(r.attrib.get('audioSamplingRate')),
'filesize': filesize,
'fps': int_or_none(r.attrib.get('frameRate')),
}
try:
existing_format = next(
fo for fo in formats
if fo['format_id'] == format_id)
except StopIteration:
f.update(self._formats.get(format_id, {}).items())
formats.append(f)
else:
existing_format.update(f)
return formats
def _real_extract(self, url):
proto = (
'http' if self._downloader.params.get('prefer_insecure', False)
else 'https')
# Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url)
if mobj:
url = proto + '://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/')
video_id = self.extract_id(url)
# Get video webpage
url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
video_webpage = self._download_webpage(url, video_id)
# Attempt to extract SWF player URL
mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
if mobj is not None:
player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
else:
player_url = None
# Get video info
if re.search(r'player-age-gate-content">', video_webpage) is not None:
age_gate = True
# We simulate the access to the video from www.youtube.com/v/{video_id}
# this can be viewed without login into Youtube
url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
data = compat_urllib_parse.urlencode({
'video_id': video_id,
'eurl': 'https://youtube.googleapis.com/v/' + video_id,
'sts': self._search_regex(
r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
})
video_info_url = proto + '://www.youtube.com/get_video_info?' + data
video_info_webpage = self._download_webpage(
video_info_url, video_id,
note='Refetching age-gated info webpage',
errnote='unable to download video info webpage')
video_info = compat_parse_qs(video_info_webpage)
else:
age_gate = False
try:
# Try looking directly into the video webpage
mobj = re.search(r';ytplayer\.config\s*=\s*({.*?});', video_webpage)
if not mobj:
raise ValueError('Could not find ytplayer.config') # caught below
json_code = uppercase_escape(mobj.group(1))
ytplayer_config = json.loads(json_code)
args = ytplayer_config['args']
# Convert to the same format returned by compat_parse_qs
video_info = dict((k, [v]) for k, v in args.items())
if 'url_encoded_fmt_stream_map' not in args:
raise ValueError('No stream_map present') # caught below
except ValueError:
# We fallback to the get_video_info pages (used by the embed page)
self.report_video_info_webpage_download(video_id)
for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
video_info_url = (
'%s://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
% (proto, video_id, el_type))
video_info_webpage = self._download_webpage(
video_info_url,
video_id, note=False,
errnote='unable to download video info webpage')
video_info = compat_parse_qs(video_info_webpage)
if 'token' in video_info:
break
if 'token' not in video_info:
if 'reason' in video_info:
raise ExtractorError(
'YouTube said: %s' % video_info['reason'][0],
expected=True, video_id=video_id)
else:
raise ExtractorError(
'"token" parameter not in video info for unknown reason',
video_id=video_id)
if 'view_count' in video_info:
view_count = int(video_info['view_count'][0])
else:
view_count = None
# Check for "rental" videos
if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
raise ExtractorError('"rental" videos not supported')
# Start extracting information
self.report_information_extraction(video_id)
# uploader
if 'author' not in video_info:
raise ExtractorError('Unable to extract uploader name')
video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
# uploader_id
video_uploader_id = None
mobj = re.search(r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage)
if mobj is not None:
video_uploader_id = mobj.group(1)
else:
self._downloader.report_warning('unable to extract uploader nickname')
# title
if 'title' in video_info:
video_title = video_info['title'][0]
else:
self._downloader.report_warning('Unable to extract video title')
video_title = '_'
# thumbnail image
# We try first to get a high quality image:
m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
video_webpage, re.DOTALL)
if m_thumb is not None:
video_thumbnail = m_thumb.group(1)
elif 'thumbnail_url' not in video_info:
self._downloader.report_warning('unable to extract video thumbnail')
video_thumbnail = None
else: # don't panic if we can't find it
video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0])
# upload date
upload_date = None
mobj = re.search(r'(?s)id="eow-date.*?>(.*?)</span>', video_webpage)
if mobj is None:
mobj = re.search(
r'(?s)id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live) on (.*?)</strong>',
video_webpage)
if mobj is not None:
upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
upload_date = unified_strdate(upload_date)
m_cat_container = self._search_regex(
r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
video_webpage, 'categories', default=None)
if m_cat_container:
category = self._html_search_regex(
r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
default=None)
video_categories = None if category is None else [category]
else:
video_categories = None
# description
video_description = get_element_by_id("eow-description", video_webpage)
if video_description:
video_description = re.sub(r'''(?x)
<a\s+
(?:[a-zA-Z-]+="[^"]+"\s+)*?
title="([^"]+)"\s+
(?:[a-zA-Z-]+="[^"]+"\s+)*?
class="yt-uix-redirect-link"\s*>
[^<]+
</a>
''', r'\1', video_description)
video_description = clean_html(video_description)
else:
fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
if fd_mobj:
video_description = unescapeHTML(fd_mobj.group(1))
else:
video_description = ''
def _extract_count(count_name):
count = self._search_regex(
r'id="watch-%s"[^>]*>.*?([\d,]+)\s*</span>' % re.escape(count_name),
video_webpage, count_name, default=None)
if count is not None:
return int(count.replace(',', ''))
return None
like_count = _extract_count('like')
dislike_count = _extract_count('dislike')
# subtitles
video_subtitles = self.extract_subtitles(video_id, video_webpage)
if self._downloader.params.get('listsubtitles', False):
self._list_available_subtitles(video_id, video_webpage)
return
if 'length_seconds' not in video_info:
self._downloader.report_warning('unable to extract video duration')
video_duration = None
else:
video_duration = int(compat_urllib_parse.unquote_plus(video_info['length_seconds'][0]))
# annotations
video_annotations = None
if self._downloader.params.get('writeannotations', False):
video_annotations = self._extract_annotations(video_id)
def _map_to_format_list(urlmap):
formats = []
for itag, video_real_url in urlmap.items():
dct = {
'format_id': itag,
'url': video_real_url,
'player_url': player_url,
}
if itag in self._formats:
dct.update(self._formats[itag])
formats.append(dct)
return formats
if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
self.report_rtmp_download()
formats = [{
'format_id': '_rtmp',
'protocol': 'rtmp',
'url': video_info['conn'][0],
'player_url': player_url,
}]
elif len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1:
encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
if 'rtmpe%3Dyes' in encoded_url_map:
raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
url_map = {}
for url_data_str in encoded_url_map.split(','):
url_data = compat_parse_qs(url_data_str)
if 'itag' not in url_data or 'url' not in url_data:
continue
format_id = url_data['itag'][0]
url = url_data['url'][0]
if 'sig' in url_data:
url += '&signature=' + url_data['sig'][0]
elif 's' in url_data:
encrypted_sig = url_data['s'][0]
jsplayer_url_json = self._search_regex(
r'"assets":.+?"js":\s*("[^"]+")',
embed_webpage if age_gate else video_webpage, 'JS player URL')
player_url = json.loads(jsplayer_url_json)
if player_url is None:
player_url_json = self._search_regex(
r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
video_webpage, 'age gate player URL')
player_url = json.loads(player_url_json)
if self._downloader.params.get('verbose'):
if player_url is None:
player_version = 'unknown'
player_desc = 'unknown'
else:
if player_url.endswith('swf'):
player_version = self._search_regex(
r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
'flash player', fatal=False)
player_desc = 'flash player %s' % player_version
else:
player_version = self._search_regex(
r'html5player-([^/]+?)(?:/html5player)?\.js',
player_url,
'html5 player', fatal=False)
player_desc = 'html5 player %s' % player_version
parts_sizes = self._signature_cache_id(encrypted_sig)
self.to_screen('{%s} signature length %s, %s' %
(format_id, parts_sizes, player_desc))
signature = self._decrypt_signature(
encrypted_sig, video_id, player_url, age_gate)
url += '&signature=' + signature
if 'ratebypass' not in url:
url += '&ratebypass=yes'
url_map[format_id] = url
formats = _map_to_format_list(url_map)
elif video_info.get('hlsvp'):
manifest_url = video_info['hlsvp'][0]
url_map = self._extract_from_m3u8(manifest_url, video_id)
formats = _map_to_format_list(url_map)
else:
raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
# Look for the DASH manifest
if self._downloader.params.get('youtube_include_dash_manifest', True):
dash_mpd = video_info.get('dashmpd')
if dash_mpd:
dash_manifest_url = dash_mpd[0]
try:
dash_formats = self._parse_dash_manifest(
video_id, dash_manifest_url, player_url, age_gate)
except (ExtractorError, KeyError) as e:
self.report_warning(
'Skipping DASH manifest: %r' % e, video_id)
else:
# Hide the formats we found through non-DASH
dash_keys = set(df['format_id'] for df in dash_formats)
for f in formats:
if f['format_id'] in dash_keys:
f['format_id'] = 'nondash-%s' % f['format_id']
f['preference'] = f.get('preference', 0) - 10000
formats.extend(dash_formats)
# Check for malformed aspect ratio
stretched_m = re.search(
r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
video_webpage)
if stretched_m:
ratio = float(stretched_m.group('w')) / float(stretched_m.group('h'))
for f in formats:
if f.get('vcodec') != 'none':
f['stretched_ratio'] = ratio
self._sort_formats(formats)
return {
'id': video_id,
'uploader': video_uploader,
'uploader_id': video_uploader_id,
'upload_date': upload_date,
'title': video_title,
'thumbnail': video_thumbnail,
'description': video_description,
'categories': video_categories,
'subtitles': video_subtitles,
'duration': video_duration,
'age_limit': 18 if age_gate else 0,
'annotations': video_annotations,
'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'formats': formats,
}
class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com playlists'
_VALID_URL = r"""(?x)(?:
(?:https?://)?
(?:\w+\.)?
youtube\.com/
(?:
(?:course|view_play_list|my_playlists|artist|playlist|watch|embed/videoseries)
\? (?:.*?&)*? (?:p|a|list)=
| p/
)
(
(?:PL|LL|EC|UU|FL|RD)?[0-9A-Za-z-_]{10,}
# Top tracks, they can also include dots
|(?:MC)[\w\.]*
)
.*
|
((?:PL|LL|EC|UU|FL|RD)[0-9A-Za-z-_]{10,})
)"""
_TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
_VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&[^"]*?index=(?P<index>\d+)'
IE_NAME = 'youtube:playlist'
_TESTS = [{
'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
'info_dict': {
'title': 'ytdl test PL',
'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
},
'playlist_count': 3,
}, {
'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
'info_dict': {
'title': 'YDL_Empty_List',
},
'playlist_count': 0,
}, {
'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'info_dict': {
'title': '29C3: Not my department',
},
'playlist_count': 95,
}, {
'note': 'issue #673',
'url': 'PLBB231211A4F62143',
'info_dict': {
'title': '[OLD]Team Fortress 2 (Class-based LP)',
},
'playlist_mincount': 26,
}, {
'note': 'Large playlist',
'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
'info_dict': {
'title': 'Uploads from Cauchemar',
},
'playlist_mincount': 799,
}, {
'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
'info_dict': {
'title': 'YDL_safe_search',
},
'playlist_count': 2,
}, {
'note': 'embedded',
'url': 'http://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'playlist_count': 4,
'info_dict': {
'title': 'JODA15',
}
}, {
'note': 'Embedded SWF player',
'url': 'http://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
'playlist_count': 4,
'info_dict': {
'title': 'JODA7',
}
}, {
'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
'info_dict': {
'title': 'Uploads from Interstellar Movie',
},
'playlist_mincout': 21,
}]
def _real_initialize(self):
self._login()
def _ids_to_results(self, ids):
return [
self.url_result(vid_id, 'Youtube', video_id=vid_id)
for vid_id in ids]
def _extract_mix(self, playlist_id):
# The mixes are generated from a a single video
# the id of the playlist is just 'RD' + video_id
url = 'https://youtube.com/watch?v=%s&list=%s' % (playlist_id[-11:], playlist_id)
webpage = self._download_webpage(
url, playlist_id, 'Downloading Youtube mix')
search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
title_span = (
search_title('playlist-title') or
search_title('title long-title') or
search_title('title'))
title = clean_html(title_span)
ids = orderedSet(re.findall(
r'''(?xs)data-video-username=".*?".*?
href="/watch\?v=([0-9A-Za-z_-]{11})&[^"]*?list=%s''' % re.escape(playlist_id),
webpage))
url_results = self._ids_to_results(ids)
return self.playlist_result(url_results, playlist_id, title)
def _real_extract(self, url):
# Extract playlist id
mobj = re.match(self._VALID_URL, url)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
playlist_id = mobj.group(1) or mobj.group(2)
# Check if it's a video-specific URL
query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
if 'v' in query_dict:
video_id = query_dict['v'][0]
if self._downloader.params.get('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
return self.url_result(video_id, 'Youtube', video_id=video_id)
else:
self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
if playlist_id.startswith('RD'):
# Mixes require a custom extraction process
return self._extract_mix(playlist_id)
url = self._TEMPLATE_URL % playlist_id
page = self._download_webpage(url, playlist_id)
more_widget_html = content_html = page
# Check if the playlist exists or is private
if re.search(r'<div class="yt-alert-message">[^<]*?(The|This) playlist (does not exist|is private)[^<]*?</div>', page) is not None:
raise ExtractorError(
'The playlist doesn\'t exist or is private, use --username or '
'--netrc to access it.',
expected=True)
# Extract the video ids from the playlist pages
ids = []
for page_num in itertools.count(1):
matches = re.finditer(self._VIDEO_RE, content_html)
# We remove the duplicates and the link with index 0
# (it's not the first video of the playlist)
new_ids = orderedSet(m.group('id') for m in matches if m.group('index') != '0')
ids.extend(new_ids)
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), playlist_id,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape)
content_html = more['content_html']
if not content_html.strip():
# Some webpages show a "Load more" button but they don't
# have more videos
break
more_widget_html = more['load_more_widget_html']
playlist_title = self._html_search_regex(
r'(?s)<h1 class="pl-header-title[^"]*">\s*(.*?)\s*</h1>',
page, 'title')
url_results = self._ids_to_results(ids)
return self.playlist_result(url_results, playlist_id, playlist_title)
class YoutubeChannelIE(InfoExtractor):
IE_DESC = 'YouTube.com channels'
_VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)'
IE_NAME = 'youtube:channel'
_TESTS = [{
'note': 'paginated channel',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'playlist_mincount': 91,
}]
def extract_videos_from_page(self, page):
ids_in_page = []
for mobj in re.finditer(r'href="/watch\?v=([0-9A-Za-z_-]+)&?', page):
if mobj.group(1) not in ids_in_page:
ids_in_page.append(mobj.group(1))
return ids_in_page
def _real_extract(self, url):
channel_id = self._match_id(url)
video_ids = []
url = 'https://www.youtube.com/channel/%s/videos' % channel_id
channel_page = self._download_webpage(url, channel_id)
autogenerated = re.search(r'''(?x)
class="[^"]*?(?:
channel-header-autogenerated-label|
yt-channel-title-autogenerated
)[^"]*"''', channel_page) is not None
if autogenerated:
# The videos are contained in a single page
# the ajax pages can't be used, they are empty
video_ids = self.extract_videos_from_page(channel_page)
entries = [
self.url_result(video_id, 'Youtube', video_id=video_id)
for video_id in video_ids]
return self.playlist_result(entries, channel_id)
def _entries():
more_widget_html = content_html = channel_page
for pagenum in itertools.count(1):
ids_in_page = self.extract_videos_from_page(content_html)
for video_id in ids_in_page:
yield self.url_result(
video_id, 'Youtube', video_id=video_id)
mobj = re.search(
r'data-uix-load-more-href="/?(?P<more>[^"]+)"',
more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), channel_id,
'Downloading page #%s' % (pagenum + 1),
transform_source=uppercase_escape)
content_html = more['content_html']
more_widget_html = more['load_more_widget_html']
return self.playlist_result(_entries(), channel_id)
class YoutubeUserIE(InfoExtractor):
IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
_TEMPLATE_URL = 'https://gdata.youtube.com/feeds/api/users/%s'
_GDATA_PAGE_SIZE = 50
_GDATA_URL = 'https://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d&alt=json'
IE_NAME = 'youtube:user'
_TESTS = [{
'url': 'https://www.youtube.com/user/TheLinuxFoundation',
'playlist_mincount': 320,
'info_dict': {
'title': 'TheLinuxFoundation',
}
}, {
'url': 'ytuser:phihag',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
# Don't return True if the url can be extracted with other youtube
# extractor, the regex would is too permissive and it would match.
other_ies = iter(klass for (name, klass) in globals().items() if name.endswith('IE') and klass is not cls)
if any(ie.suitable(url) for ie in other_ies):
return False
else:
return super(YoutubeUserIE, cls).suitable(url)
def _real_extract(self, url):
username = self._match_id(url)
# Download video ids using YouTube Data API. Result size per
# query is limited (currently to 50 videos) so we need to query
# page by page until there are no video ids - it means we got
# all of them.
def download_page(pagenum):
start_index = pagenum * self._GDATA_PAGE_SIZE + 1
gdata_url = self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)
page = self._download_webpage(
gdata_url, username,
'Downloading video ids from %d to %d' % (
start_index, start_index + self._GDATA_PAGE_SIZE))
try:
response = json.loads(page)
except ValueError as err:
raise ExtractorError('Invalid JSON in API response: ' + compat_str(err))
if 'entry' not in response['feed']:
return
# Extract video identifiers
entries = response['feed']['entry']
for entry in entries:
title = entry['title']['$t']
video_id = entry['id']['$t'].split('/')[-1]
yield {
'_type': 'url',
'url': video_id,
'ie_key': 'Youtube',
'id': video_id,
'title': title,
}
url_results = OnDemandPagedList(download_page, self._GDATA_PAGE_SIZE)
return self.playlist_result(url_results, playlist_title=username)
class YoutubeSearchIE(SearchInfoExtractor):
IE_DESC = 'YouTube.com searches'
_API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
_MAX_RESULTS = 1000
IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
video_ids = []
pagenum = 0
limit = n
PAGE_SIZE = 50
while (PAGE_SIZE * pagenum) < limit:
result_url = self._API_URL % (
compat_urllib_parse.quote_plus(query.encode('utf-8')),
(PAGE_SIZE * pagenum) + 1)
data_json = self._download_webpage(
result_url, video_id='query "%s"' % query,
note='Downloading page %s' % (pagenum + 1),
errnote='Unable to download API page')
data = json.loads(data_json)
api_response = data['data']
if 'items' not in api_response:
raise ExtractorError(
'[youtube] No video results', expected=True)
new_ids = list(video['id'] for video in api_response['items'])
video_ids += new_ids
limit = min(n, api_response['totalItems'])
pagenum += 1
if len(video_ids) > n:
video_ids = video_ids[:n]
videos = [self.url_result(video_id, 'Youtube', video_id=video_id)
for video_id in video_ids]
return self.playlist_result(videos, query)
class YoutubeSearchDateIE(YoutubeSearchIE):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc&orderby=published'
_SEARCH_KEY = 'ytsearchdate'
IE_DESC = 'YouTube.com searches, newest videos first'
class YoutubeSearchURLIE(InfoExtractor):
IE_DESC = 'YouTube.com search URLs'
IE_NAME = 'youtube:search_url'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?search_query=(?P<query>[^&]+)(?:[&]|$)'
_TESTS = [{
'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
'playlist_mincount': 5,
'info_dict': {
'title': 'youtube-dl test video',
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
query = compat_urllib_parse.unquote_plus(mobj.group('query'))
webpage = self._download_webpage(url, query)
result_code = self._search_regex(
r'(?s)<ol class="item-section"(.*?)</ol>', webpage, 'result HTML')
part_codes = re.findall(
r'(?s)<h3 class="yt-lockup-title">(.*?)</h3>', result_code)
entries = []
for part_code in part_codes:
part_title = self._html_search_regex(
[r'(?s)title="([^"]+)"', r'>([^<]+)</a>'], part_code, 'item title', fatal=False)
part_url_snippet = self._html_search_regex(
r'(?s)href="([^"]+)"', part_code, 'item URL')
part_url = compat_urlparse.urljoin(
'https://www.youtube.com/', part_url_snippet)
entries.append({
'_type': 'url',
'url': part_url,
'title': part_title,
})
return {
'_type': 'playlist',
'entries': entries,
'title': query,
}
class YoutubeShowIE(InfoExtractor):
IE_DESC = 'YouTube.com (multi-season) shows'
_VALID_URL = r'https?://www\.youtube\.com/show/(?P<id>[^?#]*)'
IE_NAME = 'youtube:show'
_TESTS = [{
'url': 'http://www.youtube.com/show/airdisasters',
'playlist_mincount': 3,
'info_dict': {
'id': 'airdisasters',
'title': 'Air Disasters',
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
webpage = self._download_webpage(
url, playlist_id, 'Downloading show webpage')
# There's one playlist for each season of the show
m_seasons = list(re.finditer(r'href="(/playlist\?list=.*?)"', webpage))
self.to_screen('%s: Found %s seasons' % (playlist_id, len(m_seasons)))
entries = [
self.url_result(
'https://www.youtube.com' + season.group(1), 'YoutubePlaylist')
for season in m_seasons
]
title = self._og_search_title(webpage, fatal=False)
return {
'_type': 'playlist',
'id': playlist_id,
'title': title,
'entries': entries,
}
class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
"""
Base class for extractors that fetch info from
http://www.youtube.com/feed_ajax
Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
"""
_LOGIN_REQUIRED = True
# use action_load_personal_feed instead of action_load_system_feed
_PERSONAL_FEED = False
@property
def _FEED_TEMPLATE(self):
action = 'action_load_system_feed'
if self._PERSONAL_FEED:
action = 'action_load_personal_feed'
return 'https://www.youtube.com/feed_ajax?%s=1&feed_name=%s&paging=%%s' % (action, self._FEED_NAME)
@property
def IE_NAME(self):
return 'youtube:%s' % self._FEED_NAME
def _real_initialize(self):
self._login()
def _real_extract(self, url):
feed_entries = []
paging = 0
for i in itertools.count(1):
info = self._download_json(
self._FEED_TEMPLATE % paging,
'%s feed' % self._FEED_NAME,
'Downloading page %s' % i,
transform_source=uppercase_escape)
feed_html = info.get('feed_html') or info.get('content_html')
load_more_widget_html = info.get('load_more_widget_html') or feed_html
m_ids = re.finditer(r'"/watch\?v=(.*?)["&]', feed_html)
ids = orderedSet(m.group(1) for m in m_ids)
feed_entries.extend(
self.url_result(video_id, 'Youtube', video_id=video_id)
for video_id in ids)
mobj = re.search(
r'data-uix-load-more-href="/?[^"]+paging=(?P<paging>\d+)',
load_more_widget_html)
if mobj is None:
break
paging = mobj.group('paging')
return self.playlist_result(feed_entries, playlist_title=self._PLAYLIST_TITLE)
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
_PLAYLIST_TITLE = 'Youtube Recommended videos'
class YoutubeWatchLaterIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/feed/watch_later|:ytwatchlater'
_FEED_NAME = 'watch_later'
_PLAYLIST_TITLE = 'Youtube Watch Later'
_PERSONAL_FEED = True
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
_VALID_URL = 'https?://www\.youtube\.com/feed/history|:ythistory'
_FEED_NAME = 'history'
_PERSONAL_FEED = True
_PLAYLIST_TITLE = 'Youtube Watch History'
class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube:favorites'
IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
_LOGIN_REQUIRED = True
def _real_extract(self, url):
webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
return self.url_result(playlist_id, 'YoutubePlaylist')
class YoutubeSubscriptionsIE(YoutubePlaylistIE):
IE_NAME = 'youtube:subscriptions'
IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
_TESTS = []
def _real_extract(self, url):
title = 'Youtube Subscriptions'
page = self._download_webpage('https://www.youtube.com/feed/subscriptions', title)
# The extraction process is the same as for playlists, but the regex
# for the video ids doesn't contain an index
ids = []
more_widget_html = content_html = page
for page_num in itertools.count(1):
matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
new_ids = orderedSet(matches)
ids.extend(new_ids)
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), title,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape)
content_html = more['content_html']
more_widget_html = more['load_more_widget_html']
return {
'_type': 'playlist',
'title': title,
'entries': self._ids_to_results(ids),
}
class YoutubeTruncatedURLIE(InfoExtractor):
IE_NAME = 'youtube:truncated_url'
IE_DESC = False # Do not list
_VALID_URL = r'''(?x)
(?:https?://)?
(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
(?:watch\?(?:
feature=[a-z_]+|
annotation_id=annotation_[^&]+|
x-yt-cl=[0-9]+|
)?
|
attribution_link\?a=[^&]+
)
$
'''
_TESTS = [{
'url': 'http://www.youtube.com/watch?annotation_id=annotation_3951667041',
'only_matching': True,
}, {
'url': 'http://www.youtube.com/watch?',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?feature=foo',
'only_matching': True,
}]
def _real_extract(self, url):
raise ExtractorError(
'Did you forget to quote the URL? Remember that & is a meta '
'character in most shells, so you want to put the URL in quotes, '
'like youtube-dl '
'"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
' or simply youtube-dl BaW_jenozKc .',
expected=True)
class YoutubeTruncatedIDIE(InfoExtractor):
IE_NAME = 'youtube:truncated_id'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
_TESTS = [{
'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
raise ExtractorError(
'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
expected=True)
|
unlicense
| -1,893,902,528,345,365,200
| 43.208525
| 226
| 0.517656
| false
| 3.68369
| false
| false
| false
|
stuser/temp
|
web_crawler/thread_sample.py
|
1
|
2625
|
import threading
import time
import requests
import datetime as dt
import NTUH_clinic as nc
# 門診查詢參數檔案(.CSV file)
ParamaterFileName = "NTUH_params"
# 查詢動作的時間間隔
interval = 300 # sec
def query(sess, classname, directory, url, hosp, dept, ampm, querydate):
bs = nc.BsObject(url, hosp, dept, ampm, querydate, sess)
soup = bs.getQueryResult()
df = bs.convertDataToDataFrame(soup)
nc.exportDataToCSVfile(df, classname, directory,
hosp, dept, ampm, querydate)
sess.close()
def getAmPmFlag():
# clinic hour: Morning clinic 09:00~12:00 , Afternoon clinic 14:00~17:00 , Evening clinic 18:30-20:30
curr = dt.datetime.now()
am_start = dt.datetime(curr.year, curr.month, curr.day, 9, 0)
pm_start = dt.datetime(curr.year, curr.month, curr.day, 14, 0)
evn_start = dt.datetime(curr.year, curr.month, curr.day, 18, 30)
clinic_end = dt.datetime(curr.year, curr.month,
curr.day, 23, 0) # 查詢程式截止時間
ampm_flag = 0
if pm_start > curr >= am_start:
ampm_flag = 1 # Morning clinic
elif evn_start > curr >= pm_start:
ampm_flag = 2 # Afternoon clinic
elif clinic_end > curr >= evn_start:
ampm_flag = 3 # Evening clinic
else:
pass # print("非門診時段")
return ampm_flag
def demo():
AmPmFlag = getAmPmFlag()
# AmPmFlag = 1 #test code
if AmPmFlag != 0:
all_param_set = nc.loadParamaterFile(ParamaterFileName)
# 依門診時段取出該時段的查詢條件
param_set = all_param_set[all_param_set['ampm'] == str(AmPmFlag)]
# *the index use in for-loop, subset need reset index.
param_set = param_set.reset_index()
query_set_nums = len(param_set)
else:
query_set_nums = 0 # 非門診時段,設查詢條件筆數為0,即不執行查詢動作
for num in range(query_set_nums):
sess = requests.Session()
# print("param_set",param_set)
t = threading.Thread(target=query,
args=[sess,
param_set.classname[num],
param_set.directory[num],
param_set.url[num],
param_set.hosp[num],
param_set.dept[num],
param_set.ampm[num],
dt.datetime.now().strftime('%Y/%m/%d')])
t.start()
while True:
threading.Thread(target=demo).start()
time.sleep(interval)
|
mit
| 2,986,205,221,865,317,400
| 32.689189
| 106
| 0.561974
| false
| 2.875433
| false
| false
| false
|
kyokley/MediaConverter
|
main.py
|
1
|
1082
|
from tv_runner import TvRunner
from movie_runner import MovieRunner
from settings import (MEDIAVIEWER_INFER_SCRAPERS_URL,
SEND_EMAIL,
CELERY_VHOST,
)
from utils import postData, send_email
from celery import Celery
from log import LogFile
log = LogFile().getLogger()
app = Celery('tasks', broker='amqp://guest@localhost/%s' % CELERY_VHOST)
@app.task(name='main.main')
def main():
all_errors = []
tvRunner = TvRunner()
tv_errors = tvRunner.run()
movieRunner = MovieRunner()
movie_errors = movieRunner.run()
postData({}, MEDIAVIEWER_INFER_SCRAPERS_URL)
all_errors.extend(tv_errors)
all_errors.extend(movie_errors)
if all_errors:
log.error('Errors occured in the following files:')
for error in all_errors:
log.error(error)
if SEND_EMAIL:
subject = 'MC: Got some errors'
message = '\n'.join(all_errors)
send_email(subject, message)
log.info('All done')
if __name__ == '__main__':
main.delay()
|
mit
| -8,782,212,688,426,282,000
| 24.761905
| 72
| 0.609982
| false
| 3.547541
| false
| false
| false
|
procrastinatio/mapproxy
|
mapproxy/test/unit/test_image.py
|
2
|
22774
|
# -:- encoding: utf8 -:-
# This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
from io import BytesIO
from mapproxy.compat.image import Image, ImageDraw
from mapproxy.image import ImageSource, ReadBufWrapper, is_single_color_image
from mapproxy.image import peek_image_format
from mapproxy.image.merge import merge_images
from mapproxy.image import _make_transparent as make_transparent, SubImageSource, img_has_transparency, quantize
from mapproxy.image.opts import ImageOptions
from mapproxy.image.tile import TileMerger, TileSplitter
from mapproxy.image.transform import ImageTransformer
from mapproxy.test.image import is_png, is_jpeg, is_tiff, create_tmp_image_file, check_format, create_debug_img, create_image
from mapproxy.srs import SRS
from nose.tools import eq_
from mapproxy.test.image import assert_img_colors_eq
from nose.plugins.skip import SkipTest
PNG_FORMAT = ImageOptions(format='image/png')
JPEG_FORMAT = ImageOptions(format='image/jpeg')
TIFF_FORMAT = ImageOptions(format='image/tiff')
class TestImageSource(object):
def setup(self):
self.tmp_filename = create_tmp_image_file((100, 100))
def teardown(self):
os.remove(self.tmp_filename)
def test_from_filename(self):
ir = ImageSource(self.tmp_filename, PNG_FORMAT)
assert is_png(ir.as_buffer())
assert ir.as_image().size == (100, 100)
def test_from_file(self):
with open(self.tmp_filename, 'rb') as tmp_file:
ir = ImageSource(tmp_file, 'png')
assert ir.as_buffer() == tmp_file
assert ir.as_image().size == (100, 100)
def test_from_image(self):
img = Image.new('RGBA', (100, 100))
ir = ImageSource(img, (100, 100), PNG_FORMAT)
assert ir.as_image() == img
assert is_png(ir.as_buffer())
def test_from_non_seekable_file(self):
with open(self.tmp_filename, 'rb') as tmp_file:
data = tmp_file.read()
class FileLikeDummy(object):
# "file" without seek, like urlopen response
def read(self):
return data
ir = ImageSource(FileLikeDummy(), 'png')
assert ir.as_buffer(seekable=True).read() == data
assert ir.as_image().size == (100, 100)
assert ir.as_buffer().read() == data
def test_output_formats(self):
img = Image.new('RGB', (100, 100))
for format in ['png', 'gif', 'tiff', 'jpeg', 'GeoTIFF', 'bmp']:
ir = ImageSource(img, (100, 100), image_opts=ImageOptions(format=format))
yield check_format, ir.as_buffer(), format
def test_converted_output(self):
ir = ImageSource(self.tmp_filename, (100, 100), PNG_FORMAT)
assert is_png(ir.as_buffer())
assert is_jpeg(ir.as_buffer(JPEG_FORMAT))
assert is_jpeg(ir.as_buffer())
assert is_tiff(ir.as_buffer(TIFF_FORMAT))
assert is_tiff(ir.as_buffer())
def test_output_formats_greyscale_png(self):
img = Image.new('L', (100, 100))
ir = ImageSource(img, image_opts=PNG_FORMAT)
img = Image.open(ir.as_buffer(ImageOptions(colors=256, transparent=True, format='image/png')))
assert img.mode == 'P'
assert img.getpixel((0, 0)) == 255
def test_output_formats_greyscale_alpha_png(self):
img = Image.new('LA', (100, 100))
ir = ImageSource(img, image_opts=PNG_FORMAT)
img = Image.open(ir.as_buffer(ImageOptions(colors=256, transparent=True, format='image/png')))
assert img.mode == 'LA'
assert img.getpixel((0, 0)) == (0, 0)
def test_output_formats_png8(self):
img = Image.new('RGBA', (100, 100))
ir = ImageSource(img, image_opts=PNG_FORMAT)
img = Image.open(ir.as_buffer(ImageOptions(colors=256, transparent=True, format='image/png')))
assert img.mode == 'P'
assert img.getpixel((0, 0)) == 255
def test_output_formats_png24(self):
img = Image.new('RGBA', (100, 100))
image_opts = PNG_FORMAT.copy()
image_opts.colors = 0 # TODO image_opts
ir = ImageSource(img, image_opts=image_opts)
img = Image.open(ir.as_buffer())
eq_(img.mode, 'RGBA')
assert img.getpixel((0, 0)) == (0, 0, 0, 0)
class TestSubImageSource(object):
def test_full(self):
sub_img = create_image((100, 100), color=[100, 120, 130, 140])
img = SubImageSource(sub_img, size=(100, 100), offset=(0, 0), image_opts=ImageOptions()).as_image()
eq_(img.getcolors(), [(100*100, (100, 120, 130, 140))])
def test_larger(self):
sub_img = create_image((150, 150), color=[100, 120, 130, 140])
img = SubImageSource(sub_img, size=(100, 100), offset=(0, 0), image_opts=ImageOptions()).as_image()
eq_(img.getcolors(), [(100*100, (100, 120, 130, 140))])
def test_negative_offset(self):
sub_img = create_image((150, 150), color=[100, 120, 130, 140])
img = SubImageSource(sub_img, size=(100, 100), offset=(-50, 0), image_opts=ImageOptions()).as_image()
eq_(img.getcolors(), [(100*100, (100, 120, 130, 140))])
def test_overlap_right(self):
sub_img = create_image((50, 50), color=[100, 120, 130, 140])
img = SubImageSource(sub_img, size=(100, 100), offset=(75, 25), image_opts=ImageOptions(transparent=True)).as_image()
eq_(sorted(img.getcolors()), [(25*50, (100, 120, 130, 140)), (100*100-25*50, (255, 255, 255, 0))])
def test_outside(self):
sub_img = create_image((50, 50), color=[100, 120, 130, 140])
img = SubImageSource(sub_img, size=(100, 100), offset=(200, 0), image_opts=ImageOptions(transparent=True)).as_image()
eq_(img.getcolors(), [(100*100, (255, 255, 255, 0))])
class ROnly(object):
def __init__(self):
self.data = [b'Hello World!']
def read(self):
if self.data:
return self.data.pop()
return b''
def __iter__(self):
it = iter(self.data)
self.data = []
return it
class TestReadBufWrapper(object):
def setup(self):
rbuf = ROnly()
self.rbuf_wrapper = ReadBufWrapper(rbuf)
def test_read(self):
assert self.rbuf_wrapper.read() == b'Hello World!'
self.rbuf_wrapper.seek(0)
eq_(self.rbuf_wrapper.read(), b'')
def test_seek_read(self):
self.rbuf_wrapper.seek(0)
assert self.rbuf_wrapper.read() == b'Hello World!'
self.rbuf_wrapper.seek(0)
assert self.rbuf_wrapper.read() == b'Hello World!'
def test_iter(self):
data = list(self.rbuf_wrapper)
eq_(data, [b'Hello World!'])
self.rbuf_wrapper.seek(0)
data = list(self.rbuf_wrapper)
eq_(data, [])
def test_seek_iter(self):
self.rbuf_wrapper.seek(0)
data = list(self.rbuf_wrapper)
eq_(data, [b'Hello World!'])
self.rbuf_wrapper.seek(0)
data = list(self.rbuf_wrapper)
eq_(data, [b'Hello World!'])
def test_hasattr(self):
assert hasattr(self.rbuf_wrapper, 'seek')
assert hasattr(self.rbuf_wrapper, 'readline')
class TestMergeAll(object):
def setup(self):
self.cleanup_tiles = []
def test_full_merge(self):
self.cleanup_tiles = [create_tmp_image_file((100, 100)) for _ in range(9)]
self.tiles = [ImageSource(tile) for tile in self.cleanup_tiles]
m = TileMerger(tile_grid=(3, 3), tile_size=(100, 100))
img_opts = ImageOptions()
result = m.merge(self.tiles, img_opts)
img = result.as_image()
eq_(img.size, (300, 300))
def test_one(self):
self.cleanup_tiles = [create_tmp_image_file((100, 100))]
self.tiles = [ImageSource(self.cleanup_tiles[0])]
m = TileMerger(tile_grid=(1, 1), tile_size=(100, 100))
img_opts = ImageOptions(transparent=True)
result = m.merge(self.tiles, img_opts)
img = result.as_image()
eq_(img.size, (100, 100))
eq_(img.mode, 'RGBA')
def test_missing_tiles(self):
self.cleanup_tiles = [create_tmp_image_file((100, 100))]
self.tiles = [ImageSource(self.cleanup_tiles[0])]
self.tiles.extend([None]*8)
m = TileMerger(tile_grid=(3, 3), tile_size=(100, 100))
img_opts = ImageOptions()
result = m.merge(self.tiles, img_opts)
img = result.as_image()
eq_(img.size, (300, 300))
eq_(img.getcolors(), [(80000, (255, 255, 255)), (10000, (0, 0, 0)), ])
def test_invalid_tile(self):
self.cleanup_tiles = [create_tmp_image_file((100, 100)) for _ in range(9)]
self.tiles = [ImageSource(tile) for tile in self.cleanup_tiles]
invalid_tile = self.tiles[0].source
with open(invalid_tile, 'wb') as tmp:
tmp.write(b'invalid')
m = TileMerger(tile_grid=(3, 3), tile_size=(100, 100))
img_opts = ImageOptions(bgcolor=(200, 0, 50))
result = m.merge(self.tiles, img_opts)
img = result.as_image()
eq_(img.size, (300, 300))
eq_(img.getcolors(), [(10000, (200, 0, 50)), (80000, (0, 0, 0))])
assert not os.path.isfile(invalid_tile)
def test_none_merge(self):
tiles = [None]
m = TileMerger(tile_grid=(1, 1), tile_size=(100, 100))
img_opts = ImageOptions(mode='RGBA', bgcolor=(200, 100, 30, 40))
result = m.merge(tiles, img_opts)
img = result.as_image()
eq_(img.size, (100, 100))
eq_(img.getcolors(), [(100*100, (200, 100, 30, 40))])
def teardown(self):
for tile_fname in self.cleanup_tiles:
if tile_fname and os.path.isfile(tile_fname):
os.remove(tile_fname)
class TestGetCrop(object):
def setup(self):
self.tmp_file = create_tmp_image_file((100, 100), two_colored=True)
self.img = ImageSource(self.tmp_file,
image_opts=ImageOptions(format='image/png'), size=(100, 100))
def teardown(self):
if os.path.exists(self.tmp_file):
os.remove(self.tmp_file)
def test_perfect_match(self):
bbox = (-10, -5, 30, 35)
transformer = ImageTransformer(SRS(4326), SRS(4326))
result = transformer.transform(self.img, bbox, (100, 100), bbox, image_opts=None)
assert self.img == result
def test_simple_resize_nearest(self):
bbox = (-10, -5, 30, 35)
transformer = ImageTransformer(SRS(4326), SRS(4326))
result = transformer.transform(self.img, bbox, (200, 200), bbox,
image_opts=ImageOptions(resampling='nearest'))
img = result.as_image()
eq_(img.size, (200, 200))
eq_(len(img.getcolors()), 2)
def test_simple_resize_bilinear(self):
bbox = (-10, -5, 30, 35)
transformer = ImageTransformer(SRS(4326), SRS(4326))
result = transformer.transform(self.img, bbox, (200, 200), bbox,
image_opts=ImageOptions(resampling='bilinear'))
img = result.as_image()
eq_(img.size, (200, 200))
# some shades of grey with bilinear
assert len(img.getcolors()) >= 4
class TestLayerMerge(object):
def test_opacity_merge(self):
img1 = ImageSource(Image.new('RGB', (10, 10), (255, 0, 255)))
img2 = ImageSource(Image.new('RGB', (10, 10), (0, 255, 255)),
image_opts=ImageOptions(opacity=0.5))
result = merge_images([img1, img2], ImageOptions(transparent=False))
img = result.as_image()
eq_(img.getpixel((0, 0)), (127, 127, 255))
def test_opacity_merge_mixed_modes(self):
img1 = ImageSource(Image.new('RGBA', (10, 10), (255, 0, 255, 255)))
img2 = ImageSource(Image.new('RGB', (10, 10), (0, 255, 255)).convert('P'),
image_opts=ImageOptions(opacity=0.5))
result = merge_images([img1, img2], ImageOptions(transparent=True))
img = result.as_image()
assert_img_colors_eq(img, [
(10*10, (127, 127, 255, 255)),
])
def test_paletted_merge(self):
if not hasattr(Image, 'FASTOCTREE'):
raise SkipTest()
# generate RGBA images with a transparent rectangle in the lower right
img1 = ImageSource(Image.new('RGBA', (50, 50), (0, 255, 0, 255))).as_image()
draw = ImageDraw.Draw(img1)
draw.rectangle((25, 25, 49, 49), fill=(0, 0, 0, 0))
paletted_img = quantize(img1, alpha=True)
assert img_has_transparency(paletted_img)
assert paletted_img.mode == 'P'
rgba_img = Image.new('RGBA', (50, 50), (255, 0, 0, 255))
draw = ImageDraw.Draw(rgba_img)
draw.rectangle((25, 25, 49, 49), fill=(0, 0, 0, 0))
img1 = ImageSource(paletted_img)
img2 = ImageSource(rgba_img)
# generate base image and merge the others above
img3 = ImageSource(Image.new('RGBA', (50, 50), (0, 0, 255, 255)))
result = merge_images([img3, img1, img2], ImageOptions(transparent=True))
img = result.as_image()
assert img.mode == 'RGBA'
eq_(img.getpixel((49, 49)), (0, 0, 255, 255))
eq_(img.getpixel((0, 0)), (255, 0, 0, 255))
def test_solid_merge(self):
img1 = ImageSource(Image.new('RGB', (10, 10), (255, 0, 255)))
img2 = ImageSource(Image.new('RGB', (10, 10), (0, 255, 255)))
result = merge_images([img1, img2], ImageOptions(transparent=False))
img = result.as_image()
eq_(img.getpixel((0, 0)), (0, 255, 255))
class TestLayerCompositeMerge(object):
def test_composite_merge(self):
# http://stackoverflow.com/questions/3374878
if not hasattr(Image, 'alpha_composite'):
raise SkipTest()
img1 = Image.new('RGBA', size=(100, 100), color=(255, 0, 0, 255))
draw = ImageDraw.Draw(img1)
draw.rectangle((33, 0, 66, 100), fill=(255, 0, 0, 128))
draw.rectangle((67, 0, 100, 100), fill=(255, 0, 0, 0))
img1 = ImageSource(img1)
img2 = Image.new('RGBA', size =(100, 100), color=(0, 255, 0, 255))
draw = ImageDraw.Draw(img2)
draw.rectangle((0, 33, 100, 66), fill=(0, 255, 0, 128))
draw.rectangle((0, 67, 100, 100), fill=(0, 255, 0, 0))
img2 = ImageSource(img2)
result = merge_images([img2, img1], ImageOptions(transparent=True))
img = result.as_image()
eq_(img.mode, 'RGBA')
assert_img_colors_eq(img, [
(1089, (0, 255, 0, 255)),
(1089, (255, 255, 255, 0)),
(1122, (0, 255, 0, 128)),
(1122, (128, 126, 0, 255)),
(1122, (255, 0, 0, 128)),
(1156, (170, 84, 0, 191)),
(3300, (255, 0, 0, 255))])
def test_composite_merge_opacity(self):
if not hasattr(Image, 'alpha_composite'):
raise SkipTest()
bg = Image.new('RGBA', size=(100, 100), color=(255, 0, 255, 255))
bg = ImageSource(bg)
fg = Image.new('RGBA', size =(100, 100), color=(0, 0, 0, 0))
draw = ImageDraw.Draw(fg)
draw.rectangle((10, 10, 89, 89), fill=(0, 255, 255, 255))
fg = ImageSource(fg, image_opts=ImageOptions(opacity=0.5))
result = merge_images([bg, fg], ImageOptions(transparent=True))
img = result.as_image()
eq_(img.mode, 'RGBA')
assert_img_colors_eq(img, [
(3600, (255, 0, 255, 255)),
(6400, (128, 127, 255, 255))])
class TestTransform(object):
def setup(self):
self.src_img = ImageSource(create_debug_img((200, 200), transparent=False))
self.src_srs = SRS(31467)
self.dst_size = (100, 150)
self.dst_srs = SRS(4326)
self.dst_bbox = (0.2, 45.1, 8.3, 53.2)
self.src_bbox = self.dst_srs.transform_bbox_to(self.src_srs, self.dst_bbox)
def test_transform(self, mesh_div=4):
transformer = ImageTransformer(self.src_srs, self.dst_srs, mesh_div=mesh_div)
result = transformer.transform(self.src_img, self.src_bbox, self.dst_size, self.dst_bbox,
image_opts=ImageOptions(resampling='nearest'))
assert isinstance(result, ImageSource)
assert result.as_image() != self.src_img
assert result.size == (100, 150)
def _test_compare_mesh_div(self):
"""
Create transformations with different div values.
"""
for div in [1, 2, 4, 6, 8, 12, 16]:
transformer = ImageTransformer(self.src_srs, self.dst_srs, mesh_div=div)
result = transformer.transform(self.src_img, self.src_bbox,
self.dst_size, self.dst_bbox)
result.as_image().save('/tmp/transform-%d.png' % (div,))
class TestSingleColorImage(object):
def test_one_point(self):
img = Image.new('RGB', (100, 100), color='#ff0000')
draw = ImageDraw.Draw(img)
draw.point((99, 99))
del draw
assert not is_single_color_image(img)
def test_solid(self):
img = Image.new('RGB', (100, 100), color='#ff0102')
eq_(is_single_color_image(img), (255, 1, 2))
def test_solid_w_alpha(self):
img = Image.new('RGBA', (100, 100), color='#ff0102')
eq_(is_single_color_image(img), (255, 1, 2, 255))
def test_solid_paletted_image(self):
img = Image.new('P', (100, 100), color=20)
palette = []
for i in range(256):
palette.extend((i, i//2, i%3))
img.putpalette(palette)
eq_(is_single_color_image(img), (20, 10, 2))
class TestMakeTransparent(object):
def _make_test_image(self):
img = Image.new('RGB', (50, 50), (130, 140, 120))
draw = ImageDraw.Draw(img)
draw.rectangle((10, 10, 39, 39), fill=(130, 150, 120))
return img
def _make_transp_test_image(self):
img = Image.new('RGBA', (50, 50), (130, 140, 120, 100))
draw = ImageDraw.Draw(img)
draw.rectangle((10, 10, 39, 39), fill=(130, 150, 120, 120))
return img
def test_result(self):
img = self._make_test_image()
img = make_transparent(img, (130, 150, 120), tolerance=5)
assert img.mode == 'RGBA'
assert img.size == (50, 50)
colors = img.getcolors()
assert colors == [(1600, (130, 140, 120, 255)), (900, (130, 150, 120, 0))]
def test_with_color_fuzz(self):
img = self._make_test_image()
img = make_transparent(img, (128, 154, 121), tolerance=5)
assert img.mode == 'RGBA'
assert img.size == (50, 50)
colors = img.getcolors()
assert colors == [(1600, (130, 140, 120, 255)), (900, (130, 150, 120, 0))]
def test_no_match(self):
img = self._make_test_image()
img = make_transparent(img, (130, 160, 120), tolerance=5)
assert img.mode == 'RGBA'
assert img.size == (50, 50)
colors = img.getcolors()
assert colors == [(1600, (130, 140, 120, 255)), (900, (130, 150, 120, 255))]
def test_from_paletted(self):
img = self._make_test_image().quantize(256)
img = make_transparent(img, (130, 150, 120), tolerance=5)
assert img.mode == 'RGBA'
assert img.size == (50, 50)
colors = img.getcolors()
eq_(colors, [(1600, (130, 140, 120, 255)), (900, (130, 150, 120, 0))])
def test_from_transparent(self):
img = self._make_transp_test_image()
draw = ImageDraw.Draw(img)
draw.rectangle((0, 0, 4, 4), fill=(130, 100, 120, 0))
draw.rectangle((5, 5, 9, 9), fill=(130, 150, 120, 255))
img = make_transparent(img, (130, 150, 120, 120), tolerance=5)
assert img.mode == 'RGBA'
assert img.size == (50, 50)
colors = sorted(img.getcolors(), reverse=True)
eq_(colors, [(1550, (130, 140, 120, 100)), (900, (130, 150, 120, 0)),
(25, (130, 150, 120, 255)), (25, (130, 100, 120, 0))])
class TestTileSplitter(object):
def test_background_larger_crop(self):
img = ImageSource(Image.new('RGB', (356, 266), (130, 140, 120)))
img_opts = ImageOptions('RGB')
splitter = TileSplitter(img, img_opts)
tile = splitter.get_tile((0, 0), (256, 256))
eq_(tile.size, (256, 256))
colors = tile.as_image().getcolors()
eq_(colors, [(256*256, (130, 140, 120))])
tile = splitter.get_tile((256, 256), (256, 256))
eq_(tile.size, (256, 256))
colors = tile.as_image().getcolors()
eq_(sorted(colors), [(10*100, (130, 140, 120)), (256*256-10*100, (255, 255, 255))])
def test_background_larger_crop_with_transparent(self):
img = ImageSource(Image.new('RGBA', (356, 266), (130, 140, 120, 255)))
img_opts = ImageOptions('RGBA', transparent=True)
splitter = TileSplitter(img, img_opts)
tile = splitter.get_tile((0, 0), (256, 256))
eq_(tile.size, (256, 256))
colors = tile.as_image().getcolors()
eq_(colors, [(256*256, (130, 140, 120, 255))])
tile = splitter.get_tile((256, 256), (256, 256))
eq_(tile.size, (256, 256))
colors = tile.as_image().getcolors()
eq_(sorted(colors), [(10*100, (130, 140, 120, 255)), (256*256-10*100, (255, 255, 255, 0))])
class TestHasTransparency(object):
def test_rgb(self):
if not hasattr(Image, 'FASTOCTREE'):
raise SkipTest()
img = Image.new('RGB', (10, 10))
assert not img_has_transparency(img)
img = quantize(img, alpha=False)
assert not img_has_transparency(img)
def test_rbga(self):
if not hasattr(Image, 'FASTOCTREE'):
raise SkipTest()
img = Image.new('RGBA', (10, 10), (100, 200, 50, 255))
img.paste((255, 50, 50, 0), (3, 3, 7, 7))
assert img_has_transparency(img)
img = quantize(img, alpha=True)
assert img_has_transparency(img)
class TestPeekImageFormat(object):
def test_peek(self):
yield self.check, 'png', 'png'
yield self.check, 'tiff', 'tiff'
yield self.check, 'gif', 'gif'
yield self.check, 'jpeg', 'jpeg'
yield self.check, 'bmp', None
def check(self, format, expected_format):
buf = BytesIO()
Image.new('RGB', (100, 100)).save(buf, format)
eq_(peek_image_format(buf), expected_format)
|
apache-2.0
| 8,792,674,897,666,819,000
| 38.469671
| 125
| 0.584438
| false
| 3.285343
| true
| false
| false
|
daskol/mipt-classifier
|
setup.py
|
1
|
1380
|
#!/usr/bin/env python3
# encoding: utf8
# setup.py
"""MIPT Student Classifier
"""
from setuptools import setup, find_packages
DOCLINES = (__doc__ or '').split('\n')
CLASSIFIERS = """\
Development Status :: 4 - Beta
Environment :: Console
Intended Audience :: Developers
Intended Audience :: End Users/Desktop
Intended Audience :: Information Technology
Intended Audience :: Other Audience
License :: OSI Approved :: MIT License
Natural Language :: Russian
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 3.5
Topic :: Internet
Topic :: Office/Business
Topic :: Utilities
"""
PLATFORMS = [
'Linux',
]
MAJOR = 0
MINOR = 0
PATCH = 0
VERSION = '{0:d}.{1:d}.{2:d}'.format(MAJOR, MINOR, PATCH)
def setup_package():
setup(name='miptclass',
version=VERSION,
description = DOCLINES[0],
long_description = '\n'.join(DOCLINES[2:]),
author='Daniel Bershatsky',
author_email='daniel.bershatsky@skolkovotech.ru',
license='MIT',
platforms=PLATFORMS,
classifiers=[line for line in CLASSIFIERS.split('\n') if line],
packages=find_packages(),
entry_points={
'console_scripts': [
'mipt-classifier=miptclass.cli:main',
],
},
)
if __name__ == '__main__':
setup_package()
|
mit
| -7,745,480,850,932,025,000
| 21.622951
| 72
| 0.62029
| false
| 3.650794
| false
| false
| false
|
tensorflow/models
|
research/audioset/yamnet/params.py
|
1
|
1847
|
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hyperparameters for YAMNet."""
from dataclasses import dataclass
# The following hyperparameters (except patch_hop_seconds) were used to train YAMNet,
# so expect some variability in performance if you change these. The patch hop can
# be changed arbitrarily: a smaller hop should give you more patches from the same
# clip and possibly better performance at a larger computational cost.
@dataclass(frozen=True) # Instances of this class are immutable.
class Params:
sample_rate: float = 16000.0
stft_window_seconds: float = 0.025
stft_hop_seconds: float = 0.010
mel_bands: int = 64
mel_min_hz: float = 125.0
mel_max_hz: float = 7500.0
log_offset: float = 0.001
patch_window_seconds: float = 0.96
patch_hop_seconds: float = 0.48
@property
def patch_frames(self):
return int(round(self.patch_window_seconds / self.stft_hop_seconds))
@property
def patch_bands(self):
return self.mel_bands
num_classes: int = 521
conv_padding: str = 'same'
batchnorm_center: bool = True
batchnorm_scale: bool = False
batchnorm_epsilon: float = 1e-4
classifier_activation: str = 'sigmoid'
tflite_compatible: bool = False
|
apache-2.0
| -6,389,886,753,464,823,000
| 35.215686
| 85
| 0.708175
| false
| 3.784836
| false
| false
| false
|
marco-mariotti/selenoprofiles
|
libraries/networkx/algorithms/tests/test_core.py
|
1
|
2003
|
#!/usr/bin/env python
from nose.tools import *
import networkx as nx
class TestCore:
def setUp(self):
# G is the example graph in Figure 1 from Batagelj and
# Zaversnik's paper titled An O(m) Algorithm for Cores
# Decomposition of Networks, 2003,
# http://arXiv.org/abs/cs/0310049. With nodes labeled as
# shown, the 3-core is given by nodes 1-8, the 2-core by nodes
# 9-16, the 1-core by nodes 17-20 and node 21 is in the
# 0-core.
t1=nx.convert_node_labels_to_integers(nx.tetrahedral_graph(),1)
t2=nx.convert_node_labels_to_integers(t1,5)
G=nx.union(t1,t2)
G.add_edges_from( [(3,7), (2,11), (11,5), (11,12), (5,12), (12,19),
(12,18), (3,9), (7,9), (7,10), (9,10), (9,20),
(17,13), (13,14), (14,15), (15,16), (16,13)])
G.add_node(21)
self.G=G
# Create the graph H resulting from the degree sequence
# [0,1,2,2,2,2,3] when using the Havel-Hakimi algorithm.
degseq=[0,1,2,2,2,2,3]
self.H=nx.havel_hakimi_graph(degseq)
def test_trivial(self):
"""Empty graph"""
G = nx.Graph()
assert_equal(nx.find_cores(G),{})
def find_cores(self):
cores=find_cores(self.G)
nodes_by_core=[]
for val in [0,1,2,3]:
nodes_by_core.append( sorted([k for k in cores if cores[k]==val]))
assert_equal(nodes_by_core[0],[21])
assert_equal(nodes_by_core[1],[17, 18, 19, 20])
assert_equal(nodes_by_core[2],[9, 10, 11, 12, 13, 14, 15, 16])
assert_equal(nodes_by_core[3], [1, 2, 3, 4, 5, 6, 7, 8])
def find_cores2(self):
cores=find_cores(self.H)
nodes_by_core=[]
for val in [0,1,2]:
nodes_by_core.append( sorted([k for k in cores if cores[k]==val]))
assert_equal(nodes_by_core[0],[0])
assert_equal(nodes_by_core[1],[1, 3])
assert_equal(nodes_by_core[2],[2, 4, 5, 6])
|
gpl-2.0
| 6,964,820,110,641,476,000
| 36.092593
| 78
| 0.545182
| false
| 2.84517
| false
| false
| false
|
kedz/cuttsum
|
wp-scripts/test-wtmf.py
|
1
|
12192
|
import corenlp as cnlp
from sklearn.metrics.pairwise import cosine_similarity
import re
import os
import gzip
import wtmf
from sklearn.externals import joblib
import cuttsum.events
import cuttsum.judgements
import pandas as pd
from collections import defaultdict
matches_df = cuttsum.judgements.get_merged_dataframe()
def heal_text(sent_text):
sent_text = sent_text.decode("utf-8")
sent_text = re.sub(
ur"[a-z ]+, [a-z][a-z ]+\( [a-z]+ \) [-\u2014_]+ ",
r"", sent_text)
sent_text = re.sub(
ur"^.*?[a-z ]+, [a-z][a-z]+ [-\u2014_]+ ",
r"", sent_text)
sent_text = re.sub(
ur"^.*?[a-z ]+\([^\)]+\) [-\u2014_]+ ",
r"", sent_text)
sent_text = re.sub(
ur"^.*?[a-z]+ +[-\u2014_]+ ",
r"", sent_text)
sent_text = re.sub(r"\([^)]+\)", r" ", sent_text)
sent_text = re.sub(ur"^ *[-\u2014_]+", r"", sent_text)
sent_text = re.sub(u" ([,.;?!]+)([\"\u201c\u201d'])", r"\1\2", sent_text)
sent_text = re.sub(r" ([:-]) ", r"\1", sent_text)
sent_text = re.sub(r"([^\d]\d{1,3}) , (\d\d\d)([^\d]|$)", r"\1,\2\3", sent_text)
sent_text = re.sub(r"^(\d{1,3}) , (\d\d\d)([^\d]|$)", r"\1,\2\3", sent_text)
sent_text = re.sub(ur" ('|\u2019) ([a-z]|ll|ve|re)( |$)", r"\1\2 ", sent_text)
sent_text = re.sub(r" ([',.;?!]+) ", r"\1 ", sent_text)
sent_text = re.sub(r" ([',.;?!]+)$", r"\1", sent_text)
sent_text = re.sub(r"(\d\.) (\d)", r"\1\2", sent_text)
sent_text = re.sub(r"(a|p)\. m\.", r"\1.m.", sent_text)
sent_text = re.sub(r"u\. (s|n)\.", r"u.\1.", sent_text)
sent_text = re.sub(
ur"\u201c ([^\s])",
ur"\u201c\1", sent_text)
sent_text = re.sub(
ur"([^\s]) \u201d",
ur"\1\u201d", sent_text)
sent_text = re.sub(
ur"\u2018 ([^\s])",
ur"\u2018\1", sent_text)
sent_text = re.sub(
ur"([^\s]) \u2019",
ur"\1\u2019", sent_text)
sent_text = re.sub(
ur"\u00e2",
ur"'", sent_text)
sent_text = re.sub(
r"^photo:reuters|^photo:ap",
r"", sent_text)
sent_text = sent_text.replace("\n", " ")
return sent_text.encode("utf-8")
nuggets = cuttsum.judgements.get_nuggets()
updates = pd.concat([
cuttsum.judgements.get_2013_updates(),
cuttsum.judgements.get_2014_sampled_updates()
])
#updates["text"] = updates["text"].apply(heal_text)
dom2type = {
"accidents": set(["accident"]),
"natural-disasters": set(["earthquake", "storm", "impact event"]),
"social-unrest": set(["protest", "riot"]),
"terrorism": set(["shooting", "bombing", "conflict", "hostage"]),
}
def tokenize(docs, norm, stop, ne, central_per=None, central_loc=None, central_org=None):
if stop:
with open("stopwords.txt", "r") as f:
sw = set([word.strip().decode("utf-8").lower() for word in f])
if norm == "stem":
from nltk.stem.porter import PorterStemmer
stemmer = PorterStemmer()
all_toks = []
for doc in docs:
toks = []
for sent in doc:
if ne:
for tok in sent:
if tok.ne == "PERSON":
if unicode(tok.lem).lower() == central_per:
toks.append(u"__CPER__")
else:
toks.append(u"__PER__")
elif tok.ne == "LOCATION":
if unicode(tok.lem).lower() == central_loc:
toks.append(u"__CLOC__")
else:
toks.append(u"__LOC__")
elif tok.ne == "ORGANIZATION":
if unicode(tok.lem).lower() == central_org:
toks.append(u"__CORG__")
else:
toks.append(u"__ORG__")
else:
if norm == "lemma":
form = unicode(tok.lem).lower()
elif norm == "stem":
form = stemmer.stem(unicode(tok).lower())
else:
form = unicode(tok).lower()
if stop:
if form not in sw and len(form) < 50:
toks.append(form)
else:
if len(form) < 50:
toks.append(form)
else:
if norm == "lemma":
stoks = [unicode(tok.lem).lower() for tok in sent]
elif norm == "stem":
stoks = [stemmer.stem(unicode(tok).lower())
for tok in sent]
else:
stoks = [unicode(tok).lower() for tok in sent]
if stop:
toks.extend([tok for tok in stoks if tok not in sw])
else:
toks.extend(stoks)
toks = [tok for tok in toks if len(tok) < 50]
#if len(toks) == 0: continue
string = u" ".join(toks).encode("utf-8")
#print string
all_toks.append(string)
return all_toks
def find_central_nes(docs):
per_counts = defaultdict(int)
org_counts = defaultdict(int)
loc_counts = defaultdict(int)
for doc in docs:
for sent in doc:
for tok in sent:
if tok.ne == "PERSON":
per_counts[unicode(tok.lem).lower()] += 1
elif tok.ne == "LOCATION":
loc_counts[unicode(tok.lem).lower()] += 1
elif tok.ne == "ORGANIZATION":
org_counts[unicode(tok.lem).lower()] += 1
if len(per_counts) > 0:
central_per = max(per_counts.items(), key=lambda x:[1])[0]
else:
central_per = None
if len(org_counts) > 0:
central_org = max(org_counts.items(), key=lambda x:[1])[0]
else:
central_org = None
if len(loc_counts) > 0:
central_loc = max(loc_counts.items(), key=lambda x:[1])[0]
else:
central_loc = None
return central_per, central_loc, central_org
def main(input_path, output_path, norm, stop, ne, lam, port):
dirname, domain = os.path.split(input_path)
input_path = os.path.join(
dirname,
"{}.norm-{}{}{}.lam{:0.3f}.pkl".format(
domain, norm, ".stop" if stop else "", ".ne" if ne else "", lam))
print "Domain: {}".format(domain)
print "Model Path: {}".format(input_path)
events = [event for event in cuttsum.events.get_events()
if event.type in dom2type[domain] and event.query_num < 26 and event.query_num != 7]
if ne is True:
annotators = ["tokenize", "ssplit", "pos", "lemma", "ner"]
elif norm == "lemma":
annotators = ["tokenize", "ssplit", "pos", "lemma"]
else:
annotators = ["tokenize", "ssplit"]
results = []
vec = joblib.load(input_path)
modelname = "{}.norm_{}.stop_{}.ne_{}.lam_{}".format(domain, norm, stop, ne, lam)
with cnlp.Server(annotators=annotators, mem="6G", port=port,
max_message_len=1000000) as client:
for event in events:
print event
event_nuggets = nuggets.loc[nuggets["query id"] == event.query_id]
print "processing nugget text"
nugget_docs = [client.annotate(text)
for text in event_nuggets["text"].tolist()]
#for doc in nugget_docs:
# print doc
#print
if ne:
central_per, central_loc, central_org = find_central_nes(
nugget_docs)
else:
central_per = None
central_loc = None
central_org = None
X_nug_txt = tokenize(nugget_docs, norm, stop, ne,
central_per=central_per, central_loc=central_loc,
central_org=central_org)
nuggets.loc[nuggets["query id"] == event.query_id, "X"] = X_nug_txt
event_nuggets = nuggets[nuggets["query id"] == event.query_id]
event_nuggets = event_nuggets[event_nuggets["X"].apply(lambda x: len(x.split(" ")) < 50 and len(x.split(" ")) > 0)]
X_nug_txt = event_nuggets["X"].tolist()
#for txt in X_nug_txt:
# print txt
#print
print "transforming nugget text"
X_nug = vec.transform(X_nug_txt)
assert X_nug.shape[0] == len(event_nuggets)
print "getting updates"
updates.loc[updates["query id"] == event.query_id, "text"] = \
updates.loc[updates["query id"] == event.query_id, "text"].apply(heal_text)
event_updates = updates[(updates["query id"] == event.query_id) & (updates["text"].apply(len) < 1000)]
print "processing update text"
docs = [client.annotate(text) for text in event_updates["text"].tolist()]
X_upd_txt = tokenize(docs, norm, stop, ne,
central_per=central_per, central_loc=central_loc,
central_org=central_org)
print "transforming update text"
X_upd = vec.transform(X_upd_txt)
for i, (index, nugget) in enumerate(event_nuggets.iterrows()):
boolean = (matches_df["query id"] == event.query_id) & (matches_df["nugget id"] == nugget["nugget id"])
match_ids = set(matches_df.loc[boolean, "update id"].tolist())
if len(match_ids) == 0: continue
#print index, nugget["nugget id"], nugget["text"]
#print X_nug[i]
if (X_nug[i] == 0).all(): continue
n_matches = 0
K = cosine_similarity(X_nug[i], X_upd)
for j in K.ravel().argsort()[::-1][:100]:
#print K[0,j],
#print event_updates.iloc[j]["text"]
if event_updates.iloc[j]["update id"] in match_ids:
n_matches += 1
#print
P100 = n_matches / 100.
optP100 = min(1., len(match_ids) / 100.)
nP100 = P100 / optP100
results.append(
{"model": modelname,
"nugget id": nugget["nugget id"],
"P@100": P100,
"opt P@100": optP100,
"normP@100":nP100
})
df = pd.DataFrame(results)
print df
print df["normP@100"].mean()
df["model"] = modelname
return results
# print len(event_updates)
#print event_updates["text"].apply(len).mean()
#print event_updates["text"].apply(heal_text).apply(len).max()
#print event_updates["text"].apply(heal_text).apply(len).median()
if __name__ == u"__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=str, required=True)
parser.add_argument("--output", type=str, required=False, default=None)
parser.add_argument("--port", type=int, required=True)
# parser.add_argument("--norm", choices=["stem", "lemma", "none"], type=str, required=True)
# parser.add_argument("--stop", action="store_true")
# parser.add_argument("--ne", action="store_true")
# parser.add_argument("--lam", type=float, required=True)
args = parser.parse_args()
dirname = os.path.dirname(args.output)
if dirname != "" and not os.path.exists(dirname):
os.makedirs(dirname)
data = []
for norm in ["none", "lemma", "stem"]:
for stop in [True, False]:
for ne in [True, False]:
for lam in [20., 10., 1., .1]:
data.extend(
main(args.input, args.output, norm, stop, ne, lam, args.port))
df = pd.DataFrame(data)
with open(args.output, "w") as f:
df.to_csv(f, sep="\t", index=False)
|
apache-2.0
| -4,383,275,878,599,725,000
| 36.170732
| 127
| 0.485482
| false
| 3.50748
| false
| false
| false
|
liosha2007/temporary-groupdocs-python-sdk
|
groupdocs/ApiClient.py
|
1
|
11192
|
#!/usr/bin/env python
"""Wordnik.com's Swagger generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the Swagger
templates."""
from __future__ import print_function
import sys
import os
import re
import urllib
import urllib2
import httplib
import json
import datetime
import mimetypes
import base64
from models import *
from groupdocs.FileStream import FileStream
from groupdocs import version
class RequestSigner(object):
def __init__(self):
if type(self) == RequestSigner:
raise Exception("RequestSigner is an abstract class and cannot be instantiated.")
def signUrl(self, url):
raise NotImplementedError
def signContent(self, requestBody, headers):
raise NotImplementedError
class DefaultRequestSigner(RequestSigner):
def signUrl(self, url):
return url
def signContent(self, requestBody, headers):
return requestBody
class ApiClient(object):
"""Generic API client for Swagger client library builds"""
def __init__(self, requestSigner=None):
self.signer = requestSigner if requestSigner != None else DefaultRequestSigner()
self.cookie = None
self.headers = {'Groupdocs-Referer': '/'.join((version.__pkgname__, version.__version__))}
self.__debug = False
def setDebug(self, flag, logFilepath=None):
self.__debug = flag
self.__logFilepath = logFilepath
def addHeaders(self, **headers):
self.headers = headers
def callAPI(self, apiServer, resourcePath, method, queryParams, postData,
headerParams=None, returnType=str):
if self.__debug and self.__logFilepath:
stdOut = sys.stdout
logFile = open(self.__logFilepath, 'a')
sys.stdout = logFile
url = apiServer + resourcePath
headers = {}
if self.headers:
for param, value in self.headers.iteritems():
headers[param] = value
if headerParams:
for param, value in headerParams.iteritems():
headers[param] = value
isFileUpload = False
if not postData:
headers['Content-type'] = 'text/html'
elif isinstance(postData, FileStream):
isFileUpload = True
if postData.contentType:
headers['Content-type'] = postData.contentType
if postData.size:
headers['Content-Length'] = str(postData.size)
else:
headers['Content-type'] = 'application/json'
if self.cookie:
headers['Cookie'] = self.cookie
data = None
if queryParams:
# Need to remove None values, these should not be sent
sentQueryParams = {}
for param, value in queryParams.items():
if value != None:
sentQueryParams[param] = value
if sentQueryParams:
url = url + '?' + urllib.urlencode(sentQueryParams)
if method in ['POST', 'PUT', 'DELETE']:
if isFileUpload:
data = postData.inputStream
elif not postData:
data = ""
elif type(postData) not in [unicode, str, int, float, bool]:
data = self.signer.signContent(json.dumps(self.sanitizeForSerialization(postData)), headers)
else:
data = self.signer.signContent(postData, headers)
if self.__debug:
handler = urllib2.HTTPSHandler(debuglevel=1) if url.lower().startswith('https') else urllib2.HTTPHandler(debuglevel=1)
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
request = MethodRequest(method=method, url=self.encodeURI(self.signer.signUrl(url)), headers=headers,
data=data)
try:
# Make the request
response = urllib2.urlopen(request)
if 'Set-Cookie' in response.headers:
self.cookie = response.headers['Set-Cookie']
if response.code == 200 or response.code == 201 or response.code == 202:
if returnType == FileStream:
fs = FileStream.fromHttp(response)
if self.__debug: print(">>>stream info: fileName=%s contentType=%s size=%s" % (fs.fileName, fs.contentType, fs.size))
return fs if 'Transfer-Encoding' in response.headers or (fs.size != None and int(fs.size) > 0) else None
else:
string = response.read()
if self.__debug: print(string)
try:
data = json.loads(string)
except ValueError: # PUT requests don't return anything
data = None
return data
elif response.code == 404:
return None
else:
string = response.read()
try:
msg = json.loads(string)['error_message']
except ValueError:
msg = string
raise ApiException(response.code, msg)
except urllib2.HTTPError, e:
raise ApiException(e.code, e.msg)
finally:
if isFileUpload:
try:
postData.inputStream.close()
except Exception, e:
sys.exc_clear()
if self.__debug and self.__logFilepath:
sys.stdout = stdOut
logFile.close()
def toPathValue(self, obj):
"""Serialize a list to a CSV string, if necessary.
Args:
obj -- data object to be serialized
Returns:
string -- json serialization of object"""
if type(obj) == list:
return ','.join(obj)
else:
return obj
def sanitizeForSerialization(self, obj):
"""Dump an object into JSON for POSTing."""
if not obj:
return None
elif type(obj) in [unicode, str, int, long, float, bool]:
return obj
elif type(obj) == list:
return [self.sanitizeForSerialization(subObj) for subObj in obj]
elif type(obj) == datetime.datetime:
return obj.isoformat()
else:
if type(obj) == dict:
objDict = obj
else:
objDict = obj.__dict__
ret_dict = {}
for (key, val) in objDict.iteritems():
if key != 'swaggerTypes' and val != None:
ret_dict[key] = self.sanitizeForSerialization(val)
return ret_dict
def deserialize(self, obj, objClass):
"""Derialize a JSON string into an object.
Args:
obj -- string or object to be deserialized
objClass -- class literal for deserialzied object, or string of class name
Returns:
object -- deserialized object"""
if not obj:
return None
# Have to accept objClass as string or actual type. Type could be a
# native Python type, or one of the model classes.
if type(objClass) == str:
if 'list[' in objClass:
match = re.match('list\[(.*)\]', objClass)
subClass = match.group(1)
return [self.deserialize(subObj, subClass) for subObj in obj]
if (objClass in ['int', 'float', 'long', 'dict', 'list', 'str']):
objClass = eval(objClass)
else: # not a native type, must be model class
objClass = eval(objClass + '.' + objClass)
if objClass in [unicode, str, int, long, float, bool]:
return objClass(obj)
elif objClass == datetime:
# Server will always return a time stamp in UTC, but with
# trailing +0000 indicating no offset from UTC. So don't process
# last 5 characters.
return datetime.datetime.strptime(obj[:-5],
"%Y-%m-%dT%H:%M:%S.%f")
instance = objClass()
for attr, attrType in instance.swaggerTypes.iteritems():
lc_attr = attr[0].lower() + attr[1:]
uc_attr = attr[0].upper() + attr[1:]
real_attr = None
if attr in obj:
real_attr = attr
elif lc_attr in obj:
real_attr = lc_attr
elif uc_attr in obj:
real_attr = uc_attr
if real_attr != None:
value = obj[real_attr]
if not value:
setattr(instance, real_attr, None)
elif attrType in ['str', 'int', 'long', 'float', 'bool']:
attrType = eval(attrType)
try:
value = attrType(value)
except UnicodeEncodeError:
value = unicode(value)
setattr(instance, real_attr, value)
elif 'list[' in attrType:
match = re.match('list\[(.*)\]', attrType)
subClass = match.group(1)
subValues = []
for subValue in value:
subValues.append(self.deserialize(subValue,
subClass))
setattr(instance, real_attr, subValues)
else:
setattr(instance, real_attr, self.deserialize(value,
attrType))
return instance
@staticmethod
def encodeURI(url):
encoded = urllib.quote(url, safe='~@#$&()*!=:;,.?/\'').replace("%25", "%")
return encoded
@staticmethod
def encodeURIComponent(url):
return urllib.quote(url, safe='~()*!.\'')
@staticmethod
def readAsDataURL(filePath):
mimetype = mimetypes.guess_type(filePath, False)[0] or "application/octet-stream"
filecontents = open(filePath, 'rb').read()
return 'data:' + mimetype + ';base64,' + base64.b64encode(filecontents).decode()
class MethodRequest(urllib2.Request):
def __init__(self, *args, **kwargs):
"""Construct a MethodRequest. Usage is the same as for
`urllib2.Request` except it also takes an optional `method`
keyword argument. If supplied, `method` will be used instead of
the default."""
if 'method' in kwargs:
self.method = kwargs.pop('method')
return urllib2.Request.__init__(self, *args, **kwargs)
def get_method(self):
return getattr(self, 'method', urllib2.Request.get_method(self))
class ApiException(Exception):
def __init__(self, code, *args):
super(Exception, self).__init__((code, ) + args)
self.code = code
|
apache-2.0
| 1,865,152,210,899,522,800
| 34.871795
| 137
| 0.536455
| false
| 4.620974
| false
| false
| false
|
naoliv/osmose-backend
|
analysers/analyser_merge_public_transport_FR_transgironde.py
|
1
|
3717
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Frédéric Rodrigo 2014 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
from Analyser_Merge import Analyser_Merge, Source, Load, Mapping, Select, Generate
class Analyser_Merge_Public_Transport_FR_TransGironde(Analyser_Merge):
def __init__(self, config, logger = None):
self.missing_official = {"item":"8040", "class": 41, "level": 3, "tag": ["merge", "public transport"], "desc": T_(u"TransGironde stop not integrated") }
self.possible_merge = {"item":"8041", "class": 43, "level": 3, "tag": ["merge", "public transport"], "desc": T_(u"TransGironde stop, integration suggestion") }
Analyser_Merge.__init__(self, config, logger,
Source(
url = "http://www.datalocale.fr/drupal7/dataset/ig_transgironde_pa",
name = u"Localisation des points d'arrêts des lignes régulières du réseau TransGironde",
file = "public_transport_FR_transgironde.csv.bz2"),
Load("LON", "LAT", table = "transgironde"),
Mapping(
select = Select(
types = ["nodes", "ways"],
tags = {"highway": "bus_stop"}),
osmRef = "ref:FR:TransGironde",
conflationDistance = 100,
generate = Generate(
static = {
"source": u"Conseil général de la Gironde - 03/2013",
"highway": "bus_stop",
"public_transport": "stop_position",
"bus": "yes",
"network": "TransGironde"},
mapping = {
"ref:FR:TransGironde": "NUMERO_PEG",
"name": lambda res: res['NOM'].split(' - ')[1] if len(res['NOM'].split(' - ')) > 1 else None},
text = lambda tags, fields: {"en": u"TransGironde stop of %s" % fields["NOM"], "fr": u"Arrêt TransGironde de %s" % fields["NOM"]} )))
def replace(self, string):
for s in self.replacement.keys():
string = string.replace(s, self.replacement[s])
return string
replacement = {
u'Coll.': u'Collège',
u'Pl.': u'Place',
u'Eglise': u'Église',
u'Rte ': u'Route ',
u'Bld ': u'Boulevard',
u'St ': u'Staint ',
u'Av. ': u'Avenue',
u'Hôp.': u'Hôpital',
}
|
gpl-3.0
| -8,028,650,922,773,014,000
| 53.470588
| 169
| 0.461123
| false
| 4.213879
| false
| false
| false
|
darogan/ParticleStats
|
scripts/ParticleStats_Compare.py
|
1
|
27381
|
#!/usr/bin/env python
###############################################################################
# ____ _ _ _ ____ _ _ #
# | _ \ __ _ _ __| |_(_) ___| | ___/ ___|| |_ __ _| |_ ___ #
# | |_) / _` | '__| __| |/ __| |/ _ \___ \| __/ _` | __/ __| #
# | __/ (_| | | | |_| | (__| | __/___) | || (_| | |_\__ \ #
# |_| \__,_|_| \__|_|\___|_|\___|____/ \__\__,_|\__|___/ #
# #
###############################################################################
# ParticleStats: Open source software for the analysis of particle #
# motility and cytoskelteal polarity #
# #
# Contact: rsh46@cam.ac.uk #
# http://www.ParticleStats.com #
# Centre for Trophoblast Research #
# University of Cambridge #
# Copyright (C) 2017 Russell S. Hamilton #
# #
# Please cite: #
# Hamilton, R.S. et al (2010) Nucl. Acids Res. Web Server Edition #
# http://dx.doi.org/10.1093/nar/gkq542 #
###############################################################################
# GNU Licence Details: #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
import os, sys, math
import os.path
from optparse import OptionParser
###############################################################################
# PARSE IN THE USER OPTIONS
###############################################################################
parser = OptionParser(usage="%prog [--a=ExcelFile1] [--b=ExcelFile2]",
version="%prog 2.001")
parser.add_option("-a", "--xls1", metavar="EXCELFILE1",
dest="ExcelFile1",
help="Name of first Excel File")
parser.add_option("-b", "--xls2", metavar="EXCELFILE2",
dest="ExcelFile2",
help="Name of second Excel File")
parser.add_option("-o", metavar="OUTPUTTYPE",
dest="OutputType", default="text",
help="print text ot html style output: DEFAULT=text")
parser.add_option("--outdir", metavar="OUTPUTDIR",
dest="OutputDir",
help="Specify a directory for the output files")
parser.add_option("--outhtml", metavar="OUTPUTHTML",
dest="OutputHTML",
help="Specify a web location for the HTML output")
parser.add_option("--trackingtype", metavar="TrackingType",
dest="TrackingType", default="999",
help="Source of tracked coords: DEFAULT=metamorph")
parser.add_option("-g", "--graphs",
dest="graphs", action="store_true",
help="print graphs")
parser.add_option("-t", "--trails",
dest="trails", action="store_true",
help="print trails")
parser.add_option("-r", "--regression",
dest="regression", action="store_true",
help="Run linear regression analysis")
parser.add_option("-d", "--debug",
dest="debug", action="store_true",
help="print full debug output")
parser.add_option("--timestart",
dest="TimeStart", metavar="TIMESTART",
default="0",
help="Provide a time point start point for movement calculations")
parser.add_option("--timeend",
dest="TimeEnd", metavar="TIMEEND",
default="90",
help="Provide a time point end point for movement calculations")
parser.add_option("--pausedefinition", metavar="PAUSEDEF",
dest="PauseDef", default="distance",
help="Pause definition: speed or distance DEFAULT=distance")
parser.add_option("--rundistance", metavar="RUNDISTANCE",
dest="RunDistance", default="1.1",
help="Run Distance in nm: DEFAULT=1.1")
parser.add_option("--runframes", metavar="RUNFRAMES",
dest="RunFrames", default="0",
help="Run Frames: DEFAULT=0")
parser.add_option("--pausedistance", metavar="PAUSEDISTANCE",
dest="PauseDistance", default="10",
help="Pause Distance in nm: DEFAULT=10")
parser.add_option("--pauseduration", metavar="PAUSEDURATION",
dest="PauseDuration", default="2000",
help="Pause Duration in miliseconds: DEFAULT=2000")
parser.add_option("--pausespeed", metavar="PAUSESPEED",
dest="PauseSpeed", default="0.25",
help="Pause Speed: DEFAULT=0.25")
parser.add_option("--pauseframes", metavar="PAUSEFRAMES",
dest="PauseFrames", default="3",
help="Pause Frames: DEFAULT=3")
parser.add_option("--reverseframes", metavar="REVERSEFRAMES",
dest="ReverseFrames", default="2",
help="Reverse Frames: DEFAULT=2")
parser.add_option("--flipY", metavar="FLIPY",
dest="FlipY", action="store_true",
help="Changes the default orientation for the Y axis. \
Default y=0 is at the top of the image")
parser.add_option("--imagesize", metavar="IMAGESIZE",
dest="ImageSize", default="512",
help="Image size to define the range of the coordinates DEFAULT=512")
parser.add_option("--pixelratio", metavar="PIXELRATIO",
dest="PixelRatio", default="1.00",
help="Pixel Ratio (nm per pixel): DEFAULT=1.00")
parser.add_option("--pixelratiomethod", metavar="PIXELRATIOMETHOD",
dest="PixelRatioMethod", default="multiply",
help="Pixel Ratio calculation method <multiply/divide>: \
DEFAULT=multiply")
parser.add_option("--dimensions", metavar="DIMENSIONS",
dest="Dimensions", default="2D",
help="Number of dimensions (1DX, 1DY, 2D): DEFAULT=2D")
(options, args) = parser.parse_args()
options.RunDistance = float(options.RunDistance)
options.PauseDuration = float(options.PauseDuration)
options.PauseDistance = float(options.PauseDistance)
options.ReverseFrames = float(options.ReverseFrames)
options.PixelRatio = float(options.PixelRatio)
options.TimeStart = int(options.TimeStart)
options.TimeEnd = int(options.TimeEnd)
if (options.OutputType != "html") and (options.OutputType != "text"):
print "Error with input parameters (run -h flag for help)"
print "--output must be html or text"
sys.exit()
if options.ExcelFile1 and options.ExcelFile2:
XLS1 = options.ExcelFile1
XLS2 = options.ExcelFile2
else:
print "Error with input parameters (run -h flag for help)"
print "Two Excel Files must be provided"
sys.exit()
if options.graphs: Graphs = "graphs"
else: Graphs = "nographs"
if options.PauseDef != "speed" and options.PauseDef != "distance":
print "Error with input parameters (run -h flag for help)"
print "Pause Definition must be either speed or distance"
sys.exit()
else:
if options.PauseDef == "speed": PauseDef = 1
elif options.PauseDef == "distance": PauseDef = 2
###############################################################################
# LOAD IN THE REQUIRED MODULES ONLY AFTER MAIN USER OPTIONS CHECKED
###############################################################################
print "\nLoading External Modules..."
import ParticleStats.ParticleStats_Inputs as PS_Inputs
import ParticleStats.ParticleStats_Outputs as PS_Outputs
import ParticleStats.ParticleStats_Maths as PS_Maths
import ParticleStats.ParticleStats_Plots as PS_Plots
import numpy as na
import re
print "Loading complete\n\n"
#Print the welcome logo plus data and run mode
FontSize_Titles = 2
FontSize_Text = 1
#if(options.OutputType == "html"):
#BaseDir = "http://idcws.bioch.ox.ac.uk/~rhamilto/ParticleStats2/"
# BaseDir = ""
#else:
if(options.OutputHTML):
BaseDir = options.OutputHTML
else:
BaseDir = ""
#DirGraphs = BaseDir+"GraphOutput/"
if(options.OutputDir):
DirGraphs = options.OutputDir
else:
DirGraphs = os.getcwd()
DirTrails = BaseDir
ImageFileSearchPath = os.getcwd()
if(options.OutputType == "html"):
PS_Outputs.Print_HTMLHeaders()
PS_Outputs.Print_Welcome(options.OutputType,FontSize_Text)
###############################################################################
# READ IN THE EXCEL FILES
###############################################################################
if options.FlipY:
FlipYImgSize = int(options.ImageSize)
else:
FlipYImgSize = 0
FDs = []
#READ IN EXCEL FILE 1 AND EXTRACT INFO
(InputDirName1, InputFileName1) = os.path.split(XLS1)
Coords1,Corrections1,Axes1 = PS_Inputs.ReadExcelCoords(XLS1,options.PixelRatio,\
options.PixelRatioMethod,\
options.TimeStart,options.TimeEnd,\
FlipYImgSize)
FDs.append({'InputDirName':InputDirName1,'InputFileName':InputFileName1,\
'Coords':Coords1, 'Corrections':Corrections1, 'Axes':Axes1 })
#READ IN EXCEL FILE 2 AND EXTRACT INFO
(InputDirName2, InputFileName2) = os.path.split(XLS2)
Coords2,Corrections2,Axes2 = PS_Inputs.ReadExcelCoords(XLS2,options.PixelRatio,\
options.PixelRatioMethod,\
options.TimeStart,options.TimeEnd,\
FlipYImgSize)
FDs.append({'InputDirName':InputDirName2,'InputFileName':InputFileName2,\
'Coords':Coords2, 'Corrections':Corrections2, 'Axes':Axes2 })
del(InputDirName1,InputDirName2,InputFileName1,InputFileName2)
del(Coords1,Coords2,Corrections1,Corrections2,Axes1,Axes2)
if((options.OutputType == 'html') and \
((len(FDs[0]['Coords']) > 200) or (len(FDs[1]['Coords']) > 200) )):
print len(FDs[0]['Coords'])
print len(FDs[1]['Coords'])
print PS_Inputs.Colourer("### Too many particles in input files - limit = 200 ###",\
"red",options.OutputType,"bold",FontSize_Titles)
sys.exit()
PS_Outputs.Print_Parameters( FDs[0]['InputFileName'],FDs[0]['Coords'], \
FDs[1]['InputFileName'],FDs[1]['Coords'], \
options.OutputType,FontSize_Text )
Colours = ["red","blue","green","purple","orange","yellow",\
"silver","cyan","brown","magenta","silver","gold"]
Colours = Colours * 100
Separator = "" + ("+"*90)
#sys.exit(0)
###############################################################################
# DRAW IMAGE STACK
###############################################################################
#print "\tReading Image Stack"
#ImageStack = ParticleStats_Inputs.ReadImageFiles("")
#ParticleStats_Inputs.DrawCoords(ImageStack,Coords,"geo",Colours)
###############################################################################
# RUN FUNCTIONS ON COORD DATA READ IN - MAIN PROGRAM LOOP
###############################################################################
RealAllRuns = []
RealAllRunsX = []
RunsHash = []
RunsHash2 = []
RunsHashAll = []
coordset = 0
while coordset < len(FDs):
print PS_Inputs.Colourer(("### Running Coords Set "+str(coordset+1)+" ###"),"black",\
options.OutputType,"bold",FontSize_Titles)
print PS_Inputs.Colourer(" Excel File = "+FDs[coordset]['InputFileName'],"black",\
options.OutputType,"bold",FontSize_Text)
AllRuns = []
AllRuns_X = []
Stats_Global_AveLen = []
Stats_Global_AveSpeed = []
FileOut = ""
RunCounter = 0;
i = 0
while i < len(FDs[coordset]['Coords']): #cycle through sheets
j = 0
while j < len(FDs[coordset]['Coords'][i]): #cycle through
print PS_Inputs.Colourer(Separator,"grey",options.OutputType,"",FontSize_Text)
# Sort out the coordinate alignment corrections
if len(FDs[coordset]['Corrections']) != 4:
print PS_Inputs.Colourer(" Applying correction coordinates ",\
"black",options.OutputType,"bold",FontSize_Text)
FDs[coordset]['Coords'][i][j] = PS_Maths.CorrectCoordinates(\
FDs[coordset]['Coords'][i][j],\
FDs[coordset]['Corrections'])
# Perform Cummulative Distance plotting
if(options.graphs):
DistanceCummulativePlot = PS_Plots.PlotDistanceVsTimeCummulative(\
FDs[coordset]['Coords'][i][j],i,j,\
("Coords"+str(coordset+1)),"msecs",\
"nm",DirGraphs)
if(options.OutputType=="html"):
IMG_Particle = "<A HREF='"+ BaseDir + DirGraphs+\
str(DistanceCummulativePlot)+".png'"+\
" TARGET=_blank><IMG WIDTH=200 "+\
"SRC='"+ BaseDir + DirGraphs + "/" +\
str(DistanceCummulativePlot)+".png' BORDER=0></A>"
elif(options.OutputType=="text"):
IMG_Particle = " Graph: Cummulative Distance vs Time "+\
str(DistanceCummulativePlot)+".png"
else:
if(options.OutputType=="html"):
IMG_Particle = " <FONT SIZE=1>Graph:<BR>NO IMAGE AVAILABLE</FONT>"
else:
IMG_Particle = " Graph: NO IMAGE AVAILABLE"
# Perform the linear regression but not the graph just yet
if(options.regression):
Regression = PS_Maths.KymoRegression(FDs[coordset]['Coords'][i][j],4,5)
#Regression = PS_Maths.Regression_CVersion(FDs[coordset]['Coords'][i][j],4,5)
print " Regression=[X=%6.3f,"%Regression['X'],\
"Intercept=%6.0f,"%Regression['Intercept'],\
"R2=%6.3f,"%Regression['R2'],"]"
#"aR2=%6.3f"%Regression['aR2'],"]"
else:
Regression = ""
# Perform Trail drawing
IMG_Trails = ""
if( options.trails):
ImageFiles = PS_Inputs.FindImageFiles(\
FDs[coordset]['Coords'][i][j][0][0],ImageFileSearchPath)
else:
ImageFiles = []
if( len(ImageFiles) > 0) and (options.trails):
PatternN = re.compile(r'.*020.tif')
k = 0
while k < len(ImageFiles):
IMG_Trails = " Trail Image: NO IMAGE AVAILABLE"
if (PatternN.match(os.path.basename(ImageFiles[k]))):
FirstImage = ImageFiles[k]
TrailImage = PS_Inputs.DrawTrailsOnImageFile(FirstImage,i,\
FDs[coordset]['InputFileName'],Colours[i][j],\
FDs[coordset]['Coords'][i][j],\
options.PixelRatio,Regression)
if( (options.OutputType == "html") and ( options.trails) ):
IMG_Trails = "<A HREF='"+ DirTrails + TrailImage + \
"' TARGET=_blank><IMG WIDTH=200 " + \
"HEIGHT=187 " + "SRC='" + DirTrails + \
TrailImage + "' BORDER=0></A>"
elif( (options.OutputType == "text") and ( options.trails) ):
IMG_Trails = " Trail Image:"+TrailImage
break
k += 1
else:
if(options.OutputType == "html"):
IMG_Trails = "<FONT SIZE=1>Trail Image:<BR>NO IMAGE AVAILABLE</FONT>"
else:
IMG_Trails = " Trail Image: NO IMAGE AVAILABLE"
Runs = []
Runs = PS_Maths.FindLongMovementsAndPausesRaquel( \
FDs[coordset]['Coords'][i][j], Regression,\
FDs[coordset]['Axes'],PauseDef, \
options.RunDistance,options.RunFrames, \
options.PauseDistance,options.PauseSpeed, \
options.PauseFrames,options.PauseDuration,\
options.ReverseFrames,options.PixelRatio,\
options.Dimensions,\
options.TimeStart, options.TimeEnd,\
options.debug)
Stats_Particle = PS_Maths.Stats_Particle(Runs)
Stats_Standards = PS_Maths.Stats_Standards(Runs)
RunsHash.append({'CoordsSet':coordset,'Sheet':i,'Particle':j,'Runs':Runs})
RunsHashAll.append({'CoordsSet':coordset,'Sheet':i,'Particle':j,'Runs':Runs})
AllRuns.append(Runs)
print "Runs for particle %4d sheet %2d" % (j, i),
print " (Excel Row=", FDs[coordset]['Coords'][i][j][0][6], \
" File=", FDs[coordset]['InputFileName'], ")"
print " No Coords =", len(FDs[coordset]['Coords'][i][j]), \
" No +ve Runs = ", Stats_Particle['No_Runs_P'], \
" No -ve Runs = ", Stats_Particle['No_Runs_N'], \
" No Pauses = ", Stats_Particle['No_Runs_0']
RunLine = ""
StatsLine = ""
Header = " Event Start End Dir Dist SDist" +\
" RDist Angle Speed SSpeed RSpeed Time"
print PS_Inputs.Colourer(Header,"grey",options.OutputType,"italics",FontSize_Text)
k = 0
while k < len(Runs):
AllRuns_X.append(Runs[k])
Error = ""
if( Runs[k][2] > 0): Event = "Run "; Colour = "red"
elif( Runs[k][2] < 0): Event = "Run "; Colour = "blue"
elif( Runs[k][2] == 0): Event = "Pause"; Colour = "green"
#if(abs(Runs[j][5]) <= 200 and abs(Runs[j][6]) <= 200 \
# and Runs[j][2] != 0):
# Colour = "purple"; Error = "ERROR? " + Event
#elif( abs(Runs[j][3]) > 300 and Runs[j][2] == 0):
# Colour = "cyan"; Error = "? " + Event
RunLine = PS_Outputs.Print_RunLine(Runs,k,Event,Error)
print PS_Inputs.Colourer(RunLine,Colour,options.OutputType,\
"",FontSize_Text)
RunCounter += 1
FileOut += PS_Outputs.Print_FileOut(Runs, RunCounter, i, k)
k += 1
StatsLine = PS_Outputs.Print_ParticleStatsLine(Stats_Particle,Stats_Standards)
print StatsLine
# Perform Linear Regression Graph drawing
if(options.regression):
#Regression = PS_Maths.KymoRegression(FDs[coordset]['Coords'][i][j],4,5)
#print " Regression=[X=%6.3f,"%Regression['X'],\
# "Intercept=%6.0f,"%Regression['Intercept'],\
# "R2=%6.3f,"%Regression['R2'],\
# "aR2=%6.3f"%Regression['aR2'],"]"
RegressionGraph = PS_Plots.RegressionGraph(\
FDs[coordset]['Coords'][i][j],(coordset+1),i,j,\
Regression,FDs[coordset]['Axes'],Runs,DirGraphs)
if( options.OutputType=="html"):
IMG_Regression = "<A HREF='"+BaseDir+DirGraphs+RegressionGraph+".png" + \
"' TARGET=_blank><IMG WIDTH=200 HEIGHT=187 " + \
"SRC='"+BaseDir+DirGraphs+RegressionGraph+".png' " + \
"BORDER=0></A>"
elif( options.OutputType=="text"):
IMG_Regression = " Regression Image: "+RegressionGraph+".png"
else:
Regression = ""
if(options.OutputType=="html"):
IMG_Regression = " <FONT SIZE=1>Regression Image:"+\
"<BR>NO IMAGE AVAILABLE</FONT"
else:
IMG_Regression = " Regression Image: NO IMAGE AVAILABLE"
if(options.OutputType == "text"):
print IMG_Particle
print IMG_Trails
print IMG_Regression
elif(options.OutputType == "html"):
print "<TABLE WIDTH=100%><TR><TD>"+IMG_Particle+"</TD>"+\
"<TD VALIGN=middle>"+IMG_Trails+"</TD>"+\
"<TD VALIGN=middle>"+IMG_Regression+"</TD>"+\
"</TR></TABLE>"
j += 1
if(options.graphs and j == (len(FDs[coordset]['Coords'][i]))):
RoseDiagram = PS_Plots.PlotCompareRoseDiagram(RunsHash,500,coordset,i,DirGraphs)
convert = "inkscape --export-png="+DirGraphs+"/"+RoseDiagram+\
".png --export-dpi=125 "+DirGraphs+"/"+RoseDiagram+".svg 2>/dev/null"
os.popen(convert)
if(options.OutputType=="html"):
IMG_Rose = "<B>Rose Diagram For Sheet "+str(i)+"</B><BR>" + \
"<A HREF='"+BaseDir+DirGraphs+RoseDiagram+".png" + \
"' TARGET=_blank><IMG WIDTH=200 HEIGHT=187 " + \
"SRC='"+BaseDir+DirGraphs+RoseDiagram+".png' " + \
"BORDER=0></A>"
else:
IMG_Rose = " RoseDiagram = "+RoseDiagram+".svg\n"+\
" RoseDiagram = "+RoseDiagram+".png\n"
print IMG_Rose
RunsHash2.append(RunsHash)
RunsHash = []
i += 1
print PS_Inputs.Colourer(Separator,"grey",options.OutputType,"",FontSize_Text)
# Print Out some Global stats
print PS_Inputs.Colourer("### Global Statistics ###","green",\
options.OutputType,"bold",FontSize_Titles)
Stats_Global = {}
Stats_Global = PS_Maths.Stats_Particle(AllRuns_X)
Stats_Standards = {}
Stats_Standards = PS_Maths.Stats_Standards(AllRuns_X)
GlobalStats = PS_Outputs.Print_GlobalStats ( AllRuns_X, Stats_Global, Stats_Standards )
print GlobalStats
Stats_Global_AveLen.append( [Stats_Global['Ave_RunLen_P'],Stats_Global['Ave_RunLen_N'],\
Stats_Standards['D_P_E'],Stats_Standards['D_N_E'] ] )
Stats_Global_AveSpeed.append( [Stats_Global['Ave_Speed_P'],Stats_Global['Ave_Speed_N'],\
Stats_Standards['S_P_E'],Stats_Standards['S_N_E'] ] )
# Call the graph drawing functions
print PS_Inputs.Colourer("### Produce Output Files ###","green",\
options.OutputType,"bold",FontSize_Titles)
if( options.graphs):
print PS_Inputs.Colourer((" Creating Runs graph for Coords Set "+\
str(coordset+1)+"..."),"black",\
options.OutputType,"",FontSize_Text)
PS_Plots.PlotRuns(AllRuns,options.PixelRatio,Colours,\
("Coords"+str(coordset+1)),DirGraphs)
#PS_Plots.PlotRunsFreq(AllRuns,Colours,("Coords"+str(coordset+1)),DirGraphs)
#Write Run Data Out to the Data File
print PS_Inputs.Colourer(" Creating Output Table for Coords Set "+str(coordset+1)+\
"...","black",options.OutputType,"",FontSize_Text)
PS_Outputs.Print_OutputFile((DirGraphs+"/ParticleStats_Coords"+str(coordset+1)+"_Output.text"),FileOut)
print PS_Inputs.Colourer("","green",options.OutputType,"bold",FontSize_Text)
print PS_Inputs.Colourer("","green",options.OutputType,"bold",FontSize_Text)
print PS_Inputs.Colourer("","green",options.OutputType,"bold",FontSize_Text)
print PS_Inputs.Colourer("","green",options.OutputType,"bold",FontSize_Text)
RealAllRuns.append(AllRuns)
RealAllRunsX.append(AllRuns_X)
coordset += 1
RoseDiagram = PS_Plots.PlotCompareRoseDiagram(RunsHashAll,500,0,99,DirGraphs)
convert = "inkscape --export-png="+DirGraphs+"/"+RoseDiagram+".png --export-dpi=125 "+\
DirGraphs+RoseDiagram+".svg 2>/dev/null"
os.popen(convert)
print "RoseDiagram (coordsset=0) =", RoseDiagram
RoseDiagram = PS_Plots.PlotCompareRoseDiagram(RunsHashAll,500,1,99,DirGraphs)
convert = "inkscape --export-png="+DirGraphs+"/"+RoseDiagram+".png --export-dpi=125 "+\
DirGraphs+RoseDiagram+".svg 2>/dev/null"
os.popen(convert)
print "RoseDiagram (coordsset=1) =", RoseDiagram
ThreeFrameResults = PS_Maths.ThreeFrameRunAnalysis(RunsHashAll,FDs,DirGraphs)
print "3 Frame Results =", len(ThreeFrameResults)
ThreeFrameGraph = PS_Plots.PlotThreeFrameResults(ThreeFrameResults,0,DirGraphs)
print "3 Frame Graph (coordsset=0) =", ThreeFrameGraph
ThreeFrameGraph = PS_Plots.PlotThreeFrameResults(ThreeFrameResults,1,DirGraphs)
print "3 Frame Graph (coordsset=1) =", ThreeFrameGraph
ThreeFrameMaxResults = PS_Maths.ThreeFrameMaxRunAnalysis(RunsHashAll,FDs,DirGraphs)
print "3 Frame Max Results =", len(ThreeFrameMaxResults)
ThreeFrameMaxGraph = PS_Plots.PlotThreeFrameMaxResults(ThreeFrameMaxResults,0,DirGraphs)
print "3 Frame Max Graph (coordsset=0) =", ThreeFrameMaxGraph
ThreeFrameMaxGraph = PS_Plots.PlotThreeFrameMaxResults(ThreeFrameMaxResults,1,DirGraphs)
print "3 Frame Max Graph (coordsset=1) =", ThreeFrameMaxGraph
DirChangeResults = PS_Maths.DirectionChangesAnalysis(RunsHashAll,0,DirGraphs)
print "Direction Change Results (coordsset=0) =", len(DirChangeResults)
DirChangeGraph = PS_Plots.PlotDirChangeResults(DirChangeResults,0,DirGraphs)
print "Direction Changes Graph (coordsset=0) =", DirChangeGraph
DirChangeResults = PS_Maths.DirectionChangesAnalysis(RunsHashAll,1,DirGraphs)
print "Direction Change Results (coordsset=1) =", len(DirChangeResults)
DirChangeGraph = PS_Plots.PlotDirChangeResults(DirChangeResults,1,DirGraphs)
print "Direction Changes Graph (coordsset=0) =", DirChangeGraph
# OK Lets do some stats comparisons between the two excel files
print PS_Inputs.Colourer("### Comparison Statistics ###","green",\
options.OutputType,"bold",FontSize_Titles)
print PS_Inputs.Colourer(" Comparing Coords Set 1 to Coords Set 2","black",\
options.OutputType,"",FontSize_Text)
print PS_Inputs.Colourer(" "+FDs[0]['InputFileName']+" vs "+FDs[1]['InputFileName'],\
"black",options.OutputType,"",FontSize_Text)
Output = PS_Maths.Stats_TTests(RealAllRunsX[0],RealAllRunsX[1])
print Output
# Plot Average Run Length for the 2 coords sets
if( options.graphs):
print ""
#PS_Plots.PlotAveRunLength(Stats_Global_AveLen)
#PS_Plots.PlotSpeed(Stats_Global_AveSpeed)
print PS_Inputs.Colourer("### FIN ###","green",options.OutputType,"bold",FontSize_Text)
print PS_Inputs.Colourer(Separator,"grey",options.OutputType,"",FontSize_Text)
if(options.OutputType == "html"):
PS_Outputs.Print_HTMLTails(BaseDir, DirGraphs, options.ExcelFile1, options.ExcelFile2 )
#------------------------------------------------------------------------------
# FIN
#------------------------------------------------------------------------------
|
gpl-3.0
| 5,150,384,486,752,012,000
| 44.635
| 113
| 0.552463
| false
| 3.644483
| false
| false
| false
|
ArcherSys/ArcherSys
|
Lib/encodings/cp500.py
|
1
|
39503
|
<<<<<<< HEAD
<<<<<<< HEAD
""" Python Character Mapping Codec cp500 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP500.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp500',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x9c' # 0x04 -> CONTROL
'\t' # 0x05 -> HORIZONTAL TABULATION
'\x86' # 0x06 -> CONTROL
'\x7f' # 0x07 -> DELETE
'\x97' # 0x08 -> CONTROL
'\x8d' # 0x09 -> CONTROL
'\x8e' # 0x0A -> CONTROL
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x9d' # 0x14 -> CONTROL
'\x85' # 0x15 -> CONTROL
'\x08' # 0x16 -> BACKSPACE
'\x87' # 0x17 -> CONTROL
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x92' # 0x1A -> CONTROL
'\x8f' # 0x1B -> CONTROL
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
'\x80' # 0x20 -> CONTROL
'\x81' # 0x21 -> CONTROL
'\x82' # 0x22 -> CONTROL
'\x83' # 0x23 -> CONTROL
'\x84' # 0x24 -> CONTROL
'\n' # 0x25 -> LINE FEED
'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
'\x1b' # 0x27 -> ESCAPE
'\x88' # 0x28 -> CONTROL
'\x89' # 0x29 -> CONTROL
'\x8a' # 0x2A -> CONTROL
'\x8b' # 0x2B -> CONTROL
'\x8c' # 0x2C -> CONTROL
'\x05' # 0x2D -> ENQUIRY
'\x06' # 0x2E -> ACKNOWLEDGE
'\x07' # 0x2F -> BELL
'\x90' # 0x30 -> CONTROL
'\x91' # 0x31 -> CONTROL
'\x16' # 0x32 -> SYNCHRONOUS IDLE
'\x93' # 0x33 -> CONTROL
'\x94' # 0x34 -> CONTROL
'\x95' # 0x35 -> CONTROL
'\x96' # 0x36 -> CONTROL
'\x04' # 0x37 -> END OF TRANSMISSION
'\x98' # 0x38 -> CONTROL
'\x99' # 0x39 -> CONTROL
'\x9a' # 0x3A -> CONTROL
'\x9b' # 0x3B -> CONTROL
'\x14' # 0x3C -> DEVICE CONTROL FOUR
'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
'\x9e' # 0x3E -> CONTROL
'\x1a' # 0x3F -> SUBSTITUTE
' ' # 0x40 -> SPACE
'\xa0' # 0x41 -> NO-BREAK SPACE
'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
'[' # 0x4A -> LEFT SQUARE BRACKET
'.' # 0x4B -> FULL STOP
'<' # 0x4C -> LESS-THAN SIGN
'(' # 0x4D -> LEFT PARENTHESIS
'+' # 0x4E -> PLUS SIGN
'!' # 0x4F -> EXCLAMATION MARK
'&' # 0x50 -> AMPERSAND
'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
']' # 0x5A -> RIGHT SQUARE BRACKET
'$' # 0x5B -> DOLLAR SIGN
'*' # 0x5C -> ASTERISK
')' # 0x5D -> RIGHT PARENTHESIS
';' # 0x5E -> SEMICOLON
'^' # 0x5F -> CIRCUMFLEX ACCENT
'-' # 0x60 -> HYPHEN-MINUS
'/' # 0x61 -> SOLIDUS
'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
'\xa6' # 0x6A -> BROKEN BAR
',' # 0x6B -> COMMA
'%' # 0x6C -> PERCENT SIGN
'_' # 0x6D -> LOW LINE
'>' # 0x6E -> GREATER-THAN SIGN
'?' # 0x6F -> QUESTION MARK
'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
'`' # 0x79 -> GRAVE ACCENT
':' # 0x7A -> COLON
'#' # 0x7B -> NUMBER SIGN
'@' # 0x7C -> COMMERCIAL AT
"'" # 0x7D -> APOSTROPHE
'=' # 0x7E -> EQUALS SIGN
'"' # 0x7F -> QUOTATION MARK
'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
'a' # 0x81 -> LATIN SMALL LETTER A
'b' # 0x82 -> LATIN SMALL LETTER B
'c' # 0x83 -> LATIN SMALL LETTER C
'd' # 0x84 -> LATIN SMALL LETTER D
'e' # 0x85 -> LATIN SMALL LETTER E
'f' # 0x86 -> LATIN SMALL LETTER F
'g' # 0x87 -> LATIN SMALL LETTER G
'h' # 0x88 -> LATIN SMALL LETTER H
'i' # 0x89 -> LATIN SMALL LETTER I
'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
'\xb1' # 0x8F -> PLUS-MINUS SIGN
'\xb0' # 0x90 -> DEGREE SIGN
'j' # 0x91 -> LATIN SMALL LETTER J
'k' # 0x92 -> LATIN SMALL LETTER K
'l' # 0x93 -> LATIN SMALL LETTER L
'm' # 0x94 -> LATIN SMALL LETTER M
'n' # 0x95 -> LATIN SMALL LETTER N
'o' # 0x96 -> LATIN SMALL LETTER O
'p' # 0x97 -> LATIN SMALL LETTER P
'q' # 0x98 -> LATIN SMALL LETTER Q
'r' # 0x99 -> LATIN SMALL LETTER R
'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
'\xb8' # 0x9D -> CEDILLA
'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
'\xa4' # 0x9F -> CURRENCY SIGN
'\xb5' # 0xA0 -> MICRO SIGN
'~' # 0xA1 -> TILDE
's' # 0xA2 -> LATIN SMALL LETTER S
't' # 0xA3 -> LATIN SMALL LETTER T
'u' # 0xA4 -> LATIN SMALL LETTER U
'v' # 0xA5 -> LATIN SMALL LETTER V
'w' # 0xA6 -> LATIN SMALL LETTER W
'x' # 0xA7 -> LATIN SMALL LETTER X
'y' # 0xA8 -> LATIN SMALL LETTER Y
'z' # 0xA9 -> LATIN SMALL LETTER Z
'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
'\xbf' # 0xAB -> INVERTED QUESTION MARK
'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
'\xae' # 0xAF -> REGISTERED SIGN
'\xa2' # 0xB0 -> CENT SIGN
'\xa3' # 0xB1 -> POUND SIGN
'\xa5' # 0xB2 -> YEN SIGN
'\xb7' # 0xB3 -> MIDDLE DOT
'\xa9' # 0xB4 -> COPYRIGHT SIGN
'\xa7' # 0xB5 -> SECTION SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
'\xac' # 0xBA -> NOT SIGN
'|' # 0xBB -> VERTICAL LINE
'\xaf' # 0xBC -> MACRON
'\xa8' # 0xBD -> DIAERESIS
'\xb4' # 0xBE -> ACUTE ACCENT
'\xd7' # 0xBF -> MULTIPLICATION SIGN
'{' # 0xC0 -> LEFT CURLY BRACKET
'A' # 0xC1 -> LATIN CAPITAL LETTER A
'B' # 0xC2 -> LATIN CAPITAL LETTER B
'C' # 0xC3 -> LATIN CAPITAL LETTER C
'D' # 0xC4 -> LATIN CAPITAL LETTER D
'E' # 0xC5 -> LATIN CAPITAL LETTER E
'F' # 0xC6 -> LATIN CAPITAL LETTER F
'G' # 0xC7 -> LATIN CAPITAL LETTER G
'H' # 0xC8 -> LATIN CAPITAL LETTER H
'I' # 0xC9 -> LATIN CAPITAL LETTER I
'\xad' # 0xCA -> SOFT HYPHEN
'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
'}' # 0xD0 -> RIGHT CURLY BRACKET
'J' # 0xD1 -> LATIN CAPITAL LETTER J
'K' # 0xD2 -> LATIN CAPITAL LETTER K
'L' # 0xD3 -> LATIN CAPITAL LETTER L
'M' # 0xD4 -> LATIN CAPITAL LETTER M
'N' # 0xD5 -> LATIN CAPITAL LETTER N
'O' # 0xD6 -> LATIN CAPITAL LETTER O
'P' # 0xD7 -> LATIN CAPITAL LETTER P
'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
'R' # 0xD9 -> LATIN CAPITAL LETTER R
'\xb9' # 0xDA -> SUPERSCRIPT ONE
'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
'\\' # 0xE0 -> REVERSE SOLIDUS
'\xf7' # 0xE1 -> DIVISION SIGN
'S' # 0xE2 -> LATIN CAPITAL LETTER S
'T' # 0xE3 -> LATIN CAPITAL LETTER T
'U' # 0xE4 -> LATIN CAPITAL LETTER U
'V' # 0xE5 -> LATIN CAPITAL LETTER V
'W' # 0xE6 -> LATIN CAPITAL LETTER W
'X' # 0xE7 -> LATIN CAPITAL LETTER X
'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
'\xb2' # 0xEA -> SUPERSCRIPT TWO
'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
'0' # 0xF0 -> DIGIT ZERO
'1' # 0xF1 -> DIGIT ONE
'2' # 0xF2 -> DIGIT TWO
'3' # 0xF3 -> DIGIT THREE
'4' # 0xF4 -> DIGIT FOUR
'5' # 0xF5 -> DIGIT FIVE
'6' # 0xF6 -> DIGIT SIX
'7' # 0xF7 -> DIGIT SEVEN
'8' # 0xF8 -> DIGIT EIGHT
'9' # 0xF9 -> DIGIT NINE
'\xb3' # 0xFA -> SUPERSCRIPT THREE
'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
=======
""" Python Character Mapping Codec cp500 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP500.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp500',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x9c' # 0x04 -> CONTROL
'\t' # 0x05 -> HORIZONTAL TABULATION
'\x86' # 0x06 -> CONTROL
'\x7f' # 0x07 -> DELETE
'\x97' # 0x08 -> CONTROL
'\x8d' # 0x09 -> CONTROL
'\x8e' # 0x0A -> CONTROL
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x9d' # 0x14 -> CONTROL
'\x85' # 0x15 -> CONTROL
'\x08' # 0x16 -> BACKSPACE
'\x87' # 0x17 -> CONTROL
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x92' # 0x1A -> CONTROL
'\x8f' # 0x1B -> CONTROL
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
'\x80' # 0x20 -> CONTROL
'\x81' # 0x21 -> CONTROL
'\x82' # 0x22 -> CONTROL
'\x83' # 0x23 -> CONTROL
'\x84' # 0x24 -> CONTROL
'\n' # 0x25 -> LINE FEED
'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
'\x1b' # 0x27 -> ESCAPE
'\x88' # 0x28 -> CONTROL
'\x89' # 0x29 -> CONTROL
'\x8a' # 0x2A -> CONTROL
'\x8b' # 0x2B -> CONTROL
'\x8c' # 0x2C -> CONTROL
'\x05' # 0x2D -> ENQUIRY
'\x06' # 0x2E -> ACKNOWLEDGE
'\x07' # 0x2F -> BELL
'\x90' # 0x30 -> CONTROL
'\x91' # 0x31 -> CONTROL
'\x16' # 0x32 -> SYNCHRONOUS IDLE
'\x93' # 0x33 -> CONTROL
'\x94' # 0x34 -> CONTROL
'\x95' # 0x35 -> CONTROL
'\x96' # 0x36 -> CONTROL
'\x04' # 0x37 -> END OF TRANSMISSION
'\x98' # 0x38 -> CONTROL
'\x99' # 0x39 -> CONTROL
'\x9a' # 0x3A -> CONTROL
'\x9b' # 0x3B -> CONTROL
'\x14' # 0x3C -> DEVICE CONTROL FOUR
'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
'\x9e' # 0x3E -> CONTROL
'\x1a' # 0x3F -> SUBSTITUTE
' ' # 0x40 -> SPACE
'\xa0' # 0x41 -> NO-BREAK SPACE
'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
'[' # 0x4A -> LEFT SQUARE BRACKET
'.' # 0x4B -> FULL STOP
'<' # 0x4C -> LESS-THAN SIGN
'(' # 0x4D -> LEFT PARENTHESIS
'+' # 0x4E -> PLUS SIGN
'!' # 0x4F -> EXCLAMATION MARK
'&' # 0x50 -> AMPERSAND
'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
']' # 0x5A -> RIGHT SQUARE BRACKET
'$' # 0x5B -> DOLLAR SIGN
'*' # 0x5C -> ASTERISK
')' # 0x5D -> RIGHT PARENTHESIS
';' # 0x5E -> SEMICOLON
'^' # 0x5F -> CIRCUMFLEX ACCENT
'-' # 0x60 -> HYPHEN-MINUS
'/' # 0x61 -> SOLIDUS
'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
'\xa6' # 0x6A -> BROKEN BAR
',' # 0x6B -> COMMA
'%' # 0x6C -> PERCENT SIGN
'_' # 0x6D -> LOW LINE
'>' # 0x6E -> GREATER-THAN SIGN
'?' # 0x6F -> QUESTION MARK
'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
'`' # 0x79 -> GRAVE ACCENT
':' # 0x7A -> COLON
'#' # 0x7B -> NUMBER SIGN
'@' # 0x7C -> COMMERCIAL AT
"'" # 0x7D -> APOSTROPHE
'=' # 0x7E -> EQUALS SIGN
'"' # 0x7F -> QUOTATION MARK
'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
'a' # 0x81 -> LATIN SMALL LETTER A
'b' # 0x82 -> LATIN SMALL LETTER B
'c' # 0x83 -> LATIN SMALL LETTER C
'd' # 0x84 -> LATIN SMALL LETTER D
'e' # 0x85 -> LATIN SMALL LETTER E
'f' # 0x86 -> LATIN SMALL LETTER F
'g' # 0x87 -> LATIN SMALL LETTER G
'h' # 0x88 -> LATIN SMALL LETTER H
'i' # 0x89 -> LATIN SMALL LETTER I
'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
'\xb1' # 0x8F -> PLUS-MINUS SIGN
'\xb0' # 0x90 -> DEGREE SIGN
'j' # 0x91 -> LATIN SMALL LETTER J
'k' # 0x92 -> LATIN SMALL LETTER K
'l' # 0x93 -> LATIN SMALL LETTER L
'm' # 0x94 -> LATIN SMALL LETTER M
'n' # 0x95 -> LATIN SMALL LETTER N
'o' # 0x96 -> LATIN SMALL LETTER O
'p' # 0x97 -> LATIN SMALL LETTER P
'q' # 0x98 -> LATIN SMALL LETTER Q
'r' # 0x99 -> LATIN SMALL LETTER R
'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
'\xb8' # 0x9D -> CEDILLA
'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
'\xa4' # 0x9F -> CURRENCY SIGN
'\xb5' # 0xA0 -> MICRO SIGN
'~' # 0xA1 -> TILDE
's' # 0xA2 -> LATIN SMALL LETTER S
't' # 0xA3 -> LATIN SMALL LETTER T
'u' # 0xA4 -> LATIN SMALL LETTER U
'v' # 0xA5 -> LATIN SMALL LETTER V
'w' # 0xA6 -> LATIN SMALL LETTER W
'x' # 0xA7 -> LATIN SMALL LETTER X
'y' # 0xA8 -> LATIN SMALL LETTER Y
'z' # 0xA9 -> LATIN SMALL LETTER Z
'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
'\xbf' # 0xAB -> INVERTED QUESTION MARK
'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
'\xae' # 0xAF -> REGISTERED SIGN
'\xa2' # 0xB0 -> CENT SIGN
'\xa3' # 0xB1 -> POUND SIGN
'\xa5' # 0xB2 -> YEN SIGN
'\xb7' # 0xB3 -> MIDDLE DOT
'\xa9' # 0xB4 -> COPYRIGHT SIGN
'\xa7' # 0xB5 -> SECTION SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
'\xac' # 0xBA -> NOT SIGN
'|' # 0xBB -> VERTICAL LINE
'\xaf' # 0xBC -> MACRON
'\xa8' # 0xBD -> DIAERESIS
'\xb4' # 0xBE -> ACUTE ACCENT
'\xd7' # 0xBF -> MULTIPLICATION SIGN
'{' # 0xC0 -> LEFT CURLY BRACKET
'A' # 0xC1 -> LATIN CAPITAL LETTER A
'B' # 0xC2 -> LATIN CAPITAL LETTER B
'C' # 0xC3 -> LATIN CAPITAL LETTER C
'D' # 0xC4 -> LATIN CAPITAL LETTER D
'E' # 0xC5 -> LATIN CAPITAL LETTER E
'F' # 0xC6 -> LATIN CAPITAL LETTER F
'G' # 0xC7 -> LATIN CAPITAL LETTER G
'H' # 0xC8 -> LATIN CAPITAL LETTER H
'I' # 0xC9 -> LATIN CAPITAL LETTER I
'\xad' # 0xCA -> SOFT HYPHEN
'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
'}' # 0xD0 -> RIGHT CURLY BRACKET
'J' # 0xD1 -> LATIN CAPITAL LETTER J
'K' # 0xD2 -> LATIN CAPITAL LETTER K
'L' # 0xD3 -> LATIN CAPITAL LETTER L
'M' # 0xD4 -> LATIN CAPITAL LETTER M
'N' # 0xD5 -> LATIN CAPITAL LETTER N
'O' # 0xD6 -> LATIN CAPITAL LETTER O
'P' # 0xD7 -> LATIN CAPITAL LETTER P
'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
'R' # 0xD9 -> LATIN CAPITAL LETTER R
'\xb9' # 0xDA -> SUPERSCRIPT ONE
'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
'\\' # 0xE0 -> REVERSE SOLIDUS
'\xf7' # 0xE1 -> DIVISION SIGN
'S' # 0xE2 -> LATIN CAPITAL LETTER S
'T' # 0xE3 -> LATIN CAPITAL LETTER T
'U' # 0xE4 -> LATIN CAPITAL LETTER U
'V' # 0xE5 -> LATIN CAPITAL LETTER V
'W' # 0xE6 -> LATIN CAPITAL LETTER W
'X' # 0xE7 -> LATIN CAPITAL LETTER X
'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
'\xb2' # 0xEA -> SUPERSCRIPT TWO
'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
'0' # 0xF0 -> DIGIT ZERO
'1' # 0xF1 -> DIGIT ONE
'2' # 0xF2 -> DIGIT TWO
'3' # 0xF3 -> DIGIT THREE
'4' # 0xF4 -> DIGIT FOUR
'5' # 0xF5 -> DIGIT FIVE
'6' # 0xF6 -> DIGIT SIX
'7' # 0xF7 -> DIGIT SEVEN
'8' # 0xF8 -> DIGIT EIGHT
'9' # 0xF9 -> DIGIT NINE
'\xb3' # 0xFA -> SUPERSCRIPT THREE
'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
""" Python Character Mapping Codec cp500 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP500.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp500',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x9c' # 0x04 -> CONTROL
'\t' # 0x05 -> HORIZONTAL TABULATION
'\x86' # 0x06 -> CONTROL
'\x7f' # 0x07 -> DELETE
'\x97' # 0x08 -> CONTROL
'\x8d' # 0x09 -> CONTROL
'\x8e' # 0x0A -> CONTROL
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x9d' # 0x14 -> CONTROL
'\x85' # 0x15 -> CONTROL
'\x08' # 0x16 -> BACKSPACE
'\x87' # 0x17 -> CONTROL
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x92' # 0x1A -> CONTROL
'\x8f' # 0x1B -> CONTROL
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
'\x80' # 0x20 -> CONTROL
'\x81' # 0x21 -> CONTROL
'\x82' # 0x22 -> CONTROL
'\x83' # 0x23 -> CONTROL
'\x84' # 0x24 -> CONTROL
'\n' # 0x25 -> LINE FEED
'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
'\x1b' # 0x27 -> ESCAPE
'\x88' # 0x28 -> CONTROL
'\x89' # 0x29 -> CONTROL
'\x8a' # 0x2A -> CONTROL
'\x8b' # 0x2B -> CONTROL
'\x8c' # 0x2C -> CONTROL
'\x05' # 0x2D -> ENQUIRY
'\x06' # 0x2E -> ACKNOWLEDGE
'\x07' # 0x2F -> BELL
'\x90' # 0x30 -> CONTROL
'\x91' # 0x31 -> CONTROL
'\x16' # 0x32 -> SYNCHRONOUS IDLE
'\x93' # 0x33 -> CONTROL
'\x94' # 0x34 -> CONTROL
'\x95' # 0x35 -> CONTROL
'\x96' # 0x36 -> CONTROL
'\x04' # 0x37 -> END OF TRANSMISSION
'\x98' # 0x38 -> CONTROL
'\x99' # 0x39 -> CONTROL
'\x9a' # 0x3A -> CONTROL
'\x9b' # 0x3B -> CONTROL
'\x14' # 0x3C -> DEVICE CONTROL FOUR
'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
'\x9e' # 0x3E -> CONTROL
'\x1a' # 0x3F -> SUBSTITUTE
' ' # 0x40 -> SPACE
'\xa0' # 0x41 -> NO-BREAK SPACE
'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
'[' # 0x4A -> LEFT SQUARE BRACKET
'.' # 0x4B -> FULL STOP
'<' # 0x4C -> LESS-THAN SIGN
'(' # 0x4D -> LEFT PARENTHESIS
'+' # 0x4E -> PLUS SIGN
'!' # 0x4F -> EXCLAMATION MARK
'&' # 0x50 -> AMPERSAND
'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
']' # 0x5A -> RIGHT SQUARE BRACKET
'$' # 0x5B -> DOLLAR SIGN
'*' # 0x5C -> ASTERISK
')' # 0x5D -> RIGHT PARENTHESIS
';' # 0x5E -> SEMICOLON
'^' # 0x5F -> CIRCUMFLEX ACCENT
'-' # 0x60 -> HYPHEN-MINUS
'/' # 0x61 -> SOLIDUS
'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
'\xa6' # 0x6A -> BROKEN BAR
',' # 0x6B -> COMMA
'%' # 0x6C -> PERCENT SIGN
'_' # 0x6D -> LOW LINE
'>' # 0x6E -> GREATER-THAN SIGN
'?' # 0x6F -> QUESTION MARK
'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
'`' # 0x79 -> GRAVE ACCENT
':' # 0x7A -> COLON
'#' # 0x7B -> NUMBER SIGN
'@' # 0x7C -> COMMERCIAL AT
"'" # 0x7D -> APOSTROPHE
'=' # 0x7E -> EQUALS SIGN
'"' # 0x7F -> QUOTATION MARK
'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
'a' # 0x81 -> LATIN SMALL LETTER A
'b' # 0x82 -> LATIN SMALL LETTER B
'c' # 0x83 -> LATIN SMALL LETTER C
'd' # 0x84 -> LATIN SMALL LETTER D
'e' # 0x85 -> LATIN SMALL LETTER E
'f' # 0x86 -> LATIN SMALL LETTER F
'g' # 0x87 -> LATIN SMALL LETTER G
'h' # 0x88 -> LATIN SMALL LETTER H
'i' # 0x89 -> LATIN SMALL LETTER I
'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
'\xb1' # 0x8F -> PLUS-MINUS SIGN
'\xb0' # 0x90 -> DEGREE SIGN
'j' # 0x91 -> LATIN SMALL LETTER J
'k' # 0x92 -> LATIN SMALL LETTER K
'l' # 0x93 -> LATIN SMALL LETTER L
'm' # 0x94 -> LATIN SMALL LETTER M
'n' # 0x95 -> LATIN SMALL LETTER N
'o' # 0x96 -> LATIN SMALL LETTER O
'p' # 0x97 -> LATIN SMALL LETTER P
'q' # 0x98 -> LATIN SMALL LETTER Q
'r' # 0x99 -> LATIN SMALL LETTER R
'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
'\xb8' # 0x9D -> CEDILLA
'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
'\xa4' # 0x9F -> CURRENCY SIGN
'\xb5' # 0xA0 -> MICRO SIGN
'~' # 0xA1 -> TILDE
's' # 0xA2 -> LATIN SMALL LETTER S
't' # 0xA3 -> LATIN SMALL LETTER T
'u' # 0xA4 -> LATIN SMALL LETTER U
'v' # 0xA5 -> LATIN SMALL LETTER V
'w' # 0xA6 -> LATIN SMALL LETTER W
'x' # 0xA7 -> LATIN SMALL LETTER X
'y' # 0xA8 -> LATIN SMALL LETTER Y
'z' # 0xA9 -> LATIN SMALL LETTER Z
'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
'\xbf' # 0xAB -> INVERTED QUESTION MARK
'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
'\xae' # 0xAF -> REGISTERED SIGN
'\xa2' # 0xB0 -> CENT SIGN
'\xa3' # 0xB1 -> POUND SIGN
'\xa5' # 0xB2 -> YEN SIGN
'\xb7' # 0xB3 -> MIDDLE DOT
'\xa9' # 0xB4 -> COPYRIGHT SIGN
'\xa7' # 0xB5 -> SECTION SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
'\xac' # 0xBA -> NOT SIGN
'|' # 0xBB -> VERTICAL LINE
'\xaf' # 0xBC -> MACRON
'\xa8' # 0xBD -> DIAERESIS
'\xb4' # 0xBE -> ACUTE ACCENT
'\xd7' # 0xBF -> MULTIPLICATION SIGN
'{' # 0xC0 -> LEFT CURLY BRACKET
'A' # 0xC1 -> LATIN CAPITAL LETTER A
'B' # 0xC2 -> LATIN CAPITAL LETTER B
'C' # 0xC3 -> LATIN CAPITAL LETTER C
'D' # 0xC4 -> LATIN CAPITAL LETTER D
'E' # 0xC5 -> LATIN CAPITAL LETTER E
'F' # 0xC6 -> LATIN CAPITAL LETTER F
'G' # 0xC7 -> LATIN CAPITAL LETTER G
'H' # 0xC8 -> LATIN CAPITAL LETTER H
'I' # 0xC9 -> LATIN CAPITAL LETTER I
'\xad' # 0xCA -> SOFT HYPHEN
'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
'}' # 0xD0 -> RIGHT CURLY BRACKET
'J' # 0xD1 -> LATIN CAPITAL LETTER J
'K' # 0xD2 -> LATIN CAPITAL LETTER K
'L' # 0xD3 -> LATIN CAPITAL LETTER L
'M' # 0xD4 -> LATIN CAPITAL LETTER M
'N' # 0xD5 -> LATIN CAPITAL LETTER N
'O' # 0xD6 -> LATIN CAPITAL LETTER O
'P' # 0xD7 -> LATIN CAPITAL LETTER P
'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
'R' # 0xD9 -> LATIN CAPITAL LETTER R
'\xb9' # 0xDA -> SUPERSCRIPT ONE
'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
'\\' # 0xE0 -> REVERSE SOLIDUS
'\xf7' # 0xE1 -> DIVISION SIGN
'S' # 0xE2 -> LATIN CAPITAL LETTER S
'T' # 0xE3 -> LATIN CAPITAL LETTER T
'U' # 0xE4 -> LATIN CAPITAL LETTER U
'V' # 0xE5 -> LATIN CAPITAL LETTER V
'W' # 0xE6 -> LATIN CAPITAL LETTER W
'X' # 0xE7 -> LATIN CAPITAL LETTER X
'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
'\xb2' # 0xEA -> SUPERSCRIPT TWO
'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
'0' # 0xF0 -> DIGIT ZERO
'1' # 0xF1 -> DIGIT ONE
'2' # 0xF2 -> DIGIT TWO
'3' # 0xF3 -> DIGIT THREE
'4' # 0xF4 -> DIGIT FOUR
'5' # 0xF5 -> DIGIT FIVE
'6' # 0xF6 -> DIGIT SIX
'7' # 0xF7 -> DIGIT SEVEN
'8' # 0xF8 -> DIGIT EIGHT
'9' # 0xF9 -> DIGIT NINE
'\xb3' # 0xFA -> SUPERSCRIPT THREE
'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
mit
| 6,183,082,987,881,244,000
| 41.613808
| 116
| 0.51948
| false
| 3.141641
| false
| false
| false
|
againer/supercda
|
utils/clinic_pickler.py
|
1
|
3611
|
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = "Alex Gainer (superrawr@gmail.com)"
__copyright__ = "Copyright 2014, Health Records For Everyone (HR4E)"
import cPickle as pickle
import datetime
import os
class PickleNotFoundException(Exception):
"""Missing Pickle Exception"""
class ClinicPickle(object):
"""Word dictionary utilities for pickling GRE words."""
_PICKLE_FOLDER = os.path.join('data', 'clinics')
_MISSING_PICKLE = 'Pickle {0} File Missing.'
def __init__(self, name):
self.name = name
self.date_created = datetime.datetime.now()
@classmethod
def create(cls, name):
"""Creates a clinic object and pickles it."""
try:
pickle_file_name = '{0}.pkl'.format(name)
path_to_pickle = os.path.join(cls._PICKLE_FOLDER,
pickle_file_name)
path = os.path.isfile(path_to_pickle)
if not path:
pickle.dump(cls(name).__dict__, file(path_to_pickle, 'wb'))
except IOError:
raise PickleNotFoundException, self._MISSING_PICKLE.format(name)
def delete(self):
"""Deletes a Clinic Pickle File."""
try:
pickle_file_name = '{0}.pkl'.format(self.name)
path_to_pickle = os.path.join(self._PICKLE_FOLDER,
pickle_file_name)
os.remove(path_to_pickle)
except IOError:
missing_pickle_error = self._MISSING_PICKLE.format(self.name)
raise PickleNotFoundException, missing_pickle_error
@classmethod
def get_all(cls):
return filter(lambda x: x != None,
[cls.load(name) for name in cls.GetAllClinicNames()])
@classmethod
def get_all_clinic_names(cls):
pkl_files = [f for f in os.listdir(cls._PICKLE_FOLDER)
if os.path.isfile(os.path.join(cls._PICKLE_FOLDER,f))]
return [_.strip('.pkl') for _ in pkl_files]
@classmethod
def load(cls, name):
"""Loads up a pickled clinic as a clinic object."""
try:
pickle_file_name = '{0}.pkl'.format(name)
path_to_pickle = os.path.join(cls._PICKLE_FOLDER,
pickle_file_name)
if os.path.isfile(path_to_pickle):
clinic = cls(name)
clinic.__dict__ = pickle.load(file(path_to_pickle, 'r+b'))
else:
clinic = None
return clinic
except IOError:
return None
def update(self, post_data):
"""Updates a clinic given the post_data dictionary."""
self.__dict__.update({})
try:
pickle_file_name = '{0}.pkl'.format(self.name)
path_to_pickle = os.path.join(self._PICKLE_FOLDER,
pickle_file_name)
if os.path.isfile(path_to_pickle):
pickle.dump(self.__dict__, file(path_to_pickle), 'wb')
except IOError:
raise PickleNotFoundException, self._MISSING_PICKLE.format(name)
|
apache-2.0
| -7,934,011,113,419,530,000
| 36.226804
| 76
| 0.585987
| false
| 3.94214
| false
| false
| false
|
creotiv/django-fuzzytest
|
django_fuzzytest/utils.py
|
1
|
8312
|
# -*- coding: utf-8 -*-
import itertools
from sre_constants import *
import sre_parse
import string
import random
from django.core.urlresolvers import RegexURLPattern, RegexURLResolver, LocaleRegexURLResolver
from django.utils import translation
from django.core.exceptions import ViewDoesNotExist
from django.contrib.admindocs.views import simplify_regex
class RegexpInverter(object):
category_chars = {
CATEGORY_DIGIT : string.digits,
CATEGORY_SPACE : string.whitespace,
CATEGORY_WORD : string.digits + string.letters + '_'
}
def _unique_extend(self, res_list, list):
for item in list:
if item not in res_list:
res_list.append(item)
def _handle_any(self, val):
"""
This is different from normal regexp matching. It only matches
printable ASCII characters.
"""
return string.printable
def _handle_branch(self, (tok, val)):
all_opts = []
for toks in val:
opts = self._permute_toks(toks)
self._unique_extend(all_opts, opts)
return all_opts
def _handle_category(self, val):
return list(self.category_chars[val])
def _handle_in(self, val):
out = []
for tok, val in val:
out += self._handle_tok(tok, val)
return out
def _handle_literal(self, val):
return [unichr(val)]
def _handle_max_repeat(self, (min, max, val)):
"""
Handle a repeat token such as {x,y} or ?.
"""
subtok, subval = val[0]
if max > 5000:
# max is the number of cartesian join operations needed to be
# carried out. More than 5000 consumes way to much memory.
raise ValueError("To many repetitions requested (%d)" % max)
optlist = self._handle_tok(subtok, subval)
iterlist = []
for x in range(min, max + 1):
joined = self._join([optlist] * x)
iterlist.append(joined)
return (''.join(it) for it in itertools.chain(*iterlist))
def _handle_range(self, val):
lo, hi = val
return (chr(x) for x in range(lo, hi + 1))
def _handle_subpattern(self, val):
return list(self._permute_toks(val[1]))
def _handle_tok(self, tok, val):
"""
Returns a list of strings of possible permutations for this regexp
token.
"""
handlers = {
ANY : self._handle_any,
BRANCH : self._handle_branch,
CATEGORY : self._handle_category,
LITERAL : self._handle_literal,
IN : self._handle_in,
MAX_REPEAT : self._handle_max_repeat,
RANGE : self._handle_range,
SUBPATTERN : self._handle_subpattern}
try:
return handlers[tok](val)
except KeyError, e:
fmt = "Unsupported regular expression construct: %s"
raise ValueError(fmt % tok)
def _permute_toks(self, toks):
"""
Returns a generator of strings of possible permutations for this
regexp token list.
"""
lists = [self._handle_tok(tok, val) for tok, val in toks]
return (''.join(it) for it in self._join(lists))
def _join(self, iterlist):
"""
Cartesian join as an iterator of the supplied sequences. Borrowed
from:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/302478
"""
def rloop(seqin, comb):
if seqin:
for item in seqin[0]:
newcomb = comb + [item]
for item in rloop(seqin[1:], newcomb):
yield item
else:
yield comb
return rloop(iterlist, [])
########## PUBLIC API ####################
def ipermute(self, p):
toks = [tok_n_val for tok_n_val in sre_parse.parse(p)]
return self._permute_toks(toks)
def permute(self, p):
return list(self.ipermute(p))
def random(self, p, length):
res = self.permute(p)
return ''.join(random.choice(res) for i in xrange(length))
class UrlFinder(object):
# TODO: Look at https://github.com/ierror/django-js-reverse
def get_urls(self, exclude=None):
if getattr(settings, 'ADMIN_FOR', None):
settings_modules = [__import__(m, {}, {}, ['']) for m in settings.ADMIN_FOR]
else:
settings_modules = [settings]
decorator = ['login_required']
urlconf = "ROOT_URLCONF"
views = []
for settings_mod in settings_modules:
try:
urlconf = __import__(getattr(settings_mod, urlconf), {}, {}, [''])
except Exception as e:
if options.get('traceback', None):
import traceback
traceback.print_exc()
print(style.ERROR("Error occurred while trying to load %s: %s" % (getattr(settings_mod, urlconf), str(e))))
continue
view_functions = self.extract_views_from_urlpatterns(urlconf.urlpatterns)
for (func, regex, url_name) in view_functions:
if hasattr(func, '__globals__'):
func_globals = func.__globals__
elif hasattr(func, 'func_globals'):
func_globals = func.func_globals
else:
func_globals = {}
if hasattr(func, '__name__'):
func_name = func.__name__
elif hasattr(func, '__class__'):
func_name = '%s()' % func.__class__.__name__
else:
func_name = re.sub(r' at 0x[0-9a-f]+', '', repr(func))
views.append({
"module":func.__module__,
"method":func_name,
"name":url_name,
"regexp": regex,
"url":simplify_regex(regex)
})
def extract_views_from_urlpatterns(self, urlpatterns, base='', namespace=None):
"""
Return a list of views from a list of urlpatterns.
Each object in the returned list is a two-tuple: (view_func, regex)
"""
views = []
for p in urlpatterns:
if isinstance(p, RegexURLPattern):
try:
if not p.name:
name = p.name
elif namespace:
name = '{0}:{1}'.format(namespace, p.name)
else:
name = p.name
views.append((p.callback, base + p.regex.pattern, name))
except ViewDoesNotExist:
continue
elif isinstance(p, RegexURLResolver):
try:
patterns = p.url_patterns
except ImportError:
continue
if namespace and p.namespace:
_namespace = '{0}:{1}'.format(namespace, p.namespace)
else:
_namespace = (p.namespace or namespace)
if isinstance(p, LocaleRegexURLResolver):
for langauge in self.LANGUAGES:
with translation.override(langauge[0]):
views.extend(self.extract_views_from_urlpatterns(patterns, base + p.regex.pattern, namespace=_namespace))
else:
views.extend(self.extract_views_from_urlpatterns(patterns, base + p.regex.pattern, namespace=_namespace))
elif hasattr(p, '_get_callback'):
try:
views.append((p._get_callback(), base + p.regex.pattern, p.name))
except ViewDoesNotExist:
continue
elif hasattr(p, 'url_patterns') or hasattr(p, '_get_url_patterns'):
try:
patterns = p.url_patterns
except ImportError:
continue
views.extend(self.extract_views_from_urlpatterns(patterns, base + p.regex.pattern, namespace=namespace))
else:
raise TypeError("%s does not appear to be a urlpattern object" % p)
return views
|
bsd-3-clause
| 3,566,915,466,897,581,600
| 34.827586
| 133
| 0.522859
| false
| 4.400212
| false
| false
| false
|
davibe/pygobject
|
gi/_signalhelper.py
|
1
|
9887
|
# -*- Mode: Python; py-indent-offset: 4 -*-
# pygobject - Python bindings for the GObject library
# Copyright (C) 2012 Simon Feltman
#
# gi/_signalhelper.py: GObject signal binding decorator object
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
import sys
import inspect
from ._gi import _gobject
# Callable went away in python 3.0 and came back in 3.2.
# Use versioning to figure out when to define it, otherwise we have to deal with
# the complexity of using __builtin__ or builtin between python versions to
# check if callable exists which PyFlakes will also complain about.
if (3, 0) <= sys.version_info < (3, 2):
def callable(fn):
return hasattr(fn, '__call__')
class Signal(str):
"""
Object which gives a nice API for creating and binding signals.
Example:
class Spam(GObject.GObject):
velocity = 0
@GObject.Signal
def pushed(self):
self.velocity += 1
@GObject.Signal(flags=GObject.SignalFlags.RUN_LAST)
def pulled(self):
self.velocity -= 1
stomped = GObject.Signal('stomped', arg_types=(int,))
@GObject.Signal
def annotated_signal(self, a:int, b:str):
"Python3 annotation support for parameter types.
def on_pushed(obj):
print(obj)
spam = Spam()
spam.pushed.connect(on_pushed)
spam.pushed.emit()
"""
class BoundSignal(str):
"""
Temporary binding object which can be used for connecting signals
without specifying the signal name string to connect.
"""
def __new__(cls, name, *args, **kargs):
return str.__new__(cls, name)
def __init__(self, signal, gobj):
str.__init__(self)
self.signal = signal
self.gobj = gobj
def __repr__(self):
return 'BoundSignal("%s")' % self
def __call__(self, *args, **kargs):
"""Call the signals closure."""
return self.signal.func(self.gobj, *args, **kargs)
def connect(self, callback, *args, **kargs):
"""Same as GObject.GObject.connect except there is no need to specify
the signal name."""
return self.gobj.connect(self, callback, *args, **kargs)
def connect_detailed(self, callback, detail, *args, **kargs):
"""Same as GObject.GObject.connect except there is no need to specify
the signal name. In addition concats "::<detail>" to the signal name
when connecting; for use with notifications like "notify" when a property
changes.
"""
return self.gobj.connect(self + '::' + detail, callback, *args, **kargs)
def disconnect(self, handler_id):
"""Same as GObject.GObject.disconnect."""
self.instance.disconnect(handler_id)
def emit(self, *args, **kargs):
"""Same as GObject.GObject.emit except there is no need to specify
the signal name."""
return self.gobj.emit(str(self), *args, **kargs)
def __new__(cls, name='', *args, **kargs):
if callable(name):
name = name.__name__
return str.__new__(cls, name)
def __init__(self, name='', func=None, flags=_gobject.SIGNAL_RUN_FIRST,
return_type=None, arg_types=None, doc='', accumulator=None, accu_data=None):
"""
@param name: name of signal or closure method when used as direct decorator.
@type name: string or callable
@param func: closure method.
@type func: callable
@param flags: flags specifying when to run closure
@type flags: GObject.SignalFlags
@param return_type: return type
@type return_type: type
@param arg_types: list of argument types specifying the signals function signature
@type arg_types: None
@param doc: documentation of signal object
@type doc: string
@param accumulator: accumulator method with the signature:
func(ihint, return_accu, handler_return, accu_data) -> boolean
@type accumulator: function
@param accu_data: user data passed to the accumulator
@type accu_data: object
"""
if func and not name:
name = func.__name__
elif callable(name):
func = name
name = func.__name__
if func and not doc:
doc = func.__doc__
str.__init__(self)
if func and not (return_type or arg_types):
return_type, arg_types = get_signal_annotations(func)
if arg_types is None:
arg_types = tuple()
self.func = func
self.flags = flags
self.return_type = return_type
self.arg_types = arg_types
self.__doc__ = doc
self.accumulator = accumulator
self.accu_data = accu_data
def __get__(self, instance, owner=None):
"""Returns a BoundSignal when accessed on an object instance."""
if instance is None:
return self
return self.BoundSignal(self, instance)
def __call__(self, obj, *args, **kargs):
"""Allows for instantiated Signals to be used as a decorator or calling
of the underlying signal method."""
# If obj is a GObject, than we call this signal as a closure otherwise
# it is used as a re-application of a decorator.
if isinstance(obj, _gobject.GObject):
self.func(obj, *args, **kargs)
else:
# If self is already an allocated name, use it otherwise create a new named
# signal using the closure name as the name.
if str(self):
name = str(self)
else:
name = obj.__name__
# Return a new value of this type since it is based on an immutable string.
return type(self)(name=name, func=obj, flags=self.flags,
return_type=self.return_type, arg_types=self.arg_types,
doc=self.__doc__, accumulator=self.accumulator, accu_data=self.accu_data)
def copy(self, newName=None):
"""Returns a renamed copy of the Signal."""
if newName is None:
newName = self.name
return type(self)(name=newName, func=self.func, flags=self.flags,
return_type=self.return_type, arg_types=self.arg_types,
doc=self.__doc__, accumulator=self.accumulator, accu_data=self.accu_data)
def get_signal_args(self):
"""Returns a tuple of: (flags, return_type, arg_types, accumulator, accu_data)"""
return (self.flags, self.return_type, self.arg_types, self.accumulator, self.accu_data)
class SignalOverride(Signal):
"""Specialized sub-class of signal which can be used as a decorator for overriding
existing signals on GObjects.
Example:
class MyWidget(Gtk.Widget):
@GObject.SignalOverride
def configure_event(self):
pass
"""
def get_signal_args(self):
"""Returns the string 'override'."""
return 'override'
def get_signal_annotations(func):
"""Attempt pulling python 3 function annotations off of 'func' for
use as a signals type information. Returns an ordered nested tuple
of (return_type, (arg_type1, arg_type2, ...)). If the given function
does not have annotations then (None, tuple()) is returned.
"""
arg_types = tuple()
return_type = None
if hasattr(func, '__annotations__'):
spec = inspect.getfullargspec(func)
arg_types = tuple(spec.annotations[arg] for arg in spec.args
if arg in spec.annotations)
if 'return' in spec.annotations:
return_type = spec.annotations['return']
return return_type, arg_types
def install_signals(cls):
"""Adds Signal instances on a GObject derived class into the '__gsignals__'
dictionary to be picked up and registered as real GObject signals.
"""
gsignals = cls.__dict__.get('__gsignals__', {})
newsignals = {}
for name, signal in cls.__dict__.items():
if isinstance(signal, Signal):
signalName = str(signal)
# Fixup a signal which is unnamed by using the class variable name.
# Since Signal is based on string which immutable,
# we must copy and replace the class variable.
if not signalName:
signalName = name
signal = signal.copy(name)
setattr(cls, name, signal)
if signalName in gsignals:
raise ValueError('Signal "%s" has already been registered.' % name)
newsignals[signalName] = signal
gsignals[signalName] = signal.get_signal_args()
cls.__gsignals__ = gsignals
# Setup signal closures by adding the specially named
# method to the class in the form of "do_<signal_name>".
for name, signal in newsignals.items():
if signal.func is not None:
funcName = 'do_' + name.replace('-', '_')
if not hasattr(cls, funcName):
setattr(cls, funcName, signal.func)
|
lgpl-2.1
| -5,544,120,678,858,116,000
| 37.321705
| 103
| 0.607363
| false
| 4.20187
| false
| false
| false
|
cmoutard/mne-python
|
mne/io/array/array.py
|
2
|
1869
|
"""Tools for creating Raw objects from numpy arrays"""
# Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
from ..base import _BaseRaw
from ...utils import verbose, logger
class RawArray(_BaseRaw):
"""Raw object from numpy array
Parameters
----------
data : array, shape (n_channels, n_times)
The channels' time series.
info : instance of Info
Info dictionary. Consider using `create_info` to populate
this structure. This may be modified in place by the class.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
See Also
--------
EpochsArray, EvokedArray, create_info
"""
@verbose
def __init__(self, data, info, verbose=None):
dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64
data = np.asanyarray(data, dtype=dtype)
if data.ndim != 2:
raise ValueError('Data must be a 2D array of shape (n_channels, '
'n_samples')
logger.info('Creating RawArray with %s data, n_channels=%s, n_times=%s'
% (dtype.__name__, data.shape[0], data.shape[1]))
if len(data) != len(info['ch_names']):
raise ValueError('len(data) does not match len(info["ch_names"])')
assert len(info['ch_names']) == info['nchan']
if info.get('buffer_size_sec', None) is None:
info['buffer_size_sec'] = 1. # reasonable default
super(RawArray, self).__init__(info, data, verbose=verbose)
logger.info(' Range : %d ... %d = %9.3f ... %9.3f secs' % (
self.first_samp, self.last_samp,
float(self.first_samp) / info['sfreq'],
float(self.last_samp) / info['sfreq']))
logger.info('Ready.')
|
bsd-3-clause
| 7,648,877,112,066,561,000
| 34.942308
| 79
| 0.578919
| false
| 3.686391
| false
| false
| false
|
bcorbet/SickRage
|
sickbeard/tv.py
|
1
|
109275
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os.path
import datetime
import threading
import re
import glob
import stat
import traceback
import shutil
import sickbeard
import xml.etree.cElementTree as etree
from name_parser.parser import NameParser, InvalidNameException, InvalidShowException
from lib import subliminal
try:
from lib.send2trash import send2trash
except ImportError:
pass
from lib.imdb import imdb
from sickbeard import db
from sickbeard import helpers, exceptions, logger
from sickbeard.exceptions import ex
from sickbeard import image_cache
from sickbeard import notifiers
from sickbeard import postProcessor
from sickbeard import subtitles
from sickbeard import history
from sickbeard import encodingKludge as ek
from common import Quality, Overview, statusStrings
from common import DOWNLOADED, SNATCHED, SNATCHED_PROPER, SNATCHED_BEST, ARCHIVED, IGNORED, UNAIRED, WANTED, SKIPPED, \
UNKNOWN, FAILED
from common import NAMING_DUPLICATE, NAMING_EXTEND, NAMING_LIMITED_EXTEND, NAMING_SEPARATED_REPEAT, \
NAMING_LIMITED_EXTEND_E_PREFIXED
def dirty_setter(attr_name):
def wrapper(self, val):
if getattr(self, attr_name) != val:
setattr(self, attr_name, val)
self.dirty = True
return wrapper
class TVShow(object):
def __init__(self, indexer, indexerid, lang=""):
self._indexerid = int(indexerid)
self._indexer = int(indexer)
self._name = ""
self._imdbid = ""
self._network = ""
self._genre = ""
self._classification = ""
self._runtime = 0
self._imdb_info = {}
self._quality = int(sickbeard.QUALITY_DEFAULT)
self._flatten_folders = int(sickbeard.FLATTEN_FOLDERS_DEFAULT)
self._status = "Unknown"
self._airs = ""
self._startyear = 0
self._paused = 0
self._air_by_date = 0
self._subtitles = int(sickbeard.SUBTITLES_DEFAULT)
self._dvdorder = 0
self._archive_firstmatch = 0
self._lang = lang
self._last_update_indexer = 1
self._sports = 0
self._anime = 0
self._scene = 0
self._rls_ignore_words = ""
self._rls_require_words = ""
self._default_ep_status = SKIPPED
self.dirty = True
self._location = ""
self.lock = threading.Lock()
self.isDirGood = False
self.episodes = {}
self.nextaired = ""
otherShow = helpers.findCertainShow(sickbeard.showList, self.indexerid)
if otherShow != None:
raise exceptions.MultipleShowObjectsException("Can't create a show if it already exists")
self.loadFromDB()
name = property(lambda self: self._name, dirty_setter("_name"))
indexerid = property(lambda self: self._indexerid, dirty_setter("_indexerid"))
indexer = property(lambda self: self._indexer, dirty_setter("_indexer"))
# location = property(lambda self: self._location, dirty_setter("_location"))
imdbid = property(lambda self: self._imdbid, dirty_setter("_imdbid"))
network = property(lambda self: self._network, dirty_setter("_network"))
genre = property(lambda self: self._genre, dirty_setter("_genre"))
classification = property(lambda self: self._classification, dirty_setter("_classification"))
runtime = property(lambda self: self._runtime, dirty_setter("_runtime"))
imdb_info = property(lambda self: self._imdb_info, dirty_setter("_imdb_info"))
quality = property(lambda self: self._quality, dirty_setter("_quality"))
flatten_folders = property(lambda self: self._flatten_folders, dirty_setter("_flatten_folders"))
status = property(lambda self: self._status, dirty_setter("_status"))
airs = property(lambda self: self._airs, dirty_setter("_airs"))
startyear = property(lambda self: self._startyear, dirty_setter("_startyear"))
paused = property(lambda self: self._paused, dirty_setter("_paused"))
air_by_date = property(lambda self: self._air_by_date, dirty_setter("_air_by_date"))
subtitles = property(lambda self: self._subtitles, dirty_setter("_subtitles"))
dvdorder = property(lambda self: self._dvdorder, dirty_setter("_dvdorder"))
archive_firstmatch = property(lambda self: self._archive_firstmatch, dirty_setter("_archive_firstmatch"))
lang = property(lambda self: self._lang, dirty_setter("_lang"))
last_update_indexer = property(lambda self: self._last_update_indexer, dirty_setter("_last_update_indexer"))
sports = property(lambda self: self._sports, dirty_setter("_sports"))
anime = property(lambda self: self._anime, dirty_setter("_anime"))
scene = property(lambda self: self._scene, dirty_setter("_scene"))
rls_ignore_words = property(lambda self: self._rls_ignore_words, dirty_setter("_rls_ignore_words"))
rls_require_words = property(lambda self: self._rls_require_words, dirty_setter("_rls_require_words"))
default_ep_status = property(lambda self: self._default_ep_status, dirty_setter("_default_ep_status"))
@property
def is_anime(self):
if int(self.anime) > 0:
return True
else:
return False
@property
def is_sports(self):
if int(self.sports) > 0:
return True
else:
return False
@property
def is_scene(self):
if int(self.scene) > 0:
return True
else:
return False
def _getLocation(self):
# no dir check needed if missing show dirs are created during post-processing
if sickbeard.CREATE_MISSING_SHOW_DIRS:
return self._location
if ek.ek(os.path.isdir, self._location):
return self._location
else:
raise exceptions.ShowDirNotFoundException("Show folder doesn't exist, you shouldn't be using it")
def _setLocation(self, newLocation):
logger.log(u"Setter sets location to " + newLocation, logger.DEBUG)
# Don't validate dir if user wants to add shows without creating a dir
if sickbeard.ADD_SHOWS_WO_DIR or ek.ek(os.path.isdir, newLocation):
dirty_setter("_location")(self, newLocation)
self._isDirGood = True
else:
raise exceptions.NoNFOException("Invalid folder for the show!")
location = property(_getLocation, _setLocation)
# delete references to anything that's not in the internal lists
def flushEpisodes(self):
for curSeason in self.episodes:
for curEp in self.episodes[curSeason]:
myEp = self.episodes[curSeason][curEp]
self.episodes[curSeason][curEp] = None
del myEp
def getAllEpisodes(self, season=None, has_location=False):
sql_selection = "SELECT season, episode, "
# subselection to detect multi-episodes early, share_location > 0
sql_selection = sql_selection + " (SELECT COUNT (*) FROM tv_episodes WHERE showid = tve.showid AND season = tve.season AND location != '' AND location = tve.location AND episode != tve.episode) AS share_location "
sql_selection = sql_selection + " FROM tv_episodes tve WHERE showid = " + str(self.indexerid)
if season is not None:
sql_selection = sql_selection + " AND season = " + str(season)
if has_location:
sql_selection = sql_selection + " AND location != '' "
# need ORDER episode ASC to rename multi-episodes in order S01E01-02
sql_selection = sql_selection + " ORDER BY season ASC, episode ASC"
myDB = db.DBConnection()
results = myDB.select(sql_selection)
ep_list = []
for cur_result in results:
cur_ep = self.getEpisode(int(cur_result["season"]), int(cur_result["episode"]))
if not cur_ep:
continue
cur_ep.relatedEps = []
if cur_ep.location:
# if there is a location, check if it's a multi-episode (share_location > 0) and put them in relatedEps
if cur_result["share_location"] > 0:
related_eps_result = myDB.select(
"SELECT * FROM tv_episodes WHERE showid = ? AND season = ? AND location = ? AND episode != ? ORDER BY episode ASC",
[self.indexerid, cur_ep.season, cur_ep.location, cur_ep.episode])
for cur_related_ep in related_eps_result:
related_ep = self.getEpisode(int(cur_related_ep["season"]), int(cur_related_ep["episode"]))
if related_ep and related_ep not in cur_ep.relatedEps:
cur_ep.relatedEps.append(related_ep)
ep_list.append(cur_ep)
return ep_list
def getEpisode(self, season=None, episode=None, file=None, noCreate=False, absolute_number=None, forceUpdate=False):
# if we get an anime get the real season and episode
if self.is_anime and absolute_number and not season and not episode:
myDB = db.DBConnection()
sql = "SELECT * FROM tv_episodes WHERE showid = ? AND absolute_number = ? AND season != 0"
sqlResults = myDB.select(sql, [self.indexerid, absolute_number])
if len(sqlResults) == 1:
episode = int(sqlResults[0]["episode"])
season = int(sqlResults[0]["season"])
logger.log(
"Found episode by absolute_number:" + str(absolute_number) + " which is " + str(season) + "x" + str(
episode), logger.DEBUG)
elif len(sqlResults) > 1:
logger.log("Multiple entries for absolute number: " + str(
absolute_number) + " in show: " + self.name + " found ", logger.ERROR)
return None
else:
logger.log(
"No entries for absolute number: " + str(absolute_number) + " in show: " + self.name + " found.",
logger.DEBUG)
return None
if not season in self.episodes:
self.episodes[season] = {}
if not episode in self.episodes[season] or self.episodes[season][episode] is None:
if noCreate:
return None
logger.log(str(self.indexerid) + u": An object for episode " + str(season) + "x" + str(
episode) + " didn't exist in the cache, trying to create it", logger.DEBUG)
if file:
ep = TVEpisode(self, season, episode, file)
else:
ep = TVEpisode(self, season, episode)
if ep != None:
self.episodes[season][episode] = ep
return self.episodes[season][episode]
def should_update(self, update_date=datetime.date.today()):
# if show is not 'Ended' always update (status 'Continuing')
if self.status == 'Continuing':
return True
# run logic against the current show latest aired and next unaired data to see if we should bypass 'Ended' status
graceperiod = datetime.timedelta(days=30)
last_airdate = datetime.date.fromordinal(1)
# get latest aired episode to compare against today - graceperiod and today + graceperiod
myDB = db.DBConnection()
sql_result = myDB.select(
"SELECT * FROM tv_episodes WHERE showid = ? AND season > '0' AND airdate > '1' AND status > '1' ORDER BY airdate DESC LIMIT 1",
[self.indexerid])
if sql_result:
last_airdate = datetime.date.fromordinal(sql_result[0]['airdate'])
if last_airdate >= (update_date - graceperiod) and last_airdate <= (update_date + graceperiod):
return True
# get next upcoming UNAIRED episode to compare against today + graceperiod
sql_result = myDB.select(
"SELECT * FROM tv_episodes WHERE showid = ? AND season > '0' AND airdate > '1' AND status = '1' ORDER BY airdate ASC LIMIT 1",
[self.indexerid])
if sql_result:
next_airdate = datetime.date.fromordinal(sql_result[0]['airdate'])
if next_airdate <= (update_date + graceperiod):
return True
last_update_indexer = datetime.date.fromordinal(self.last_update_indexer)
# in the first year after ended (last airdate), update every 30 days
if (update_date - last_airdate) < datetime.timedelta(days=450) and (
update_date - last_update_indexer) > datetime.timedelta(days=30):
return True
return False
def writeShowNFO(self):
result = False
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.indexerid) + u": Show dir doesn't exist, skipping NFO generation")
return False
logger.log(str(self.indexerid) + u": Writing NFOs for show")
for cur_provider in sickbeard.metadata_provider_dict.values():
result = cur_provider.create_show_metadata(self) or result
return result
def writeMetadata(self, show_only=False):
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.indexerid) + u": Show dir doesn't exist, skipping NFO generation")
return
self.getImages()
self.writeShowNFO()
if not show_only:
self.writeEpisodeNFOs()
def writeEpisodeNFOs(self):
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.indexerid) + u": Show dir doesn't exist, skipping NFO generation")
return
logger.log(str(self.indexerid) + u": Writing NFOs for all episodes")
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND location != ''", [self.indexerid])
for epResult in sqlResults:
logger.log(str(self.indexerid) + u": Retrieving/creating episode " + str(epResult["season"]) + "x" + str(
epResult["episode"]), logger.DEBUG)
curEp = self.getEpisode(epResult["season"], epResult["episode"])
if not curEp:
continue
curEp.createMetaFiles()
def updateMetadata(self):
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.indexerid) + u": Show dir doesn't exist, skipping NFO generation")
return
self.updateShowNFO()
def updateShowNFO(self):
result = False
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.indexerid) + u": Show dir doesn't exist, skipping NFO generation")
return False
logger.log(str(self.indexerid) + u": Updating NFOs for show with new indexer info")
for cur_provider in sickbeard.metadata_provider_dict.values():
result = cur_provider.update_show_indexer_metadata(self) or result
return result
# find all media files in the show folder and create episodes for as many as possible
def loadEpisodesFromDir(self):
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.indexerid) + u": Show dir doesn't exist, not loading episodes from disk")
return
logger.log(str(self.indexerid) + u": Loading all episodes from the show directory " + self._location)
# get file list
mediaFiles = helpers.listMediaFiles(self._location)
# create TVEpisodes from each media file (if possible)
sql_l = []
for mediaFile in mediaFiles:
parse_result = None
curEpisode = None
logger.log(str(self.indexerid) + u": Creating episode from " + mediaFile, logger.DEBUG)
try:
curEpisode = self.makeEpFromFile(ek.ek(os.path.join, self._location, mediaFile))
except (exceptions.ShowNotFoundException, exceptions.EpisodeNotFoundException), e:
logger.log(u"Episode " + mediaFile + " returned an exception: " + ex(e), logger.ERROR)
continue
except exceptions.EpisodeDeletedException:
logger.log(u"The episode deleted itself when I tried making an object for it", logger.DEBUG)
if curEpisode is None:
continue
# see if we should save the release name in the db
ep_file_name = ek.ek(os.path.basename, curEpisode.location)
ep_file_name = ek.ek(os.path.splitext, ep_file_name)[0]
try:
parse_result = None
np = NameParser(False, showObj=self, tryIndexers=True)
parse_result = np.parse(ep_file_name)
except (InvalidNameException, InvalidShowException):
pass
if not ' ' in ep_file_name and parse_result and parse_result.release_group:
logger.log(
u"Name " + ep_file_name + u" gave release group of " + parse_result.release_group + ", seems valid",
logger.DEBUG)
curEpisode.release_name = ep_file_name
# store the reference in the show
if curEpisode != None:
if self.subtitles:
try:
curEpisode.refreshSubtitles()
except:
logger.log(str(self.indexerid) + ": Could not refresh subtitles", logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
sql_l.append(curEpisode.get_sql())
if len(sql_l) > 0:
myDB = db.DBConnection()
myDB.mass_action(sql_l)
def loadEpisodesFromDB(self):
logger.log(u"Loading all episodes from the DB")
myDB = db.DBConnection()
sql = "SELECT * FROM tv_episodes WHERE showid = ?"
sqlResults = myDB.select(sql, [self.indexerid])
scannedEps = {}
lINDEXER_API_PARMS = sickbeard.indexerApi(self.indexer).api_params.copy()
if self.lang:
lINDEXER_API_PARMS['language'] = self.lang
if self.dvdorder != 0:
lINDEXER_API_PARMS['dvdorder'] = True
t = sickbeard.indexerApi(self.indexer).indexer(**lINDEXER_API_PARMS)
cachedShow = t[self.indexerid]
cachedSeasons = {}
for curResult in sqlResults:
deleteEp = False
curSeason = int(curResult["season"])
curEpisode = int(curResult["episode"])
if curSeason not in cachedSeasons:
try:
cachedSeasons[curSeason] = cachedShow[curSeason]
except sickbeard.indexer_seasonnotfound, e:
logger.log(u"Error when trying to load the episode from " + sickbeard.indexerApi(
self.indexer).name + ": " + e.message, logger.WARNING)
deleteEp = True
if not curSeason in scannedEps:
scannedEps[curSeason] = {}
logger.log(u"Loading episode " + str(curSeason) + "x" + str(curEpisode) + " from the DB", logger.DEBUG)
try:
curEp = self.getEpisode(curSeason, curEpisode)
if not curEp:
raise exceptions.EpisodeNotFoundException
# if we found out that the ep is no longer on TVDB then delete it from our database too
if deleteEp:
curEp.deleteEpisode()
curEp.loadFromDB(curSeason, curEpisode)
curEp.loadFromIndexer(tvapi=t, cachedSeason=cachedSeasons[curSeason])
scannedEps[curSeason][curEpisode] = True
except exceptions.EpisodeDeletedException:
logger.log(u"Tried loading an episode from the DB that should have been deleted, skipping it",
logger.DEBUG)
continue
return scannedEps
def loadEpisodesFromIndexer(self, cache=True):
lINDEXER_API_PARMS = sickbeard.indexerApi(self.indexer).api_params.copy()
if not cache:
lINDEXER_API_PARMS['cache'] = False
if self.lang:
lINDEXER_API_PARMS['language'] = self.lang
if self.dvdorder != 0:
lINDEXER_API_PARMS['dvdorder'] = True
try:
t = sickbeard.indexerApi(self.indexer).indexer(**lINDEXER_API_PARMS)
showObj = t[self.indexerid]
except sickbeard.indexer_error:
logger.log(u"" + sickbeard.indexerApi(
self.indexer).name + " timed out, unable to update episodes from " + sickbeard.indexerApi(
self.indexer).name, logger.ERROR)
return None
logger.log(
str(self.indexerid) + u": Loading all episodes from " + sickbeard.indexerApi(self.indexer).name + "..")
scannedEps = {}
sql_l = []
for season in showObj:
scannedEps[season] = {}
for episode in showObj[season]:
# need some examples of wtf episode 0 means to decide if we want it or not
if episode == 0:
continue
try:
ep = self.getEpisode(season, episode)
if not ep:
raise exceptions.EpisodeNotFoundException
except exceptions.EpisodeNotFoundException:
logger.log(
str(self.indexerid) + ": " + sickbeard.indexerApi(self.indexer).name + " object for " + str(
season) + "x" + str(episode) + " is incomplete, skipping this episode")
continue
else:
try:
ep.loadFromIndexer(tvapi=t)
except exceptions.EpisodeDeletedException:
logger.log(u"The episode was deleted, skipping the rest of the load")
continue
with ep.lock:
logger.log(str(self.indexerid) + u": Loading info from " + sickbeard.indexerApi(
self.indexer).name + " for episode " + str(season) + "x" + str(episode), logger.DEBUG)
ep.loadFromIndexer(season, episode, tvapi=t)
sql_l.append(ep.get_sql())
scannedEps[season][episode] = True
if len(sql_l) > 0:
myDB = db.DBConnection()
myDB.mass_action(sql_l)
# Done updating save last update date
self.last_update_indexer = datetime.date.today().toordinal()
self.saveToDB()
return scannedEps
def getImages(self, fanart=None, poster=None):
fanart_result = poster_result = banner_result = False
season_posters_result = season_banners_result = season_all_poster_result = season_all_banner_result = False
for cur_provider in sickbeard.metadata_provider_dict.values():
# FIXME: Needs to not show this message if the option is not enabled?
logger.log(u"Running metadata routines for " + cur_provider.name, logger.DEBUG)
fanart_result = cur_provider.create_fanart(self) or fanart_result
poster_result = cur_provider.create_poster(self) or poster_result
banner_result = cur_provider.create_banner(self) or banner_result
season_posters_result = cur_provider.create_season_posters(self) or season_posters_result
season_banners_result = cur_provider.create_season_banners(self) or season_banners_result
season_all_poster_result = cur_provider.create_season_all_poster(self) or season_all_poster_result
season_all_banner_result = cur_provider.create_season_all_banner(self) or season_all_banner_result
return fanart_result or poster_result or banner_result or season_posters_result or season_banners_result or season_all_poster_result or season_all_banner_result
# make a TVEpisode object from a media file
def makeEpFromFile(self, file):
if not ek.ek(os.path.isfile, file):
logger.log(str(self.indexerid) + u": That isn't even a real file dude... " + file)
return None
logger.log(str(self.indexerid) + u": Creating episode object from " + file, logger.DEBUG)
try:
myParser = NameParser(showObj=self, tryIndexers=True)
parse_result = myParser.parse(file)
except InvalidNameException:
logger.log(u"Unable to parse the filename " + file + " into a valid episode", logger.DEBUG)
return None
except InvalidShowException:
logger.log(u"Unable to parse the filename " + file + " into a valid show", logger.DEBUG)
return None
if not len(parse_result.episode_numbers):
logger.log("parse_result: " + str(parse_result))
logger.log(u"No episode number found in " + file + ", ignoring it", logger.ERROR)
return None
# for now lets assume that any episode in the show dir belongs to that show
season = parse_result.season_number if parse_result.season_number != None else 1
episodes = parse_result.episode_numbers
rootEp = None
sql_l = []
for curEpNum in episodes:
episode = int(curEpNum)
logger.log(
str(self.indexerid) + ": " + file + " parsed to " + self.name + " " + str(season) + "x" + str(episode),
logger.DEBUG)
checkQualityAgain = False
same_file = False
curEp = self.getEpisode(season, episode)
if not curEp:
try:
curEp = self.getEpisode(season, episode, file)
if not curEp:
raise exceptions.EpisodeNotFoundException
except exceptions.EpisodeNotFoundException:
logger.log(str(self.indexerid) + u": Unable to figure out what this file is, skipping",
logger.ERROR)
continue
else:
# if there is a new file associated with this ep then re-check the quality
if curEp.location and ek.ek(os.path.normpath, curEp.location) != ek.ek(os.path.normpath, file):
logger.log(
u"The old episode had a different file associated with it, I will re-check the quality based on the new filename " + file,
logger.DEBUG)
checkQualityAgain = True
with curEp.lock:
old_size = curEp.file_size
curEp.location = file
# if the sizes are the same then it's probably the same file
if old_size and curEp.file_size == old_size:
same_file = True
else:
same_file = False
curEp.checkForMetaFiles()
if rootEp == None:
rootEp = curEp
else:
if curEp not in rootEp.relatedEps:
with rootEp.lock:
rootEp.relatedEps.append(curEp)
# if it's a new file then
if not same_file:
with curEp.lock:
curEp.release_name = ''
# if they replace a file on me I'll make some attempt at re-checking the quality unless I know it's the same file
if checkQualityAgain and not same_file:
newQuality = Quality.nameQuality(file, self.is_anime)
logger.log(u"Since this file has been renamed, I checked " + file + " and found quality " +
Quality.qualityStrings[newQuality], logger.DEBUG)
if newQuality != Quality.UNKNOWN:
with curEp.lock:
curEp.status = Quality.compositeStatus(DOWNLOADED, newQuality)
# check for status/quality changes as long as it's a new file
elif not same_file and sickbeard.helpers.isMediaFile(file) and curEp.status not in Quality.DOWNLOADED + [
ARCHIVED, IGNORED]:
oldStatus, oldQuality = Quality.splitCompositeStatus(curEp.status)
newQuality = Quality.nameQuality(file, self.is_anime)
if newQuality == Quality.UNKNOWN:
newQuality = Quality.assumeQuality(file)
newStatus = None
# if it was snatched and now exists then set the status correctly
if oldStatus == SNATCHED and oldQuality <= newQuality:
logger.log(u"STATUS: this ep used to be snatched with quality " + Quality.qualityStrings[
oldQuality] + u" but a file exists with quality " + Quality.qualityStrings[
newQuality] + u" so I'm setting the status to DOWNLOADED", logger.DEBUG)
newStatus = DOWNLOADED
# if it was snatched proper and we found a higher quality one then allow the status change
elif oldStatus == SNATCHED_PROPER and oldQuality < newQuality:
logger.log(u"STATUS: this ep used to be snatched proper with quality " + Quality.qualityStrings[
oldQuality] + u" but a file exists with quality " + Quality.qualityStrings[
newQuality] + u" so I'm setting the status to DOWNLOADED", logger.DEBUG)
newStatus = DOWNLOADED
elif oldStatus not in (SNATCHED, SNATCHED_PROPER):
newStatus = DOWNLOADED
if newStatus is not None:
with curEp.lock:
logger.log(u"STATUS: we have an associated file, so setting the status from " + str(
curEp.status) + u" to DOWNLOADED/" + str(Quality.statusFromName(file, anime=self.is_anime)),
logger.DEBUG)
curEp.status = Quality.compositeStatus(newStatus, newQuality)
with curEp.lock:
sql_l.append(curEp.get_sql())
if len(sql_l) > 0:
myDB = db.DBConnection()
myDB.mass_action(sql_l)
# creating metafiles on the root should be good enough
if rootEp:
with rootEp.lock:
rootEp.createMetaFiles()
return rootEp
def loadFromDB(self, skipNFO=False):
logger.log(str(self.indexerid) + u": Loading show info from database")
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_shows WHERE indexer_id = ?", [self.indexerid])
if len(sqlResults) > 1:
raise exceptions.MultipleDBShowsException()
elif len(sqlResults) == 0:
logger.log(str(self.indexerid) + ": Unable to find the show in the database")
return
else:
self.indexer = int(sqlResults[0]["indexer"] or 0)
if not self.name:
self.name = sqlResults[0]["show_name"]
if not self.network:
self.network = sqlResults[0]["network"]
if not self.genre:
self.genre = sqlResults[0]["genre"]
if not self.classification:
self.classification = sqlResults[0]["classification"]
self.runtime = sqlResults[0]["runtime"]
self.status = sqlResults[0]["status"]
if self.status is None:
self.status = "Unknown"
self.airs = sqlResults[0]["airs"]
if self.airs is None:
self.airs = ""
self.startyear = int(sqlResults[0]["startyear"] or 0)
self.air_by_date = int(sqlResults[0]["air_by_date"] or 0)
self.anime = int(sqlResults[0]["anime"] or 0)
self.sports = int(sqlResults[0]["sports"] or 0)
self.scene = int(sqlResults[0]["scene"] or 0)
self.subtitles = int(sqlResults[0]["subtitles"] or 0)
self.dvdorder = int(sqlResults[0]["dvdorder"] or 0)
self.archive_firstmatch = int(sqlResults[0]["archive_firstmatch"] or 0)
self.quality = int(sqlResults[0]["quality"] or UNKNOWN)
self.flatten_folders = int(sqlResults[0]["flatten_folders"] or 0)
self.paused = int(sqlResults[0]["paused"] or 0)
try:
self.location = sqlResults[0]["location"]
except Exception:
dirty_setter("_location")(self, sqlResults[0]["location"])
self._isDirGood = False
if not self.lang:
self.lang = sqlResults[0]["lang"]
self.last_update_indexer = sqlResults[0]["last_update_indexer"]
self.rls_ignore_words = sqlResults[0]["rls_ignore_words"]
self.rls_require_words = sqlResults[0]["rls_require_words"]
self.default_ep_status = int(sqlResults[0]["default_ep_status"] or SKIPPED)
if not self.imdbid:
self.imdbid = sqlResults[0]["imdb_id"]
# Get IMDb_info from database
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM imdb_info WHERE indexer_id = ?", [self.indexerid])
if len(sqlResults) == 0:
logger.log(str(self.indexerid) + ": Unable to find IMDb show info in the database")
return
else:
self.imdb_info = dict(zip(sqlResults[0].keys(), sqlResults[0]))
self.dirty = False
return True
def loadFromIndexer(self, cache=True, tvapi=None, cachedSeason=None):
logger.log(str(self.indexerid) + u": Loading show info from " + sickbeard.indexerApi(self.indexer).name)
# There's gotta be a better way of doing this but we don't wanna
# change the cache value elsewhere
if tvapi is None:
lINDEXER_API_PARMS = sickbeard.indexerApi(self.indexer).api_params.copy()
if not cache:
lINDEXER_API_PARMS['cache'] = False
if self.lang:
lINDEXER_API_PARMS['language'] = self.lang
if self.dvdorder != 0:
lINDEXER_API_PARMS['dvdorder'] = True
t = sickbeard.indexerApi(self.indexer).indexer(**lINDEXER_API_PARMS)
else:
t = tvapi
myEp = t[self.indexerid]
try:
self.name = myEp['seriesname'].strip()
except AttributeError:
raise sickbeard.indexer_attributenotfound(
"Found %s, but attribute 'seriesname' was empty." % (self.indexerid))
self.classification = getattr(myEp, 'classification', 'Scripted')
self.genre = getattr(myEp, 'genre', '')
self.network = getattr(myEp, 'network', '')
self.runtime = getattr(myEp, 'runtime', '')
self.imdbid = getattr(myEp, 'imdb_id', '')
if getattr(myEp, 'airs_dayofweek', None) is not None and getattr(myEp, 'airs_time', None) is not None:
self.airs = myEp["airs_dayofweek"] + " " + myEp["airs_time"]
if self.airs is None:
self.airs = ''
if getattr(myEp, 'firstaired', None) is not None:
self.startyear = int(str(myEp["firstaired"]).split('-')[0])
self.status = getattr(myEp, 'status', 'Unknown')
def loadIMDbInfo(self, imdbapi=None):
imdb_info = {'imdb_id': self.imdbid,
'title': '',
'year': '',
'akas': [],
'runtimes': '',
'genres': [],
'countries': '',
'country_codes': [],
'certificates': [],
'rating': '',
'votes': '',
'last_update': ''
}
i = imdb.IMDb()
if not self.imdbid:
self.imdbid = i.title2imdbID(self.name, kind='tv series')
if self.imdbid:
logger.log(str(self.indexerid) + u": Loading show info from IMDb")
imdbTv = i.get_movie(str(re.sub("[^0-9]", "", self.imdbid)))
for key in filter(lambda x: x.replace('_', ' ') in imdbTv.keys(), imdb_info.keys()):
# Store only the first value for string type
if type(imdb_info[key]) == type('') and type(imdbTv.get(key)) == type([]):
imdb_info[key] = imdbTv.get(key.replace('_', ' '))[0]
else:
imdb_info[key] = imdbTv.get(key.replace('_', ' '))
# Filter only the value
if imdb_info['runtimes']:
imdb_info['runtimes'] = re.search('\d+', imdb_info['runtimes']).group(0)
else:
imdb_info['runtimes'] = self.runtime
if imdb_info['akas']:
imdb_info['akas'] = '|'.join(imdb_info['akas'])
else:
imdb_info['akas'] = ''
# Join all genres in a string
if imdb_info['genres']:
imdb_info['genres'] = '|'.join(imdb_info['genres'])
else:
imdb_info['genres'] = ''
# Get only the production country certificate if any
if imdb_info['certificates'] and imdb_info['countries']:
dct = {}
try:
for item in imdb_info['certificates']:
dct[item.split(':')[0]] = item.split(':')[1]
imdb_info['certificates'] = dct[imdb_info['countries']]
except:
imdb_info['certificates'] = ''
else:
imdb_info['certificates'] = ''
if imdb_info['country_codes']:
imdb_info['country_codes'] = '|'.join(imdb_info['country_codes'])
else:
imdb_info['country_codes'] = ''
imdb_info['last_update'] = datetime.date.today().toordinal()
# Rename dict keys without spaces for DB upsert
self.imdb_info = dict(
(k.replace(' ', '_'), k(v) if hasattr(v, 'keys') else v) for k, v in imdb_info.items())
logger.log(str(self.indexerid) + u": Obtained info from IMDb ->" + str(self.imdb_info), logger.DEBUG)
def nextEpisode(self):
logger.log(str(self.indexerid) + ": Finding the episode which airs next", logger.DEBUG)
curDate = datetime.date.today().toordinal()
if not self.nextaired or self.nextaired and curDate > self.nextaired:
myDB = db.DBConnection()
sqlResults = myDB.select(
"SELECT airdate, season, episode FROM tv_episodes WHERE showid = ? AND airdate >= ? AND status IN (?,?) ORDER BY airdate ASC LIMIT 1",
[self.indexerid, datetime.date.today().toordinal(), UNAIRED, WANTED])
if sqlResults == None or len(sqlResults) == 0:
logger.log(str(self.indexerid) + u": No episode found... need to implement a show status",
logger.DEBUG)
self.nextaired = ""
else:
logger.log(str(self.indexerid) + u": Found episode " + str(sqlResults[0]["season"]) + "x" + str(
sqlResults[0]["episode"]), logger.DEBUG)
self.nextaired = sqlResults[0]['airdate']
return self.nextaired
def deleteShow(self, full=False):
sql_l = [["DELETE FROM tv_episodes WHERE showid = ?", [self.indexerid]],
["DELETE FROM tv_shows WHERE indexer_id = ?", [self.indexerid]],
["DELETE FROM imdb_info WHERE indexer_id = ?", [self.indexerid]],
["DELETE FROM xem_refresh WHERE indexer_id = ?", [self.indexerid]],
["DELETE FROM scene_numbering WHERE indexer_id = ?", [self.indexerid]]]
myDB = db.DBConnection()
myDB.mass_action(sql_l)
action = ('delete', 'trash')[sickbeard.TRASH_REMOVE_SHOW]
# remove self from show list
sickbeard.showList = [x for x in sickbeard.showList if int(x.indexerid) != self.indexerid]
# clear the cache
image_cache_dir = ek.ek(os.path.join, sickbeard.CACHE_DIR, 'images')
for cache_file in ek.ek(glob.glob, ek.ek(os.path.join, image_cache_dir, str(self.indexerid) + '.*')):
logger.log(u'Attempt to %s cache file %s' % (action, cache_file))
try:
if sickbeard.TRASH_REMOVE_SHOW:
send2trash(cache_file)
else:
os.remove(cache_file)
except OSError, e:
logger.log(u'Unable to %s %s: %s / %s' % (action, cache_file, repr(e), str(e)), logger.WARNING)
# remove entire show folder
if full:
try:
logger.log(u'Attempt to %s show folder %s' % (action, self._location))
# check first the read-only attribute
file_attribute = ek.ek(os.stat, self.location)[0]
if (not file_attribute & stat.S_IWRITE):
# File is read-only, so make it writeable
logger.log('Attempting to make writeable the read only folder %s' % self._location, logger.DEBUG)
try:
ek.ek(os.chmod, self.location, stat.S_IWRITE)
except:
logger.log(u'Unable to change permissions of %s' % self._location, logger.WARNING)
if sickbeard.TRASH_REMOVE_SHOW:
send2trash(self.location)
else:
ek.ek(shutil.rmtree, self.location)
logger.log(u'%s show folder %s' %
(('Deleted', 'Trashed')[sickbeard.TRASH_REMOVE_SHOW],
self._location))
except exceptions.ShowDirNotFoundException:
logger.log(u"Show folder does not exist, no need to %s %s" % (action, self._location), logger.WARNING)
except OSError, e:
logger.log(u'Unable to %s %s: %s / %s' % (action, self._location, repr(e), str(e)), logger.WARNING)
def populateCache(self):
cache_inst = image_cache.ImageCache()
logger.log(u"Checking & filling cache for show " + self.name)
cache_inst.fill_cache(self)
def refreshDir(self):
# make sure the show dir is where we think it is unless dirs are created on the fly
if not ek.ek(os.path.isdir, self._location) and not sickbeard.CREATE_MISSING_SHOW_DIRS:
return False
# load from dir
self.loadEpisodesFromDir()
# run through all locations from DB, check that they exist
logger.log(str(self.indexerid) + u": Loading all episodes with a location from the database")
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND location != ''", [self.indexerid])
sql_l = []
for ep in sqlResults:
curLoc = os.path.normpath(ep["location"])
season = int(ep["season"])
episode = int(ep["episode"])
try:
curEp = self.getEpisode(season, episode)
if not curEp:
raise exceptions.EpisodeDeletedException
except exceptions.EpisodeDeletedException:
logger.log(u"The episode was deleted while we were refreshing it, moving on to the next one",
logger.DEBUG)
continue
# if the path doesn't exist or if it's not in our show dir
if not ek.ek(os.path.isfile, curLoc) or not os.path.normpath(curLoc).startswith(
os.path.normpath(self.location)):
# check if downloaded files still exist, update our data if this has changed
if not sickbeard.SKIP_REMOVED_FILES:
with curEp.lock:
# if it used to have a file associated with it and it doesn't anymore then set it to IGNORED
if curEp.location and curEp.status in Quality.DOWNLOADED:
logger.log(str(self.indexerid) + u": Location for " + str(season) + "x" + str(
episode) + " doesn't exist, removing it and changing our status to IGNORED",
logger.DEBUG)
curEp.status = IGNORED
curEp.subtitles = list()
curEp.subtitles_searchcount = 0
curEp.subtitles_lastsearch = str(datetime.datetime.min)
curEp.location = ''
curEp.hasnfo = False
curEp.hastbn = False
curEp.release_name = ''
sql_l.append(curEp.get_sql())
else:
# the file exists, set its modify file stamp
if sickbeard.AIRDATE_EPISODES:
with curEp.lock:
curEp.airdateModifyStamp()
if len(sql_l) > 0:
myDB = db.DBConnection()
myDB.mass_action(sql_l)
def downloadSubtitles(self, force=False):
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.indexerid) + ": Show dir doesn't exist, can't download subtitles", logger.DEBUG)
return
logger.log(str(self.indexerid) + ": Downloading subtitles", logger.DEBUG)
try:
episodes = self.getAllEpisodes(has_location=True)
if not len(episodes) > 0:
logger.log(str(self.indexerid) + ": No episodes to download subtitles for " + self.name, logger.DEBUG)
return
for episode in episodes:
episode.downloadSubtitles(force=force)
except Exception:
logger.log("Error occurred when downloading subtitles: " + traceback.format_exc(), logger.DEBUG)
def saveToDB(self, forceSave=False):
if not self.dirty and not forceSave:
logger.log(str(self.indexerid) + ": Not saving show to db - record is not dirty", logger.DEBUG)
return
logger.log(str(self.indexerid) + u": Saving show info to database", logger.DEBUG)
controlValueDict = {"indexer_id": self.indexerid}
newValueDict = {"indexer": self.indexer,
"show_name": self.name,
"location": self._location,
"network": self.network,
"genre": self.genre,
"classification": self.classification,
"runtime": self.runtime,
"quality": self.quality,
"airs": self.airs,
"status": self.status,
"flatten_folders": self.flatten_folders,
"paused": self.paused,
"air_by_date": self.air_by_date,
"anime": self.anime,
"scene": self.scene,
"sports": self.sports,
"subtitles": self.subtitles,
"dvdorder": self.dvdorder,
"archive_firstmatch": self.archive_firstmatch,
"startyear": self.startyear,
"lang": self.lang,
"imdb_id": self.imdbid,
"last_update_indexer": self.last_update_indexer,
"rls_ignore_words": self.rls_ignore_words,
"rls_require_words": self.rls_require_words,
"default_ep_status": self.default_ep_status
}
myDB = db.DBConnection()
myDB.upsert("tv_shows", newValueDict, controlValueDict)
helpers.update_anime_support()
if self.imdbid:
controlValueDict = {"indexer_id": self.indexerid}
newValueDict = self.imdb_info
myDB = db.DBConnection()
myDB.upsert("imdb_info", newValueDict, controlValueDict)
def __str__(self):
toReturn = ""
toReturn += "indexerid: " + str(self.indexerid) + "\n"
toReturn += "indexer: " + str(self.indexer) + "\n"
toReturn += "name: " + self.name + "\n"
toReturn += "location: " + self._location + "\n"
if self.network:
toReturn += "network: " + self.network + "\n"
if self.airs:
toReturn += "airs: " + self.airs + "\n"
toReturn += "status: " + self.status + "\n"
toReturn += "startyear: " + str(self.startyear) + "\n"
if self.genre:
toReturn += "genre: " + self.genre + "\n"
toReturn += "classification: " + self.classification + "\n"
toReturn += "runtime: " + str(self.runtime) + "\n"
toReturn += "quality: " + str(self.quality) + "\n"
toReturn += "scene: " + str(self.is_scene) + "\n"
toReturn += "sports: " + str(self.is_sports) + "\n"
toReturn += "anime: " + str(self.is_anime) + "\n"
return toReturn
def wantEpisode(self, season, episode, quality, manualSearch=False):
logger.log(u"Checking if found episode " + str(season) + "x" + str(episode) + " is wanted at quality " +
Quality.qualityStrings[quality], logger.DEBUG)
# if the quality isn't one we want under any circumstances then just say no
anyQualities, bestQualities = Quality.splitQuality(self.quality)
logger.log(u"any,best = " + str(anyQualities) + " " + str(bestQualities) + " and found " + str(quality),
logger.DEBUG)
if quality not in anyQualities + bestQualities:
logger.log(u"Don't want this quality, ignoring found episode", logger.DEBUG)
return False
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT status FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?",
[self.indexerid, season, episode])
if not sqlResults or not len(sqlResults):
logger.log(u"Unable to find a matching episode in database, ignoring found episode", logger.DEBUG)
return False
epStatus = int(sqlResults[0]["status"])
epStatus_text = statusStrings[epStatus]
logger.log(u"Existing episode status: " + str(epStatus) + " (" + epStatus_text + ")", logger.DEBUG)
# if we know we don't want it then just say no
if epStatus in (SKIPPED, IGNORED, ARCHIVED) and not manualSearch:
logger.log(u"Existing episode status is skipped/ignored/archived, ignoring found episode", logger.DEBUG)
return False
# if it's one of these then we want it as long as it's in our allowed initial qualities
if quality in anyQualities + bestQualities:
if epStatus in (WANTED, UNAIRED, SKIPPED):
logger.log(u"Existing episode status is wanted/unaired/skipped, getting found episode", logger.DEBUG)
return True
elif manualSearch:
logger.log(
u"Usually ignoring found episode, but forced search allows the quality, getting found episode",
logger.DEBUG)
return True
else:
logger.log(u"Quality is on wanted list, need to check if it's better than existing quality",
logger.DEBUG)
curStatus, curQuality = Quality.splitCompositeStatus(epStatus)
# if we are re-downloading then we only want it if it's in our bestQualities list and better than what we have
if curStatus in Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_BEST and quality in bestQualities and quality > curQuality:
logger.log(u"Episode already exists but the found episode has better quality, getting found episode",
logger.DEBUG)
return True
else:
logger.log(u"Episode already exists and the found episode has same/lower quality, ignoring found episode",
logger.DEBUG)
logger.log(u"None of the conditions were met, ignoring found episode", logger.DEBUG)
return False
def getOverview(self, epStatus):
if epStatus == WANTED:
return Overview.WANTED
elif epStatus in (UNAIRED, UNKNOWN):
return Overview.UNAIRED
elif epStatus in (SKIPPED, IGNORED):
return Overview.SKIPPED
elif epStatus == ARCHIVED:
return Overview.GOOD
elif epStatus in Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.FAILED + Quality.SNATCHED_BEST:
anyQualities, bestQualities = Quality.splitQuality(self.quality) # @UnusedVariable
if bestQualities:
maxBestQuality = max(bestQualities)
else:
maxBestQuality = None
epStatus, curQuality = Quality.splitCompositeStatus(epStatus)
if epStatus == FAILED:
return Overview.WANTED
elif epStatus in (SNATCHED, SNATCHED_PROPER, SNATCHED_BEST):
return Overview.SNATCHED
# if they don't want re-downloads then we call it good if they have anything
elif maxBestQuality == None:
return Overview.GOOD
# if they have one but it's not the best they want then mark it as qual
elif curQuality < maxBestQuality:
return Overview.QUAL
# if it's >= maxBestQuality then it's good
else:
return Overview.GOOD
def __getstate__(self):
d = dict(self.__dict__)
del d['lock']
return d
def __setstate__(self, d):
d['lock'] = threading.Lock()
self.__dict__.update(d)
class TVEpisode(object):
def __init__(self, show, season, episode, file=""):
self._name = ""
self._season = season
self._episode = episode
self._absolute_number = 0
self._description = ""
self._subtitles = list()
self._subtitles_searchcount = 0
self._subtitles_lastsearch = str(datetime.datetime.min)
self._airdate = datetime.date.fromordinal(1)
self._hasnfo = False
self._hastbn = False
self._status = UNKNOWN
self._indexerid = 0
self._file_size = 0
self._release_name = ''
self._is_proper = False
self._version = 0
self._release_group = ''
# setting any of the above sets the dirty flag
self.dirty = True
self.show = show
self.scene_season = 0
self.scene_episode = 0
self.scene_absolute_number = 0
self._location = file
self._indexer = int(self.show.indexer)
self.lock = threading.Lock()
self.specifyEpisode(self.season, self.episode)
self.relatedEps = []
self.checkForMetaFiles()
self.wantedQuality = []
name = property(lambda self: self._name, dirty_setter("_name"))
season = property(lambda self: self._season, dirty_setter("_season"))
episode = property(lambda self: self._episode, dirty_setter("_episode"))
absolute_number = property(lambda self: self._absolute_number, dirty_setter("_absolute_number"))
description = property(lambda self: self._description, dirty_setter("_description"))
subtitles = property(lambda self: self._subtitles, dirty_setter("_subtitles"))
subtitles_searchcount = property(lambda self: self._subtitles_searchcount, dirty_setter("_subtitles_searchcount"))
subtitles_lastsearch = property(lambda self: self._subtitles_lastsearch, dirty_setter("_subtitles_lastsearch"))
airdate = property(lambda self: self._airdate, dirty_setter("_airdate"))
hasnfo = property(lambda self: self._hasnfo, dirty_setter("_hasnfo"))
hastbn = property(lambda self: self._hastbn, dirty_setter("_hastbn"))
status = property(lambda self: self._status, dirty_setter("_status"))
indexer = property(lambda self: self._indexer, dirty_setter("_indexer"))
indexerid = property(lambda self: self._indexerid, dirty_setter("_indexerid"))
# location = property(lambda self: self._location, dirty_setter("_location"))
file_size = property(lambda self: self._file_size, dirty_setter("_file_size"))
release_name = property(lambda self: self._release_name, dirty_setter("_release_name"))
is_proper = property(lambda self: self._is_proper, dirty_setter("_is_proper"))
version = property(lambda self: self._version, dirty_setter("_version"))
release_group = property(lambda self: self._release_group, dirty_setter("_release_group"))
def _set_location(self, new_location):
logger.log(u"Setter sets location to " + new_location, logger.DEBUG)
# self._location = newLocation
dirty_setter("_location")(self, new_location)
if new_location and ek.ek(os.path.isfile, new_location):
self.file_size = ek.ek(os.path.getsize, new_location)
else:
self.file_size = 0
location = property(lambda self: self._location, _set_location)
def refreshSubtitles(self):
"""Look for subtitles files and refresh the subtitles property"""
self.subtitles = subtitles.subtitlesLanguages(self.location)
def downloadSubtitles(self, force=False):
# TODO: Add support for force option
if not ek.ek(os.path.isfile, self.location):
logger.log(
str(self.show.indexerid) + ": Episode file doesn't exist, can't download subtitles for episode " + str(
self.season) + "x" + str(self.episode), logger.DEBUG)
return
logger.log(str(self.show.indexerid) + ": Downloading subtitles for episode " + str(self.season) + "x" + str(
self.episode), logger.DEBUG)
previous_subtitles = self.subtitles
try:
need_languages = set(sickbeard.SUBTITLES_LANGUAGES) - set(self.subtitles)
subtitles = subliminal.download_subtitles([self.location], languages=need_languages,
services=sickbeard.subtitles.getEnabledServiceList(), force=force,
multi=sickbeard.SUBTITLES_MULTI, cache_dir=sickbeard.CACHE_DIR)
if sickbeard.SUBTITLES_DIR:
for video in subtitles:
subs_new_path = ek.ek(os.path.join, os.path.dirname(video.path), sickbeard.SUBTITLES_DIR)
dir_exists = helpers.makeDir(subs_new_path)
if not dir_exists:
logger.log(u"Unable to create subtitles folder " + subs_new_path, logger.ERROR)
else:
helpers.chmodAsParent(subs_new_path)
for subtitle in subtitles.get(video):
new_file_path = ek.ek(os.path.join, subs_new_path, os.path.basename(subtitle.path))
helpers.moveFile(subtitle.path, new_file_path)
helpers.chmodAsParent(new_file_path)
else:
for video in subtitles:
for subtitle in subtitles.get(video):
helpers.chmodAsParent(subtitle.path)
except Exception as e:
logger.log("Error occurred when downloading subtitles: " + traceback.format_exc(), logger.ERROR)
return
self.refreshSubtitles()
self.subtitles_searchcount = self.subtitles_searchcount + 1 if self.subtitles_searchcount else 1 # added the if because sometime it raise an error
self.subtitles_lastsearch = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.saveToDB()
newsubtitles = set(self.subtitles).difference(set(previous_subtitles))
if newsubtitles:
subtitleList = ", ".join(subliminal.language.Language(x).name for x in newsubtitles)
logger.log(str(self.show.indexerid) + u": Downloaded " + subtitleList + " subtitles for episode " + str(
self.season) + "x" + str(self.episode), logger.DEBUG)
notifiers.notify_subtitle_download(self.prettyName(), subtitleList)
else:
logger.log(
str(self.show.indexerid) + u": No subtitles downloaded for episode " + str(self.season) + "x" + str(
self.episode), logger.DEBUG)
if sickbeard.SUBTITLES_HISTORY:
for video in subtitles:
for subtitle in subtitles.get(video):
history.logSubtitle(self.show.indexerid, self.season, self.episode, self.status, subtitle)
return subtitles
def checkForMetaFiles(self):
oldhasnfo = self.hasnfo
oldhastbn = self.hastbn
cur_nfo = False
cur_tbn = False
# check for nfo and tbn
if ek.ek(os.path.isfile, self.location):
for cur_provider in sickbeard.metadata_provider_dict.values():
if cur_provider.episode_metadata:
new_result = cur_provider._has_episode_metadata(self)
else:
new_result = False
cur_nfo = new_result or cur_nfo
if cur_provider.episode_thumbnails:
new_result = cur_provider._has_episode_thumb(self)
else:
new_result = False
cur_tbn = new_result or cur_tbn
self.hasnfo = cur_nfo
self.hastbn = cur_tbn
# if either setting has changed return true, if not return false
return oldhasnfo != self.hasnfo or oldhastbn != self.hastbn
def specifyEpisode(self, season, episode):
sqlResult = self.loadFromDB(season, episode)
if not sqlResult:
# only load from NFO if we didn't load from DB
if ek.ek(os.path.isfile, self.location):
try:
self.loadFromNFO(self.location)
except exceptions.NoNFOException:
logger.log(str(self.show.indexerid) + u": There was an error loading the NFO for episode " + str(
season) + "x" + str(episode), logger.ERROR)
pass
# if we tried loading it from NFO and didn't find the NFO, try the Indexers
if not self.hasnfo:
try:
result = self.loadFromIndexer(season, episode)
except exceptions.EpisodeDeletedException:
result = False
# if we failed SQL *and* NFO, Indexers then fail
if not result:
raise exceptions.EpisodeNotFoundException(
"Couldn't find episode " + str(season) + "x" + str(episode))
def loadFromDB(self, season, episode):
logger.log(
str(self.show.indexerid) + u": Loading episode details from DB for episode " + str(season) + "x" + str(
episode), logger.DEBUG)
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?",
[self.show.indexerid, season, episode])
if len(sqlResults) > 1:
raise exceptions.MultipleDBEpisodesException("Your DB has two records for the same show somehow.")
elif len(sqlResults) == 0:
logger.log(str(self.show.indexerid) + u": Episode " + str(self.season) + "x" + str(
self.episode) + " not found in the database", logger.DEBUG)
return False
else:
# NAMEIT logger.log(u"AAAAA from" + str(self.season)+"x"+str(self.episode) + " -" + self.name + " to " + str(sqlResults[0]["name"]))
if sqlResults[0]["name"]:
self.name = sqlResults[0]["name"]
self.season = season
self.episode = episode
self.absolute_number = sqlResults[0]["absolute_number"]
self.description = sqlResults[0]["description"]
if not self.description:
self.description = ""
if sqlResults[0]["subtitles"] and sqlResults[0]["subtitles"]:
self.subtitles = sqlResults[0]["subtitles"].split(",")
self.subtitles_searchcount = sqlResults[0]["subtitles_searchcount"]
self.subtitles_lastsearch = sqlResults[0]["subtitles_lastsearch"]
self.airdate = datetime.date.fromordinal(int(sqlResults[0]["airdate"]))
# logger.log(u"1 Status changes from " + str(self.status) + " to " + str(sqlResults[0]["status"]), logger.DEBUG)
self.status = int(sqlResults[0]["status"] or -1)
# don't overwrite my location
if sqlResults[0]["location"] and sqlResults[0]["location"]:
self.location = os.path.normpath(sqlResults[0]["location"])
if sqlResults[0]["file_size"]:
self.file_size = int(sqlResults[0]["file_size"])
else:
self.file_size = 0
self.indexerid = int(sqlResults[0]["indexerid"])
self.indexer = int(sqlResults[0]["indexer"])
sickbeard.scene_numbering.xem_refresh(self.show.indexerid, self.show.indexer)
try:
self.scene_season = int(sqlResults[0]["scene_season"])
except:
self.scene_season = 0
try:
self.scene_episode = int(sqlResults[0]["scene_episode"])
except:
self.scene_episode = 0
try:
self.scene_absolute_number = int(sqlResults[0]["scene_absolute_number"])
except:
self.scene_absolute_number = 0
if self.scene_absolute_number == 0:
self.scene_absolute_number = sickbeard.scene_numbering.get_scene_absolute_numbering(
self.show.indexerid,
self.show.indexer,
self.absolute_number
)
if self.scene_season == 0 or self.scene_episode == 0:
self.scene_season, self.scene_episode = sickbeard.scene_numbering.get_scene_numbering(
self.show.indexerid,
self.show.indexer,
self.season, self.episode
)
if sqlResults[0]["release_name"] is not None:
self.release_name = sqlResults[0]["release_name"]
if sqlResults[0]["is_proper"]:
self.is_proper = int(sqlResults[0]["is_proper"])
if sqlResults[0]["version"]:
self.version = int(sqlResults[0]["version"])
if sqlResults[0]["release_group"] is not None:
self.release_group = sqlResults[0]["release_group"]
self.dirty = False
return True
def loadFromIndexer(self, season=None, episode=None, cache=True, tvapi=None, cachedSeason=None):
if season is None:
season = self.season
if episode is None:
episode = self.episode
logger.log(str(self.show.indexerid) + u": Loading episode details from " + sickbeard.indexerApi(
self.show.indexer).name + " for episode " + str(season) + "x" + str(episode), logger.DEBUG)
indexer_lang = self.show.lang
try:
if cachedSeason is None:
if tvapi is None:
lINDEXER_API_PARMS = sickbeard.indexerApi(self.indexer).api_params.copy()
if not cache:
lINDEXER_API_PARMS['cache'] = False
if indexer_lang:
lINDEXER_API_PARMS['language'] = indexer_lang
if self.show.dvdorder != 0:
lINDEXER_API_PARMS['dvdorder'] = True
t = sickbeard.indexerApi(self.indexer).indexer(**lINDEXER_API_PARMS)
else:
t = tvapi
myEp = t[self.show.indexerid][season][episode]
else:
myEp = cachedSeason[episode]
except (sickbeard.indexer_error, IOError), e:
logger.log(u"" + sickbeard.indexerApi(self.indexer).name + " threw up an error: " + ex(e), logger.DEBUG)
# if the episode is already valid just log it, if not throw it up
if self.name:
logger.log(u"" + sickbeard.indexerApi(
self.indexer).name + " timed out but we have enough info from other sources, allowing the error",
logger.DEBUG)
return
else:
logger.log(u"" + sickbeard.indexerApi(self.indexer).name + " timed out, unable to create the episode",
logger.ERROR)
return False
except (sickbeard.indexer_episodenotfound, sickbeard.indexer_seasonnotfound):
logger.log(u"Unable to find the episode on " + sickbeard.indexerApi(
self.indexer).name + "... has it been removed? Should I delete from db?", logger.DEBUG)
# if I'm no longer on the Indexers but I once was then delete myself from the DB
if self.indexerid != -1:
self.deleteEpisode()
return
if getattr(myEp, 'episodename', None) is None:
logger.log(u"This episode (" + self.show.name + " - " + str(season) + "x" + str(
episode) + ") has no name on " + sickbeard.indexerApi(self.indexer).name + "")
# if I'm incomplete on TVDB but I once was complete then just delete myself from the DB for now
if self.indexerid != -1:
self.deleteEpisode()
return False
if getattr(myEp, 'absolute_number', None) is None:
logger.log(u"This episode (" + self.show.name + " - " + str(season) + "x" + str(
episode) + ") has no absolute number on " + sickbeard.indexerApi(
self.indexer).name, logger.DEBUG)
else:
logger.log(
str(self.show.indexerid) + ": The absolute_number for " + str(season) + "x" + str(episode) + " is : " +
str(myEp["absolute_number"]), logger.DEBUG)
self.absolute_number = int(myEp["absolute_number"])
self.name = getattr(myEp, 'episodename', "")
self.season = season
self.episode = episode
sickbeard.scene_numbering.xem_refresh(self.show.indexerid, self.show.indexer)
self.scene_absolute_number = sickbeard.scene_numbering.get_scene_absolute_numbering(
self.show.indexerid,
self.show.indexer,
self.absolute_number
)
self.scene_season, self.scene_episode = sickbeard.scene_numbering.get_scene_numbering(
self.show.indexerid,
self.show.indexer,
self.season, self.episode
)
self.description = getattr(myEp, 'overview', "")
firstaired = getattr(myEp, 'firstaired', None)
if not firstaired or firstaired == "0000-00-00":
firstaired = str(datetime.date.fromordinal(1))
rawAirdate = [int(x) for x in firstaired.split("-")]
try:
self.airdate = datetime.date(rawAirdate[0], rawAirdate[1], rawAirdate[2])
except (ValueError, IndexError):
logger.log(u"Malformed air date of " + str(firstaired) + " retrieved from " + sickbeard.indexerApi(
self.indexer).name + " for (" + self.show.name + " - " + str(season) + "x" + str(episode) + ")",
logger.WARNING)
# if I'm incomplete on the indexer but I once was complete then just delete myself from the DB for now
if self.indexerid != -1:
self.deleteEpisode()
return False
# early conversion to int so that episode doesn't get marked dirty
self.indexerid = getattr(myEp, 'id', None)
if self.indexerid is None:
logger.log(u"Failed to retrieve ID from " + sickbeard.indexerApi(self.indexer).name, logger.ERROR)
if self.indexerid != -1:
self.deleteEpisode()
return False
# don't update show status if show dir is missing, unless it's missing on purpose
if not ek.ek(os.path.isdir,
self.show._location) and not sickbeard.CREATE_MISSING_SHOW_DIRS and not sickbeard.ADD_SHOWS_WO_DIR:
logger.log(
u"The show dir is missing, not bothering to change the episode statuses since it'd probably be invalid")
return
if self.location:
logger.log(str(self.show.indexerid) + u": Setting status for " + str(season) + "x" + str(
episode) + " based on status " + str(self.status) + " and existence of " + self.location, logger.DEBUG)
# if we don't have the file
if not ek.ek(os.path.isfile, self.location):
# if it hasn't aired yet set the status to UNAIRED
if self.airdate >= datetime.date.today() and self.status in [SKIPPED, UNAIRED, UNKNOWN, WANTED]:
logger.log(u"Episode airs in the future, marking it " + str(UNAIRED), logger.DEBUG)
self.status = UNAIRED
# if there's no airdate then set it to skipped (and respect ignored)
elif self.airdate == datetime.date.fromordinal(1):
if self.status == IGNORED:
logger.log(u"Episode has no air date, but it's already marked as ignored", logger.DEBUG)
else:
logger.log(u"Episode has no air date, automatically marking it skipped", logger.DEBUG)
self.status = SKIPPED
# if we don't have the file and the airdate is in the past
else:
if self.status == UNAIRED:
if self.season > 0:
self.status = WANTED
else:
self.status = SKIPPED
# if we somehow are still UNKNOWN then just use the shows defined default status or SKIPPED
elif self.status == UNKNOWN:
self.status = self.show.default_ep_status
else:
logger.log(
u"Not touching status because we have no ep file, the airdate is in the past, and the status is " + str(
self.status), logger.DEBUG)
# if we have a media file then it's downloaded
elif sickbeard.helpers.isMediaFile(self.location):
# leave propers alone, you have to either post-process them or manually change them back
if self.status not in Quality.SNATCHED_PROPER + Quality.DOWNLOADED + Quality.SNATCHED + [ARCHIVED]:
logger.log(
u"5 Status changes from " + str(self.status) + " to " + str(Quality.statusFromName(self.location)),
logger.DEBUG)
self.status = Quality.statusFromName(self.location, anime=self.show.is_anime)
# shouldn't get here probably
else:
logger.log(u"6 Status changes from " + str(self.status) + " to " + str(UNKNOWN), logger.DEBUG)
self.status = UNKNOWN
def loadFromNFO(self, location):
if not ek.ek(os.path.isdir, self.show._location):
logger.log(
str(self.show.indexerid) + u": The show dir is missing, not bothering to try loading the episode NFO")
return
logger.log(
str(self.show.indexerid) + u": Loading episode details from the NFO file associated with " + location,
logger.DEBUG)
self.location = location
if self.location != "":
if self.status == UNKNOWN:
if sickbeard.helpers.isMediaFile(self.location):
logger.log(u"7 Status changes from " + str(self.status) + " to " + str(
Quality.statusFromName(self.location, anime=self.show.is_anime)), logger.DEBUG)
self.status = Quality.statusFromName(self.location, anime=self.show.is_anime)
nfoFile = sickbeard.helpers.replaceExtension(self.location, "nfo")
logger.log(str(self.show.indexerid) + u": Using NFO name " + nfoFile, logger.DEBUG)
if ek.ek(os.path.isfile, nfoFile):
try:
showXML = etree.ElementTree(file=nfoFile)
except (SyntaxError, ValueError), e:
logger.log(u"Error loading the NFO, backing up the NFO and skipping for now: " + ex(e),
logger.ERROR) # TODO: figure out what's wrong and fix it
try:
ek.ek(os.rename, nfoFile, nfoFile + ".old")
except Exception, e:
logger.log(
u"Failed to rename your episode's NFO file - you need to delete it or fix it: " + ex(e),
logger.ERROR)
raise exceptions.NoNFOException("Error in NFO format")
for epDetails in showXML.getiterator('episodedetails'):
if epDetails.findtext('season') is None or int(epDetails.findtext('season')) != self.season or \
epDetails.findtext('episode') is None or int(
epDetails.findtext('episode')) != self.episode:
logger.log(str(
self.show.indexerid) + u": NFO has an <episodedetails> block for a different episode - wanted " + str(
self.season) + "x" + str(self.episode) + " but got " + str(
epDetails.findtext('season')) + "x" + str(epDetails.findtext('episode')), logger.DEBUG)
continue
if epDetails.findtext('title') is None or epDetails.findtext('aired') is None:
raise exceptions.NoNFOException("Error in NFO format (missing episode title or airdate)")
self.name = epDetails.findtext('title')
self.episode = int(epDetails.findtext('episode'))
self.season = int(epDetails.findtext('season'))
sickbeard.scene_numbering.xem_refresh(self.show.indexerid, self.show.indexer)
self.scene_absolute_number = sickbeard.scene_numbering.get_scene_absolute_numbering(
self.show.indexerid,
self.show.indexer,
self.absolute_number
)
self.scene_season, self.scene_episode = sickbeard.scene_numbering.get_scene_numbering(
self.show.indexerid,
self.show.indexer,
self.season, self.episode
)
self.description = epDetails.findtext('plot')
if self.description is None:
self.description = ""
if epDetails.findtext('aired'):
rawAirdate = [int(x) for x in epDetails.findtext('aired').split("-")]
self.airdate = datetime.date(rawAirdate[0], rawAirdate[1], rawAirdate[2])
else:
self.airdate = datetime.date.fromordinal(1)
self.hasnfo = True
else:
self.hasnfo = False
if ek.ek(os.path.isfile, sickbeard.helpers.replaceExtension(nfoFile, "tbn")):
self.hastbn = True
else:
self.hastbn = False
def __str__(self):
toReturn = ""
toReturn += str(self.show.name) + " - " + str(self.season) + "x" + str(self.episode) + " - " + str(
self.name) + "\n"
toReturn += "location: " + str(self.location) + "\n"
toReturn += "description: " + str(self.description) + "\n"
toReturn += "subtitles: " + str(",".join(self.subtitles)) + "\n"
toReturn += "subtitles_searchcount: " + str(self.subtitles_searchcount) + "\n"
toReturn += "subtitles_lastsearch: " + str(self.subtitles_lastsearch) + "\n"
toReturn += "airdate: " + str(self.airdate.toordinal()) + " (" + str(self.airdate) + ")\n"
toReturn += "hasnfo: " + str(self.hasnfo) + "\n"
toReturn += "hastbn: " + str(self.hastbn) + "\n"
toReturn += "status: " + str(self.status) + "\n"
return toReturn
def createMetaFiles(self):
if not ek.ek(os.path.isdir, self.show._location):
logger.log(str(self.show.indexerid) + u": The show dir is missing, not bothering to try to create metadata")
return
self.createNFO()
self.createThumbnail()
if self.checkForMetaFiles():
self.saveToDB()
def createNFO(self):
result = False
for cur_provider in sickbeard.metadata_provider_dict.values():
result = cur_provider.create_episode_metadata(self) or result
return result
def createThumbnail(self):
result = False
for cur_provider in sickbeard.metadata_provider_dict.values():
result = cur_provider.create_episode_thumb(self) or result
return result
def deleteEpisode(self):
logger.log(u"Deleting " + self.show.name + " " + str(self.season) + "x" + str(self.episode) + " from the DB",
logger.DEBUG)
# remove myself from the show dictionary
if self.show.getEpisode(self.season, self.episode, noCreate=True) == self:
logger.log(u"Removing myself from my show's list", logger.DEBUG)
del self.show.episodes[self.season][self.episode]
# delete myself from the DB
logger.log(u"Deleting myself from the database", logger.DEBUG)
myDB = db.DBConnection()
sql = "DELETE FROM tv_episodes WHERE showid=" + str(self.show.indexerid) + " AND season=" + str(
self.season) + " AND episode=" + str(self.episode)
myDB.action(sql)
raise exceptions.EpisodeDeletedException()
def get_sql(self, forceSave=False):
"""
Creates SQL queue for this episode if any of its data has been changed since the last save.
forceSave: If True it will create SQL queue even if no data has been changed since the
last save (aka if the record is not dirty).
"""
if not self.dirty and not forceSave:
logger.log(str(self.show.indexerid) + u": Not creating SQL queue - record is not dirty", logger.DEBUG)
return
myDB = db.DBConnection()
rows = myDB.select(
'SELECT episode_id FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?',
[self.show.indexerid, self.season, self.episode])
epID = None
if rows:
epID = int(rows[0]['episode_id'])
if epID:
# use a custom update method to get the data into the DB for existing records.
return [
"UPDATE tv_episodes SET indexerid = ?, indexer = ?, name = ?, description = ?, subtitles = ?, "
"subtitles_searchcount = ?, subtitles_lastsearch = ?, airdate = ?, hasnfo = ?, hastbn = ?, status = ?, "
"location = ?, file_size = ?, release_name = ?, is_proper = ?, showid = ?, season = ?, episode = ?, "
"absolute_number = ?, version = ?, release_group = ? WHERE episode_id = ?",
[self.indexerid, self.indexer, self.name, self.description, ",".join([sub for sub in self.subtitles]),
self.subtitles_searchcount, self.subtitles_lastsearch, self.airdate.toordinal(), self.hasnfo,
self.hastbn,
self.status, self.location, self.file_size, self.release_name, self.is_proper, self.show.indexerid,
self.season, self.episode, self.absolute_number, self.version, self.release_group, epID]]
else:
# use a custom insert method to get the data into the DB.
return [
"INSERT OR IGNORE INTO tv_episodes (episode_id, indexerid, indexer, name, description, subtitles, "
"subtitles_searchcount, subtitles_lastsearch, airdate, hasnfo, hastbn, status, location, file_size, "
"release_name, is_proper, showid, season, episode, absolute_number, version, release_group) VALUES "
"((SELECT episode_id FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?)"
",?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);",
[self.show.indexerid, self.season, self.episode, self.indexerid, self.indexer, self.name,
self.description,
",".join([sub for sub in self.subtitles]), self.subtitles_searchcount, self.subtitles_lastsearch,
self.airdate.toordinal(), self.hasnfo, self.hastbn, self.status, self.location, self.file_size,
self.release_name, self.is_proper, self.show.indexerid, self.season, self.episode,
self.absolute_number, self.version, self.release_group]]
def saveToDB(self, forceSave=False):
"""
Saves this episode to the database if any of its data has been changed since the last save.
forceSave: If True it will save to the database even if no data has been changed since the
last save (aka if the record is not dirty).
"""
if not self.dirty and not forceSave:
logger.log(str(self.show.indexerid) + u": Not saving episode to db - record is not dirty", logger.DEBUG)
return
logger.log(str(self.show.indexerid) + u": Saving episode details to database", logger.DEBUG)
logger.log(u"STATUS IS " + str(self.status), logger.DEBUG)
newValueDict = {"indexerid": self.indexerid,
"indexer": self.indexer,
"name": self.name,
"description": self.description,
"subtitles": ",".join([sub for sub in self.subtitles]),
"subtitles_searchcount": self.subtitles_searchcount,
"subtitles_lastsearch": self.subtitles_lastsearch,
"airdate": self.airdate.toordinal(),
"hasnfo": self.hasnfo,
"hastbn": self.hastbn,
"status": self.status,
"location": self.location,
"file_size": self.file_size,
"release_name": self.release_name,
"is_proper": self.is_proper,
"absolute_number": self.absolute_number,
"version": self.version,
"release_group": self.release_group
}
controlValueDict = {"showid": self.show.indexerid,
"season": self.season,
"episode": self.episode}
# use a custom update/insert method to get the data into the DB
myDB = db.DBConnection()
myDB.upsert("tv_episodes", newValueDict, controlValueDict)
def fullPath(self):
if self.location == None or self.location == "":
return None
else:
return ek.ek(os.path.join, self.show.location, self.location)
def createStrings(self, pattern=None):
patterns = [
'%S.N.S%SE%0E',
'%S.N.S%0SE%E',
'%S.N.S%SE%E',
'%S.N.S%0SE%0E',
'%SN S%SE%0E',
'%SN S%0SE%E',
'%SN S%SE%E',
'%SN S%0SE%0E'
]
strings = []
if not pattern:
for p in patterns:
strings += [self._format_pattern(p)]
return strings
return self._format_pattern(pattern)
def prettyName(self):
"""
Returns the name of this episode in a "pretty" human-readable format. Used for logging
and notifications and such.
Returns: A string representing the episode's name and season/ep numbers
"""
if self.show.anime and not self.show.scene:
return self._format_pattern('%SN - %AB - %EN')
elif self.show.air_by_date:
return self._format_pattern('%SN - %AD - %EN')
return self._format_pattern('%SN - %Sx%0E - %EN')
def _ep_name(self):
"""
Returns the name of the episode to use during renaming. Combines the names of related episodes.
Eg. "Ep Name (1)" and "Ep Name (2)" becomes "Ep Name"
"Ep Name" and "Other Ep Name" becomes "Ep Name & Other Ep Name"
"""
multiNameRegex = "(.*) \(\d{1,2}\)"
self.relatedEps = sorted(self.relatedEps, key=lambda x: x.episode)
if len(self.relatedEps) == 0:
goodName = self.name
else:
goodName = ''
singleName = True
curGoodName = None
for curName in [self.name] + [x.name for x in self.relatedEps]:
match = re.match(multiNameRegex, curName)
if not match:
singleName = False
break
if curGoodName == None:
curGoodName = match.group(1)
elif curGoodName != match.group(1):
singleName = False
break
if singleName:
goodName = curGoodName
else:
goodName = self.name
for relEp in self.relatedEps:
goodName += " & " + relEp.name
return goodName
def _replace_map(self):
"""
Generates a replacement map for this episode which maps all possible custom naming patterns to the correct
value for this episode.
Returns: A dict with patterns as the keys and their replacement values as the values.
"""
ep_name = self._ep_name()
def dot(name):
return helpers.sanitizeSceneName(name)
def us(name):
return re.sub('[ -]', '_', name)
def release_name(name):
if name:
name = helpers.remove_non_release_groups(helpers.remove_extension(name))
return name
def release_group(show, name):
if name:
name = helpers.remove_non_release_groups(helpers.remove_extension(name))
else:
return ""
try:
np = NameParser(name, showObj=show, naming_pattern=True)
parse_result = np.parse(name)
except (InvalidNameException, InvalidShowException), e:
logger.log(u"Unable to get parse release_group: " + ex(e), logger.DEBUG)
return ''
if not parse_result.release_group:
return ''
return parse_result.release_group
epStatus, epQual = Quality.splitCompositeStatus(self.status) # @UnusedVariable
if sickbeard.NAMING_STRIP_YEAR:
show_name = re.sub("\(\d+\)$", "", self.show.name).rstrip()
else:
show_name = self.show.name
return {
'%SN': show_name,
'%S.N': dot(show_name),
'%S_N': us(show_name),
'%EN': ep_name,
'%E.N': dot(ep_name),
'%E_N': us(ep_name),
'%QN': Quality.qualityStrings[epQual],
'%Q.N': dot(Quality.qualityStrings[epQual]),
'%Q_N': us(Quality.qualityStrings[epQual]),
'%S': str(self.season),
'%0S': '%02d' % self.season,
'%E': str(self.episode),
'%0E': '%02d' % self.episode,
'%XS': str(self.scene_season),
'%0XS': '%02d' % self.scene_season,
'%XE': str(self.scene_episode),
'%0XE': '%02d' % self.scene_episode,
'%AB': '%(#)03d' % {'#': self.absolute_number},
'%XAB': '%(#)03d' % {'#': self.scene_absolute_number},
'%RN': release_name(self.release_name),
'%RG': release_group(self.show, self.release_name),
'%AD': str(self.airdate).replace('-', ' '),
'%A.D': str(self.airdate).replace('-', '.'),
'%A_D': us(str(self.airdate)),
'%A-D': str(self.airdate),
'%Y': str(self.airdate.year),
'%M': str(self.airdate.month),
'%D': str(self.airdate.day),
'%0M': '%02d' % self.airdate.month,
'%0D': '%02d' % self.airdate.day,
'%RT': "PROPER" if self.is_proper else "",
}
def _format_string(self, pattern, replace_map):
"""
Replaces all template strings with the correct value
"""
result_name = pattern
# do the replacements
for cur_replacement in sorted(replace_map.keys(), reverse=True):
result_name = result_name.replace(cur_replacement, helpers.sanitizeFileName(replace_map[cur_replacement]))
result_name = result_name.replace(cur_replacement.lower(),
helpers.sanitizeFileName(replace_map[cur_replacement].lower()))
return result_name
def _format_pattern(self, pattern=None, multi=None, anime_type=None):
"""
Manipulates an episode naming pattern and then fills the template in
"""
if pattern == None:
pattern = sickbeard.NAMING_PATTERN
if multi == None:
multi = sickbeard.NAMING_MULTI_EP
if anime_type == None:
anime_type = sickbeard.NAMING_ANIME
replace_map = self._replace_map()
result_name = pattern
# if there's no release group then replace it with a reasonable facsimile
if not replace_map['%RN']:
if self.show.air_by_date or self.show.sports:
result_name = result_name.replace('%RN', '%S.N.%A.D.%E.N-SiCKRAGE')
result_name = result_name.replace('%rn', '%s.n.%A.D.%e.n-sickrage')
elif anime_type != 3:
result_name = result_name.replace('%RN', '%S.N.%AB.%E.N-SiCKRAGE')
result_name = result_name.replace('%rn', '%s.n.%ab.%e.n-sickrage')
else:
result_name = result_name.replace('%RN', '%S.N.S%0SE%0E.%E.N-SiCKRAGE')
result_name = result_name.replace('%rn', '%s.n.s%0se%0e.%e.n-sickrage')
result_name = result_name.replace('%RG', 'SICKRAGE')
result_name = result_name.replace('%rg', 'sickrage')
logger.log(u"Episode has no release name, replacing it with a generic one: " + result_name, logger.DEBUG)
if not replace_map['%RT']:
result_name = re.sub('([ _.-]*)%RT([ _.-]*)', r'\2', result_name)
# split off ep name part only
name_groups = re.split(r'[\\/]', result_name)
# figure out the double-ep numbering style for each group, if applicable
for cur_name_group in name_groups:
season_format = sep = ep_sep = ep_format = None
season_ep_regex = '''
(?P<pre_sep>[ _.-]*)
((?:s(?:eason|eries)?\s*)?%0?S(?![._]?N))
(.*?)
(%0?E(?![._]?N))
(?P<post_sep>[ _.-]*)
'''
ep_only_regex = '(E?%0?E(?![._]?N))'
# try the normal way
season_ep_match = re.search(season_ep_regex, cur_name_group, re.I | re.X)
ep_only_match = re.search(ep_only_regex, cur_name_group, re.I | re.X)
# if we have a season and episode then collect the necessary data
if season_ep_match:
season_format = season_ep_match.group(2)
ep_sep = season_ep_match.group(3)
ep_format = season_ep_match.group(4)
sep = season_ep_match.group('pre_sep')
if not sep:
sep = season_ep_match.group('post_sep')
if not sep:
sep = ' '
# force 2-3-4 format if they chose to extend
if multi in (NAMING_EXTEND, NAMING_LIMITED_EXTEND, NAMING_LIMITED_EXTEND_E_PREFIXED):
ep_sep = '-'
regex_used = season_ep_regex
# if there's no season then there's not much choice so we'll just force them to use 03-04-05 style
elif ep_only_match:
season_format = ''
ep_sep = '-'
ep_format = ep_only_match.group(1)
sep = ''
regex_used = ep_only_regex
else:
continue
# we need at least this much info to continue
if not ep_sep or not ep_format:
continue
# start with the ep string, eg. E03
ep_string = self._format_string(ep_format.upper(), replace_map)
for other_ep in self.relatedEps:
# for limited extend we only append the last ep
if multi in (NAMING_LIMITED_EXTEND, NAMING_LIMITED_EXTEND_E_PREFIXED) and other_ep != self.relatedEps[
-1]:
continue
elif multi == NAMING_DUPLICATE:
# add " - S01"
ep_string += sep + season_format
elif multi == NAMING_SEPARATED_REPEAT:
ep_string += sep
# add "E04"
ep_string += ep_sep
if multi == NAMING_LIMITED_EXTEND_E_PREFIXED:
ep_string += 'E'
ep_string += other_ep._format_string(ep_format.upper(), other_ep._replace_map())
if anime_type != 3:
if self.absolute_number == 0:
curAbsolute_number = self.episode
else:
curAbsolute_number = self.absolute_number
if self.season != 0: # dont set absolute numbers if we are on specials !
if anime_type == 1: # this crazy person wants both ! (note: +=)
ep_string += sep + "%(#)03d" % {
"#": curAbsolute_number}
elif anime_type == 2: # total anime freak only need the absolute number ! (note: =)
ep_string = "%(#)03d" % {"#": curAbsolute_number}
for relEp in self.relatedEps:
if relEp.absolute_number != 0:
ep_string += '-' + "%(#)03d" % {"#": relEp.absolute_number}
else:
ep_string += '-' + "%(#)03d" % {"#": relEp.episode}
regex_replacement = None
if anime_type == 2:
regex_replacement = r'\g<pre_sep>' + ep_string + r'\g<post_sep>'
elif season_ep_match:
regex_replacement = r'\g<pre_sep>\g<2>\g<3>' + ep_string + r'\g<post_sep>'
elif ep_only_match:
regex_replacement = ep_string
if regex_replacement:
# fill out the template for this piece and then insert this piece into the actual pattern
cur_name_group_result = re.sub('(?i)(?x)' + regex_used, regex_replacement, cur_name_group)
# cur_name_group_result = cur_name_group.replace(ep_format, ep_string)
# logger.log(u"found "+ep_format+" as the ep pattern using "+regex_used+" and replaced it with "+regex_replacement+" to result in "+cur_name_group_result+" from "+cur_name_group, logger.DEBUG)
result_name = result_name.replace(cur_name_group, cur_name_group_result)
result_name = self._format_string(result_name, replace_map)
logger.log(u"formatting pattern: " + pattern + " -> " + result_name, logger.DEBUG)
return result_name
def proper_path(self):
"""
Figures out the path where this episode SHOULD live according to the renaming rules, relative from the show dir
"""
anime_type = sickbeard.NAMING_ANIME
if not self.show.is_anime:
anime_type = 3
result = self.formatted_filename(anime_type=anime_type)
# if they want us to flatten it and we're allowed to flatten it then we will
if self.show.flatten_folders and not sickbeard.NAMING_FORCE_FOLDERS:
return result
# if not we append the folder on and use that
else:
result = ek.ek(os.path.join, self.formatted_dir(), result)
return result
def formatted_dir(self, pattern=None, multi=None):
"""
Just the folder name of the episode
"""
if pattern == None:
# we only use ABD if it's enabled, this is an ABD show, AND this is not a multi-ep
if self.show.air_by_date and sickbeard.NAMING_CUSTOM_ABD and not self.relatedEps:
pattern = sickbeard.NAMING_ABD_PATTERN
elif self.show.sports and sickbeard.NAMING_CUSTOM_SPORTS and not self.relatedEps:
pattern = sickbeard.NAMING_SPORTS_PATTERN
elif self.show.anime and sickbeard.NAMING_CUSTOM_ANIME:
pattern = sickbeard.NAMING_ANIME_PATTERN
else:
pattern = sickbeard.NAMING_PATTERN
# split off the dirs only, if they exist
name_groups = re.split(r'[\\/]', pattern)
if len(name_groups) == 1:
return ''
else:
return self._format_pattern(os.sep.join(name_groups[:-1]), multi)
def formatted_filename(self, pattern=None, multi=None, anime_type=None):
"""
Just the filename of the episode, formatted based on the naming settings
"""
if pattern == None:
# we only use ABD if it's enabled, this is an ABD show, AND this is not a multi-ep
if self.show.air_by_date and sickbeard.NAMING_CUSTOM_ABD and not self.relatedEps:
pattern = sickbeard.NAMING_ABD_PATTERN
elif self.show.sports and sickbeard.NAMING_CUSTOM_SPORTS and not self.relatedEps:
pattern = sickbeard.NAMING_SPORTS_PATTERN
elif self.show.anime and sickbeard.NAMING_CUSTOM_ANIME:
pattern = sickbeard.NAMING_ANIME_PATTERN
else:
pattern = sickbeard.NAMING_PATTERN
# split off the dirs only, if they exist
name_groups = re.split(r'[\\/]', pattern)
return self._format_pattern(name_groups[-1], multi, anime_type)
def rename(self):
"""
Renames an episode file and all related files to the location and filename as specified
in the naming settings.
"""
if not ek.ek(os.path.isfile, self.location):
logger.log(u"Can't perform rename on " + self.location + " when it doesn't exist, skipping", logger.WARNING)
return
proper_path = self.proper_path()
absolute_proper_path = ek.ek(os.path.join, self.show.location, proper_path)
absolute_current_path_no_ext, file_ext = ek.ek(os.path.splitext, self.location)
absolute_current_path_no_ext_length = len(absolute_current_path_no_ext)
related_subs = []
current_path = absolute_current_path_no_ext
if absolute_current_path_no_ext.startswith(self.show.location):
current_path = absolute_current_path_no_ext[len(self.show.location):]
logger.log(u"Renaming/moving episode from the base path " + self.location + " to " + absolute_proper_path,
logger.DEBUG)
# if it's already named correctly then don't do anything
if proper_path == current_path:
logger.log(str(self.indexerid) + u": File " + self.location + " is already named correctly, skipping",
logger.DEBUG)
return
related_files = postProcessor.PostProcessor(self.location).list_associated_files(
self.location)
if self.show.subtitles and sickbeard.SUBTITLES_DIR != '':
related_subs = postProcessor.PostProcessor(self.location).list_associated_files(sickbeard.SUBTITLES_DIR,
subtitles_only=True)
absolute_proper_subs_path = ek.ek(os.path.join, sickbeard.SUBTITLES_DIR, self.formatted_filename())
logger.log(u"Files associated to " + self.location + ": " + str(related_files), logger.DEBUG)
# move the ep file
result = helpers.rename_ep_file(self.location, absolute_proper_path, absolute_current_path_no_ext_length)
# move related files
for cur_related_file in related_files:
cur_result = helpers.rename_ep_file(cur_related_file, absolute_proper_path,
absolute_current_path_no_ext_length)
if not cur_result:
logger.log(str(self.indexerid) + u": Unable to rename file " + cur_related_file, logger.ERROR)
for cur_related_sub in related_subs:
absolute_proper_subs_path = ek.ek(os.path.join, sickbeard.SUBTITLES_DIR, self.formatted_filename())
cur_result = helpers.rename_ep_file(cur_related_sub, absolute_proper_subs_path,
absolute_current_path_no_ext_length)
if not cur_result:
logger.log(str(self.indexerid) + u": Unable to rename file " + cur_related_sub, logger.ERROR)
# save the ep
with self.lock:
if result:
self.location = absolute_proper_path + file_ext
for relEp in self.relatedEps:
relEp.location = absolute_proper_path + file_ext
# in case something changed with the metadata just do a quick check
for curEp in [self] + self.relatedEps:
curEp.checkForMetaFiles()
# save any changes to the databas
sql_l = []
with self.lock:
for relEp in [self] + self.relatedEps:
sql_l.append(relEp.get_sql())
if len(sql_l) > 0:
myDB = db.DBConnection()
myDB.mass_action(sql_l)
def airdateModifyStamp(self):
"""
Make the modify date and time of a file reflect the show air date and time.
Note: Also called from postProcessor
"""
hr = min = 0
airs = re.search('.*?(\d{1,2})(?::\s*?(\d{2}))?\s*(pm)?', self.show.airs, re.I)
if airs:
hr = int(airs.group(1))
hr = (12 + hr, hr)[None is airs.group(3)]
hr = (hr, hr - 12)[0 == hr % 12 and 0 != hr]
min = int((airs.group(2), min)[None is airs.group(2)])
airtime = datetime.time(hr, min)
airdatetime = datetime.datetime.combine(self.airdate, airtime)
filemtime = datetime.datetime.fromtimestamp(os.path.getmtime(self.location))
if filemtime != airdatetime:
import time
airdatetime = airdatetime.timetuple()
logger.log(str(self.show.indexerid) + u": About to modify date of '" + self.location
+ "' to show air date " + time.strftime("%b %d,%Y (%H:%M)", airdatetime), logger.DEBUG)
try:
if helpers.touchFile(self.location, time.mktime(airdatetime)):
logger.log(str(self.show.indexerid) + u": Changed modify date of " + os.path.basename(self.location)
+ " to show air date " + time.strftime("%b %d,%Y (%H:%M)", airdatetime))
else:
logger.log(str(self.show.indexerid) + u": Unable to modify date of " + os.path.basename(self.location)
+ " to show air date " + time.strftime("%b %d,%Y (%H:%M)", airdatetime), logger.ERROR)
except:
logger.log(str(self.show.indexerid) + u": Failed to modify date of '" + os.path.basename(self.location)
+ "' to show air date " + time.strftime("%b %d,%Y (%H:%M)", airdatetime), logger.ERROR)
def __getstate__(self):
d = dict(self.__dict__)
del d['lock']
return d
def __setstate__(self, d):
d['lock'] = threading.Lock()
self.__dict__.update(d)
|
gpl-3.0
| 470,947,764,053,170,940
| 42.345894
| 221
| 0.563395
| false
| 4.107928
| false
| false
| false
|
baloo/shinken
|
shinken/modules/ip_tag_arbiter/__init__.py
|
1
|
1426
|
#!/usr/bin/python
#Copyright (C) 2009 Gabes Jean, naparuba@gmail.com
#
#This file is part of Shinken.
#
#Shinken is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Shinken is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Affero General Public License for more details.
#
#You should have received a copy of the GNU Affero General Public License
#along with Shinken. If not, see <http://www.gnu.org/licenses/>.
properties = {
'daemons' : ['arbiter'],
'type' : 'ip_tag',
}
#called by the plugin manager to get a broker
def get_instance(plugin):
#print "Get a Service Perfdata broker for plugin %s" % plugin.get_name()
# First try to import
try:
from ip_tag_arbiter import Ip_Tag_Arbiter
except ImportError , exp:
print "Warning : the plugin type %s is unavalable : %s" % ('ip_tag', exp)
return None
# Catch errors
ip_range = plugin.ip_range
prop = plugin.property
value = plugin.value
method = getattr(plugin, 'method', 'replace')
instance = Ip_Tag_Arbiter(plugin, ip_range, prop, value, method)
return instance
|
agpl-3.0
| -5,142,636,619,774,961,000
| 30.688889
| 81
| 0.699158
| false
| 3.66581
| false
| false
| false
|
jcushman/pywb
|
pywb/warc/archiveiterator.py
|
1
|
14850
|
from pywb.utils.timeutils import iso_date_to_timestamp
from pywb.utils.bufferedreaders import DecompressingBufferedReader
from pywb.utils.canonicalize import canonicalize
from pywb.utils.loaders import extract_post_query, append_post_query
from recordloader import ArcWarcRecordLoader
import hashlib
import base64
import re
try: # pragma: no cover
from collections import OrderedDict
except ImportError: # pragma: no cover
from ordereddict import OrderedDict
#=================================================================
class ArchiveIterator(object):
""" Iterate over records in WARC and ARC files, both gzip chunk
compressed and uncompressed
The indexer will automatically detect format, and decompress
if necessary.
"""
GZIP_ERR_MSG = """
ERROR: Non-chunked gzip file detected, gzip block continues
beyond single record.
This file is probably not a multi-chunk gzip but a single gzip file.
To allow seek, a gzipped {1} must have each record compressed into
a single gzip chunk and concatenated together.
This file is likely still valid and you can use it by decompressing it:
gunzip myfile.{0}.gz
You can then also use the 'warc2warc' tool from the 'warc-tools'
package which will create a properly chunked gzip file:
warc2warc -Z myfile.{0} > myfile.{0}.gz
"""
def __init__(self, fileobj, no_record_parse=False,
verify_http=False):
self.fh = fileobj
self.loader = ArcWarcRecordLoader(verify_http=verify_http)
self.reader = None
self.offset = 0
self.known_format = None
self.member_info = None
self.no_record_parse = no_record_parse
def iter_records(self, block_size=16384):
""" iterate over each record
"""
decomp_type = 'gzip'
self.reader = DecompressingBufferedReader(self.fh,
block_size=block_size)
self.offset = self.fh.tell()
self.next_line = None
is_valid = True
while True:
try:
record = self._next_record(self.next_line)
if not is_valid:
self._raise_err()
yield record
except EOFError:
break
self.read_to_end(record)
if self.reader.decompressor:
is_valid = self.reader.read_next_member()
def _raise_err(self):
frmt = 'warc/arc'
if self.known_format:
frmt = self.known_format
frmt_up = frmt.upper()
msg = self.GZIP_ERR_MSG.format(frmt, frmt_up)
raise Exception(msg)
def _consume_blanklines(self):
""" Consume blank lines that are between records
- For warcs, there are usually 2
- For arcs, may be 1 or 0
- For block gzipped files, these are at end of each gzip envelope
and are included in record length which is the full gzip envelope
- For uncompressed, they are between records and so are NOT part of
the record length
count empty_size so that it can be substracted from
the record length for uncompressed
"""
empty_size = 0
while True:
line = self.reader.readline()
if len(line) == 0:
return None, empty_size
if line.rstrip() == '':
empty_size += len(line)
continue
return line, empty_size
def read_to_end(self, record, compute_digest=False):
""" Read remainder of the stream
If a digester is included, update it
with the data read
"""
# already at end of this record, don't read until it is consumed
if self.member_info:
return None
if compute_digest:
digester = hashlib.sha1()
else:
digester = None
num = 0
curr_offset = self.offset
while True:
b = record.stream.read(8192)
if not b:
break
num += len(b)
if digester:
digester.update(b)
"""
- For compressed files, blank lines are consumed
since they are part of record length
- For uncompressed files, blank lines are read later,
and not included in the record length
"""
#if self.reader.decompressor:
self.next_line, empty_size = self._consume_blanklines()
self.offset = self.fh.tell() - self.reader.rem_length()
#if self.offset < 0:
# raise Exception('Not Gzipped Properly')
if self.next_line:
self.offset -= len(self.next_line)
length = self.offset - curr_offset
if not self.reader.decompressor:
length -= empty_size
if compute_digest:
digest = base64.b32encode(digester.digest())
else:
digest = None
self.member_info = (curr_offset, length, digest)
#return self.member_info
#return next_line
def _next_record(self, next_line):
""" Use loader to parse the record from the reader stream
Supporting warc and arc records
"""
record = self.loader.parse_record_stream(self.reader,
next_line,
self.known_format,
self.no_record_parse)
self.member_info = None
# Track known format for faster parsing of other records
self.known_format = record.format
return record
#=================================================================
class ArchiveIndexEntryMixin(object):
MIME_RE = re.compile('[; ]')
def reset_entry(self):
self['urlkey'] = ''
def extract_mime(self, mime, def_mime='unk'):
""" Utility function to extract mimetype only
from a full content type, removing charset settings
"""
self['mime'] = def_mime
if mime:
self['mime'] = self.MIME_RE.split(mime, 1)[0]
def extract_status(self, status_headers):
""" Extract status code only from status line
"""
self['status'] = status_headers.get_statuscode()
if not self['status']:
self['status'] = '-'
elif self['status'] == '204' and 'Error' in status_headers.statusline:
self['status'] = '-'
def set_rec_info(self, offset, length, digest):
if digest:
self['digest'] = digest
self['length'] = str(length)
self['offset'] = str(offset)
def merge_request_data(self, other, options):
surt_ordered = options.get('surt_ordered', True)
if other.record.rec_type != 'request':
return False
# two requests, not correct
if self.record.rec_type == 'request':
return False
# merge POST/PUT body query
post_query = other.get('_post_query')
if post_query:
url = append_post_query(self['url'], post_query)
self['urlkey'] = canonicalize(url, surt_ordered)
other['urlkey'] = self['urlkey']
referer = other.record.status_headers.get_header('referer')
if referer:
self['_referer'] = referer
return True
#=================================================================
class DefaultRecordIter(object):
def __init__(self, **options):
self.options = options
self.entry_cache = {}
def _create_index_entry(self, rec_type):
try:
entry = self.entry_cache[rec_type]
entry.reset_entry()
except:
if self.options.get('cdxj'):
entry = OrderedArchiveIndexEntry()
else:
entry = ArchiveIndexEntry()
self.entry_cache[rec_type] = entry
return entry
def create_record_iter(self, arcv_iter):
append_post = self.options.get('append_post')
include_all = self.options.get('include_all')
block_size = self.options.get('block_size', 16384)
surt_ordered = self.options.get('surt_ordered', True)
minimal = self.options.get('minimal')
append_post = self.options.get('append_post')
if append_post and minimal:
raise Exception('Sorry, minimal index option and ' +
'append POST options can not be used together')
for record in arcv_iter.iter_records(block_size):
entry = None
if not include_all and not minimal and (record.status_headers.get_statuscode() == '-'):
continue
if record.format == 'warc':
if (record.rec_type in ('request', 'warcinfo') and
not include_all and
not append_post):
continue
elif (not include_all and
record.content_type == 'application/warc-fields'):
continue
entry = self.parse_warc_record(record)
elif record.format == 'arc':
entry = self.parse_arc_record(record)
if not entry:
continue
if entry.get('url') and not entry.get('urlkey'):
entry['urlkey'] = canonicalize(entry['url'], surt_ordered)
compute_digest = False
if (entry.get('digest', '-') == '-' and
record.rec_type not in ('revisit', 'request', 'warcinfo')):
compute_digest = True
elif not minimal and record.rec_type == 'request' and append_post:
method = record.status_headers.protocol
len_ = record.status_headers.get_header('Content-Length')
post_query = extract_post_query(method,
entry.get('mime'),
len_,
record.stream)
entry['_post_query'] = post_query
arcv_iter.read_to_end(record, compute_digest)
entry.set_rec_info(*arcv_iter.member_info)
entry.record = record
yield entry
def join_request_records(self, entry_iter):
prev_entry = None
for entry in entry_iter:
if not prev_entry:
prev_entry = entry
continue
# check for url match
if (entry['url'] != prev_entry['url']):
pass
# check for concurrency also
elif (entry.record.rec_headers.get_header('WARC-Concurrent-To') !=
prev_entry.record.rec_headers.get_header('WARC-Record-ID')):
pass
elif (entry.merge_request_data(prev_entry, self.options) or
prev_entry.merge_request_data(entry, self.options)):
yield prev_entry
yield entry
prev_entry = None
continue
yield prev_entry
prev_entry = entry
if prev_entry:
yield prev_entry
#=================================================================
def parse_warc_record(self, record):
""" Parse warc record
"""
entry = self._create_index_entry(record.rec_type)
if record.rec_type == 'warcinfo':
entry['url'] = record.rec_headers.get_header('WARC-Filename')
entry['urlkey'] = entry['url']
entry['_warcinfo'] = record.stream.read(record.length)
return entry
entry['url'] = record.rec_headers.get_header('WARC-Target-Uri')
# timestamp
entry['timestamp'] = iso_date_to_timestamp(record.rec_headers.
get_header('WARC-Date'))
# mime
if record.rec_type == 'revisit':
entry['mime'] = 'warc/revisit'
elif self.options.get('minimal'):
entry['mime'] = '-'
else:
def_mime = '-' if record.rec_type == 'request' else 'unk'
entry.extract_mime(record.status_headers.
get_header('Content-Type'),
def_mime)
# status -- only for response records (by convention):
if record.rec_type == 'response' and not self.options.get('minimal'):
entry.extract_status(record.status_headers)
else:
entry['status'] = '-'
# digest
digest = record.rec_headers.get_header('WARC-Payload-Digest')
entry['digest'] = digest
if digest and digest.startswith('sha1:'):
entry['digest'] = digest[len('sha1:'):]
elif not entry.get('digest'):
entry['digest'] = '-'
# optional json metadata, if present
metadata = record.rec_headers.get_header('WARC-Json-Metadata')
if metadata:
entry['metadata'] = metadata
return entry
#=================================================================
def parse_arc_record(self, record):
""" Parse arc record
"""
if record.rec_type == 'arc_header':
return None
url = record.rec_headers.get_header('uri')
url = url.replace('\r', '%0D')
url = url.replace('\n', '%0A')
# replace formfeed
url = url.replace('\x0c', '%0C')
# replace nulls
url = url.replace('\x00', '%00')
entry = self._create_index_entry(record.rec_type)
entry['url'] = url
# timestamp
entry['timestamp'] = record.rec_headers.get_header('archive-date')
if len(entry['timestamp']) > 14:
entry['timestamp'] = entry['timestamp'][:14]
if not self.options.get('minimal'):
# mime
entry.extract_mime(record.rec_headers.get_header('content-type'))
# status
entry.extract_status(record.status_headers)
# digest
entry['digest'] = '-'
return entry
def __call__(self, fh):
aiter = ArchiveIterator(fh, self.options.get('minimal', False),
self.options.get('verify_http', False))
entry_iter = self.create_record_iter(aiter)
if self.options.get('append_post'):
entry_iter = self.join_request_records(entry_iter)
for entry in entry_iter:
if (entry.record.rec_type in ('request', 'warcinfo') and
not self.options.get('include_all')):
continue
yield entry
class ArchiveIndexEntry(ArchiveIndexEntryMixin, dict):
pass
class OrderedArchiveIndexEntry(ArchiveIndexEntryMixin, OrderedDict):
pass
|
gpl-3.0
| 1,864,010,479,243,279,400
| 30.395349
| 99
| 0.535825
| false
| 4.393491
| false
| false
| false
|
matpalm/cartpoleplusplus
|
dqn_cartpole.py
|
1
|
2303
|
#!/usr/bin/env python
# copy pasta from https://github.com/matthiasplappert/keras-rl/blob/master/examples/dqn_cartpole.py
# with some extra arg parsing
import numpy as np
import gym
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
import bullet_cartpole
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--num-train', type=int, default=100)
parser.add_argument('--num-eval', type=int, default=0)
parser.add_argument('--load-file', type=str, default=None)
parser.add_argument('--save-file', type=str, default=None)
bullet_cartpole.add_opts(parser)
opts = parser.parse_args()
print "OPTS", opts
ENV_NAME = 'BulletCartpole'
# Get the environment and extract the number of actions.
env = bullet_cartpole.BulletCartpole(opts=opts, discrete_actions=True)
nb_actions = env.action_space.n
# Next, we build a very simple model.
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(32))
model.add(Activation('tanh'))
#model.add(Dense(16))
#model.add(Activation('relu'))
#model.add(Dense(16))
#model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
print(model.summary())
memory = SequentialMemory(limit=50000)
policy = BoltzmannQPolicy()
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
target_model_update=1e-2, policy=policy)
dqn.compile(Adam(lr=1e-3), metrics=['mae'])
if opts.load_file is not None:
print "loading weights from from [%s]" % opts.load_file
dqn.load_weights(opts.load_file)
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
dqn.fit(env, nb_steps=opts.num_train, visualize=True, verbose=2)
# After training is done, we save the final weights.
if opts.save_file is not None:
print "saving weights to [%s]" % opts.save_file
dqn.save_weights(opts.save_file, overwrite=True)
# Finally, evaluate our algorithm for 5 episodes.
dqn.test(env, nb_episodes=opts.num_eval, visualize=True)
|
mit
| 8,919,449,101,382,390,000
| 31.9
| 99
| 0.752497
| false
| 3.112162
| false
| false
| false
|
CTSNE/NodeDefender
|
NodeDefender/config/database.py
|
1
|
3997
|
import NodeDefender
import flask_migrate
import sqlalchemy
import os
from flask_sqlalchemy import SQLAlchemy
import alembic
import shutil
import pip
default_config = {'engine' : '',
'username' : '',
'password' : '',
'host' : '',
'port' : '',
'database' : '',
'filepath' : 'nodedefender.sql'}
config = default_config.copy()
def load_config(parser):
config.update(parser['DATABASE'])
NodeDefender.app.config.update(
DATABASE_ENGINE=config['engine'],
DATABASE_USERNAME=config['username'],
DATABASE_PASSWORD=config['password'],
DATABASE_HOST=config['host'],
DATABASE_PORT=config['port'],
DATABASE_DATABASE=config['database'],
DATABASE_FILEPATH=config['filepath'])
if NodeDefender.app.testing:
NodeDefender.app.config.update(
SQLALCHEMY_DATABASE_URI = "sqlite:///:memory:")
else:
NodeDefender.app.config.update(
SQLALCHEMY_DATABASE_URI = get_uri())
return config
def test_database():
app = NodeDefender.app
app.config.update(
SQLALCHEMY_DATABASE_URI = get_uri())
db = NodeDefender.db.sql.load(app)
folder = NodeDefender.config.migrations_folder
migrate = flask_migrate.Migrate(app, db, folder)
try:
init_migrations(app)
except alembic.util.exc.CommandError:
drop_alembic_table(db)
remove_migrations_folder(folder)
init_migrations(app)
try:
migrate_database(app)
upgrade_database(app)
except Exception:
pass
return True
def drop_alembic_table(db):
query = sqlalchemy.text("drop table alembic_version")
try:
db.engine.execute(query)
except Exception:
pass
return True
def remove_migrations_folder(folder):
try:
shutil.rmtree(folder)
except FileNotFoundError:
pass
return True
def init_migrations(app):
with app.app_context():
flask_migrate.init()
def migrate_database(app):
with app.app_context():
flask_migrate.migrate()
def upgrade_database(app):
with app.app_context():
flask_migrate.upgrade()
def install_mysql():
try:
import pip
except ImportError:
if not pip.main(['install', 'pymysql']):
return False
return True
def install_postgresql():
if pip.main(['install', 'psycopg2']):
return True
return False
def get_uri():
if config['engine'] == 'sqlite':
return 'sqlite:///' + config['filepath']
username = config['username']
password = config['password']
host = config['host']
port = config['port']
database = config['database']
if config['engine'] == 'mysql':
return 'mysql+pymysql://'+username+':'+password+'@'+host+':'+port+\
'/'+database
elif config['engine'] == 'postgresql':
return 'postgresql://'+username+':'+password+'@'+host+':'+port+\
'/'+database()
return "sqlite:///:memory:"
def set_default():
for key, value in default_config.items():
NodeDefender.config.parser['DATABASE'][key] = str(value)
return True
def set(**kwargs):
for key, value in kwargs.items():
if key not in config:
continue
if key == "filepath" and value is not None:
value = os.path.join(NodeDefender.config.datafolder, value)
if key == 'engine' and value == 'postgresql':
if not install_postgresql():
raise ImportError("Not able to install PostgreSQL\
Please verify that libpq-dev is installed")
if key == 'engine' and value == 'mysql':
if not install_mysql():
raise ImportError("Not able to install MySQL")
config[key] = str(value)
test_database()
return True
def write():
NodeDefender.config.parser['DATABASE'] = config
NodeDefender.config.write()
|
mit
| -6,365,526,685,889,680,000
| 27.347518
| 75
| 0.597698
| false
| 4.078571
| true
| false
| false
|
mufaddalq/cloudstack-datera-driver
|
tools/marvin/marvin/deployAndRun.py
|
1
|
5113
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tcExecuteEngine import TestCaseExecuteEngine
import sys
import os
import traceback
import time
from argparse import ArgumentParser
from marvinInit import MarvinInit
from marvin.codes import (SUCCESS,
FAILED,
EXCEPTION,
UNKNOWN_ERROR
)
parser = None
def printAndExit():
'''
Prints pretty message for parser and exit
'''
global parser
if parser is not None:
parser.print_usage()
exit(1)
def parseAndCheck():
'''
Parses,reads the options and verifies for the config file
'''
global parser
parser = ArgumentParser()
parser.add_argument("-d", "--tcpath", dest="tcpath",
help="the test case directory or file path")
parser.add_argument("-c", "--config", action="store",
default="./datacenterCfg", dest="config",
help="the path where the json config file generated,\
by default is ./datacenterCfg")
parser.add_argument("-l", "--load", dest="load", action="store_true",
help="only load config, do not deploy,\
it will only run testcase")
parser.add_argument("-n", "--num", dest="number",
help="how many times you want to run the tests")
options = parser.parse_args()
cfg_file = options.config
tc_path = options.tcpath
load_flag = options.load
num_iter = 1 if options.number is None else int(options.number)
'''
Check if the config file is None or not and exit accordingly
'''
if cfg_file is None:
printAndExit()
return {"cfg_file": cfg_file,
"load_flag": load_flag,
"tc_path": tc_path,
"num_iter": num_iter}
def startMarvin(cfg_file, load_flag):
'''
Initialize the Marvin
'''
try:
obj_marvininit = MarvinInit(cfg_file, load_flag)
if obj_marvininit.init() == SUCCESS:
testClient = obj_marvininit.getTestClient()
tcRunLogger = obj_marvininit.getLogger()
parsedConfig = obj_marvininit.getParsedConfig()
debugStream = obj_marvininit.getDebugFile()
return {"tc_client": testClient,
"tc_runlogger": tcRunLogger,
"tc_parsedcfg": parsedConfig,
"tc_debugstream": debugStream}
else:
print "\nMarvin Initialization Failed"
exit(1)
except Exception, e:
print "\n Exception occurred while starting Marvin %s" % str(e)
exit(1)
def runTCs(num_iter, inp1, inp2):
'''
Run Test Cases based upon number of iterations
'''
n = 0
while(n < num_iter):
engine = TestCaseExecuteEngine(inp2["tc_client"],
inp2["tc_parsedcfg"],
inp2["tc_runlogger"],
inp2["tc_debugstream"])
if inp1["tc_file"] is not None:
engine.loadTestsFromFile(inp1["tc_file"])
else:
engine.loadTestsFromDir(inp1["tc_dir"])
engine.run()
n = n + 1
def checkTCPath(tc_path):
'''
Verifies if the tc_path is a folder or file and its existence
'''
ret = {"tc_file": None, "tc_dir": None}
check = True
if tc_path is None:
printAndExit()
else:
if os.path.isfile(tc_path):
ret["tc_file"] = tc_path
elif os.path.isdir(tc_path):
ret["tc_dir"] = tc_path
else:
check = False
if check is False:
print"\nTC Path is Invalid.So Exiting"
exit(1)
return ret
if __name__ == "__main__":
'''
1. Parse and Check
'''
out1 = parseAndCheck()
print "\nStep1 :Parsing Options And Check Went Fine"
'''
2. Start Marvin
'''
out2 = startMarvin(out1["cfg_file"], out1["load_flag"])
print "\nStep2: Marvin Initialization Went Fine"
'''
3. Check TC folder or Module and Path existence
'''
out3 = checkTCPath(out1["tc_path"])
print "\nStep3: TC Path Check Went Fine"
'''
4. Run TCs
'''
runTCs(out1["num_iter"], out3, out2)
print "\nStep4: TC Running Finished"
|
apache-2.0
| 4,409,944,793,403,968,000
| 29.616766
| 77
| 0.580677
| false
| 4.07086
| true
| false
| false
|
franciscouzo/crosswordly
|
app/app/models.py
|
1
|
1735
|
import string
from django.db import models
from django.contrib import admin
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.db.models.signals import post_save
class UserProfile(models.Model):
user = models.OneToOneField(User)
score = models.IntegerField(default=0)
stars = models.IntegerField(default=0)
letters = models.CharField(max_length=8, blank=True)
x = models.IntegerField(default=0)
y = models.IntegerField(default=0)
def __str__(self):
return self.user.username
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.get_or_create(user=instance)
class Word(models.Model):
word = models.CharField(max_length=15, unique=True)
used = models.IntegerField(default=0)
def __str__(self):
return self.word
class WordHistory(models.Model):
user = models.ForeignKey(User)
word = models.ForeignKey(Word)
score = models.IntegerField()
x = models.IntegerField()
y = models.IntegerField()
def __str__(self):
return '{} - {} ({}, {})'.format(self.user, self.word, self.x, self.y)
class Cell(models.Model):
user = models.ForeignKey(User)
x = models.IntegerField()
y = models.IntegerField()
letter = models.CharField(
choices=[(c, c) for c in string.ascii_lowercase], max_length=1)
datetime = models.DateTimeField(auto_now=True)
def __str__(self):
return '{} - {} ({}, {})'.format(
self.user, self.letter, self.x, self.y)
class Meta:
unique_together = ('x', 'y')
for cls in [UserProfile, Word, WordHistory, Cell]:
admin.site.register(cls)
|
gpl-3.0
| -3,721,119,490,238,843,400
| 26.539683
| 78
| 0.665706
| false
| 3.668076
| false
| false
| false
|
AdrianGaudebert/elmo
|
apps/privacy/migrations/0001_initial.py
|
1
|
5309
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Policy'
db.create_table('privacy_policy', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('text', self.gf('django.db.models.fields.TextField')()),
('active', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('privacy', ['Policy'])
# Adding model 'Comment'
db.create_table('privacy_comment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('text', self.gf('django.db.models.fields.TextField')()),
('policy', self.gf('django.db.models.fields.related.ForeignKey')(related_name='comments', to=orm['privacy.Policy'])),
('who', self.gf('django.db.models.fields.related.ForeignKey')(related_name='privacy_comments', to=orm['auth.User'])),
))
db.send_create_signal('privacy', ['Comment'])
def backwards(self, orm):
# Deleting model 'Policy'
db.delete_table('privacy_policy')
# Deleting model 'Comment'
db.delete_table('privacy_comment')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'privacy.comment': {
'Meta': {'object_name': 'Comment'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'policy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['privacy.Policy']"}),
'text': ('django.db.models.fields.TextField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'privacy_comments'", 'to': "orm['auth.User']"})
},
'privacy.policy': {
'Meta': {'object_name': 'Policy'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['privacy']
|
mpl-2.0
| 566,294,100,273,939,260
| 58.662921
| 182
| 0.557355
| false
| 3.792143
| false
| false
| false
|
Erotemic/hotspotter
|
hotspotter/feature_compute2.py
|
1
|
7452
|
''' Computes feature representations '''
from __future__ import division, print_function
from hscom import __common__
(print, print_, print_on, print_off,
rrr, profile) = __common__.init(__name__, '[fc2]')
# scientific
import numpy as np
# python
from os.path import join
# hotspotter
from hscom import helpers as util
from hscom import params
from hscom import fileio as io
from hscom.Parallelize import parallel_compute
import extern_feat
def whiten_features(desc_list):
import algos
print('[fc2] * Whitening features')
ax2_desc = np.vstack(desc_list)
ax2_desc_white = algos.scale_to_byte(algos.whiten(ax2_desc))
index = 0
offset = 0
for cx in xrange(len(desc_list)):
old_desc = desc_list[cx]
print ('[fc2] * ' + util.info(old_desc, 'old_desc'))
offset = len(old_desc)
new_desc = ax2_desc_white[index:(index + offset)]
desc_list[cx] = new_desc
index += offset
# =======================================
# Main Script
# =======================================
@profile
def bigcache_feat_save(cache_dir, uid, ext, kpts_list, desc_list):
print('[fc2] Caching desc_list and kpts_list')
io.smart_save(kpts_list, cache_dir, 'kpts_list', uid, ext)
io.smart_save(desc_list, cache_dir, 'desc_list', uid, ext)
@profile
def bigcache_feat_load(cache_dir, uid, ext):
#io.debug_smart_load(cache_dir, fname='*', uid=uid, ext='.*')
kpts_list = io.smart_load(cache_dir, 'kpts_list', uid, ext, can_fail=True)
desc_list = io.smart_load(cache_dir, 'desc_list', uid, ext, can_fail=True)
if desc_list is None or kpts_list is None:
return None
desc_list = desc_list.tolist()
kpts_list = kpts_list.tolist()
print('[fc2] Loaded kpts_list and desc_list from big cache')
return kpts_list, desc_list
@profile
def sequential_feat_load(feat_cfg, feat_fpath_list):
kpts_list = []
desc_list = []
# Debug loading (seems to use lots of memory)
print('\n')
try:
nFeats = len(feat_fpath_list)
prog_label = '[fc2] Loading feature: '
mark_progress, end_progress = util.progress_func(nFeats, prog_label)
for count, feat_path in enumerate(feat_fpath_list):
try:
npz = np.load(feat_path, mmap_mode=None)
except IOError:
print('\n')
util.checkpath(feat_path, verbose=True)
print('IOError on feat_path=%r' % feat_path)
raise
kpts = npz['arr_0']
desc = npz['arr_1']
npz.close()
kpts_list.append(kpts)
desc_list.append(desc)
mark_progress(count)
end_progress()
print('[fc2] Finished load of individual kpts and desc')
except MemoryError:
print('\n------------')
print('[fc2] Out of memory')
print('[fc2] Trying to read: %r' % feat_path)
print('[fc2] len(kpts_list) = %d' % len(kpts_list))
print('[fc2] len(desc_list) = %d' % len(desc_list))
raise
if feat_cfg.whiten:
desc_list = whiten_features(desc_list)
return kpts_list, desc_list
# Maps a preference string into a function
feat_type2_precompute = {
'hesaff+sift': extern_feat.precompute_hesaff,
}
@profile
def _load_features_individualy(hs, cx_list):
use_cache = not params.args.nocache_feats
feat_cfg = hs.prefs.feat_cfg
feat_dir = hs.dirs.feat_dir
feat_uid = feat_cfg.get_uid()
print('[fc2] Loading ' + feat_uid + ' individually')
# Build feature paths
rchip_fpath_list = [hs.cpaths.cx2_rchip_path[cx] for cx in iter(cx_list)]
cid_list = hs.tables.cx2_cid[cx_list]
feat_fname_fmt = ''.join(('cid%d', feat_uid, '.npz'))
feat_fpath_fmt = join(feat_dir, feat_fname_fmt)
feat_fpath_list = [feat_fpath_fmt % cid for cid in cid_list]
#feat_fname_list = [feat_fname_fmt % cid for cid in cid_list]
# Compute features in parallel, saving them to disk
kwargs_list = [feat_cfg.get_dict_args()] * len(rchip_fpath_list)
pfc_kwargs = {
'func': feat_type2_precompute[feat_cfg.feat_type],
'arg_list': [rchip_fpath_list, feat_fpath_list, kwargs_list],
'num_procs': params.args.num_procs,
'lazy': use_cache,
}
parallel_compute(**pfc_kwargs)
# Load precomputed features sequentially
kpts_list, desc_list = sequential_feat_load(feat_cfg, feat_fpath_list)
return kpts_list, desc_list
@profile
def _load_features_bigcache(hs, cx_list):
# args for smart load/save
feat_cfg = hs.prefs.feat_cfg
feat_uid = feat_cfg.get_uid()
cache_dir = hs.dirs.cache_dir
sample_uid = util.hashstr_arr(cx_list, 'cids')
bigcache_uid = '_'.join((feat_uid, sample_uid))
ext = '.npy'
loaded = bigcache_feat_load(cache_dir, bigcache_uid, ext)
if loaded is not None: # Cache Hit
kpts_list, desc_list = loaded
else: # Cache Miss
kpts_list, desc_list = _load_features_individualy(hs, cx_list)
# Cache all the features
bigcache_feat_save(cache_dir, bigcache_uid, ext, kpts_list, desc_list)
return kpts_list, desc_list
@profile
@util.indent_decor('[fc2]')
def load_features(hs, cx_list=None, **kwargs):
# TODO: There needs to be a fast way to ensure that everything is
# already loaded. Same for cc2.
print('=============================')
print('[fc2] Precomputing and loading features: %r' % hs.get_db_name())
#----------------
# COMPUTE SETUP
#----------------
use_cache = not params.args.nocache_feats
use_big_cache = use_cache and cx_list is None
feat_cfg = hs.prefs.feat_cfg
feat_uid = feat_cfg.get_uid()
if hs.feats.feat_uid != '' and hs.feats.feat_uid != feat_uid:
print('[fc2] Disagreement: OLD_feat_uid = %r' % hs.feats.feat_uid)
print('[fc2] Disagreement: NEW_feat_uid = %r' % feat_uid)
print('[fc2] Unloading all chip information')
hs.unload_all()
hs.load_chips(cx_list=cx_list)
print('[fc2] feat_uid = %r' % feat_uid)
# Get the list of chip features to load
cx_list = hs.get_valid_cxs() if cx_list is None else cx_list
if not np.iterable(cx_list):
cx_list = [cx_list]
print('[cc2] len(cx_list) = %r' % len(cx_list))
if len(cx_list) == 0:
return # HACK
cx_list = np.array(cx_list) # HACK
if use_big_cache: # use only if all descriptors requested
kpts_list, desc_list = _load_features_bigcache(hs, cx_list)
else:
kpts_list, desc_list = _load_features_individualy(hs, cx_list)
# Extend the datastructure if needed
list_size = max(cx_list) + 1
util.ensure_list_size(hs.feats.cx2_kpts, list_size)
util.ensure_list_size(hs.feats.cx2_desc, list_size)
# Copy the values into the ChipPaths object
for lx, cx in enumerate(cx_list):
hs.feats.cx2_kpts[cx] = kpts_list[lx]
for lx, cx in enumerate(cx_list):
hs.feats.cx2_desc[cx] = desc_list[lx]
hs.feats.feat_uid = feat_uid
print('[fc2]=============================')
def clear_feature_cache(hs):
feat_cfg = hs.prefs.feat_cfg
feat_dir = hs.dirs.feat_dir
cache_dir = hs.dirs.cache_dir
feat_uid = feat_cfg.get_uid()
print('[fc2] clearing feature cache: %r' % feat_dir)
util.remove_files_in_dir(feat_dir, '*' + feat_uid + '*', verbose=True, dryrun=False)
util.remove_files_in_dir(cache_dir, '*' + feat_uid + '*', verbose=True, dryrun=False)
pass
|
apache-2.0
| 4,390,036,601,219,088,000
| 35.891089
| 89
| 0.608427
| false
| 3.10759
| false
| false
| false
|
Maselkov/GW2Bot
|
guildwars2/worldsync.py
|
1
|
7774
|
import asyncio
import discord
from discord.ext import commands, tasks
from .exceptions import APIError, APIKeyError
class WorldsyncMixin:
@commands.guild_only()
@commands.has_permissions(manage_guild=True)
@commands.group(case_insensitive=True)
async def worldsync(self, ctx):
"""Role management based on in game account world"""
if ctx.invoked_subcommand is None:
await ctx.send_help(ctx.command)
@worldsync.command(name="toggle")
async def worldsync_toggle(self, ctx):
"""Enable automatic world roles"""
def check(m):
return m.author == ctx.author and m.channel == ctx.channel
guild = ctx.guild
doc = await self.bot.database.get(guild, self)
worldsync = doc.get("worldsync", {})
enabled = not worldsync.get("enabled", False)
world_role = guild.get_role(worldsync.get("world_role"))
ally_role = guild.get_role(worldsync.get("ally_role"))
world_id = worldsync.get("world_id")
if not world_role or not ally_role or not world_id and enabled:
return await ctx.send(
"You must set the home world, as well as world role and "
"ally role before you can enable worldsync\n```\n"
f"{ctx.prefix}worldsync world\n"
f"{ctx.prefix}worldsync worldrole\n"
f"{ctx.prefix}worldsync allyrole```")
await self.bot.database.set(guild, {"worldsync.enabled": enabled},
self)
if enabled:
await ctx.send("Worldsync is now enabled. Use the same "
"command to disable.")
doc = await self.bot.database.get(guild, self)
return await self.sync_worlds(worldsync, guild)
await ctx.send("Worldsync disabled")
@worldsync.command(name="world")
async def worldsync_world(self, ctx, *, world):
"""Set your home world"""
if not world:
return await ctx.send_help(ctx.command)
wid = await self.get_world_id(world)
if not wid:
return await ctx.send("Invalid world name")
await self.bot.database.set(ctx.guild, {"worldsync.world_id": wid},
self)
await ctx.send(f"World set! Use `{ctx.prefix}worldsync toggle` to "
"enable if you haven't already")
@worldsync.command(name="worldrole")
async def worldsync_worldrole(self, ctx, role: discord.Role):
"""Set the role to be given to those in the home world.
You can use role mention or ID"""
await self.bot.database.set(ctx.guild,
{"worldsync.world_role": role.id}, self)
await ctx.send("Role set. Make sure the bot has enough permissions "
"to grant the role.")
@worldsync.command(name="allyrole")
async def worldsync_allyrole(self, ctx, role: discord.Role):
"""Set the role to be given to those in the linked worlds.
You can use role mention or ID"""
await self.bot.database.set(ctx.guild,
{"worldsync.ally_role": role.id}, self)
await ctx.send("Role set. Make sure the bot has enough permissions "
"to grant the role.")
@worldsync.command(name="now")
async def worldsync_now(self, ctx):
"""Run the worldsync now"""
msg = await ctx.send("Starting worldsync." +
self.get_emoji(ctx, "loading"))
doc = await self.bot.database.get(ctx.guild, self)
worldsync = doc.get("worldsync", {})
enabled = worldsync.get("enabled", False)
if not enabled:
return await ctx.send("Worldsync is not enabled")
async with ctx.typing():
await self.sync_worlds(worldsync, ctx.guild)
await ctx.send("Worldsync complete")
try:
await msg.delete()
except discord.HTTPException:
pass
async def get_linked_worlds(self, world):
endpoint = f"wvw/matches/overview?world={world}"
results = await self.call_api(endpoint)
for worlds in results["all_worlds"].values():
if world in worlds:
worlds.remove(world)
return worlds
return []
async def worldsync_member(self, member, world_role, ally_role, world_id,
linked_worlds):
try:
on_world = False
on_linked = False
try:
results = await self.call_api("account", member)
user_world = results["world"]
if user_world == world_id:
on_world = True
if user_world in linked_worlds:
on_linked = True
except APIKeyError:
pass
except APIError:
return
single_role = world_role == ally_role
if on_world:
if world_role not in member.roles:
await member.add_roles(world_role)
if not single_role and ally_role in member.roles:
await member.remove_roles(ally_role)
return
if on_linked:
if ally_role not in member.roles:
await member.add_roles(ally_role)
if not single_role and world_role in member.roles:
await member.remove_roles(world_role)
return
if world_role in member.roles:
await member.remove_roles(world_role)
if ally_role in member.roles:
await member.remove_roles(ally_role)
except:
pass
async def sync_worlds(self, doc, guild):
world_id = doc.get("world_id")
try:
linked_worlds = await self.get_linked_worlds(world_id)
except APIError as e:
return
world_role = guild.get_role(doc.get("world_role"))
ally_role = guild.get_role(doc.get("ally_role"))
if not world_role or not ally_role:
return
for member in guild.members:
if member.bot:
continue
await self.worldsync_member(member, world_role, ally_role,
world_id, linked_worlds)
await asyncio.sleep(0.25)
@commands.Cog.listener("on_member_join")
async def worldsync_on_member_join(self, member):
if member.bot:
return
guild = member.guild
doc = await self.bot.database.get(guild, self)
worldsync = doc.get("worldsync", {})
enabled = worldsync.get("enabled", False)
if not enabled:
return
world_role = guild.get_role(worldsync.get("world_role"))
ally_role = guild.get_role(worldsync.get("ally_role"))
if not world_role or not ally_role:
return
world_id = worldsync.get("world_id")
try:
linked_worlds = await self.get_linked_worlds(world_id)
except APIError as e:
return
await self.worldsync_member(member, world_role, ally_role, world_id,
linked_worlds)
@tasks.loop(minutes=5)
async def worldsync_task(self):
cursor = self.bot.database.iter("guilds", {"worldsync.enabled": True},
self,
subdocs=["worldsync"])
async for doc in cursor:
try:
await self.sync_worlds(doc, doc["_obj"])
except asyncio.CancelledError:
return
except Exception as e:
pass
|
mit
| 8,110,200,218,613,507,000
| 39.489583
| 78
| 0.551839
| false
| 3.912431
| false
| false
| false
|
stevarino/cmsc495
|
mac_app/forms.py
|
1
|
1837
|
from django import forms
from django.contrib.auth.models import User
from .models import Department
class NewUserTicket(forms.Form):
username = forms.CharField(label='Username', max_length=32)
password = forms.CharField(label='Password', widget=forms.PasswordInput)
firstname = forms.CharField(label='First Name', max_length=32, required=False)
lastname = forms.CharField(label='Last Name', max_length=32, required=False)
address = forms.CharField(max_length=256, required=False)
city = forms.CharField(max_length=128, required=False)
state = forms.CharField(max_length=128, required=False)
postal_code = forms.CharField(max_length=16, required=False)
phone = forms.CharField(max_length=16, required=False)
department = forms.ModelChoiceField(Department.objects.all())
# form validator to ensure unique username
def clean_username(self):
username = self.cleaned_data['username']
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(u'Username "{}" is already in use.'.format(username))
class UserSearchForm(forms.Form):
username = forms.CharField(label='Username', max_length=32, required=False)
first_name = forms.CharField(label='First Name', max_length=32, required=False)
last_name = forms.CharField(label='Last Name', max_length=32, required=False)
def get_users(self):
users = User.objects
is_filtered = False
for f in ['first_name', 'last_name', 'username']:
if self.cleaned_data[f]:
is_filtered = True
users = users.filter(**{
f+'__icontains': self.cleaned_data[f]
})
if is_filtered:
return users
return []
|
mit
| -1,642,018,643,992,884,500
| 40.772727
| 89
| 0.663582
| false
| 4.028509
| false
| false
| false
|
CanalTP/navitia
|
source/navitiacommon/navitiacommon/parser_args_type.py
|
1
|
9951
|
# encoding: utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from collections import namedtuple
import ujson
import geojson
import flask
from dateutil import parser
from flask_restful.inputs import boolean
import six
import sys
class TypeSchema(object):
def __init__(self, type=None, metadata=None):
self.type = type
self.metadata = metadata
class CustomSchemaType(object):
def schema(self):
# by default we look for a _schema variable, but it can be overriden
return self._schema
class DepthArgument(CustomSchemaType):
def __call__(self, value, name):
conv_value = int(value)
if conv_value > 3:
raise ValueError("The {} argument has to be <= 3, you gave : {}".format(name, value))
return conv_value
def schema(self):
return TypeSchema(type=int, metadata={'minimum': 0, 'maximum': 3})
class PositiveFloat(CustomSchemaType):
def __call__(self, value, name):
conv_value = float(value)
if conv_value <= 0:
raise ValueError("The {} argument has to be > 0, you gave : {}".format(name, value))
return conv_value
def schema(self):
return TypeSchema(type=float, metadata={'minimum': 0})
class IntRange(CustomSchemaType):
def __init__(self, min, max):
self.min = min
self.max = max
def __call__(self, value, name):
conv_value = int(value)
if not self.min <= conv_value <= self.max:
raise ValueError(
"The {} argument has to be in range [{}, {}], you gave : {}".format(
name, self.min, self.max, value
)
)
return conv_value
def schema(self):
return TypeSchema(type=float, metadata={'minimum': 0})
class FloatRange(CustomSchemaType):
def __init__(self, min, max):
self.min = min
self.max = max
def __call__(self, value, name):
conv_value = float(value)
if not self.min <= conv_value <= self.max:
raise ValueError(
"The {} argument has to be in range [{}, {}], you gave : {}".format(
name, self.min, self.max, value
)
)
return conv_value
def schema(self):
return TypeSchema(type=float, metadata={'minimum': 0})
class SpeedRange(CustomSchemaType):
map_range = {
'bike_speed': (0.01, 15),
'bss_speed': (0.01, 15),
'walking_speed': (0.01, 4),
'car_speed': (0.01, 50),
'taxi_speed': (0.01, 50),
'car_no_park_speed': (0.01, 50),
'ridesharing_speed': (0.01, 50),
'default': (sys.float_info.min, sys.float_info.max),
}
def __call__(self, value, name):
conv_value = float(value)
(range_min, range_max) = (
SpeedRange.map_range[name] if name in SpeedRange.map_range else SpeedRange.map_range['default']
)
if not range_min <= conv_value <= range_max:
raise ValueError(
"The {} argument has to be in range [{}, {}], you gave : {}".format(
name, range_min, range_max, value
)
)
return conv_value
def schema(self):
return TypeSchema(type=float, metadata={'minimum': 0})
class BooleanType(CustomSchemaType):
def __call__(self, value):
if isinstance(value, bool):
return value
return boolean(value)
def schema(self):
return TypeSchema(type=bool)
class OptionValue(CustomSchemaType):
def __init__(self, optional_values):
self.optional_values = optional_values
def __call__(self, value, name):
# if input value is iterable
if hasattr(value, '__iter__') and not isinstance(value, six.text_type):
if not all((v in self.optional_values for v in value)):
error = "The {} argument must be in list {}, you gave {}".format(
name, str(self.optional_values), value
)
raise ValueError(error)
elif not (value in self.optional_values):
error = "The {} argument must be in list {}, you gave {}".format(
name, str(self.optional_values), value
)
raise ValueError(error)
return value
def schema(self):
return TypeSchema(type=str, metadata={'enum': self.optional_values})
class DescribedOptionValue(OptionValue):
def __init__(self, optional_values):
self.description = "Possible values:\n"
self.description += '\n'.join([" * '{}' - {}".format(k, v) for k, v in optional_values.items()])
super(DescribedOptionValue, self).__init__(optional_values.keys())
def schema(self):
ts = super(DescribedOptionValue, self).schema()
ts.metadata['description'] = self.description
return ts
class IntervalValue(CustomSchemaType):
def __init__(self, type=int, min_value=None, max_value=None):
self.type = type
self.min_value = min_value
self.max_value = max_value
def __call__(self, value, name):
v = self.type(value)
if self.min_value:
v = max(v, self.min_value)
if self.max_value:
v = min(v, self.max_value)
return v
def schema(self):
metadata = {}
if self.min_value:
metadata['minimum'] = self.min_value
if self.max_value:
metadata['maximum'] = self.max_value
return TypeSchema(type=self.type, metadata=metadata)
def geojson_argument(value):
def is_geometry_valid(geometry):
geometry_str = ujson.dumps(geometry)
valid = geojson.is_valid(geojson.loads(geometry_str))
return 'valid' in valid and (valid['valid'] == 'yes' or valid['valid'] == '')
if value:
if not isinstance(value, dict):
raise ValueError('invalid json')
if not is_geometry_valid(value):
raise ValueError('invalid geojson')
geometry = value.get('geometry', {}).get('type')
if not geometry or geometry.lower() != 'polygon':
raise ValueError('invalid geometry type')
return value
class CoordFormat(CustomSchemaType):
def __init__(self, nullable=False):
super(CoordFormat, self).__init__()
self.nullable = nullable
def __call__(self, coord):
"""
Validate coordinates format (lon;lat)
"""
if coord == '' and self.nullable:
return coord
lon_lat_splitted = coord.split(";")
if len(lon_lat_splitted) != 2:
raise ValueError('Invalid coordinate parameter. It must be lon;lat where lon and lat are floats.')
lon, lat = lon_lat_splitted
lat = float(lat)
if not (-90.0 <= lat <= 90.0):
raise ValueError("lat should be between -90 and 90")
lon = float(lon)
if not (180.0 >= lon >= -180.0):
raise ValueError("lon should be between -180 and 180")
return coord
def schema(self):
return TypeSchema(type=str, metadata={'pattern': '.*;.*'})
class UnsignedInteger(CustomSchemaType):
def __call__(self, value):
try:
d = int(value)
if d < 0:
raise ValueError('invalid unsigned int')
return d
except ValueError as e:
raise ValueError("Unable to evaluate, {}".format(e))
def schema(self):
return TypeSchema(type=int, metadata={'minimum': 0})
class PositiveInteger(CustomSchemaType):
def __call__(self, value):
try:
d = int(value)
if d <= 0:
raise ValueError('invalid positive int')
return d
except ValueError as e:
raise ValueError("Unable to evaluate, {}".format(e))
def schema(self):
return TypeSchema(type=int, metadata={'minimum': 1})
def _parse_input_date(date):
"""
datetime parse date seems broken, '155' with format '%H%M%S' is not
rejected but parsed as 1h, 5mn, 5s...
so use use for the input date parse dateutil even if the 'guess'
mechanism seems a bit dangerous
"""
return parser.parse(date, dayfirst=False, yearfirst=True)
class DateTimeFormat(CustomSchemaType):
def __call__(self, value):
"""
we want to valid the date format
"""
try:
d = _parse_input_date(value)
if d.year <= 1970:
raise ValueError('date is too early!')
return d
except ValueError as e:
raise ValueError("Unable to parse datetime, {}".format(e))
def schema(self):
return TypeSchema(type=str, metadata={'format': 'date-time'})
|
agpl-3.0
| 2,071,259,826,487,371,000
| 30.490506
| 110
| 0.592604
| false
| 3.962963
| false
| false
| false
|
lwerdna/alib
|
py/bytes.py
|
1
|
8254
|
#!/usr/bin/python
#------------------------------------------------------------------------------
#
# Copyright 2011-2016 Andrew Lamoureux
#
# This file is a part of autils.
#
# autils is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#------------------------------------------------------------------------------
import os
import sys
import re
from struct import pack, unpack
import string
sys.path.append(os.environ['PATH_AUTILS'])
from parsing import *
regex_hex_int = r'^(?:0x)?[a-fA-F0-9]{1,16}$'
# this is useful for parsing output from objdump, which can come
# as a list of bytes, list of words, etc.
#
# bytes example (x86_64):
# 40051c: 55 push %rbp
# 40051d: 48 89 e5 mov %rsp,%rbp
# 400520: bf d4 05 40 00 mov $0x4005d4,%edi
#
# words example (arm thumb):
# 1928: 2a00 cmp r2, #0
# 192a: d031 beq.n 1990 <.text+0x150>
# 192c: f8d8 300c ldr.w r3, [r8, #12]
#
#------------------------------------------------------------------------------
# binary data to various string representations
#------------------------------------------------------------------------------
def getHexDump(data, addr=0, grouping=1, endian='little'):
result = ''
while(data):
ascii = ''
buff16 = data[0:16]
data = data[16:]
result += "%08X: " % addr
i = 0
while i < 16:
if(i < len(buff16)):
f0 = { \
'big': {1:'>B', 2:'>H', 4:'>I', 8:'>Q'}, \
'little': {1:'<B', 2:'<H', 4:'<I', 8:'<Q'} \
}
f1 = { \
1:'%02X ', 2:'%04X ', 4:'%08X ', 8:'%016X ' \
}
temp = unpack(f0[endian][grouping], buff16[i:i+grouping])[0]
result += f1[grouping] % temp
for j in range(grouping):
if(buff16[i+j] >= ' ' and buff16[i+j] <= '~'):
ascii += buff16[i+j]
else:
ascii += '.'
else:
if grouping == 1:
result += ' '*len('DE ')
elif grouping == 2:
result += ' '*len('DEAD ')
elif grouping == 4:
result += ' '*len('DEADBEEF ')
elif grouping == 8:
result += ' '*len('DEADBEEFCAFEBABE ')
i += grouping
result += ' %s\n' % ascii
addr += 16;
return result
def getGdbWrites(addr, data):
result = ''
while(data):
if(len(data) >= 4):
result += 'set *(unsigned int *)0x%X = 0x%X\n' % \
(addr, unpack('I',data[0:4])[0])
data = data[4:]
addr += 4
elif(len(data) >= 2):
result += 'set *(unsigned short *)0x%X = 0x%X\n' % \
(addr, unpack('H',data[0:2])[0])
data = data[2:]
addr += 2
elif(len(data) == 1):
result += 'set *(unsigned char *)0x%X = 0x%X\n' % \
(addr, unpack('B',data[0:1])[0])
data = data[1:]
addr += 1
else:
print 'IMPOSSIBLE!'
return result;
def getIdaPatchIdc(addr, data):
result = ''
while(data):
if(len(data) >= 4):
result += 'PatchDword(0x%X, 0x%X);\n' % \
(addr, unpack('I',data[0:4])[0])
data = data[4:]
addr += 4
elif(len(data) >= 2):
result += 'PatchWord(0x%X, 0x%X);\n' % \
(addr, unpack('H',data[0:2])[0])
data = data[2:]
addr += 2
elif(len(data) == 1):
result += 'PatchByte(0x%X, 0x%X);\n' % \
(addr, unpack('B',data[0:1])[0])
data = data[1:]
addr += 1
else:
result += 'IMPOSSIBLE!'
return result
def getCString(data):
result = ''
count = 0
group16 = ''
while(data):
group16 += "\\x%02X" % unpack('B', data[0])[0]
data = data[1:]
count += 1
if count == 16:
result += '"%s"\n' % group16
group16 = ''
count = 0
if group16:
result += '"%s"' % group16
return result
def getPythonString(data):
temp = getCString(data)
temp = re.sub("\n", " + \\\n", temp)
return temp
def getStrAsHex(s, spaced=False):
raise Exception("use binascii.hexlify() or foo.encode('hex') instead")
#------------------------------------------------------------------------------
# bit access
#------------------------------------------------------------------------------
def getBits(val, hi, lo):
mask = (2**(hi+1) - 1) - (2**lo-1)
return (val & mask) >> lo
#------------------------------------------------------------------------------
# endian conversions
#------------------------------------------------------------------------------
def bswap32(val):
return unpack('>I', pack('<I', val))[0]
def bswap16(val):
return unpack('>H', pack('<H', val))[0]
#------------------------------------------------------------------------------
# bit byte calculations
#------------------------------------------------------------------------------
def dataXor(a, b):
assert(len(a)==len(b))
length = len(a)
result = ''
for i in range(length):
result += pack('B', ord(a[i]) ^ ord(b[i]))
return result
#------------------------------------------------------------------------------
# tests
#------------------------------------------------------------------------------
if __name__ == '__main__':
# test getFirstHexInt()
text = "" + \
"blah blah blah\n" + \
"blah blah 0xDEADBEEF blah\n" + \
"blah blah\n" + \
"0xCAFEBABEEE\n" + \
"derp werp\n" + \
"ree dee\n"
if(parseHexValue(text) == 0xDEADBEEF):
print "PASS!"
else:
print "FAIL!"
text = "" + \
"[R]ead [M]emory via [S]DIO\n" + \
"parsed address: 0x00000500\n" + \
"parsed len: 0x00000100\n" + \
"len = 0x100\n" + \
"addr = 0x500\n" + \
"partition = 0x0\n" + \
"00000500: A0 60 00 68 08 B1 47 F4 00 27 B8 F1 05 0F 18 BF .`.h..G..'......\n" + \
"00000510: 47 F0 80 77 00 2F 4F F0 01 07 0B D1 28 68 28 B1 G..w./O.....(h(.\n" + \
"00000520: 28 68 38 B1 30 46 1C F0 21 FC 18 B1 17 B1 0A F0 (h8.0F..!.......\n" + \
"00000530: B1 FC 1E E0 01 20 29 F0 59 FB 21 F0 FD FF 16 E0 ..... ).Y.!.....\n"
if(parseBytes(text) == "" + \
"\xA0\x60\x00\x68\x08\xB1\x47\xF4\x00\x27\xB8\xF1\x05\x0F\x18\xBF" + \
"\x47\xF0\x80\x77\x00\x2F\x4F\xF0\x01\x07\x0B\xD1\x28\x68\x28\xB1" + \
"\x28\x68\x38\xB1\x30\x46\x1C\xF0\x21\xFC\x18\xB1\x17\xB1\x0A\xF0" + \
"\xB1\xFC\x1E\xE0\x01\x20\x29\xF0\x59\xFB\x21\xF0\xFD\xFF\x16\xE0"):
print "PASS!"
else:
print "FAIL!"
print parseBytes(text)
data = \
"\x23\x21\x2f\x75\x73\x72\x2f\x62\x69\x6e\x2f\x70\x79\x74\x68\x6f" + \
"\x6e\x0a\x23\x20\x32\x30\x31\x32\x20\x61\x6e\x64\x72\x65\x77\x6c" + \
"\x0a\x0a\x23\x20\x72\x6f\x75\x74\x69\x6e\x65\x73\x20\x66\x6f\x72" + \
"\x20\x70\x61\x72\x73\x69\x6e\x67\x2f\x70\x72\x6f\x63\x65\x73\x73" + \
"\x69\x6e\x67\x20\x62\x69\x74\x73\x2f\x62\x79\x74\x65\x73\x0a\x0a" + \
"\x69\x6d\x70\x6f\x72\x74\x20\x72\x65\x0a\x66\x72\x6f\x6d\x20\x73" + \
"\x74\x72\x75\x63\x74\x20\x69\x6d\x70\x6f\x72\x74\x20\x70\x61\x63" + \
"\x6b\x2c\x20\x75\x6e\x70\x61\x63\x6b\x0a\x69\x6d\x70\x6f\x72\x74" + \
"\x20\x73\x74\x72\x69\x6e\x67\x0a\x0a\x72\x65\x67\x65\x78\x5f\x68" + \
"\x65\x78\x5f\x69\x6e\x74\x20\x3d\x20\x72\x27\x5e\x28\x3f\x3a\x30" + \
"\x78\x29\x3f\x5b\x61\x2d\x66\x41\x2d\x46\x30\x2d\x39\x5d\x7b\x31" + \
"\x2c\x31\x36\x7d\x24\x27\x0a\x0a\x23\x20\x67\x72\x61\x62\x73\x20" + \
"\x66\x69\x72\x73\x74\x20\x70\x61\x72\x73\x65\x61\x62\x6c\x65\x20" + \
"\x68\x65\x78\x61\x64\x65\x63\x69\x6d\x61\x6c\x20\x69\x6e\x74\x65" + \
"\x67\x65\x72\x20\x66\x72\x6f\x6d\x20\x61\x20\x6c\x69\x6e\x65\x0a" + \
"\x23\x0a\x64\x65\x66\x20\x67\x65\x74\x46\x69\x72\x73\x74\x4c\x69"
print getHexDump(data, 0, grouping=1, endian='big')
print getHexDump(data, 0, grouping=2, endian='big')
print getHexDump(data, 0, grouping=4, endian='big')
print getHexDump(data, 0, grouping=8, endian='big')
print getHexDump(data, 0, grouping=1, endian='little')
print getHexDump(data, 0, grouping=2, endian='little')
print getHexDump(data, 0, grouping=4, endian='little')
print getHexDump(data, 0, grouping=8, endian='little')
print getGdbWrites(0, data)
print getIdaPatchIdc(0, data)
print getCString(data)
print getPythonString(data)
|
gpl-3.0
| 1,060,310,704,577,615,400
| 28.478571
| 84
| 0.531015
| false
| 2.363012
| false
| false
| false
|
mancoast/CPythonPyc_test
|
fail/331_test_inspect.py
|
1
|
88168
|
import re
import sys
import types
import unittest
import inspect
import linecache
import datetime
import collections
import os
import shutil
from os.path import normcase
from test.support import run_unittest, TESTFN, DirsOnSysPath
from test import inspect_fodder as mod
from test import inspect_fodder2 as mod2
# C module for test_findsource_binary
import unicodedata
# Functions tested in this suite:
# ismodule, isclass, ismethod, isfunction, istraceback, isframe, iscode,
# isbuiltin, isroutine, isgenerator, isgeneratorfunction, getmembers,
# getdoc, getfile, getmodule, getsourcefile, getcomments, getsource,
# getclasstree, getargspec, getargvalues, formatargspec, formatargvalues,
# currentframe, stack, trace, isdatadescriptor
# NOTE: There are some additional tests relating to interaction with
# zipimport in the test_zipimport_support test module.
modfile = mod.__file__
if modfile.endswith(('c', 'o')):
modfile = modfile[:-1]
# Normalize file names: on Windows, the case of file names of compiled
# modules depends on the path used to start the python executable.
modfile = normcase(modfile)
def revise(filename, *args):
return (normcase(filename),) + args
import builtins
git = mod.StupidGit()
class IsTestBase(unittest.TestCase):
predicates = set([inspect.isbuiltin, inspect.isclass, inspect.iscode,
inspect.isframe, inspect.isfunction, inspect.ismethod,
inspect.ismodule, inspect.istraceback,
inspect.isgenerator, inspect.isgeneratorfunction])
def istest(self, predicate, exp):
obj = eval(exp)
self.assertTrue(predicate(obj), '%s(%s)' % (predicate.__name__, exp))
for other in self.predicates - set([predicate]):
if predicate == inspect.isgeneratorfunction and\
other == inspect.isfunction:
continue
self.assertFalse(other(obj), 'not %s(%s)' % (other.__name__, exp))
def generator_function_example(self):
for i in range(2):
yield i
class TestPredicates(IsTestBase):
def test_sixteen(self):
count = len([x for x in dir(inspect) if x.startswith('is')])
# This test is here for remember you to update Doc/library/inspect.rst
# which claims there are 16 such functions
expected = 16
err_msg = "There are %d (not %d) is* functions" % (count, expected)
self.assertEqual(count, expected, err_msg)
def test_excluding_predicates(self):
global tb
self.istest(inspect.isbuiltin, 'sys.exit')
self.istest(inspect.isbuiltin, '[].append')
self.istest(inspect.iscode, 'mod.spam.__code__')
try:
1/0
except:
tb = sys.exc_info()[2]
self.istest(inspect.isframe, 'tb.tb_frame')
self.istest(inspect.istraceback, 'tb')
if hasattr(types, 'GetSetDescriptorType'):
self.istest(inspect.isgetsetdescriptor,
'type(tb.tb_frame).f_locals')
else:
self.assertFalse(inspect.isgetsetdescriptor(type(tb.tb_frame).f_locals))
finally:
# Clear traceback and all the frames and local variables hanging to it.
tb = None
self.istest(inspect.isfunction, 'mod.spam')
self.istest(inspect.isfunction, 'mod.StupidGit.abuse')
self.istest(inspect.ismethod, 'git.argue')
self.istest(inspect.ismodule, 'mod')
self.istest(inspect.isdatadescriptor, 'collections.defaultdict.default_factory')
self.istest(inspect.isgenerator, '(x for x in range(2))')
self.istest(inspect.isgeneratorfunction, 'generator_function_example')
if hasattr(types, 'MemberDescriptorType'):
self.istest(inspect.ismemberdescriptor, 'datetime.timedelta.days')
else:
self.assertFalse(inspect.ismemberdescriptor(datetime.timedelta.days))
def test_isroutine(self):
self.assertTrue(inspect.isroutine(mod.spam))
self.assertTrue(inspect.isroutine([].count))
def test_isclass(self):
self.istest(inspect.isclass, 'mod.StupidGit')
self.assertTrue(inspect.isclass(list))
class CustomGetattr(object):
def __getattr__(self, attr):
return None
self.assertFalse(inspect.isclass(CustomGetattr()))
def test_get_slot_members(self):
class C(object):
__slots__ = ("a", "b")
x = C()
x.a = 42
members = dict(inspect.getmembers(x))
self.assertIn('a', members)
self.assertNotIn('b', members)
def test_isabstract(self):
from abc import ABCMeta, abstractmethod
class AbstractClassExample(metaclass=ABCMeta):
@abstractmethod
def foo(self):
pass
class ClassExample(AbstractClassExample):
def foo(self):
pass
a = ClassExample()
# Test general behaviour.
self.assertTrue(inspect.isabstract(AbstractClassExample))
self.assertFalse(inspect.isabstract(ClassExample))
self.assertFalse(inspect.isabstract(a))
self.assertFalse(inspect.isabstract(int))
self.assertFalse(inspect.isabstract(5))
class TestInterpreterStack(IsTestBase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
git.abuse(7, 8, 9)
def test_abuse_done(self):
self.istest(inspect.istraceback, 'git.ex[2]')
self.istest(inspect.isframe, 'mod.fr')
def test_stack(self):
self.assertTrue(len(mod.st) >= 5)
self.assertEqual(revise(*mod.st[0][1:]),
(modfile, 16, 'eggs', [' st = inspect.stack()\n'], 0))
self.assertEqual(revise(*mod.st[1][1:]),
(modfile, 9, 'spam', [' eggs(b + d, c + f)\n'], 0))
self.assertEqual(revise(*mod.st[2][1:]),
(modfile, 43, 'argue', [' spam(a, b, c)\n'], 0))
self.assertEqual(revise(*mod.st[3][1:]),
(modfile, 39, 'abuse', [' self.argue(a, b, c)\n'], 0))
def test_trace(self):
self.assertEqual(len(git.tr), 3)
self.assertEqual(revise(*git.tr[0][1:]),
(modfile, 43, 'argue', [' spam(a, b, c)\n'], 0))
self.assertEqual(revise(*git.tr[1][1:]),
(modfile, 9, 'spam', [' eggs(b + d, c + f)\n'], 0))
self.assertEqual(revise(*git.tr[2][1:]),
(modfile, 18, 'eggs', [' q = y / 0\n'], 0))
def test_frame(self):
args, varargs, varkw, locals = inspect.getargvalues(mod.fr)
self.assertEqual(args, ['x', 'y'])
self.assertEqual(varargs, None)
self.assertEqual(varkw, None)
self.assertEqual(locals, {'x': 11, 'p': 11, 'y': 14})
self.assertEqual(inspect.formatargvalues(args, varargs, varkw, locals),
'(x=11, y=14)')
def test_previous_frame(self):
args, varargs, varkw, locals = inspect.getargvalues(mod.fr.f_back)
self.assertEqual(args, ['a', 'b', 'c', 'd', 'e', 'f'])
self.assertEqual(varargs, 'g')
self.assertEqual(varkw, 'h')
self.assertEqual(inspect.formatargvalues(args, varargs, varkw, locals),
'(a=7, b=8, c=9, d=3, e=4, f=5, *g=(), **h={})')
class GetSourceBase(unittest.TestCase):
# Subclasses must override.
fodderModule = None
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
with open(inspect.getsourcefile(self.fodderModule)) as fp:
self.source = fp.read()
def sourcerange(self, top, bottom):
lines = self.source.split("\n")
return "\n".join(lines[top-1:bottom]) + "\n"
def assertSourceEqual(self, obj, top, bottom):
self.assertEqual(inspect.getsource(obj),
self.sourcerange(top, bottom))
class TestRetrievingSourceCode(GetSourceBase):
fodderModule = mod
def test_getclasses(self):
classes = inspect.getmembers(mod, inspect.isclass)
self.assertEqual(classes,
[('FesteringGob', mod.FesteringGob),
('MalodorousPervert', mod.MalodorousPervert),
('ParrotDroppings', mod.ParrotDroppings),
('StupidGit', mod.StupidGit)])
tree = inspect.getclasstree([cls[1] for cls in classes], 1)
self.assertEqual(tree,
[(object, ()),
[(mod.ParrotDroppings, (object,)),
(mod.StupidGit, (object,)),
[(mod.MalodorousPervert, (mod.StupidGit,)),
[(mod.FesteringGob, (mod.MalodorousPervert,
mod.ParrotDroppings))
]
]
]
])
def test_getfunctions(self):
functions = inspect.getmembers(mod, inspect.isfunction)
self.assertEqual(functions, [('eggs', mod.eggs),
('spam', mod.spam)])
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_getdoc(self):
self.assertEqual(inspect.getdoc(mod), 'A module docstring.')
self.assertEqual(inspect.getdoc(mod.StupidGit),
'A longer,\n\nindented\n\ndocstring.')
self.assertEqual(inspect.getdoc(git.abuse),
'Another\n\ndocstring\n\ncontaining\n\ntabs')
def test_cleandoc(self):
self.assertEqual(inspect.cleandoc('An\n indented\n docstring.'),
'An\nindented\ndocstring.')
def test_getcomments(self):
self.assertEqual(inspect.getcomments(mod), '# line 1\n')
self.assertEqual(inspect.getcomments(mod.StupidGit), '# line 20\n')
def test_getmodule(self):
# Check actual module
self.assertEqual(inspect.getmodule(mod), mod)
# Check class (uses __module__ attribute)
self.assertEqual(inspect.getmodule(mod.StupidGit), mod)
# Check a method (no __module__ attribute, falls back to filename)
self.assertEqual(inspect.getmodule(mod.StupidGit.abuse), mod)
# Do it again (check the caching isn't broken)
self.assertEqual(inspect.getmodule(mod.StupidGit.abuse), mod)
# Check a builtin
self.assertEqual(inspect.getmodule(str), sys.modules["builtins"])
# Check filename override
self.assertEqual(inspect.getmodule(None, modfile), mod)
def test_getsource(self):
self.assertSourceEqual(git.abuse, 29, 39)
self.assertSourceEqual(mod.StupidGit, 21, 46)
def test_getsourcefile(self):
self.assertEqual(normcase(inspect.getsourcefile(mod.spam)), modfile)
self.assertEqual(normcase(inspect.getsourcefile(git.abuse)), modfile)
fn = "_non_existing_filename_used_for_sourcefile_test.py"
co = compile("None", fn, "exec")
self.assertEqual(inspect.getsourcefile(co), None)
linecache.cache[co.co_filename] = (1, None, "None", co.co_filename)
try:
self.assertEqual(normcase(inspect.getsourcefile(co)), fn)
finally:
del linecache.cache[co.co_filename]
def test_getfile(self):
self.assertEqual(inspect.getfile(mod.StupidGit), mod.__file__)
def test_getmodule_recursion(self):
from types import ModuleType
name = '__inspect_dummy'
m = sys.modules[name] = ModuleType(name)
m.__file__ = "<string>" # hopefully not a real filename...
m.__loader__ = "dummy" # pretend the filename is understood by a loader
exec("def x(): pass", m.__dict__)
self.assertEqual(inspect.getsourcefile(m.x.__code__), '<string>')
del sys.modules[name]
inspect.getmodule(compile('a=10','','single'))
def test_proceed_with_fake_filename(self):
'''doctest monkeypatches linecache to enable inspection'''
fn, source = '<test>', 'def x(): pass\n'
getlines = linecache.getlines
def monkey(filename, module_globals=None):
if filename == fn:
return source.splitlines(keepends=True)
else:
return getlines(filename, module_globals)
linecache.getlines = monkey
try:
ns = {}
exec(compile(source, fn, 'single'), ns)
inspect.getsource(ns["x"])
finally:
linecache.getlines = getlines
class TestDecorators(GetSourceBase):
fodderModule = mod2
def test_wrapped_decorator(self):
self.assertSourceEqual(mod2.wrapped, 14, 17)
def test_replacing_decorator(self):
self.assertSourceEqual(mod2.gone, 9, 10)
class TestOneliners(GetSourceBase):
fodderModule = mod2
def test_oneline_lambda(self):
# Test inspect.getsource with a one-line lambda function.
self.assertSourceEqual(mod2.oll, 25, 25)
def test_threeline_lambda(self):
# Test inspect.getsource with a three-line lambda function,
# where the second and third lines are _not_ indented.
self.assertSourceEqual(mod2.tll, 28, 30)
def test_twoline_indented_lambda(self):
# Test inspect.getsource with a two-line lambda function,
# where the second line _is_ indented.
self.assertSourceEqual(mod2.tlli, 33, 34)
def test_onelinefunc(self):
# Test inspect.getsource with a regular one-line function.
self.assertSourceEqual(mod2.onelinefunc, 37, 37)
def test_manyargs(self):
# Test inspect.getsource with a regular function where
# the arguments are on two lines and _not_ indented and
# the body on the second line with the last arguments.
self.assertSourceEqual(mod2.manyargs, 40, 41)
def test_twolinefunc(self):
# Test inspect.getsource with a regular function where
# the body is on two lines, following the argument list and
# continued on the next line by a \\.
self.assertSourceEqual(mod2.twolinefunc, 44, 45)
def test_lambda_in_list(self):
# Test inspect.getsource with a one-line lambda function
# defined in a list, indented.
self.assertSourceEqual(mod2.a[1], 49, 49)
def test_anonymous(self):
# Test inspect.getsource with a lambda function defined
# as argument to another function.
self.assertSourceEqual(mod2.anonymous, 55, 55)
class TestBuggyCases(GetSourceBase):
fodderModule = mod2
def test_with_comment(self):
self.assertSourceEqual(mod2.with_comment, 58, 59)
def test_multiline_sig(self):
self.assertSourceEqual(mod2.multiline_sig[0], 63, 64)
def test_nested_class(self):
self.assertSourceEqual(mod2.func69().func71, 71, 72)
def test_one_liner_followed_by_non_name(self):
self.assertSourceEqual(mod2.func77, 77, 77)
def test_one_liner_dedent_non_name(self):
self.assertSourceEqual(mod2.cls82.func83, 83, 83)
def test_with_comment_instead_of_docstring(self):
self.assertSourceEqual(mod2.func88, 88, 90)
def test_method_in_dynamic_class(self):
self.assertSourceEqual(mod2.method_in_dynamic_class, 95, 97)
@unittest.skipIf(
not hasattr(unicodedata, '__file__') or
unicodedata.__file__[-4:] in (".pyc", ".pyo"),
"unicodedata is not an external binary module")
def test_findsource_binary(self):
self.assertRaises(IOError, inspect.getsource, unicodedata)
self.assertRaises(IOError, inspect.findsource, unicodedata)
def test_findsource_code_in_linecache(self):
lines = ["x=1"]
co = compile(lines[0], "_dynamically_created_file", "exec")
self.assertRaises(IOError, inspect.findsource, co)
self.assertRaises(IOError, inspect.getsource, co)
linecache.cache[co.co_filename] = (1, None, lines, co.co_filename)
try:
self.assertEqual(inspect.findsource(co), (lines,0))
self.assertEqual(inspect.getsource(co), lines[0])
finally:
del linecache.cache[co.co_filename]
class TestNoEOL(GetSourceBase):
def __init__(self, *args, **kwargs):
self.tempdir = TESTFN + '_dir'
os.mkdir(self.tempdir)
with open(os.path.join(self.tempdir,
'inspect_fodder3%spy' % os.extsep), 'w') as f:
f.write("class X:\n pass # No EOL")
with DirsOnSysPath(self.tempdir):
import inspect_fodder3 as mod3
self.fodderModule = mod3
GetSourceBase.__init__(self, *args, **kwargs)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_class(self):
self.assertSourceEqual(self.fodderModule.X, 1, 2)
class _BrokenDataDescriptor(object):
"""
A broken data descriptor. See bug #1785.
"""
def __get__(*args):
raise AssertionError("should not __get__ data descriptors")
def __set__(*args):
raise RuntimeError
def __getattr__(*args):
raise AssertionError("should not __getattr__ data descriptors")
class _BrokenMethodDescriptor(object):
"""
A broken method descriptor. See bug #1785.
"""
def __get__(*args):
raise AssertionError("should not __get__ method descriptors")
def __getattr__(*args):
raise AssertionError("should not __getattr__ method descriptors")
# Helper for testing classify_class_attrs.
def attrs_wo_objs(cls):
return [t[:3] for t in inspect.classify_class_attrs(cls)]
class TestClassesAndFunctions(unittest.TestCase):
def test_newstyle_mro(self):
# The same w/ new-class MRO.
class A(object): pass
class B(A): pass
class C(A): pass
class D(B, C): pass
expected = (D, B, C, A, object)
got = inspect.getmro(D)
self.assertEqual(expected, got)
def assertArgSpecEquals(self, routine, args_e, varargs_e=None,
varkw_e=None, defaults_e=None, formatted=None):
args, varargs, varkw, defaults = inspect.getargspec(routine)
self.assertEqual(args, args_e)
self.assertEqual(varargs, varargs_e)
self.assertEqual(varkw, varkw_e)
self.assertEqual(defaults, defaults_e)
if formatted is not None:
self.assertEqual(inspect.formatargspec(args, varargs, varkw, defaults),
formatted)
def assertFullArgSpecEquals(self, routine, args_e, varargs_e=None,
varkw_e=None, defaults_e=None,
kwonlyargs_e=[], kwonlydefaults_e=None,
ann_e={}, formatted=None):
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = \
inspect.getfullargspec(routine)
self.assertEqual(args, args_e)
self.assertEqual(varargs, varargs_e)
self.assertEqual(varkw, varkw_e)
self.assertEqual(defaults, defaults_e)
self.assertEqual(kwonlyargs, kwonlyargs_e)
self.assertEqual(kwonlydefaults, kwonlydefaults_e)
self.assertEqual(ann, ann_e)
if formatted is not None:
self.assertEqual(inspect.formatargspec(args, varargs, varkw, defaults,
kwonlyargs, kwonlydefaults, ann),
formatted)
def test_getargspec(self):
self.assertArgSpecEquals(mod.eggs, ['x', 'y'], formatted='(x, y)')
self.assertArgSpecEquals(mod.spam,
['a', 'b', 'c', 'd', 'e', 'f'],
'g', 'h', (3, 4, 5),
'(a, b, c, d=3, e=4, f=5, *g, **h)')
self.assertRaises(ValueError, self.assertArgSpecEquals,
mod2.keyworded, [])
self.assertRaises(ValueError, self.assertArgSpecEquals,
mod2.annotated, [])
self.assertRaises(ValueError, self.assertArgSpecEquals,
mod2.keyword_only_arg, [])
def test_getfullargspec(self):
self.assertFullArgSpecEquals(mod2.keyworded, [], varargs_e='arg1',
kwonlyargs_e=['arg2'],
kwonlydefaults_e={'arg2':1},
formatted='(*arg1, arg2=1)')
self.assertFullArgSpecEquals(mod2.annotated, ['arg1'],
ann_e={'arg1' : list},
formatted='(arg1: list)')
self.assertFullArgSpecEquals(mod2.keyword_only_arg, [],
kwonlyargs_e=['arg'],
formatted='(*, arg)')
def test_getargspec_method(self):
class A(object):
def m(self):
pass
self.assertArgSpecEquals(A.m, ['self'])
def test_classify_newstyle(self):
class A(object):
def s(): pass
s = staticmethod(s)
def c(cls): pass
c = classmethod(c)
def getp(self): pass
p = property(getp)
def m(self): pass
def m1(self): pass
datablob = '1'
dd = _BrokenDataDescriptor()
md = _BrokenMethodDescriptor()
attrs = attrs_wo_objs(A)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
self.assertIn(('c', 'class method', A), attrs, 'missing class method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', A), attrs,
'missing plain method: %r' % attrs)
self.assertIn(('m1', 'method', A), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
self.assertIn(('md', 'method', A), attrs, 'missing method descriptor')
self.assertIn(('dd', 'data', A), attrs, 'missing data descriptor')
class B(A):
def m(self): pass
attrs = attrs_wo_objs(B)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
self.assertIn(('c', 'class method', A), attrs, 'missing class method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', B), attrs, 'missing plain method')
self.assertIn(('m1', 'method', A), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
self.assertIn(('md', 'method', A), attrs, 'missing method descriptor')
self.assertIn(('dd', 'data', A), attrs, 'missing data descriptor')
class C(A):
def m(self): pass
def c(self): pass
attrs = attrs_wo_objs(C)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
self.assertIn(('c', 'method', C), attrs, 'missing plain method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', C), attrs, 'missing plain method')
self.assertIn(('m1', 'method', A), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
self.assertIn(('md', 'method', A), attrs, 'missing method descriptor')
self.assertIn(('dd', 'data', A), attrs, 'missing data descriptor')
class D(B, C):
def m1(self): pass
attrs = attrs_wo_objs(D)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
self.assertIn(('c', 'method', C), attrs, 'missing plain method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', B), attrs, 'missing plain method')
self.assertIn(('m1', 'method', D), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
self.assertIn(('md', 'method', A), attrs, 'missing method descriptor')
self.assertIn(('dd', 'data', A), attrs, 'missing data descriptor')
def test_classify_builtin_types(self):
# Simple sanity check that all built-in types can have their
# attributes classified.
for name in dir(__builtins__):
builtin = getattr(__builtins__, name)
if isinstance(builtin, type):
inspect.classify_class_attrs(builtin)
def test_getmembers_descriptors(self):
class A(object):
dd = _BrokenDataDescriptor()
md = _BrokenMethodDescriptor()
def pred_wrapper(pred):
# A quick'n'dirty way to discard standard attributes of new-style
# classes.
class Empty(object):
pass
def wrapped(x):
if '__name__' in dir(x) and hasattr(Empty, x.__name__):
return False
return pred(x)
return wrapped
ismethoddescriptor = pred_wrapper(inspect.ismethoddescriptor)
isdatadescriptor = pred_wrapper(inspect.isdatadescriptor)
self.assertEqual(inspect.getmembers(A, ismethoddescriptor),
[('md', A.__dict__['md'])])
self.assertEqual(inspect.getmembers(A, isdatadescriptor),
[('dd', A.__dict__['dd'])])
class B(A):
pass
self.assertEqual(inspect.getmembers(B, ismethoddescriptor),
[('md', A.__dict__['md'])])
self.assertEqual(inspect.getmembers(B, isdatadescriptor),
[('dd', A.__dict__['dd'])])
def test_getmembers_method(self):
class B:
def f(self):
pass
self.assertIn(('f', B.f), inspect.getmembers(B))
self.assertNotIn(('f', B.f), inspect.getmembers(B, inspect.ismethod))
b = B()
self.assertIn(('f', b.f), inspect.getmembers(b))
self.assertIn(('f', b.f), inspect.getmembers(b, inspect.ismethod))
_global_ref = object()
class TestGetClosureVars(unittest.TestCase):
def test_name_resolution(self):
# Basic test of the 4 different resolution mechanisms
def f(nonlocal_ref):
def g(local_ref):
print(local_ref, nonlocal_ref, _global_ref, unbound_ref)
return g
_arg = object()
nonlocal_vars = {"nonlocal_ref": _arg}
global_vars = {"_global_ref": _global_ref}
builtin_vars = {"print": print}
unbound_names = {"unbound_ref"}
expected = inspect.ClosureVars(nonlocal_vars, global_vars,
builtin_vars, unbound_names)
self.assertEqual(inspect.getclosurevars(f(_arg)), expected)
def test_generator_closure(self):
def f(nonlocal_ref):
def g(local_ref):
print(local_ref, nonlocal_ref, _global_ref, unbound_ref)
yield
return g
_arg = object()
nonlocal_vars = {"nonlocal_ref": _arg}
global_vars = {"_global_ref": _global_ref}
builtin_vars = {"print": print}
unbound_names = {"unbound_ref"}
expected = inspect.ClosureVars(nonlocal_vars, global_vars,
builtin_vars, unbound_names)
self.assertEqual(inspect.getclosurevars(f(_arg)), expected)
def test_method_closure(self):
class C:
def f(self, nonlocal_ref):
def g(local_ref):
print(local_ref, nonlocal_ref, _global_ref, unbound_ref)
return g
_arg = object()
nonlocal_vars = {"nonlocal_ref": _arg}
global_vars = {"_global_ref": _global_ref}
builtin_vars = {"print": print}
unbound_names = {"unbound_ref"}
expected = inspect.ClosureVars(nonlocal_vars, global_vars,
builtin_vars, unbound_names)
self.assertEqual(inspect.getclosurevars(C().f(_arg)), expected)
def test_nonlocal_vars(self):
# More complex tests of nonlocal resolution
def _nonlocal_vars(f):
return inspect.getclosurevars(f).nonlocals
def make_adder(x):
def add(y):
return x + y
return add
def curry(func, arg1):
return lambda arg2: func(arg1, arg2)
def less_than(a, b):
return a < b
# The infamous Y combinator.
def Y(le):
def g(f):
return le(lambda x: f(f)(x))
Y.g_ref = g
return g(g)
def check_y_combinator(func):
self.assertEqual(_nonlocal_vars(func), {'f': Y.g_ref})
inc = make_adder(1)
add_two = make_adder(2)
greater_than_five = curry(less_than, 5)
self.assertEqual(_nonlocal_vars(inc), {'x': 1})
self.assertEqual(_nonlocal_vars(add_two), {'x': 2})
self.assertEqual(_nonlocal_vars(greater_than_five),
{'arg1': 5, 'func': less_than})
self.assertEqual(_nonlocal_vars((lambda x: lambda y: x + y)(3)),
{'x': 3})
Y(check_y_combinator)
def test_getclosurevars_empty(self):
def foo(): pass
_empty = inspect.ClosureVars({}, {}, {}, set())
self.assertEqual(inspect.getclosurevars(lambda: True), _empty)
self.assertEqual(inspect.getclosurevars(foo), _empty)
def test_getclosurevars_error(self):
class T: pass
self.assertRaises(TypeError, inspect.getclosurevars, 1)
self.assertRaises(TypeError, inspect.getclosurevars, list)
self.assertRaises(TypeError, inspect.getclosurevars, {})
def _private_globals(self):
code = """def f(): print(path)"""
ns = {}
exec(code, ns)
return ns["f"], ns
def test_builtins_fallback(self):
f, ns = self._private_globals()
ns.pop("__builtins__", None)
expected = inspect.ClosureVars({}, {}, {"print":print}, {"path"})
self.assertEqual(inspect.getclosurevars(f), expected)
def test_builtins_as_dict(self):
f, ns = self._private_globals()
ns["__builtins__"] = {"path":1}
expected = inspect.ClosureVars({}, {}, {"path":1}, {"print"})
self.assertEqual(inspect.getclosurevars(f), expected)
def test_builtins_as_module(self):
f, ns = self._private_globals()
ns["__builtins__"] = os
expected = inspect.ClosureVars({}, {}, {"path":os.path}, {"print"})
self.assertEqual(inspect.getclosurevars(f), expected)
class TestGetcallargsFunctions(unittest.TestCase):
def assertEqualCallArgs(self, func, call_params_string, locs=None):
locs = dict(locs or {}, func=func)
r1 = eval('func(%s)' % call_params_string, None, locs)
r2 = eval('inspect.getcallargs(func, %s)' % call_params_string, None,
locs)
self.assertEqual(r1, r2)
def assertEqualException(self, func, call_param_string, locs=None):
locs = dict(locs or {}, func=func)
try:
eval('func(%s)' % call_param_string, None, locs)
except Exception as e:
ex1 = e
else:
self.fail('Exception not raised')
try:
eval('inspect.getcallargs(func, %s)' % call_param_string, None,
locs)
except Exception as e:
ex2 = e
else:
self.fail('Exception not raised')
self.assertIs(type(ex1), type(ex2))
self.assertEqual(str(ex1), str(ex2))
del ex1, ex2
def makeCallable(self, signature):
"""Create a function that returns its locals()"""
code = "lambda %s: locals()"
return eval(code % signature)
def test_plain(self):
f = self.makeCallable('a, b=1')
self.assertEqualCallArgs(f, '2')
self.assertEqualCallArgs(f, '2, 3')
self.assertEqualCallArgs(f, 'a=2')
self.assertEqualCallArgs(f, 'b=3, a=2')
self.assertEqualCallArgs(f, '2, b=3')
# expand *iterable / **mapping
self.assertEqualCallArgs(f, '*(2,)')
self.assertEqualCallArgs(f, '*[2]')
self.assertEqualCallArgs(f, '*(2, 3)')
self.assertEqualCallArgs(f, '*[2, 3]')
self.assertEqualCallArgs(f, '**{"a":2}')
self.assertEqualCallArgs(f, 'b=3, **{"a":2}')
self.assertEqualCallArgs(f, '2, **{"b":3}')
self.assertEqualCallArgs(f, '**{"b":3, "a":2}')
# expand UserList / UserDict
self.assertEqualCallArgs(f, '*collections.UserList([2])')
self.assertEqualCallArgs(f, '*collections.UserList([2, 3])')
self.assertEqualCallArgs(f, '**collections.UserDict(a=2)')
self.assertEqualCallArgs(f, '2, **collections.UserDict(b=3)')
self.assertEqualCallArgs(f, 'b=2, **collections.UserDict(a=3)')
def test_varargs(self):
f = self.makeCallable('a, b=1, *c')
self.assertEqualCallArgs(f, '2')
self.assertEqualCallArgs(f, '2, 3')
self.assertEqualCallArgs(f, '2, 3, 4')
self.assertEqualCallArgs(f, '*(2,3,4)')
self.assertEqualCallArgs(f, '2, *[3,4]')
self.assertEqualCallArgs(f, '2, 3, *collections.UserList([4])')
def test_varkw(self):
f = self.makeCallable('a, b=1, **c')
self.assertEqualCallArgs(f, 'a=2')
self.assertEqualCallArgs(f, '2, b=3, c=4')
self.assertEqualCallArgs(f, 'b=3, a=2, c=4')
self.assertEqualCallArgs(f, 'c=4, **{"a":2, "b":3}')
self.assertEqualCallArgs(f, '2, c=4, **{"b":3}')
self.assertEqualCallArgs(f, 'b=2, **{"a":3, "c":4}')
self.assertEqualCallArgs(f, '**collections.UserDict(a=2, b=3, c=4)')
self.assertEqualCallArgs(f, '2, c=4, **collections.UserDict(b=3)')
self.assertEqualCallArgs(f, 'b=2, **collections.UserDict(a=3, c=4)')
def test_varkw_only(self):
# issue11256:
f = self.makeCallable('**c')
self.assertEqualCallArgs(f, '')
self.assertEqualCallArgs(f, 'a=1')
self.assertEqualCallArgs(f, 'a=1, b=2')
self.assertEqualCallArgs(f, 'c=3, **{"a": 1, "b": 2}')
self.assertEqualCallArgs(f, '**collections.UserDict(a=1, b=2)')
self.assertEqualCallArgs(f, 'c=3, **collections.UserDict(a=1, b=2)')
def test_keyword_only(self):
f = self.makeCallable('a=3, *, c, d=2')
self.assertEqualCallArgs(f, 'c=3')
self.assertEqualCallArgs(f, 'c=3, a=3')
self.assertEqualCallArgs(f, 'a=2, c=4')
self.assertEqualCallArgs(f, '4, c=4')
self.assertEqualException(f, '')
self.assertEqualException(f, '3')
self.assertEqualException(f, 'a=3')
self.assertEqualException(f, 'd=4')
f = self.makeCallable('*, c, d=2')
self.assertEqualCallArgs(f, 'c=3')
self.assertEqualCallArgs(f, 'c=3, d=4')
self.assertEqualCallArgs(f, 'd=4, c=3')
def test_multiple_features(self):
f = self.makeCallable('a, b=2, *f, **g')
self.assertEqualCallArgs(f, '2, 3, 7')
self.assertEqualCallArgs(f, '2, 3, x=8')
self.assertEqualCallArgs(f, '2, 3, x=8, *[(4,[5,6]), 7]')
self.assertEqualCallArgs(f, '2, x=8, *[3, (4,[5,6]), 7], y=9')
self.assertEqualCallArgs(f, 'x=8, *[2, 3, (4,[5,6])], y=9')
self.assertEqualCallArgs(f, 'x=8, *collections.UserList('
'[2, 3, (4,[5,6])]), **{"y":9, "z":10}')
self.assertEqualCallArgs(f, '2, x=8, *collections.UserList([3, '
'(4,[5,6])]), **collections.UserDict('
'y=9, z=10)')
f = self.makeCallable('a, b=2, *f, x, y=99, **g')
self.assertEqualCallArgs(f, '2, 3, x=8')
self.assertEqualCallArgs(f, '2, 3, x=8, *[(4,[5,6]), 7]')
self.assertEqualCallArgs(f, '2, x=8, *[3, (4,[5,6]), 7], y=9, z=10')
self.assertEqualCallArgs(f, 'x=8, *[2, 3, (4,[5,6])], y=9, z=10')
self.assertEqualCallArgs(f, 'x=8, *collections.UserList('
'[2, 3, (4,[5,6])]), q=0, **{"y":9, "z":10}')
self.assertEqualCallArgs(f, '2, x=8, *collections.UserList([3, '
'(4,[5,6])]), q=0, **collections.UserDict('
'y=9, z=10)')
def test_errors(self):
f0 = self.makeCallable('')
f1 = self.makeCallable('a, b')
f2 = self.makeCallable('a, b=1')
# f0 takes no arguments
self.assertEqualException(f0, '1')
self.assertEqualException(f0, 'x=1')
self.assertEqualException(f0, '1,x=1')
# f1 takes exactly 2 arguments
self.assertEqualException(f1, '')
self.assertEqualException(f1, '1')
self.assertEqualException(f1, 'a=2')
self.assertEqualException(f1, 'b=3')
# f2 takes at least 1 argument
self.assertEqualException(f2, '')
self.assertEqualException(f2, 'b=3')
for f in f1, f2:
# f1/f2 takes exactly/at most 2 arguments
self.assertEqualException(f, '2, 3, 4')
self.assertEqualException(f, '1, 2, 3, a=1')
self.assertEqualException(f, '2, 3, 4, c=5')
# XXX: success of this one depends on dict order
## self.assertEqualException(f, '2, 3, 4, a=1, c=5')
# f got an unexpected keyword argument
self.assertEqualException(f, 'c=2')
self.assertEqualException(f, '2, c=3')
self.assertEqualException(f, '2, 3, c=4')
self.assertEqualException(f, '2, c=4, b=3')
self.assertEqualException(f, '**{u"\u03c0\u03b9": 4}')
# f got multiple values for keyword argument
self.assertEqualException(f, '1, a=2')
self.assertEqualException(f, '1, **{"a":2}')
self.assertEqualException(f, '1, 2, b=3')
# XXX: Python inconsistency
# - for functions and bound methods: unexpected keyword 'c'
# - for unbound methods: multiple values for keyword 'a'
#self.assertEqualException(f, '1, c=3, a=2')
# issue11256:
f3 = self.makeCallable('**c')
self.assertEqualException(f3, '1, 2')
self.assertEqualException(f3, '1, 2, a=1, b=2')
f4 = self.makeCallable('*, a, b=0')
self.assertEqualException(f3, '1, 2')
self.assertEqualException(f3, '1, 2, a=1, b=2')
class TestGetcallargsMethods(TestGetcallargsFunctions):
def setUp(self):
class Foo(object):
pass
self.cls = Foo
self.inst = Foo()
def makeCallable(self, signature):
assert 'self' not in signature
mk = super(TestGetcallargsMethods, self).makeCallable
self.cls.method = mk('self, ' + signature)
return self.inst.method
class TestGetcallargsUnboundMethods(TestGetcallargsMethods):
def makeCallable(self, signature):
super(TestGetcallargsUnboundMethods, self).makeCallable(signature)
return self.cls.method
def assertEqualCallArgs(self, func, call_params_string, locs=None):
return super(TestGetcallargsUnboundMethods, self).assertEqualCallArgs(
*self._getAssertEqualParams(func, call_params_string, locs))
def assertEqualException(self, func, call_params_string, locs=None):
return super(TestGetcallargsUnboundMethods, self).assertEqualException(
*self._getAssertEqualParams(func, call_params_string, locs))
def _getAssertEqualParams(self, func, call_params_string, locs=None):
assert 'inst' not in call_params_string
locs = dict(locs or {}, inst=self.inst)
return (func, 'inst,' + call_params_string, locs)
class TestGetattrStatic(unittest.TestCase):
def test_basic(self):
class Thing(object):
x = object()
thing = Thing()
self.assertEqual(inspect.getattr_static(thing, 'x'), Thing.x)
self.assertEqual(inspect.getattr_static(thing, 'x', None), Thing.x)
with self.assertRaises(AttributeError):
inspect.getattr_static(thing, 'y')
self.assertEqual(inspect.getattr_static(thing, 'y', 3), 3)
def test_inherited(self):
class Thing(object):
x = object()
class OtherThing(Thing):
pass
something = OtherThing()
self.assertEqual(inspect.getattr_static(something, 'x'), Thing.x)
def test_instance_attr(self):
class Thing(object):
x = 2
def __init__(self, x):
self.x = x
thing = Thing(3)
self.assertEqual(inspect.getattr_static(thing, 'x'), 3)
del thing.x
self.assertEqual(inspect.getattr_static(thing, 'x'), 2)
def test_property(self):
class Thing(object):
@property
def x(self):
raise AttributeError("I'm pretending not to exist")
thing = Thing()
self.assertEqual(inspect.getattr_static(thing, 'x'), Thing.x)
def test_descriptor_raises_AttributeError(self):
class descriptor(object):
def __get__(*_):
raise AttributeError("I'm pretending not to exist")
desc = descriptor()
class Thing(object):
x = desc
thing = Thing()
self.assertEqual(inspect.getattr_static(thing, 'x'), desc)
def test_classAttribute(self):
class Thing(object):
x = object()
self.assertEqual(inspect.getattr_static(Thing, 'x'), Thing.x)
def test_inherited_classattribute(self):
class Thing(object):
x = object()
class OtherThing(Thing):
pass
self.assertEqual(inspect.getattr_static(OtherThing, 'x'), Thing.x)
def test_slots(self):
class Thing(object):
y = 'bar'
__slots__ = ['x']
def __init__(self):
self.x = 'foo'
thing = Thing()
self.assertEqual(inspect.getattr_static(thing, 'x'), Thing.x)
self.assertEqual(inspect.getattr_static(thing, 'y'), 'bar')
del thing.x
self.assertEqual(inspect.getattr_static(thing, 'x'), Thing.x)
def test_metaclass(self):
class meta(type):
attr = 'foo'
class Thing(object, metaclass=meta):
pass
self.assertEqual(inspect.getattr_static(Thing, 'attr'), 'foo')
class sub(meta):
pass
class OtherThing(object, metaclass=sub):
x = 3
self.assertEqual(inspect.getattr_static(OtherThing, 'attr'), 'foo')
class OtherOtherThing(OtherThing):
pass
# this test is odd, but it was added as it exposed a bug
self.assertEqual(inspect.getattr_static(OtherOtherThing, 'x'), 3)
def test_no_dict_no_slots(self):
self.assertEqual(inspect.getattr_static(1, 'foo', None), None)
self.assertNotEqual(inspect.getattr_static('foo', 'lower'), None)
def test_no_dict_no_slots_instance_member(self):
# returns descriptor
with open(__file__) as handle:
self.assertEqual(inspect.getattr_static(handle, 'name'), type(handle).name)
def test_inherited_slots(self):
# returns descriptor
class Thing(object):
__slots__ = ['x']
def __init__(self):
self.x = 'foo'
class OtherThing(Thing):
pass
# it would be nice if this worked...
# we get the descriptor instead of the instance attribute
self.assertEqual(inspect.getattr_static(OtherThing(), 'x'), Thing.x)
def test_descriptor(self):
class descriptor(object):
def __get__(self, instance, owner):
return 3
class Foo(object):
d = descriptor()
foo = Foo()
# for a non data descriptor we return the instance attribute
foo.__dict__['d'] = 1
self.assertEqual(inspect.getattr_static(foo, 'd'), 1)
# if the descriptor is a data-desciptor we should return the
# descriptor
descriptor.__set__ = lambda s, i, v: None
self.assertEqual(inspect.getattr_static(foo, 'd'), Foo.__dict__['d'])
def test_metaclass_with_descriptor(self):
class descriptor(object):
def __get__(self, instance, owner):
return 3
class meta(type):
d = descriptor()
class Thing(object, metaclass=meta):
pass
self.assertEqual(inspect.getattr_static(Thing, 'd'), meta.__dict__['d'])
def test_class_as_property(self):
class Base(object):
foo = 3
class Something(Base):
executed = False
@property
def __class__(self):
self.executed = True
return object
instance = Something()
self.assertEqual(inspect.getattr_static(instance, 'foo'), 3)
self.assertFalse(instance.executed)
self.assertEqual(inspect.getattr_static(Something, 'foo'), 3)
def test_mro_as_property(self):
class Meta(type):
@property
def __mro__(self):
return (object,)
class Base(object):
foo = 3
class Something(Base, metaclass=Meta):
pass
self.assertEqual(inspect.getattr_static(Something(), 'foo'), 3)
self.assertEqual(inspect.getattr_static(Something, 'foo'), 3)
def test_dict_as_property(self):
test = self
test.called = False
class Foo(dict):
a = 3
@property
def __dict__(self):
test.called = True
return {}
foo = Foo()
foo.a = 4
self.assertEqual(inspect.getattr_static(foo, 'a'), 3)
self.assertFalse(test.called)
def test_custom_object_dict(self):
test = self
test.called = False
class Custom(dict):
def get(self, key, default=None):
test.called = True
super().get(key, default)
class Foo(object):
a = 3
foo = Foo()
foo.__dict__ = Custom()
self.assertEqual(inspect.getattr_static(foo, 'a'), 3)
self.assertFalse(test.called)
def test_metaclass_dict_as_property(self):
class Meta(type):
@property
def __dict__(self):
self.executed = True
class Thing(metaclass=Meta):
executed = False
def __init__(self):
self.spam = 42
instance = Thing()
self.assertEqual(inspect.getattr_static(instance, "spam"), 42)
self.assertFalse(Thing.executed)
def test_module(self):
sentinel = object()
self.assertIsNot(inspect.getattr_static(sys, "version", sentinel),
sentinel)
def test_metaclass_with_metaclass_with_dict_as_property(self):
class MetaMeta(type):
@property
def __dict__(self):
self.executed = True
return dict(spam=42)
class Meta(type, metaclass=MetaMeta):
executed = False
class Thing(metaclass=Meta):
pass
with self.assertRaises(AttributeError):
inspect.getattr_static(Thing, "spam")
self.assertFalse(Thing.executed)
class TestGetGeneratorState(unittest.TestCase):
def setUp(self):
def number_generator():
for number in range(5):
yield number
self.generator = number_generator()
def _generatorstate(self):
return inspect.getgeneratorstate(self.generator)
def test_created(self):
self.assertEqual(self._generatorstate(), inspect.GEN_CREATED)
def test_suspended(self):
next(self.generator)
self.assertEqual(self._generatorstate(), inspect.GEN_SUSPENDED)
def test_closed_after_exhaustion(self):
for i in self.generator:
pass
self.assertEqual(self._generatorstate(), inspect.GEN_CLOSED)
def test_closed_after_immediate_exception(self):
with self.assertRaises(RuntimeError):
self.generator.throw(RuntimeError)
self.assertEqual(self._generatorstate(), inspect.GEN_CLOSED)
def test_running(self):
# As mentioned on issue #10220, checking for the RUNNING state only
# makes sense inside the generator itself.
# The following generator checks for this by using the closure's
# reference to self and the generator state checking helper method
def running_check_generator():
for number in range(5):
self.assertEqual(self._generatorstate(), inspect.GEN_RUNNING)
yield number
self.assertEqual(self._generatorstate(), inspect.GEN_RUNNING)
self.generator = running_check_generator()
# Running up to the first yield
next(self.generator)
# Running after the first yield
next(self.generator)
def test_easy_debugging(self):
# repr() and str() of a generator state should contain the state name
names = 'GEN_CREATED GEN_RUNNING GEN_SUSPENDED GEN_CLOSED'.split()
for name in names:
state = getattr(inspect, name)
self.assertIn(name, repr(state))
self.assertIn(name, str(state))
def test_getgeneratorlocals(self):
def each(lst, a=None):
b=(1, 2, 3)
for v in lst:
if v == 3:
c = 12
yield v
numbers = each([1, 2, 3])
self.assertEqual(inspect.getgeneratorlocals(numbers),
{'a': None, 'lst': [1, 2, 3]})
next(numbers)
self.assertEqual(inspect.getgeneratorlocals(numbers),
{'a': None, 'lst': [1, 2, 3], 'v': 1,
'b': (1, 2, 3)})
next(numbers)
self.assertEqual(inspect.getgeneratorlocals(numbers),
{'a': None, 'lst': [1, 2, 3], 'v': 2,
'b': (1, 2, 3)})
next(numbers)
self.assertEqual(inspect.getgeneratorlocals(numbers),
{'a': None, 'lst': [1, 2, 3], 'v': 3,
'b': (1, 2, 3), 'c': 12})
try:
next(numbers)
except StopIteration:
pass
self.assertEqual(inspect.getgeneratorlocals(numbers), {})
def test_getgeneratorlocals_empty(self):
def yield_one():
yield 1
one = yield_one()
self.assertEqual(inspect.getgeneratorlocals(one), {})
try:
next(one)
except StopIteration:
pass
self.assertEqual(inspect.getgeneratorlocals(one), {})
def test_getgeneratorlocals_error(self):
self.assertRaises(TypeError, inspect.getgeneratorlocals, 1)
self.assertRaises(TypeError, inspect.getgeneratorlocals, lambda x: True)
self.assertRaises(TypeError, inspect.getgeneratorlocals, set)
self.assertRaises(TypeError, inspect.getgeneratorlocals, (2,3))
class TestSignatureObject(unittest.TestCase):
@staticmethod
def signature(func):
sig = inspect.signature(func)
return (tuple((param.name,
(... if param.default is param.empty else param.default),
(... if param.annotation is param.empty
else param.annotation),
str(param.kind).lower())
for param in sig.parameters.values()),
(... if sig.return_annotation is sig.empty
else sig.return_annotation))
def test_signature_object(self):
S = inspect.Signature
P = inspect.Parameter
self.assertEqual(str(S()), '()')
def test(po, pk, *args, ko, **kwargs):
pass
sig = inspect.signature(test)
po = sig.parameters['po'].replace(kind=P.POSITIONAL_ONLY)
pk = sig.parameters['pk']
args = sig.parameters['args']
ko = sig.parameters['ko']
kwargs = sig.parameters['kwargs']
S((po, pk, args, ko, kwargs))
with self.assertRaisesRegex(ValueError, 'wrong parameter order'):
S((pk, po, args, ko, kwargs))
with self.assertRaisesRegex(ValueError, 'wrong parameter order'):
S((po, args, pk, ko, kwargs))
with self.assertRaisesRegex(ValueError, 'wrong parameter order'):
S((args, po, pk, ko, kwargs))
with self.assertRaisesRegex(ValueError, 'wrong parameter order'):
S((po, pk, args, kwargs, ko))
kwargs2 = kwargs.replace(name='args')
with self.assertRaisesRegex(ValueError, 'duplicate parameter name'):
S((po, pk, args, kwargs2, ko))
def test_signature_immutability(self):
def test(a):
pass
sig = inspect.signature(test)
with self.assertRaises(AttributeError):
sig.foo = 'bar'
with self.assertRaises(TypeError):
sig.parameters['a'] = None
def test_signature_on_noarg(self):
def test():
pass
self.assertEqual(self.signature(test), ((), ...))
def test_signature_on_wargs(self):
def test(a, b:'foo') -> 123:
pass
self.assertEqual(self.signature(test),
((('a', ..., ..., "positional_or_keyword"),
('b', ..., 'foo', "positional_or_keyword")),
123))
def test_signature_on_wkwonly(self):
def test(*, a:float, b:str) -> int:
pass
self.assertEqual(self.signature(test),
((('a', ..., float, "keyword_only"),
('b', ..., str, "keyword_only")),
int))
def test_signature_on_complex_args(self):
def test(a, b:'foo'=10, *args:'bar', spam:'baz', ham=123, **kwargs:int):
pass
self.assertEqual(self.signature(test),
((('a', ..., ..., "positional_or_keyword"),
('b', 10, 'foo', "positional_or_keyword"),
('args', ..., 'bar', "var_positional"),
('spam', ..., 'baz', "keyword_only"),
('ham', 123, ..., "keyword_only"),
('kwargs', ..., int, "var_keyword")),
...))
def test_signature_on_builtin_function(self):
with self.assertRaisesRegex(ValueError, 'not supported by signature'):
inspect.signature(type)
with self.assertRaisesRegex(ValueError, 'not supported by signature'):
# support for 'wrapper_descriptor'
inspect.signature(type.__call__)
with self.assertRaisesRegex(ValueError, 'not supported by signature'):
# support for 'method-wrapper'
inspect.signature(min.__call__)
with self.assertRaisesRegex(ValueError,
'no signature found for builtin function'):
# support for 'method-wrapper'
inspect.signature(min)
def test_signature_on_non_function(self):
with self.assertRaisesRegex(TypeError, 'is not a callable object'):
inspect.signature(42)
with self.assertRaisesRegex(TypeError, 'is not a Python function'):
inspect.Signature.from_function(42)
def test_signature_on_method(self):
class Test:
def foo(self, arg1, arg2=1) -> int:
pass
meth = Test().foo
self.assertEqual(self.signature(meth),
((('arg1', ..., ..., "positional_or_keyword"),
('arg2', 1, ..., "positional_or_keyword")),
int))
def test_signature_on_classmethod(self):
class Test:
@classmethod
def foo(cls, arg1, *, arg2=1):
pass
meth = Test().foo
self.assertEqual(self.signature(meth),
((('arg1', ..., ..., "positional_or_keyword"),
('arg2', 1, ..., "keyword_only")),
...))
meth = Test.foo
self.assertEqual(self.signature(meth),
((('arg1', ..., ..., "positional_or_keyword"),
('arg2', 1, ..., "keyword_only")),
...))
def test_signature_on_staticmethod(self):
class Test:
@staticmethod
def foo(cls, *, arg):
pass
meth = Test().foo
self.assertEqual(self.signature(meth),
((('cls', ..., ..., "positional_or_keyword"),
('arg', ..., ..., "keyword_only")),
...))
meth = Test.foo
self.assertEqual(self.signature(meth),
((('cls', ..., ..., "positional_or_keyword"),
('arg', ..., ..., "keyword_only")),
...))
def test_signature_on_partial(self):
from functools import partial
def test():
pass
self.assertEqual(self.signature(partial(test)), ((), ...))
with self.assertRaisesRegex(ValueError, "has incorrect arguments"):
inspect.signature(partial(test, 1))
with self.assertRaisesRegex(ValueError, "has incorrect arguments"):
inspect.signature(partial(test, a=1))
def test(a, b, *, c, d):
pass
self.assertEqual(self.signature(partial(test)),
((('a', ..., ..., "positional_or_keyword"),
('b', ..., ..., "positional_or_keyword"),
('c', ..., ..., "keyword_only"),
('d', ..., ..., "keyword_only")),
...))
self.assertEqual(self.signature(partial(test, 1)),
((('b', ..., ..., "positional_or_keyword"),
('c', ..., ..., "keyword_only"),
('d', ..., ..., "keyword_only")),
...))
self.assertEqual(self.signature(partial(test, 1, c=2)),
((('b', ..., ..., "positional_or_keyword"),
('c', 2, ..., "keyword_only"),
('d', ..., ..., "keyword_only")),
...))
self.assertEqual(self.signature(partial(test, b=1, c=2)),
((('a', ..., ..., "positional_or_keyword"),
('b', 1, ..., "positional_or_keyword"),
('c', 2, ..., "keyword_only"),
('d', ..., ..., "keyword_only")),
...))
self.assertEqual(self.signature(partial(test, 0, b=1, c=2)),
((('b', 1, ..., "positional_or_keyword"),
('c', 2, ..., "keyword_only"),
('d', ..., ..., "keyword_only"),),
...))
def test(a, *args, b, **kwargs):
pass
self.assertEqual(self.signature(partial(test, 1)),
((('args', ..., ..., "var_positional"),
('b', ..., ..., "keyword_only"),
('kwargs', ..., ..., "var_keyword")),
...))
self.assertEqual(self.signature(partial(test, 1, 2, 3)),
((('args', ..., ..., "var_positional"),
('b', ..., ..., "keyword_only"),
('kwargs', ..., ..., "var_keyword")),
...))
self.assertEqual(self.signature(partial(test, 1, 2, 3, test=True)),
((('args', ..., ..., "var_positional"),
('b', ..., ..., "keyword_only"),
('kwargs', ..., ..., "var_keyword")),
...))
self.assertEqual(self.signature(partial(test, 1, 2, 3, test=1, b=0)),
((('args', ..., ..., "var_positional"),
('b', 0, ..., "keyword_only"),
('kwargs', ..., ..., "var_keyword")),
...))
self.assertEqual(self.signature(partial(test, b=0)),
((('a', ..., ..., "positional_or_keyword"),
('args', ..., ..., "var_positional"),
('b', 0, ..., "keyword_only"),
('kwargs', ..., ..., "var_keyword")),
...))
self.assertEqual(self.signature(partial(test, b=0, test=1)),
((('a', ..., ..., "positional_or_keyword"),
('args', ..., ..., "var_positional"),
('b', 0, ..., "keyword_only"),
('kwargs', ..., ..., "var_keyword")),
...))
def test(a, b, c:int) -> 42:
pass
sig = test.__signature__ = inspect.signature(test)
self.assertEqual(self.signature(partial(partial(test, 1))),
((('b', ..., ..., "positional_or_keyword"),
('c', ..., int, "positional_or_keyword")),
42))
self.assertEqual(self.signature(partial(partial(test, 1), 2)),
((('c', ..., int, "positional_or_keyword"),),
42))
psig = inspect.signature(partial(partial(test, 1), 2))
def foo(a):
return a
_foo = partial(partial(foo, a=10), a=20)
self.assertEqual(self.signature(_foo),
((('a', 20, ..., "positional_or_keyword"),),
...))
# check that we don't have any side-effects in signature(),
# and the partial object is still functioning
self.assertEqual(_foo(), 20)
def foo(a, b, c):
return a, b, c
_foo = partial(partial(foo, 1, b=20), b=30)
self.assertEqual(self.signature(_foo),
((('b', 30, ..., "positional_or_keyword"),
('c', ..., ..., "positional_or_keyword")),
...))
self.assertEqual(_foo(c=10), (1, 30, 10))
_foo = partial(_foo, 2) # now 'b' has two values -
# positional and keyword
with self.assertRaisesRegex(ValueError, "has incorrect arguments"):
inspect.signature(_foo)
def foo(a, b, c, *, d):
return a, b, c, d
_foo = partial(partial(foo, d=20, c=20), b=10, d=30)
self.assertEqual(self.signature(_foo),
((('a', ..., ..., "positional_or_keyword"),
('b', 10, ..., "positional_or_keyword"),
('c', 20, ..., "positional_or_keyword"),
('d', 30, ..., "keyword_only")),
...))
ba = inspect.signature(_foo).bind(a=200, b=11)
self.assertEqual(_foo(*ba.args, **ba.kwargs), (200, 11, 20, 30))
def foo(a=1, b=2, c=3):
return a, b, c
_foo = partial(foo, a=10, c=13)
ba = inspect.signature(_foo).bind(11)
self.assertEqual(_foo(*ba.args, **ba.kwargs), (11, 2, 13))
ba = inspect.signature(_foo).bind(11, 12)
self.assertEqual(_foo(*ba.args, **ba.kwargs), (11, 12, 13))
ba = inspect.signature(_foo).bind(11, b=12)
self.assertEqual(_foo(*ba.args, **ba.kwargs), (11, 12, 13))
ba = inspect.signature(_foo).bind(b=12)
self.assertEqual(_foo(*ba.args, **ba.kwargs), (10, 12, 13))
_foo = partial(_foo, b=10)
ba = inspect.signature(_foo).bind(12, 14)
self.assertEqual(_foo(*ba.args, **ba.kwargs), (12, 14, 13))
def test_signature_on_decorated(self):
import functools
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs) -> int:
return func(*args, **kwargs)
return wrapper
class Foo:
@decorator
def bar(self, a, b):
pass
self.assertEqual(self.signature(Foo.bar),
((('self', ..., ..., "positional_or_keyword"),
('a', ..., ..., "positional_or_keyword"),
('b', ..., ..., "positional_or_keyword")),
...))
self.assertEqual(self.signature(Foo().bar),
((('a', ..., ..., "positional_or_keyword"),
('b', ..., ..., "positional_or_keyword")),
...))
# Test that we handle method wrappers correctly
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs) -> int:
return func(42, *args, **kwargs)
sig = inspect.signature(func)
new_params = tuple(sig.parameters.values())[1:]
wrapper.__signature__ = sig.replace(parameters=new_params)
return wrapper
class Foo:
@decorator
def __call__(self, a, b):
pass
self.assertEqual(self.signature(Foo.__call__),
((('a', ..., ..., "positional_or_keyword"),
('b', ..., ..., "positional_or_keyword")),
...))
self.assertEqual(self.signature(Foo().__call__),
((('b', ..., ..., "positional_or_keyword"),),
...))
def test_signature_on_class(self):
class C:
def __init__(self, a):
pass
self.assertEqual(self.signature(C),
((('a', ..., ..., "positional_or_keyword"),),
...))
class CM(type):
def __call__(cls, a):
pass
class C(metaclass=CM):
def __init__(self, b):
pass
self.assertEqual(self.signature(C),
((('a', ..., ..., "positional_or_keyword"),),
...))
class CM(type):
def __new__(mcls, name, bases, dct, *, foo=1):
return super().__new__(mcls, name, bases, dct)
class C(metaclass=CM):
def __init__(self, b):
pass
self.assertEqual(self.signature(C),
((('b', ..., ..., "positional_or_keyword"),),
...))
self.assertEqual(self.signature(CM),
((('name', ..., ..., "positional_or_keyword"),
('bases', ..., ..., "positional_or_keyword"),
('dct', ..., ..., "positional_or_keyword"),
('foo', 1, ..., "keyword_only")),
...))
class CMM(type):
def __new__(mcls, name, bases, dct, *, foo=1):
return super().__new__(mcls, name, bases, dct)
def __call__(cls, nm, bs, dt):
return type(nm, bs, dt)
class CM(type, metaclass=CMM):
def __new__(mcls, name, bases, dct, *, bar=2):
return super().__new__(mcls, name, bases, dct)
class C(metaclass=CM):
def __init__(self, b):
pass
self.assertEqual(self.signature(CMM),
((('name', ..., ..., "positional_or_keyword"),
('bases', ..., ..., "positional_or_keyword"),
('dct', ..., ..., "positional_or_keyword"),
('foo', 1, ..., "keyword_only")),
...))
self.assertEqual(self.signature(CM),
((('nm', ..., ..., "positional_or_keyword"),
('bs', ..., ..., "positional_or_keyword"),
('dt', ..., ..., "positional_or_keyword")),
...))
self.assertEqual(self.signature(C),
((('b', ..., ..., "positional_or_keyword"),),
...))
class CM(type):
def __init__(cls, name, bases, dct, *, bar=2):
return super().__init__(name, bases, dct)
class C(metaclass=CM):
def __init__(self, b):
pass
self.assertEqual(self.signature(CM),
((('name', ..., ..., "positional_or_keyword"),
('bases', ..., ..., "positional_or_keyword"),
('dct', ..., ..., "positional_or_keyword"),
('bar', 2, ..., "keyword_only")),
...))
def test_signature_on_callable_objects(self):
class Foo:
def __call__(self, a):
pass
self.assertEqual(self.signature(Foo()),
((('a', ..., ..., "positional_or_keyword"),),
...))
class Spam:
pass
with self.assertRaisesRegex(TypeError, "is not a callable object"):
inspect.signature(Spam())
class Bar(Spam, Foo):
pass
self.assertEqual(self.signature(Bar()),
((('a', ..., ..., "positional_or_keyword"),),
...))
class ToFail:
__call__ = type
with self.assertRaisesRegex(ValueError, "not supported by signature"):
inspect.signature(ToFail())
class Wrapped:
pass
Wrapped.__wrapped__ = lambda a: None
self.assertEqual(self.signature(Wrapped),
((('a', ..., ..., "positional_or_keyword"),),
...))
def test_signature_on_lambdas(self):
self.assertEqual(self.signature((lambda a=10: a)),
((('a', 10, ..., "positional_or_keyword"),),
...))
def test_signature_equality(self):
def foo(a, *, b:int) -> float: pass
self.assertNotEqual(inspect.signature(foo), 42)
def bar(a, *, b:int) -> float: pass
self.assertEqual(inspect.signature(foo), inspect.signature(bar))
def bar(a, *, b:int) -> int: pass
self.assertNotEqual(inspect.signature(foo), inspect.signature(bar))
def bar(a, *, b:int): pass
self.assertNotEqual(inspect.signature(foo), inspect.signature(bar))
def bar(a, *, b:int=42) -> float: pass
self.assertNotEqual(inspect.signature(foo), inspect.signature(bar))
def bar(a, *, c) -> float: pass
self.assertNotEqual(inspect.signature(foo), inspect.signature(bar))
def bar(a, b:int) -> float: pass
self.assertNotEqual(inspect.signature(foo), inspect.signature(bar))
def spam(b:int, a) -> float: pass
self.assertNotEqual(inspect.signature(spam), inspect.signature(bar))
def foo(*, a, b, c): pass
def bar(*, c, b, a): pass
self.assertEqual(inspect.signature(foo), inspect.signature(bar))
def foo(*, a=1, b, c): pass
def bar(*, c, b, a=1): pass
self.assertEqual(inspect.signature(foo), inspect.signature(bar))
def foo(pos, *, a=1, b, c): pass
def bar(pos, *, c, b, a=1): pass
self.assertEqual(inspect.signature(foo), inspect.signature(bar))
def foo(pos, *, a, b, c): pass
def bar(pos, *, c, b, a=1): pass
self.assertNotEqual(inspect.signature(foo), inspect.signature(bar))
def foo(pos, *args, a=42, b, c, **kwargs:int): pass
def bar(pos, *args, c, b, a=42, **kwargs:int): pass
self.assertEqual(inspect.signature(foo), inspect.signature(bar))
def test_signature_unhashable(self):
def foo(a): pass
sig = inspect.signature(foo)
with self.assertRaisesRegex(TypeError, 'unhashable type'):
hash(sig)
def test_signature_str(self):
def foo(a:int=1, *, b, c=None, **kwargs) -> 42:
pass
self.assertEqual(str(inspect.signature(foo)),
'(a:int=1, *, b, c=None, **kwargs) -> 42')
def foo(a:int=1, *args, b, c=None, **kwargs) -> 42:
pass
self.assertEqual(str(inspect.signature(foo)),
'(a:int=1, *args, b, c=None, **kwargs) -> 42')
def foo():
pass
self.assertEqual(str(inspect.signature(foo)), '()')
def test_signature_str_positional_only(self):
P = inspect.Parameter
def test(a_po, *, b, **kwargs):
return a_po, kwargs
sig = inspect.signature(test)
new_params = list(sig.parameters.values())
new_params[0] = new_params[0].replace(kind=P.POSITIONAL_ONLY)
test.__signature__ = sig.replace(parameters=new_params)
self.assertEqual(str(inspect.signature(test)),
'(<a_po>, *, b, **kwargs)')
sig = inspect.signature(test)
new_params = list(sig.parameters.values())
new_params[0] = new_params[0].replace(name=None)
test.__signature__ = sig.replace(parameters=new_params)
self.assertEqual(str(inspect.signature(test)),
'(<0>, *, b, **kwargs)')
def test_signature_replace_anno(self):
def test() -> 42:
pass
sig = inspect.signature(test)
sig = sig.replace(return_annotation=None)
self.assertIs(sig.return_annotation, None)
sig = sig.replace(return_annotation=sig.empty)
self.assertIs(sig.return_annotation, sig.empty)
sig = sig.replace(return_annotation=42)
self.assertEqual(sig.return_annotation, 42)
self.assertEqual(sig, inspect.signature(test))
class TestParameterObject(unittest.TestCase):
def test_signature_parameter_kinds(self):
P = inspect.Parameter
self.assertTrue(P.POSITIONAL_ONLY < P.POSITIONAL_OR_KEYWORD < \
P.VAR_POSITIONAL < P.KEYWORD_ONLY < P.VAR_KEYWORD)
self.assertEqual(str(P.POSITIONAL_ONLY), 'POSITIONAL_ONLY')
self.assertTrue('POSITIONAL_ONLY' in repr(P.POSITIONAL_ONLY))
def test_signature_parameter_object(self):
p = inspect.Parameter('foo', default=10,
kind=inspect.Parameter.POSITIONAL_ONLY)
self.assertEqual(p.name, 'foo')
self.assertEqual(p.default, 10)
self.assertIs(p.annotation, p.empty)
self.assertEqual(p.kind, inspect.Parameter.POSITIONAL_ONLY)
with self.assertRaisesRegex(ValueError, 'invalid value'):
inspect.Parameter('foo', default=10, kind='123')
with self.assertRaisesRegex(ValueError, 'not a valid parameter name'):
inspect.Parameter('1', kind=inspect.Parameter.VAR_KEYWORD)
with self.assertRaisesRegex(ValueError,
'non-positional-only parameter'):
inspect.Parameter(None, kind=inspect.Parameter.VAR_KEYWORD)
with self.assertRaisesRegex(ValueError, 'cannot have default values'):
inspect.Parameter('a', default=42,
kind=inspect.Parameter.VAR_KEYWORD)
with self.assertRaisesRegex(ValueError, 'cannot have default values'):
inspect.Parameter('a', default=42,
kind=inspect.Parameter.VAR_POSITIONAL)
p = inspect.Parameter('a', default=42,
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD)
with self.assertRaisesRegex(ValueError, 'cannot have default values'):
p.replace(kind=inspect.Parameter.VAR_POSITIONAL)
self.assertTrue(repr(p).startswith('<Parameter'))
def test_signature_parameter_equality(self):
P = inspect.Parameter
p = P('foo', default=42, kind=inspect.Parameter.KEYWORD_ONLY)
self.assertEqual(p, p)
self.assertNotEqual(p, 42)
self.assertEqual(p, P('foo', default=42,
kind=inspect.Parameter.KEYWORD_ONLY))
def test_signature_parameter_unhashable(self):
p = inspect.Parameter('foo', default=42,
kind=inspect.Parameter.KEYWORD_ONLY)
with self.assertRaisesRegex(TypeError, 'unhashable type'):
hash(p)
def test_signature_parameter_replace(self):
p = inspect.Parameter('foo', default=42,
kind=inspect.Parameter.KEYWORD_ONLY)
self.assertIsNot(p, p.replace())
self.assertEqual(p, p.replace())
p2 = p.replace(annotation=1)
self.assertEqual(p2.annotation, 1)
p2 = p2.replace(annotation=p2.empty)
self.assertEqual(p, p2)
p2 = p2.replace(name='bar')
self.assertEqual(p2.name, 'bar')
self.assertNotEqual(p2, p)
with self.assertRaisesRegex(ValueError, 'not a valid parameter name'):
p2 = p2.replace(name=p2.empty)
p2 = p2.replace(name='foo', default=None)
self.assertIs(p2.default, None)
self.assertNotEqual(p2, p)
p2 = p2.replace(name='foo', default=p2.empty)
self.assertIs(p2.default, p2.empty)
p2 = p2.replace(default=42, kind=p2.POSITIONAL_OR_KEYWORD)
self.assertEqual(p2.kind, p2.POSITIONAL_OR_KEYWORD)
self.assertNotEqual(p2, p)
with self.assertRaisesRegex(ValueError, 'invalid value for'):
p2 = p2.replace(kind=p2.empty)
p2 = p2.replace(kind=p2.KEYWORD_ONLY)
self.assertEqual(p2, p)
def test_signature_parameter_positional_only(self):
p = inspect.Parameter(None, kind=inspect.Parameter.POSITIONAL_ONLY)
self.assertEqual(str(p), '<>')
p = p.replace(name='1')
self.assertEqual(str(p), '<1>')
def test_signature_parameter_immutability(self):
p = inspect.Parameter(None, kind=inspect.Parameter.POSITIONAL_ONLY)
with self.assertRaises(AttributeError):
p.foo = 'bar'
with self.assertRaises(AttributeError):
p.kind = 123
class TestSignatureBind(unittest.TestCase):
@staticmethod
def call(func, *args, **kwargs):
sig = inspect.signature(func)
ba = sig.bind(*args, **kwargs)
return func(*ba.args, **ba.kwargs)
def test_signature_bind_empty(self):
def test():
return 42
self.assertEqual(self.call(test), 42)
with self.assertRaisesRegex(TypeError, 'too many positional arguments'):
self.call(test, 1)
with self.assertRaisesRegex(TypeError, 'too many positional arguments'):
self.call(test, 1, spam=10)
with self.assertRaisesRegex(TypeError, 'too many keyword arguments'):
self.call(test, spam=1)
def test_signature_bind_var(self):
def test(*args, **kwargs):
return args, kwargs
self.assertEqual(self.call(test), ((), {}))
self.assertEqual(self.call(test, 1), ((1,), {}))
self.assertEqual(self.call(test, 1, 2), ((1, 2), {}))
self.assertEqual(self.call(test, foo='bar'), ((), {'foo': 'bar'}))
self.assertEqual(self.call(test, 1, foo='bar'), ((1,), {'foo': 'bar'}))
self.assertEqual(self.call(test, args=10), ((), {'args': 10}))
self.assertEqual(self.call(test, 1, 2, foo='bar'),
((1, 2), {'foo': 'bar'}))
def test_signature_bind_just_args(self):
def test(a, b, c):
return a, b, c
self.assertEqual(self.call(test, 1, 2, 3), (1, 2, 3))
with self.assertRaisesRegex(TypeError, 'too many positional arguments'):
self.call(test, 1, 2, 3, 4)
with self.assertRaisesRegex(TypeError, "'b' parameter lacking default"):
self.call(test, 1)
with self.assertRaisesRegex(TypeError, "'a' parameter lacking default"):
self.call(test)
def test(a, b, c=10):
return a, b, c
self.assertEqual(self.call(test, 1, 2, 3), (1, 2, 3))
self.assertEqual(self.call(test, 1, 2), (1, 2, 10))
def test(a=1, b=2, c=3):
return a, b, c
self.assertEqual(self.call(test, a=10, c=13), (10, 2, 13))
self.assertEqual(self.call(test, a=10), (10, 2, 3))
self.assertEqual(self.call(test, b=10), (1, 10, 3))
def test_signature_bind_varargs_order(self):
def test(*args):
return args
self.assertEqual(self.call(test), ())
self.assertEqual(self.call(test, 1, 2, 3), (1, 2, 3))
def test_signature_bind_args_and_varargs(self):
def test(a, b, c=3, *args):
return a, b, c, args
self.assertEqual(self.call(test, 1, 2, 3, 4, 5), (1, 2, 3, (4, 5)))
self.assertEqual(self.call(test, 1, 2), (1, 2, 3, ()))
self.assertEqual(self.call(test, b=1, a=2), (2, 1, 3, ()))
self.assertEqual(self.call(test, 1, b=2), (1, 2, 3, ()))
with self.assertRaisesRegex(TypeError,
"multiple values for argument 'c'"):
self.call(test, 1, 2, 3, c=4)
def test_signature_bind_just_kwargs(self):
def test(**kwargs):
return kwargs
self.assertEqual(self.call(test), {})
self.assertEqual(self.call(test, foo='bar', spam='ham'),
{'foo': 'bar', 'spam': 'ham'})
def test_signature_bind_args_and_kwargs(self):
def test(a, b, c=3, **kwargs):
return a, b, c, kwargs
self.assertEqual(self.call(test, 1, 2), (1, 2, 3, {}))
self.assertEqual(self.call(test, 1, 2, foo='bar', spam='ham'),
(1, 2, 3, {'foo': 'bar', 'spam': 'ham'}))
self.assertEqual(self.call(test, b=2, a=1, foo='bar', spam='ham'),
(1, 2, 3, {'foo': 'bar', 'spam': 'ham'}))
self.assertEqual(self.call(test, a=1, b=2, foo='bar', spam='ham'),
(1, 2, 3, {'foo': 'bar', 'spam': 'ham'}))
self.assertEqual(self.call(test, 1, b=2, foo='bar', spam='ham'),
(1, 2, 3, {'foo': 'bar', 'spam': 'ham'}))
self.assertEqual(self.call(test, 1, b=2, c=4, foo='bar', spam='ham'),
(1, 2, 4, {'foo': 'bar', 'spam': 'ham'}))
self.assertEqual(self.call(test, 1, 2, 4, foo='bar'),
(1, 2, 4, {'foo': 'bar'}))
self.assertEqual(self.call(test, c=5, a=4, b=3),
(4, 3, 5, {}))
def test_signature_bind_kwonly(self):
def test(*, foo):
return foo
with self.assertRaisesRegex(TypeError,
'too many positional arguments'):
self.call(test, 1)
self.assertEqual(self.call(test, foo=1), 1)
def test(a, *, foo=1, bar):
return foo
with self.assertRaisesRegex(TypeError,
"'bar' parameter lacking default value"):
self.call(test, 1)
def test(foo, *, bar):
return foo, bar
self.assertEqual(self.call(test, 1, bar=2), (1, 2))
self.assertEqual(self.call(test, bar=2, foo=1), (1, 2))
with self.assertRaisesRegex(TypeError,
'too many keyword arguments'):
self.call(test, bar=2, foo=1, spam=10)
with self.assertRaisesRegex(TypeError,
'too many positional arguments'):
self.call(test, 1, 2)
with self.assertRaisesRegex(TypeError,
'too many positional arguments'):
self.call(test, 1, 2, bar=2)
with self.assertRaisesRegex(TypeError,
'too many keyword arguments'):
self.call(test, 1, bar=2, spam='ham')
with self.assertRaisesRegex(TypeError,
"'bar' parameter lacking default value"):
self.call(test, 1)
def test(foo, *, bar, **bin):
return foo, bar, bin
self.assertEqual(self.call(test, 1, bar=2), (1, 2, {}))
self.assertEqual(self.call(test, foo=1, bar=2), (1, 2, {}))
self.assertEqual(self.call(test, 1, bar=2, spam='ham'),
(1, 2, {'spam': 'ham'}))
self.assertEqual(self.call(test, spam='ham', foo=1, bar=2),
(1, 2, {'spam': 'ham'}))
with self.assertRaisesRegex(TypeError,
"'foo' parameter lacking default value"):
self.call(test, spam='ham', bar=2)
self.assertEqual(self.call(test, 1, bar=2, bin=1, spam=10),
(1, 2, {'bin': 1, 'spam': 10}))
def test_signature_bind_arguments(self):
def test(a, *args, b, z=100, **kwargs):
pass
sig = inspect.signature(test)
ba = sig.bind(10, 20, b=30, c=40, args=50, kwargs=60)
# we won't have 'z' argument in the bound arguments object, as we didn't
# pass it to the 'bind'
self.assertEqual(tuple(ba.arguments.items()),
(('a', 10), ('args', (20,)), ('b', 30),
('kwargs', {'c': 40, 'args': 50, 'kwargs': 60})))
self.assertEqual(ba.kwargs,
{'b': 30, 'c': 40, 'args': 50, 'kwargs': 60})
self.assertEqual(ba.args, (10, 20))
def test_signature_bind_positional_only(self):
P = inspect.Parameter
def test(a_po, b_po, c_po=3, foo=42, *, bar=50, **kwargs):
return a_po, b_po, c_po, foo, bar, kwargs
sig = inspect.signature(test)
new_params = collections.OrderedDict(tuple(sig.parameters.items()))
for name in ('a_po', 'b_po', 'c_po'):
new_params[name] = new_params[name].replace(kind=P.POSITIONAL_ONLY)
new_sig = sig.replace(parameters=new_params.values())
test.__signature__ = new_sig
self.assertEqual(self.call(test, 1, 2, 4, 5, bar=6),
(1, 2, 4, 5, 6, {}))
with self.assertRaisesRegex(TypeError, "parameter is positional only"):
self.call(test, 1, 2, c_po=4)
with self.assertRaisesRegex(TypeError, "parameter is positional only"):
self.call(test, a_po=1, b_po=2)
def test_signature_bind_with_self_arg(self):
# Issue #17071: one of the parameters is named "self
def test(a, self, b):
pass
sig = inspect.signature(test)
ba = sig.bind(1, 2, 3)
self.assertEqual(ba.args, (1, 2, 3))
ba = sig.bind(1, self=2, b=3)
self.assertEqual(ba.args, (1, 2, 3))
class TestBoundArguments(unittest.TestCase):
def test_signature_bound_arguments_unhashable(self):
def foo(a): pass
ba = inspect.signature(foo).bind(1)
with self.assertRaisesRegex(TypeError, 'unhashable type'):
hash(ba)
def test_signature_bound_arguments_equality(self):
def foo(a): pass
ba = inspect.signature(foo).bind(1)
self.assertEqual(ba, ba)
ba2 = inspect.signature(foo).bind(1)
self.assertEqual(ba, ba2)
ba3 = inspect.signature(foo).bind(2)
self.assertNotEqual(ba, ba3)
ba3.arguments['a'] = 1
self.assertEqual(ba, ba3)
def bar(b): pass
ba4 = inspect.signature(bar).bind(1)
self.assertNotEqual(ba, ba4)
def test_main():
run_unittest(
TestDecorators, TestRetrievingSourceCode, TestOneliners, TestBuggyCases,
TestInterpreterStack, TestClassesAndFunctions, TestPredicates,
TestGetcallargsFunctions, TestGetcallargsMethods,
TestGetcallargsUnboundMethods, TestGetattrStatic, TestGetGeneratorState,
TestNoEOL, TestSignatureObject, TestSignatureBind, TestParameterObject,
TestBoundArguments, TestGetClosureVars
)
if __name__ == "__main__":
test_main()
|
gpl-3.0
| -9,168,784,811,708,351,000
| 37.467714
| 88
| 0.540309
| false
| 3.947703
| true
| false
| false
|
d7415/merlin
|
Hooks/user/edituser.py
|
1
|
4554
|
# This file is part of Merlin.
# Merlin is the Copyright (C)2008,2009,2010 of Robin K. Hansen, Elliot Rosemarine, Andreas Jacobsen.
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from Core.config import Config
from Core.db import session
from Core.maps import User
from Core.chanusertracker import CUT
from Core.loadable import loadable, route, require_user
class edituser(loadable):
"""Used to change a user's access or (de)activate them"""
usage = " <user> (<access>|true|false)"
@route(r"(.+)\s+(\S+)", access = "admin")
@require_user
def execute(self, message, user, params):
usernames = params.group(1)
access = params.group(2).lower()
if access.isdigit():
access = int(access)
elif access in self.true:
access = True
elif access in self.false:
access = False
else:
try:
access = Config.getint("Access",access)
except Exception:
message.reply("Invalid access level '%s'" % (access,))
return
addnicks = []
remnicks = []
changed = []
mbraxx = Config.getint("Access","member")
home = Config.get("Channels","home")
for username in usernames.split():
member = User.load(name=username, active=False)
if member is None:
message.alert("No such user '%s'" % (username,))
return
if type(access) is int and not member.active:
message.reply("You should first re-activate user %s" %(member.name,))
return
if access > user.access or member.access > user.access:
message.reply("You may not change access higher than your own")
return
changed.append(username)
if type(access) == int:
if member.active == True and member.access < mbraxx and access >= mbraxx:
addnicks.append(member.name)
if member.active == True and member.access >= mbraxx and access < mbraxx:
message.privmsg("remuser %s %s"%(home, member.name,), Config.get("Services", "nick"))
remnicks.append(member.name)
# message.privmsg("ban %s *!*@%s.%s GTFO, EAAD"%(home, member.name, Config.get("Services", "usermask"),), Config.get("Services", "nick"))
member.access = access
else:
if member.active != access and access == True and member.access >= mbraxx:
addnicks.append(member.name)
if member.active != access and access == False and member.access >= mbraxx:
message.privmsg("remuser %s %s"%(home, member.name,), Config.get("Services", "nick"))
remnicks.append(member.name)
# message.privmsg("ban %s *!*@%s.%s GTFO, EAAD"%(home, member.name, Config.get("Services", "usermask"),), Config.get("Services", "nick"))
member.active = access
if not member.active:
CUT.untrack_user(member.name)
session.commit()
if addnicks:
message.privmsg("adduser %s %s 24" %(home, ",".join(addnicks),), Config.get("Services", "nick"))
message.reply("%s ha%s been added to %s"%(", ".join(addnicks), "ve" if len(addnicks) > 1 else "s", home,))
if remnicks:
message.reply("%s ha%s been removed from %s"%(", ".join(remnicks), "ve" if len(remnicks) > 1 else "s", home,))
if changed:
message.reply("Editted user%s %s access to %s" % ("s" if len(changed) > 1 else "", ", ".join(changed), access,))
|
gpl-2.0
| -7,891,953,885,707,710,000
| 45.469388
| 156
| 0.593105
| false
| 4.022968
| true
| false
| false
|
code-for-india/sahana_shelter_worldbank
|
private/templates/Sandy/controllers.py
|
1
|
8864
|
# -*- coding: utf-8 -*-
from os import path
from gluon import *
from gluon.storage import Storage
from s3 import *
# =============================================================================
class index():
""" Custom Home Page """
def __call__(self):
auth = current.auth
if auth.is_logged_in():
# Redirect to Map
redirect(URL(c="hms", f="hospital", args=["map"]))
request = current.request
response = current.response
response.title = current.deployment_settings.get_system_name()
T = current.T
db = current.db
s3db = current.s3db
s3 = response.s3
appname = request.application
settings = current.deployment_settings
# Check logged in and permissions
roles = current.session.s3.roles
system_roles = auth.get_system_roles()
AUTHENTICATED = system_roles.AUTHENTICATED
if AUTHENTICATED in roles and \
auth.s3_has_permission("read", s3db.hms_hospital):
hospital_items = self.hospital()
datatable_ajax_source = "/%s/default/hospital.aadata" % \
appname
s3.actions = None
hospital_box = DIV(H3(T("Hospitals")),
A(T("Create Hospital"),
_href = URL(c="hms", f="hospital",
args=["create"]),
_id = "add-btn",
_class = "action-btn",
_style = "margin-right:10px;"),
hospital_items,
_id = "org_box",
_class = "menu_box fleft"
)
else:
hospital_box = ""
datatable_ajax_source = ""
item = ""
if settings.has_module("cms"):
table = s3db.cms_post
item = db(table.module == "default").select(table.body,
limitby=(0, 1)).first()
if item:
item = DIV(XML(item.body))
else:
item = ""
# Login/Registration forms
self_registration = settings.get_security_self_registration()
registered = False
login_form = None
login_div = None
register_form = None
register_div = None
if AUTHENTICATED not in roles:
# This user isn't yet logged-in
if request.cookies.has_key("registered"):
# This browser has logged-in before
registered = True
if self_registration:
# Provide a Registration box on front page
register_form = auth.s3_registration_form()
register_div = DIV(H3(T("Register")),
P(XML(T("If you would like to help, then please %(sign_up_now)s") % \
dict(sign_up_now=B(T("sign-up now"))))))
if request.env.request_method == "POST":
post_script = \
'''$('#register_form').removeClass('hide')
$('#login_form').addClass('hide')'''
else:
post_script = ""
register_script = \
'''$('#register-btn').attr('href','#register')
$('#login-btn').attr('href','#login')
%s
$('#register-btn').click(function(){
$('#register_form').removeClass('hide')
$('#login_form').addClass('hide')
})
$('#login-btn').click(function(){
$('#register_form').addClass('hide')
$('#login_form').removeClass('hide')
})''' % post_script
s3.jquery_ready.append(register_script)
# Provide a login box on front page
request.args = ["login"]
auth.messages.submit_button = T("Login")
login_form = auth()
login_div = DIV(H3(T("Login")),
P(XML(T("Registered users can %(login)s to access the system") % \
dict(login=B(T("login"))))))
if settings.frontpage.rss:
s3.external_stylesheets.append("http://www.google.com/uds/solutions/dynamicfeed/gfdynamicfeedcontrol.css")
s3.scripts.append("http://www.google.com/jsapi?key=notsupplied-wizard")
s3.scripts.append("http://www.google.com/uds/solutions/dynamicfeed/gfdynamicfeedcontrol.js")
counter = 0
feeds = ""
for feed in settings.frontpage.rss:
counter += 1
feeds = "".join((feeds,
"{title:'%s',\n" % feed["title"],
"url:'%s'}" % feed["url"]))
# Don't add a trailing comma for old IEs
if counter != len(settings.frontpage.rss):
feeds += ",\n"
# feedCycleTime: milliseconds before feed is reloaded (5 minutes)
feed_control = "".join(('''
function LoadDynamicFeedControl(){
var feeds=[
''', feeds, '''
]
var options={
feedCycleTime:300000,
numResults:5,
stacked:true,
horizontal:false,
title:"''', str(T("News")), '''"
}
new GFdynamicFeedControl(feeds,'feed-control',options)
}
google.load('feeds','1')
google.setOnLoadCallback(LoadDynamicFeedControl)'''))
s3.js_global.append(feed_control)
view = path.join(request.folder, "private", "templates",
"Sandy", "views", "index.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
return dict(title = response.title,
item = item,
hospital_box = hospital_box,
r = None, # Required for dataTable to work
datatable_ajax_source = datatable_ajax_source,
self_registration=self_registration,
registered=registered,
login_form=login_form,
login_div=login_div,
register_form=register_form,
register_div=register_div
)
# -------------------------------------------------------------------------
@staticmethod
def hospital():
"""
Function to handle pagination for the hospitals list
on the homepage
"""
request = current.request
get_vars = request.get_vars
resource = current.s3db.resource("hms_hospital")
totalrows = resource.count()
if "iDisplayLength" in get_vars:
display_length = int(request.get_vars["iDisplayLength"])
else:
display_length = 10
limit = 4 * display_length
list_fields = ["id", "name"]
filter, orderby, left = resource.datatable_filter(list_fields,
get_vars)
resource.add_filter(filter)
data = resource.select(list_fields,
start=0,
limit=limit,
orderby=orderby,
left=left,
count=True,
represent=True)
filteredrows = data["numrows"]
rfields = data["rfields"]
rows = data["rows"]
dt = S3DataTable(rfields, rows)
dt.defaultActionButtons(resource)
current.response.s3.no_formats = True
if request.extension == "html":
items = dt.html(totalrows,
totalrows,
"hospital_list_1",
dt_displayLength=display_length,
dt_ajax_url=URL(c="default",
f="hospital",
extension="aadata",
vars={"id": "hospital_list_1"},
),
dt_pagination="true",
)
elif request.extension == "aadata":
if "sEcho" in request.vars:
echo = int(request.vars.sEcho)
else:
echo = None
items = dt.json(totalrows,
filteredrows,
"hospital_list_1",
echo)
else:
from gluon.http import HTTP
raise HTTP(501, current.ERROR.BAD_FORMAT)
return items
# END =========================================================================
|
mit
| 2,980,182,400,315,720,700
| 36.880342
| 118
| 0.463109
| false
| 4.587992
| false
| false
| false
|
M0n0xy2/mpsi_python_cours
|
python/charles/mini-projet 3/PPCLS.py
|
1
|
2272
|
# -*- coding: utf-8 -*-
# ***************************
# PCSIB DM3 2015
# Nom: Dizier Charles
# ****************************
import random
equi_num_object = {
1: "pierre",
2: "papier",
3: "ciseaux",
4: "lezard",
5: "spock"
}
winner_dict = {
"papier, ciseaux": False,
"ciseaux, papier": True,
"papier, pierre": True,
"pierre, papier": False,
"pierre, lezard": True,
"lezard, pierre": False,
"lezard, spock": True,
"spock, lezard": False,
"spock, ciseaux": True,
"ciseaux, spock": False,
"lezard, ciseaux": False,
"ciseaux, lezard": True,
"papier, lezard": False,
"lezard, papier": True,
"papier, spock": True,
"spock, papier": False,
"spock, pierre": True,
"pierre, spock": False,
"pierre, ciseaux": True,
"ciseaux, pierre": True
}
def combat(player, ia):
global ia_score, player_score
combat_str = "{}, {}".format(equi_num_object[player], equi_num_object[ia])
player_winner = winner_dict[combat_str]
if player_winner:
player_score += 1
elif not player_winner:
ia_score += 1
def enregistrevainqueur():
out_file = open("resultatsPPCLS.txt", "a")
if ia_score > player_score:
winner = '"IA"'
winner_score, looser_score = ia_score, player_score
else:
winner = '"Joueur"'
winner_score, looser_score = player_score, ia_score
print("Vainqueur : {} sur un score de {} contre {} points".format(winner, winner_score, looser_score), file=out_file)
if __name__ == "__main__":
ia_score = 0
player_score = 0
while ia_score < 5 and player_score < 5:
print("Choix de jeu :")
for num, obj in equi_num_object.items():
print("{} pour {}".format(num, obj))
player_play = int(input('Entrez 1,2,3,4 ou 5 selon votre choix de jeu: '))
ia_play = random.randint(1, 5)
if player_play == ia_play:
print("Egalité ! On recommence")
continue
combat(player_play, ia_play)
print("===================")
print("Scores actuels : ")
print("Joueur: {} point(s)".format(player_score))
print("IA: {} point(s)".format(ia_score))
print("===================")
enregistrevainqueur()
|
mit
| 5,174,479,759,390,140,000
| 27.3875
| 121
| 0.547336
| false
| 2.904092
| false
| false
| false
|
franziz/artagger
|
artagger/Utility/Eval.py
|
1
|
2652
|
# -*- coding: utf-8 -*-
import os
import sys
os.chdir("../")
sys.setrecursionlimit(100000)
sys.path.append(os.path.abspath(""))
os.chdir("./Utility")
from Utility.Utils import getWordTag, readDictionary
def computeAccuracy(goldStandardCorpus, taggedCorpus):
tagged = open(taggedCorpus, "r").read().split()
goldStandard = open(goldStandardCorpus, "r").read().split()
if len(tagged) != len(goldStandard):
print("The numbers of word tokens in %s and %s are not equal!" % (goldStandardCorpus, taggedCorpus))
return 0
numwords = 0
count = 0
for i in range(len(tagged)):
numwords += 1
word1, tag1 = getWordTag(tagged[i])
word2, tag2 = getWordTag(goldStandard[i])
if word1 != word2 and word1 != "''" and word2 != "''":
print("Words are not the same in gold standard and tagged corpora, at the index", i)
return 0
if tag1.lower() == tag2.lower():
count += 1
return count * 100.0 / numwords
def computeAccuracies(fullDictFile, goldStandardCorpus, taggedCorpus):
"""
Return known-word accuracy, unknown-word accuracy and the overall accuracy
"""
tagged = open(taggedCorpus, "r").read().split()
goldStandard = open(goldStandardCorpus, "r").read().split()
if len(tagged) != len(goldStandard):
print("The numbers of word tokens in %s and %s are not equal!" % (goldStandardCorpus, taggedCorpus))
return 0
fullDICT = readDictionary(fullDictFile)
numwords = count = 0
countKN = countUNKN = 0
countCorrectKN = countCorrectUNKN = 0
for i in range(len(tagged)):
numwords += 1
word1, tag1 = getWordTag(tagged[i])
word2, tag2 = getWordTag(goldStandard[i])
if word1 != word2 and word1 != "''" and word2 != "''":
print("Words are not the same in gold standard and tagged corpora, at the index", i)
return 0
if tag1.lower() == tag2.lower():
count += 1
if word1 in fullDICT:
countKN += 1
if tag1.lower() == tag2.lower():
countCorrectKN += 1
else:
countUNKN += 1
if tag1.lower() == tag2.lower():
countCorrectUNKN += 1
if countUNKN == 0:
return countCorrectKN * 100.0 / countKN, 0.0, count * 100.0 / numwords
else:
return countCorrectKN * 100.0 / countKN, countCorrectUNKN * 100.0 / countUNKN, count * 100.0 / numwords
if __name__ == "__main__":
print(computeAccuracy(sys.argv[1], sys.argv[2]), "%")
pass
|
apache-2.0
| -2,303,381,570,509,057,000
| 33.441558
| 111
| 0.584087
| false
| 3.678225
| false
| false
| false
|
tsbischof/photon_correlation
|
scripts/plot_intensity.py
|
1
|
2283
|
#!/usr/bin/env python3
import csv
import sys
import argparse
import matplotlib.pyplot as plt
import photon_correlation as pc
def intensity_from_stream(stream):
for line in csv.reader(stream):
time_left = int(line[0])
time_right = int(line[1])
counts = map(int, line[2:])
yield(((time_left, time_right), counts))
def plot_intensity(intensity, mode="t2"):
plt.clf()
if mode == "t2":
times = list(map(lambda x: float(x[0][0])/1e12, intensity))
counts = list(map(
lambda x: list(map(
lambda y: float(y)/(x[0][1]-x[0][0])*10**12,
x[1])),
intensity))
for i in range(len(counts[0])):
plt.plot(times,
list(map(lambda x: x[i], counts)),
label=str(i))
plt.xlabel("Time/s")
plt.ylabel("PL intensity/(counts/second)")
elif mode == "t3":
times = list(map(lambda x: float(x[0][0]), intensity))
counts = list(map(
lambda x: list(map(
lambda y: float(y)/(x[0][1]-x[0][0]),
x[1])),
intensity))
for i in range(len(counts[0])):
plt.plot(times,
list(map(lambda x: x[i], counts)),
label=str(i))
plt.xlabel("Pulse number")
plt.ylabel("PL intensity/(counts/pulse)")
else:
raise(ValueError("Unknown mode: {0}".format(mode)))
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Plot an intensity.")
parser.add_argument("--mode", default="t2", type=str,
help="Mode of the photons, either t2 or t3.")
parser.add_argument("files", type=str, nargs="*",
help="Filenames containing g2 data to plot.")
args = parser.parse_args()
for filename in args.files:
intensity = pc.Intensity(filename=filename)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
intensity.normalized().add_to_axes(ax)
plt.show(fig)
# with open(filename) as stream_in:
# intensity = list(intensity_from_stream(stream_in))
#
# plot_intensity(intensity, mode=args.mode)
|
bsd-3-clause
| 7,184,816,256,500,360,000
| 28.269231
| 70
| 0.526938
| false
| 3.646965
| false
| false
| false
|
rlindner81/pyload
|
module/plugins/hoster/ZbigzCom.py
|
1
|
4492
|
# -*- coding: utf-8 -*-
import random
import re
import time
import urlparse
from module.plugins.internal.Hoster import Hoster
from module.plugins.internal.misc import json
class ZbigzCom(Hoster):
__name__ = "ZbigzCom"
__type__ = "hoster"
__version__ = "0.02"
__status__ = "testing"
__pattern__ = r'https?://.+\.torrent|magnet:\?.+'
__config__ = [("activated", "bool", "Activated", False)]
__description__ = """Zbigz.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("GammaC0de", "nitzo2001[AT}yahoo[DOT]com")]
def jquery_call(self, url, file_id, call_id, **kwargs):
current_millis = int(time.time() * 1000)
json_callback = "jQuery" + call_id + "_" + str(current_millis)
urlp = urlparse.urlparse(url)
get_params = kwargs.copy()
get_params.update(urlparse.parse_qs(urlp.query))
get_params['hash'] = file_id
get_params['jsoncallback'] = json_callback
get_params['_'] = current_millis
jquery_data = self.load(
urlp.scheme +
"://" +
urlp.netloc +
urlp.path,
get=get_params)
m = re.search("%s\((.+?)\);" % json_callback, jquery_data)
return json.loads(m.group(1)) if m else None
def sleep(self, sec):
for _i in range(sec):
if self.pyfile.abort:
break
time.sleep(1)
def process(self, pyfile):
self.data = self.load("http://m.zbigz.com/myfiles",
post={'url': pyfile.url})
if "Error. Only premium members are able to download" in self.data:
self.fail(_("File can be downloaded by premium users only"))
m = re.search(r'&hash=(\w+)"', self.data)
if m is None:
self.fail("Hash not found")
file_id = m.group(1)
call_id = "".join([random.choice("0123456789") for _x in range(20)])
self.pyfile.setCustomStatus("torrent")
self.pyfile.setProgress(0)
json_data = self.jquery_call(
"http://m.zbigz.com/core/info.php", file_id, call_id)
if json_data is None:
self.fail("Unexpected jQuery response")
if 'faultString' in json_data:
self.fail(json_data['faultString'])
pyfile.name = json_data['info']['name'] + \
(".zip" if len(json_data['files']) > 1 else "")
pyfile.size = json_data['info']['size']
while True:
json_data = self.jquery_call(
"http://m.zbigz.com/core/info.php", file_id, call_id)
if json_data is None:
self.fail("Unexpected jQuery response")
if 'faultString' in json_data:
self.fail(json_data['faultString'])
progress = int(json_data['info']['progress'])
pyfile.setProgress(progress)
if json_data['info']['state'] != "downloading" or progress == 100:
break
self.sleep(5)
pyfile.setProgress(100)
if len(json_data['files']) == 1:
download_url = "http://m.zbigz.com/file/%s/0" % file_id
else:
self.data = self.load("http://m.zbigz.com/file/%s/-1" % file_id)
m = re.search(
r'\'(http://\w+.zbigz.com/core/zipstate.php\?hash=%s&did=(\w+)).+?\'' %
file_id, self.data)
if m is None:
self.fail("Zip state URL not found")
zip_status_url = m.group(1)
download_id = m.group(2)
m = re.search(
r'\'(http://\w+.zbigz.com/z/%s/.+?)\'' %
download_id, self.data)
if m is None:
self.fail("Zip download URL not found")
download_url = m.group(1)
self.pyfile.setCustomStatus("zip")
self.pyfile.setProgress(0)
while True:
json_data = self.jquery_call(zip_status_url, file_id, call_id)
if json_data is None:
self.fail("Unexpected jQuery response")
if 'faultString' in json_data:
self.fail(json_data['faultString'])
progress = int(json_data['proc'])
self.pyfile.setProgress(progress)
if progress == 100:
break
self.sleep(5)
self.download(download_url)
self.load("http://m.zbigz.com/delete.php?hash=%s" % file_id)
|
gpl-3.0
| 4,467,117,906,312,538,000
| 29.557823
| 87
| 0.518923
| false
| 3.5936
| false
| false
| false
|
jptomo/rpython-lang-scheme
|
rpython/rtyper/rclass.py
|
1
|
47390
|
import sys
import types
from rpython.flowspace.model import Constant
from rpython.flowspace.operation import op
from rpython.annotator import description, model as annmodel
from rpython.rlib.objectmodel import UnboxedValue
from rpython.tool.pairtype import pairtype, pair
from rpython.tool.identity_dict import identity_dict
from rpython.tool.flattenrec import FlattenRecursion
from rpython.rtyper.extregistry import ExtRegistryEntry
from rpython.rtyper.error import TyperError
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.lltypesystem.lltype import (
Ptr, Struct, GcStruct, malloc, cast_pointer, castable, nullptr,
RuntimeTypeInfo, getRuntimeTypeInfo, typeOf, Void, FuncType, Bool, Signed,
functionptr)
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rtyper.lltypesystem import rstr
from rpython.rtyper.rmodel import (
Repr, getgcflavor, inputconst, warning, mangle)
class FieldListAccessor(object):
def initialize(self, TYPE, fields):
assert type(fields) is dict
self.TYPE = TYPE
self.fields = fields
for x in fields.itervalues():
assert isinstance(x, ImmutableRanking)
def all_immutable_fields(self):
result = set()
for key, value in self.fields.iteritems():
if value in (IR_IMMUTABLE, IR_IMMUTABLE_ARRAY):
result.add(key)
return result
def __repr__(self):
return '<FieldListAccessor for %s>' % getattr(self, 'TYPE', '?')
class ImmutableRanking(object):
def __init__(self, name, is_immutable):
self.name = name
self.is_immutable = is_immutable
def __nonzero__(self):
return self.is_immutable
def __repr__(self):
return '<%s>' % self.name
IR_MUTABLE = ImmutableRanking('mutable', False)
IR_IMMUTABLE = ImmutableRanking('immutable', True)
IR_IMMUTABLE_ARRAY = ImmutableRanking('immutable_array', True)
IR_QUASIIMMUTABLE = ImmutableRanking('quasiimmutable', False)
IR_QUASIIMMUTABLE_ARRAY = ImmutableRanking('quasiimmutable_array', False)
class ImmutableConflictError(Exception):
"""Raised when the _immutable_ or _immutable_fields_ hints are
not consistent across a class hierarchy."""
def getclassrepr(rtyper, classdef):
if classdef is None:
return rtyper.rootclass_repr
result = classdef.repr
if result is None:
result = classdef.repr = ClassRepr(rtyper, classdef)
rtyper.add_pendingsetup(result)
return result
def getinstancerepr(rtyper, classdef, default_flavor='gc'):
if classdef is None:
flavor = default_flavor
else:
flavor = getgcflavor(classdef)
try:
result = rtyper.instance_reprs[classdef, flavor]
except KeyError:
result = buildinstancerepr(rtyper, classdef, gcflavor=flavor)
rtyper.instance_reprs[classdef, flavor] = result
rtyper.add_pendingsetup(result)
return result
def buildinstancerepr(rtyper, classdef, gcflavor='gc'):
from rpython.rtyper.rvirtualizable import VirtualizableInstanceRepr
if classdef is None:
unboxed = []
virtualizable = False
else:
unboxed = [subdef for subdef in classdef.getallsubdefs() if
subdef.classdesc.pyobj is not None and
issubclass(subdef.classdesc.pyobj, UnboxedValue)]
virtualizable = classdef.classdesc.read_attribute(
'_virtualizable_', Constant(False)).value
config = rtyper.annotator.translator.config
usetagging = len(unboxed) != 0 and config.translation.taggedpointers
if virtualizable:
assert len(unboxed) == 0
assert gcflavor == 'gc'
return VirtualizableInstanceRepr(rtyper, classdef)
elif usetagging:
# the UnboxedValue class and its parent classes need a
# special repr for their instances
if len(unboxed) != 1:
raise TyperError("%r has several UnboxedValue subclasses" % (
classdef,))
assert gcflavor == 'gc'
from rpython.rtyper.lltypesystem import rtagged
return rtagged.TaggedInstanceRepr(rtyper, classdef, unboxed[0])
else:
return InstanceRepr(rtyper, classdef, gcflavor)
class MissingRTypeAttribute(TyperError):
pass
# ____________________________________________________________
#
# There is one "vtable" per user class, with the following structure:
# A root class "object" has:
#
# struct object_vtable {
# // struct object_vtable* parenttypeptr; not used any more
# RuntimeTypeInfo * rtti;
# Signed subclassrange_min; //this is also the id of the class itself
# Signed subclassrange_max;
# RPyString * name;
# struct object * instantiate();
# }
#
# Every other class X, with parent Y, has the structure:
#
# struct vtable_X {
# struct vtable_Y super; // inlined
# ... // extra class attributes
# }
# The type of the instances is:
#
# struct object { // for the root class
# struct object_vtable* typeptr;
# }
#
# struct X {
# struct Y super; // inlined
# ... // extra instance attributes
# }
#
# there's also a nongcobject
OBJECT_VTABLE = lltype.ForwardReference()
CLASSTYPE = Ptr(OBJECT_VTABLE)
OBJECT = GcStruct('object', ('typeptr', CLASSTYPE),
hints={'immutable': True, 'shouldntbenull': True,
'typeptr': True},
rtti=True)
OBJECTPTR = Ptr(OBJECT)
OBJECT_VTABLE.become(Struct('object_vtable',
#('parenttypeptr', CLASSTYPE),
('subclassrange_min', Signed),
('subclassrange_max', Signed),
('rtti', Ptr(RuntimeTypeInfo)),
('name', Ptr(rstr.STR)),
('hash', Signed),
('instantiate', Ptr(FuncType([], OBJECTPTR))),
hints={'immutable': True}))
# non-gc case
NONGCOBJECT = Struct('nongcobject', ('typeptr', CLASSTYPE))
NONGCOBJECTPTR = Ptr(NONGCOBJECT)
OBJECT_BY_FLAVOR = {'gc': OBJECT, 'raw': NONGCOBJECT}
LLFLAVOR = {'gc': 'gc', 'raw': 'raw', 'stack': 'raw'}
def cast_vtable_to_typeptr(vtable):
while typeOf(vtable).TO != OBJECT_VTABLE:
vtable = vtable.super
return vtable
def alloc_array_name(name):
return rstr.string_repr.convert_const(name)
class ClassRepr(Repr):
def __init__(self, rtyper, classdef):
self.rtyper = rtyper
self.classdef = classdef
self.vtable_type = lltype.ForwardReference()
self.lowleveltype = Ptr(self.vtable_type)
def __repr__(self):
if self.classdef is None:
clsname = 'object'
else:
clsname = self.classdef.name
return '<ClassRepr for %s>' % (clsname,)
def compact_repr(self):
if self.classdef is None:
clsname = 'object'
else:
clsname = self.classdef.name
return 'ClassR %s' % (clsname,)
def convert_desc(self, desc):
subclassdef = desc.getuniqueclassdef()
if self.classdef is not None:
if self.classdef.commonbase(subclassdef) != self.classdef:
raise TyperError("not a subclass of %r: %r" % (
self.classdef.name, desc))
r_subclass = getclassrepr(self.rtyper, subclassdef)
return r_subclass.getruntime(self.lowleveltype)
def convert_const(self, value):
if not isinstance(value, (type, types.ClassType)):
raise TyperError("not a class: %r" % (value,))
bk = self.rtyper.annotator.bookkeeper
return self.convert_desc(bk.getdesc(value))
def prepare_method(self, s_value):
# special-casing for methods:
# if s_value is SomePBC([MethodDescs...])
# return a PBC representing the underlying functions
if (isinstance(s_value, annmodel.SomePBC) and
s_value.getKind() == description.MethodDesc):
s_value = self.classdef.lookup_filter(s_value)
funcdescs = [mdesc.funcdesc for mdesc in s_value.descriptions]
return annmodel.SomePBC(funcdescs)
return None # not a method
def get_ll_eq_function(self):
return None
def _setup_repr(self):
# NOTE: don't store mutable objects like the dicts below on 'self'
# before they are fully built, to avoid strange bugs in case
# of recursion where other code would uses these
# partially-initialized dicts.
clsfields = {}
pbcfields = {}
allmethods = {}
# class attributes
llfields = []
for name, attrdef in self.classdef.attrs.items():
if attrdef.readonly:
s_value = attrdef.s_value
s_unboundmethod = self.prepare_method(s_value)
if s_unboundmethod is not None:
allmethods[name] = True
s_value = s_unboundmethod
r = self.rtyper.getrepr(s_value)
mangled_name = 'cls_' + name
clsfields[name] = mangled_name, r
llfields.append((mangled_name, r.lowleveltype))
# attributes showing up in getattrs done on the class as a PBC
extra_access_sets = self.classdef.extra_access_sets
for access_set, (attr, counter) in extra_access_sets.items():
r = self.rtyper.getrepr(access_set.s_value)
mangled_name = mangle('pbc%d' % counter, attr)
pbcfields[access_set, attr] = mangled_name, r
llfields.append((mangled_name, r.lowleveltype))
llfields.sort()
llfields.sort(key=attr_reverse_size)
#
self.rbase = getclassrepr(self.rtyper, self.classdef.basedef)
self.rbase.setup()
kwds = {'hints': {'immutable': True}}
vtable_type = Struct('%s_vtable' % self.classdef.name,
('super', self.rbase.vtable_type),
*llfields, **kwds)
self.vtable_type.become(vtable_type)
allmethods.update(self.rbase.allmethods)
self.clsfields = clsfields
self.pbcfields = pbcfields
self.allmethods = allmethods
self.vtable = None
def getvtable(self):
"""Return a ptr to the vtable of this type."""
if self.vtable is None:
self.init_vtable()
return cast_vtable_to_typeptr(self.vtable)
def getruntime(self, expected_type):
assert expected_type == CLASSTYPE
return self.getvtable()
def init_vtable(self):
"""Create the actual vtable"""
self.vtable = malloc(self.vtable_type, immortal=True)
vtable_part = self.vtable
r_parentcls = self
while r_parentcls.classdef is not None:
self.setup_vtable(vtable_part, r_parentcls)
vtable_part = vtable_part.super
r_parentcls = r_parentcls.rbase
self.fill_vtable_root(vtable_part)
def setup_vtable(self, vtable, r_parentcls):
"""Initialize the vtable portion corresponding to 'r_parentcls'."""
# setup class attributes: for each attribute name at the level
# of 'r_parentcls', look up its value in the class
def assign(mangled_name, value):
if (isinstance(value, Constant) and
isinstance(value.value, staticmethod)):
value = Constant(value.value.__get__(42)) # staticmethod => bare function
llvalue = r.convert_desc_or_const(value)
setattr(vtable, mangled_name, llvalue)
for fldname in r_parentcls.clsfields:
mangled_name, r = r_parentcls.clsfields[fldname]
if r.lowleveltype is Void:
continue
value = self.classdef.classdesc.read_attribute(fldname, None)
if value is not None:
assign(mangled_name, value)
# extra PBC attributes
for (access_set, attr), (mangled_name, r) in r_parentcls.pbcfields.items():
if self.classdef.classdesc not in access_set.descs:
continue # only for the classes in the same pbc access set
if r.lowleveltype is Void:
continue
attrvalue = self.classdef.classdesc.read_attribute(attr, None)
if attrvalue is not None:
assign(mangled_name, attrvalue)
def fill_vtable_root(self, vtable):
"""Initialize the head of the vtable."""
vtable.hash = hash(self)
# initialize the 'subclassrange_*' and 'name' fields
if self.classdef is not None:
#vtable.parenttypeptr = self.rbase.getvtable()
vtable.subclassrange_min = self.classdef.minid
vtable.subclassrange_max = self.classdef.maxid
else: # for the root class
vtable.subclassrange_min = 0
vtable.subclassrange_max = sys.maxint
rinstance = getinstancerepr(self.rtyper, self.classdef)
rinstance.setup()
if rinstance.gcflavor == 'gc':
vtable.rtti = getRuntimeTypeInfo(rinstance.object_type)
if self.classdef is None:
name = 'object'
else:
name = self.classdef.shortname
vtable.name = alloc_array_name(name)
if hasattr(self.classdef, 'my_instantiate_graph'):
graph = self.classdef.my_instantiate_graph
vtable.instantiate = self.rtyper.getcallable(graph)
#else: the classdef was created recently, so no instantiate()
# could reach it
def fromtypeptr(self, vcls, llops):
"""Return the type pointer cast to self's vtable type."""
self.setup()
castable(self.lowleveltype, vcls.concretetype) # sanity check
return llops.genop('cast_pointer', [vcls],
resulttype=self.lowleveltype)
fromclasstype = fromtypeptr
def getclsfield(self, vcls, attr, llops):
"""Read the given attribute of 'vcls'."""
if attr in self.clsfields:
mangled_name, r = self.clsfields[attr]
v_vtable = self.fromtypeptr(vcls, llops)
cname = inputconst(Void, mangled_name)
return llops.genop('getfield', [v_vtable, cname], resulttype=r)
else:
if self.classdef is None:
raise MissingRTypeAttribute(attr)
return self.rbase.getclsfield(vcls, attr, llops)
def setclsfield(self, vcls, attr, vvalue, llops):
"""Write the given attribute of 'vcls'."""
if attr in self.clsfields:
mangled_name, r = self.clsfields[attr]
v_vtable = self.fromtypeptr(vcls, llops)
cname = inputconst(Void, mangled_name)
llops.genop('setfield', [v_vtable, cname, vvalue])
else:
if self.classdef is None:
raise MissingRTypeAttribute(attr)
self.rbase.setclsfield(vcls, attr, vvalue, llops)
def getpbcfield(self, vcls, access_set, attr, llops):
if (access_set, attr) not in self.pbcfields:
raise TyperError("internal error: missing PBC field")
mangled_name, r = self.pbcfields[access_set, attr]
v_vtable = self.fromtypeptr(vcls, llops)
cname = inputconst(Void, mangled_name)
return llops.genop('getfield', [v_vtable, cname], resulttype=r)
def rtype_issubtype(self, hop):
class_repr = get_type_repr(self.rtyper)
v_cls1, v_cls2 = hop.inputargs(class_repr, class_repr)
if isinstance(v_cls2, Constant):
cls2 = v_cls2.value
minid = hop.inputconst(Signed, cls2.subclassrange_min)
maxid = hop.inputconst(Signed, cls2.subclassrange_max)
return hop.gendirectcall(ll_issubclass_const, v_cls1, minid,
maxid)
else:
v_cls1, v_cls2 = hop.inputargs(class_repr, class_repr)
return hop.gendirectcall(ll_issubclass, v_cls1, v_cls2)
class RootClassRepr(ClassRepr):
"""ClassRepr for the root of the class hierarchy"""
classdef = None
def __init__(self, rtyper):
self.rtyper = rtyper
self.vtable_type = OBJECT_VTABLE
self.lowleveltype = Ptr(self.vtable_type)
def _setup_repr(self):
self.clsfields = {}
self.pbcfields = {}
self.allmethods = {}
self.vtable = None
def init_vtable(self):
self.vtable = malloc(self.vtable_type, immortal=True)
self.fill_vtable_root(self.vtable)
def get_type_repr(rtyper):
return rtyper.rootclass_repr
# ____________________________________________________________
class __extend__(annmodel.SomeInstance):
def rtyper_makerepr(self, rtyper):
return getinstancerepr(rtyper, self.classdef)
def rtyper_makekey(self):
return self.__class__, self.classdef
class __extend__(annmodel.SomeType):
def rtyper_makerepr(self, rtyper):
return get_type_repr(rtyper)
def rtyper_makekey(self):
return self.__class__,
class InstanceRepr(Repr):
def __init__(self, rtyper, classdef, gcflavor='gc'):
self.rtyper = rtyper
self.classdef = classdef
if classdef is None:
self.object_type = OBJECT_BY_FLAVOR[LLFLAVOR[gcflavor]]
else:
ForwardRef = lltype.FORWARDREF_BY_FLAVOR[LLFLAVOR[gcflavor]]
self.object_type = ForwardRef()
self.iprebuiltinstances = identity_dict()
self.lowleveltype = Ptr(self.object_type)
self.gcflavor = gcflavor
def _setup_repr(self, llfields=None, hints=None, adtmeths=None):
# NOTE: don't store mutable objects like the dicts below on 'self'
# before they are fully built, to avoid strange bugs in case
# of recursion where other code would uses these
# partially-initialized dicts.
if self.classdef is None:
self.immutable_field_set = set()
self.rclass = getclassrepr(self.rtyper, self.classdef)
fields = {}
allinstancefields = {}
if self.classdef is None:
fields['__class__'] = 'typeptr', get_type_repr(self.rtyper)
else:
# instance attributes
attrs = self.classdef.attrs.items()
attrs.sort()
myllfields = []
for name, attrdef in attrs:
if not attrdef.readonly:
r = self.rtyper.getrepr(attrdef.s_value)
mangled_name = 'inst_' + name
fields[name] = mangled_name, r
myllfields.append((mangled_name, r.lowleveltype))
myllfields.sort(key=attr_reverse_size)
if llfields is None:
llfields = myllfields
else:
llfields = llfields + myllfields
self.rbase = getinstancerepr(self.rtyper, self.classdef.basedef,
self.gcflavor)
self.rbase.setup()
MkStruct = lltype.STRUCT_BY_FLAVOR[LLFLAVOR[self.gcflavor]]
if adtmeths is None:
adtmeths = {}
if hints is None:
hints = {}
hints = self._check_for_immutable_hints(hints)
kwds = {}
if self.gcflavor == 'gc':
kwds['rtti'] = True
for name, attrdef in attrs:
if not attrdef.readonly and self.is_quasi_immutable(name):
llfields.append(('mutate_' + name, OBJECTPTR))
object_type = MkStruct(self.classdef.name,
('super', self.rbase.object_type),
hints=hints,
adtmeths=adtmeths,
*llfields,
**kwds)
self.object_type.become(object_type)
allinstancefields.update(self.rbase.allinstancefields)
allinstancefields.update(fields)
self.fields = fields
self.allinstancefields = allinstancefields
def _check_for_immutable_hints(self, hints):
loc = self.classdef.classdesc.lookup('_immutable_')
if loc is not None:
if loc is not self.classdef.classdesc:
raise ImmutableConflictError(
"class %r inherits from its parent _immutable_=True, "
"so it should also declare _immutable_=True" % (
self.classdef,))
if loc.classdict.get('_immutable_').value is not True:
raise TyperError(
"class %r: _immutable_ = something else than True" % (
self.classdef,))
hints = hints.copy()
hints['immutable'] = True
self.immutable_field_set = set() # unless overwritten below
if self.classdef.classdesc.lookup('_immutable_fields_') is not None:
hints = hints.copy()
immutable_fields = self.classdef.classdesc.classdict.get(
'_immutable_fields_')
if immutable_fields is not None:
self.immutable_field_set = set(immutable_fields.value)
accessor = FieldListAccessor()
hints['immutable_fields'] = accessor
return hints
def __repr__(self):
if self.classdef is None:
clsname = 'object'
else:
clsname = self.classdef.name
return '<InstanceRepr for %s>' % (clsname,)
def compact_repr(self):
if self.classdef is None:
clsname = 'object'
else:
clsname = self.classdef.name
return 'InstanceR %s' % (clsname,)
def _setup_repr_final(self):
self._setup_immutable_field_list()
self._check_for_immutable_conflicts()
if self.gcflavor == 'gc':
if (self.classdef is not None and
self.classdef.classdesc.lookup('__del__') is not None):
s_func = self.classdef.classdesc.s_read_attribute('__del__')
source_desc = self.classdef.classdesc.lookup('__del__')
source_classdef = source_desc.getclassdef(None)
source_repr = getinstancerepr(self.rtyper, source_classdef)
assert len(s_func.descriptions) == 1
funcdesc, = s_func.descriptions
graph = funcdesc.getuniquegraph()
self.check_graph_of_del_does_not_call_too_much(graph)
FUNCTYPE = FuncType([Ptr(source_repr.object_type)], Void)
destrptr = functionptr(FUNCTYPE, graph.name,
graph=graph,
_callable=graph.func)
else:
destrptr = None
OBJECT = OBJECT_BY_FLAVOR[LLFLAVOR[self.gcflavor]]
self.rtyper.attachRuntimeTypeInfoFunc(self.object_type,
ll_runtime_type_info,
OBJECT, destrptr)
vtable = self.rclass.getvtable()
self.rtyper.set_type_for_typeptr(vtable, self.lowleveltype.TO)
def _setup_immutable_field_list(self):
hints = self.object_type._hints
if "immutable_fields" in hints:
accessor = hints["immutable_fields"]
if not hasattr(accessor, 'fields'):
immutable_fields = set()
rbase = self
while rbase.classdef is not None:
immutable_fields.update(rbase.immutable_field_set)
rbase = rbase.rbase
self._parse_field_list(immutable_fields, accessor, hints)
def _parse_field_list(self, fields, accessor, hints):
ranking = {}
for name in fields:
quasi = False
if name.endswith('?[*]'): # a quasi-immutable field pointing to
name = name[:-4] # an immutable array
rank = IR_QUASIIMMUTABLE_ARRAY
quasi = True
elif name.endswith('[*]'): # for virtualizables' lists
name = name[:-3]
rank = IR_IMMUTABLE_ARRAY
elif name.endswith('?'): # a quasi-immutable field
name = name[:-1]
rank = IR_QUASIIMMUTABLE
quasi = True
else: # a regular immutable/green field
rank = IR_IMMUTABLE
try:
mangled_name, r = self._get_field(name)
except KeyError:
continue
if quasi and hints.get("immutable"):
raise TyperError(
"can't have _immutable_ = True and a quasi-immutable field "
"%s in class %s" % (name, self.classdef))
ranking[mangled_name] = rank
accessor.initialize(self.object_type, ranking)
return ranking
def _check_for_immutable_conflicts(self):
# check for conflicts, i.e. a field that is defined normally as
# mutable in some parent class but that is now declared immutable
is_self_immutable = "immutable" in self.object_type._hints
base = self
while base.classdef is not None:
base = base.rbase
for fieldname in base.fields:
try:
mangled, r = base._get_field(fieldname)
except KeyError:
continue
if r.lowleveltype == Void:
continue
base._setup_immutable_field_list()
if base.object_type._immutable_field(mangled):
continue
# 'fieldname' is a mutable, non-Void field in the parent
if is_self_immutable:
raise ImmutableConflictError(
"class %r has _immutable_=True, but parent class %r "
"defines (at least) the mutable field %r" %
(self, base, fieldname))
if (fieldname in self.immutable_field_set or
(fieldname + '?') in self.immutable_field_set):
raise ImmutableConflictError(
"field %r is defined mutable in class %r, but "
"listed in _immutable_fields_ in subclass %r" %
(fieldname, base, self))
def hook_access_field(self, vinst, cname, llops, flags):
pass # for virtualizables; see rvirtualizable.py
def hook_setfield(self, vinst, fieldname, llops):
if self.is_quasi_immutable(fieldname):
c_fieldname = inputconst(Void, 'mutate_' + fieldname)
llops.genop('jit_force_quasi_immutable', [vinst, c_fieldname])
def is_quasi_immutable(self, fieldname):
search1 = fieldname + '?'
search2 = fieldname + '?[*]'
rbase = self
while rbase.classdef is not None:
if (search1 in rbase.immutable_field_set or
search2 in rbase.immutable_field_set):
return True
rbase = rbase.rbase
return False
def new_instance(self, llops, classcallhop=None, nonmovable=False):
"""Build a new instance, without calling __init__."""
flavor = self.gcflavor
flags = {'flavor': flavor}
if nonmovable:
flags['nonmovable'] = True
ctype = inputconst(Void, self.object_type)
cflags = inputconst(Void, flags)
vlist = [ctype, cflags]
vptr = llops.genop('malloc', vlist,
resulttype=Ptr(self.object_type))
ctypeptr = inputconst(CLASSTYPE, self.rclass.getvtable())
self.setfield(vptr, '__class__', ctypeptr, llops)
# initialize instance attributes from their defaults from the class
if self.classdef is not None:
flds = self.allinstancefields.keys()
flds.sort()
for fldname in flds:
if fldname == '__class__':
continue
mangled_name, r = self.allinstancefields[fldname]
if r.lowleveltype is Void:
continue
value = self.classdef.classdesc.read_attribute(fldname, None)
if value is not None:
ll_value = r.convert_desc_or_const(value)
# don't write NULL GC pointers: we know that the malloc
# done above initialized at least the GC Ptr fields to
# NULL already, and that's true for all our GCs
if (isinstance(r.lowleveltype, Ptr) and
r.lowleveltype.TO._gckind == 'gc' and
not ll_value):
continue
cvalue = inputconst(r.lowleveltype, ll_value)
self.setfield(vptr, fldname, cvalue, llops,
flags={'access_directly': True})
return vptr
def convert_const(self, value):
if value is None:
return self.null_instance()
if isinstance(value, types.MethodType):
value = value.im_self # bound method -> instance
bk = self.rtyper.annotator.bookkeeper
try:
classdef = bk.getuniqueclassdef(value.__class__)
except KeyError:
raise TyperError("no classdef: %r" % (value.__class__,))
if classdef != self.classdef:
# if the class does not match exactly, check that 'value' is an
# instance of a subclass and delegate to that InstanceRepr
if classdef.commonbase(self.classdef) != self.classdef:
raise TyperError("not an instance of %r: %r" % (
self.classdef.name, value))
rinstance = getinstancerepr(self.rtyper, classdef)
result = rinstance.convert_const(value)
return self.upcast(result)
# common case
return self.convert_const_exact(value)
def convert_const_exact(self, value):
try:
return self.iprebuiltinstances[value]
except KeyError:
self.setup()
result = self.create_instance()
self.iprebuiltinstances[value] = result
self.initialize_prebuilt_instance(value, self.classdef, result)
return result
def get_reusable_prebuilt_instance(self):
"Get a dummy prebuilt instance. Multiple calls reuse the same one."
try:
return self._reusable_prebuilt_instance
except AttributeError:
self.setup()
result = self.create_instance()
self._reusable_prebuilt_instance = result
self.initialize_prebuilt_data(Ellipsis, self.classdef, result)
return result
_initialize_data_flattenrec = FlattenRecursion()
def initialize_prebuilt_instance(self, value, classdef, result):
# must fill in the hash cache before the other ones
# (see test_circular_hash_initialization)
self.initialize_prebuilt_hash(value, result)
self._initialize_data_flattenrec(self.initialize_prebuilt_data,
value, classdef, result)
def get_ll_hash_function(self):
return ll_inst_hash
get_ll_fasthash_function = get_ll_hash_function
def rtype_type(self, hop):
if hop.s_result.is_constant():
return hop.inputconst(hop.r_result, hop.s_result.const)
instance_repr = self.common_repr()
vinst, = hop.inputargs(instance_repr)
if hop.args_s[0].can_be_none():
return hop.gendirectcall(ll_inst_type, vinst)
else:
return instance_repr.getfield(vinst, '__class__', hop.llops)
def rtype_getattr(self, hop):
if hop.s_result.is_constant():
return hop.inputconst(hop.r_result, hop.s_result.const)
attr = hop.args_s[1].const
vinst, vattr = hop.inputargs(self, Void)
if attr == '__class__' and hop.r_result.lowleveltype is Void:
# special case for when the result of '.__class__' is a constant
[desc] = hop.s_result.descriptions
return hop.inputconst(Void, desc.pyobj)
if attr in self.allinstancefields:
return self.getfield(vinst, attr, hop.llops,
flags=hop.args_s[0].flags)
elif attr in self.rclass.allmethods:
# special case for methods: represented as their 'self' only
# (see MethodsPBCRepr)
return hop.r_result.get_method_from_instance(self, vinst,
hop.llops)
else:
vcls = self.getfield(vinst, '__class__', hop.llops)
return self.rclass.getclsfield(vcls, attr, hop.llops)
def rtype_setattr(self, hop):
attr = hop.args_s[1].const
r_value = self.getfieldrepr(attr)
vinst, vattr, vvalue = hop.inputargs(self, Void, r_value)
self.setfield(vinst, attr, vvalue, hop.llops,
flags=hop.args_s[0].flags)
def rtype_bool(self, hop):
vinst, = hop.inputargs(self)
return hop.genop('ptr_nonzero', [vinst], resulttype=Bool)
def ll_str(self, i): # doesn't work for non-gc classes!
from rpython.rtyper.lltypesystem.ll_str import ll_int2hex
from rpython.rlib.rarithmetic import r_uint
if not i:
return rstr.null_str
instance = cast_pointer(OBJECTPTR, i)
# Two choices: the first gives a fast answer but it can change
# (typically only once) during the life of the object.
#uid = r_uint(cast_ptr_to_int(i))
uid = r_uint(llop.gc_id(lltype.Signed, i))
#
res = rstr.instance_str_prefix
res = rstr.ll_strconcat(res, instance.typeptr.name)
res = rstr.ll_strconcat(res, rstr.instance_str_infix)
res = rstr.ll_strconcat(res, ll_int2hex(uid, False))
res = rstr.ll_strconcat(res, rstr.instance_str_suffix)
return res
def get_ll_eq_function(self):
return None # defaults to compare by identity ('==' on pointers)
def can_ll_be_null(self, s_value):
return s_value.can_be_none()
def check_graph_of_del_does_not_call_too_much(self, graph):
# RPython-level __del__() methods should not do "too much".
# In the PyPy Python interpreter, they usually do simple things
# like file.__del__() closing the file descriptor; or if they
# want to do more like call an app-level __del__() method, they
# enqueue the object instead, and the actual call is done later.
#
# Here, as a quick way to check "not doing too much", we check
# that from no RPython-level __del__() method we can reach a
# JitDriver.
#
# XXX wrong complexity, but good enough because the set of
# reachable graphs should be small
callgraph = self.rtyper.annotator.translator.callgraph.values()
seen = {graph: None}
while True:
oldlength = len(seen)
for caller, callee in callgraph:
if caller in seen and callee not in seen:
func = getattr(callee, 'func', None)
if getattr(func, '_dont_reach_me_in_del_', False):
lst = [str(callee)]
g = caller
while g:
lst.append(str(g))
g = seen.get(g)
lst.append('')
raise TyperError("the RPython-level __del__() method "
"in %r calls:%s" %
(graph, '\n\t'.join(lst[::-1])))
if getattr(func, '_cannot_really_call_random_things_',
False):
continue
seen[callee] = caller
if len(seen) == oldlength:
break
def common_repr(self): # -> object or nongcobject reprs
return getinstancerepr(self.rtyper, None, self.gcflavor)
def _get_field(self, attr):
return self.fields[attr]
def null_instance(self):
return nullptr(self.object_type)
def upcast(self, result):
return cast_pointer(self.lowleveltype, result)
def create_instance(self):
return malloc(self.object_type, flavor=self.gcflavor, immortal=True)
def initialize_prebuilt_data(self, value, classdef, result):
if self.classdef is not None:
# recursively build the parent part of the instance
self.rbase.initialize_prebuilt_data(value, classdef, result.super)
# then add instance attributes from this level
for name, (mangled_name, r) in self.fields.items():
if r.lowleveltype is Void:
llattrvalue = None
else:
try:
attrvalue = getattr(value, name)
except AttributeError:
attrvalue = self.classdef.classdesc.read_attribute(
name, None)
if attrvalue is None:
# Ellipsis from get_reusable_prebuilt_instance()
#if value is not Ellipsis:
#warning("prebuilt instance %r has no "
# "attribute %r" % (value, name))
llattrvalue = r.lowleveltype._defl()
else:
llattrvalue = r.convert_desc_or_const(attrvalue)
else:
llattrvalue = r.convert_const(attrvalue)
setattr(result, mangled_name, llattrvalue)
else:
# OBJECT part
rclass = getclassrepr(self.rtyper, classdef)
result.typeptr = rclass.getvtable()
def initialize_prebuilt_hash(self, value, result):
llattrvalue = getattr(value, '__precomputed_identity_hash', None)
if llattrvalue is not None:
lltype.init_identity_hash(result, llattrvalue)
def getfieldrepr(self, attr):
"""Return the repr used for the given attribute."""
if attr in self.fields:
mangled_name, r = self.fields[attr]
return r
else:
if self.classdef is None:
raise MissingRTypeAttribute(attr)
return self.rbase.getfieldrepr(attr)
def getfield(self, vinst, attr, llops, force_cast=False, flags={}):
"""Read the given attribute (or __class__ for the type) of 'vinst'."""
if attr in self.fields:
mangled_name, r = self.fields[attr]
cname = inputconst(Void, mangled_name)
if force_cast:
vinst = llops.genop('cast_pointer', [vinst], resulttype=self)
self.hook_access_field(vinst, cname, llops, flags)
return llops.genop('getfield', [vinst, cname], resulttype=r)
else:
if self.classdef is None:
raise MissingRTypeAttribute(attr)
return self.rbase.getfield(vinst, attr, llops, force_cast=True,
flags=flags)
def setfield(self, vinst, attr, vvalue, llops, force_cast=False,
flags={}):
"""Write the given attribute (or __class__ for the type) of 'vinst'."""
if attr in self.fields:
mangled_name, r = self.fields[attr]
cname = inputconst(Void, mangled_name)
if force_cast:
vinst = llops.genop('cast_pointer', [vinst], resulttype=self)
self.hook_access_field(vinst, cname, llops, flags)
self.hook_setfield(vinst, attr, llops)
llops.genop('setfield', [vinst, cname, vvalue])
else:
if self.classdef is None:
raise MissingRTypeAttribute(attr)
self.rbase.setfield(vinst, attr, vvalue, llops, force_cast=True,
flags=flags)
def rtype_isinstance(self, hop):
class_repr = get_type_repr(hop.rtyper)
instance_repr = self.common_repr()
v_obj, v_cls = hop.inputargs(instance_repr, class_repr)
if isinstance(v_cls, Constant):
cls = v_cls.value
llf, llf_nonnull = make_ll_isinstance(self.rtyper, cls)
if hop.args_s[0].can_be_None:
return hop.gendirectcall(llf, v_obj)
else:
return hop.gendirectcall(llf_nonnull, v_obj)
else:
return hop.gendirectcall(ll_isinstance, v_obj, v_cls)
class __extend__(pairtype(InstanceRepr, InstanceRepr)):
def convert_from_to((r_ins1, r_ins2), v, llops):
# which is a subclass of which?
if r_ins1.classdef is None or r_ins2.classdef is None:
basedef = None
else:
basedef = r_ins1.classdef.commonbase(r_ins2.classdef)
if basedef == r_ins2.classdef:
# r_ins1 is an instance of the subclass: converting to parent
v = llops.genop('cast_pointer', [v],
resulttype=r_ins2.lowleveltype)
return v
elif basedef == r_ins1.classdef:
# r_ins2 is an instance of the subclass: potentially unsafe
# casting, but we do it anyway (e.g. the annotator produces
# such casts after a successful isinstance() check)
v = llops.genop('cast_pointer', [v],
resulttype=r_ins2.lowleveltype)
return v
else:
return NotImplemented
def rtype_is_((r_ins1, r_ins2), hop):
if r_ins1.gcflavor != r_ins2.gcflavor:
# obscure logic, the is can be true only if both are None
v_ins1, v_ins2 = hop.inputargs(
r_ins1.common_repr(), r_ins2.common_repr())
return hop.gendirectcall(ll_both_none, v_ins1, v_ins2)
if r_ins1.classdef is None or r_ins2.classdef is None:
basedef = None
else:
basedef = r_ins1.classdef.commonbase(r_ins2.classdef)
r_ins = getinstancerepr(r_ins1.rtyper, basedef, r_ins1.gcflavor)
return pairtype(Repr, Repr).rtype_is_(pair(r_ins, r_ins), hop)
rtype_eq = rtype_is_
def rtype_ne(rpair, hop):
v = rpair.rtype_eq(hop)
return hop.genop("bool_not", [v], resulttype=Bool)
# ____________________________________________________________
def rtype_new_instance(rtyper, classdef, llops, classcallhop=None,
nonmovable=False):
rinstance = getinstancerepr(rtyper, classdef)
return rinstance.new_instance(llops, classcallhop, nonmovable=nonmovable)
def ll_inst_hash(ins):
if not ins:
return 0 # for None
else:
return lltype.identityhash(ins)
_missing = object()
def fishllattr(inst, name, default=_missing):
p = widest = lltype.normalizeptr(inst)
while True:
try:
return getattr(p, 'inst_' + name)
except AttributeError:
pass
try:
p = p.super
except AttributeError:
break
if default is _missing:
raise AttributeError("%s has no field %s" %
(lltype.typeOf(widest), name))
return default
def attr_reverse_size((_, T)):
# This is used to sort the instance or class attributes by decreasing
# "likely size", as reported by rffi.sizeof(), to minimize padding
# holes in C. Fields should first be sorted by name, just to minimize
# randomness, and then (stably) sorted by 'attr_reverse_size'.
if T is lltype.Void:
return None
from rpython.rtyper.lltypesystem.rffi import sizeof
try:
return -sizeof(T)
except StandardError:
return None
# ____________________________________________________________
#
# Low-level implementation of operations on classes and instances
# doesn't work for non-gc stuff!
def ll_cast_to_object(obj):
return cast_pointer(OBJECTPTR, obj)
# doesn't work for non-gc stuff!
def ll_type(obj):
return cast_pointer(OBJECTPTR, obj).typeptr
def ll_issubclass(subcls, cls):
return llop.int_between(Bool,
cls.subclassrange_min,
subcls.subclassrange_min,
cls.subclassrange_max)
def ll_issubclass_const(subcls, minid, maxid):
return llop.int_between(Bool, minid, subcls.subclassrange_min, maxid)
def ll_isinstance(obj, cls): # obj should be cast to OBJECT or NONGCOBJECT
if not obj:
return False
obj_cls = obj.typeptr
return ll_issubclass(obj_cls, cls)
def make_ll_isinstance(rtyper, cls):
try:
return rtyper.isinstance_helpers[cls._obj]
except KeyError:
minid = cls.subclassrange_min
maxid = cls.subclassrange_max
if minid.number_with_subclasses():
def ll_isinstance_const_nonnull(obj):
objid = obj.typeptr.subclassrange_min
return llop.int_between(Bool, minid, objid, maxid)
else:
def ll_isinstance_const_nonnull(obj):
return obj.typeptr == cls
def ll_isinstance_const(obj):
if not obj:
return False
return ll_isinstance_const_nonnull(obj)
result = (ll_isinstance_const, ll_isinstance_const_nonnull)
rtyper.isinstance_helpers[cls._obj] = result
return result
def ll_runtime_type_info(obj):
return obj.typeptr.rtti
def ll_inst_type(obj):
if obj:
return obj.typeptr
else:
# type(None) -> NULL (for now)
return nullptr(typeOf(obj).TO.typeptr.TO)
def ll_both_none(ins1, ins2):
return not ins1 and not ins2
# ____________________________________________________________
def feedllattr(inst, name, llvalue):
p = widest = lltype.normalizeptr(inst)
while True:
try:
return setattr(p, 'inst_' + name, llvalue)
except AttributeError:
pass
try:
p = p.super
except AttributeError:
break
raise AttributeError("%s has no field %s" % (lltype.typeOf(widest),
name))
def declare_type_for_typeptr(vtable, TYPE):
"""Hack for custom low-level-only 'subclasses' of OBJECT:
call this somewhere annotated, in order to declare that it is
of the given TYPE and has got the corresponding vtable."""
class Entry(ExtRegistryEntry):
_about_ = declare_type_for_typeptr
def compute_result_annotation(self, s_vtable, s_TYPE):
assert s_vtable.is_constant()
assert s_TYPE.is_constant()
return annmodel.s_None
def specialize_call(self, hop):
vtable = hop.args_v[0].value
TYPE = hop.args_v[1].value
assert lltype.typeOf(vtable) == CLASSTYPE
assert isinstance(TYPE, GcStruct)
assert lltype._castdepth(TYPE, OBJECT) > 0
hop.rtyper.set_type_for_typeptr(vtable, TYPE)
hop.exception_cannot_occur()
return hop.inputconst(lltype.Void, None)
|
mit
| -5,830,441,345,531,532,000
| 39.195081
| 91
| 0.57375
| false
| 3.872681
| false
| false
| false
|
simplegeo/rtree
|
tests/data.py
|
1
|
1251
|
import os.path
boxes15 = []
f = file(os.path.join(os.path.dirname(__file__), 'boxes_15x15.data'), 'r')
for line in f.readlines():
if not line:
break
[left, bottom, right, top] = [float(x) for x in line.split()]
boxes15.append((left, bottom, right, top))
boxes3 = []
f = file(os.path.join(os.path.dirname(__file__), 'boxes_3x3.data'), 'r')
for line in f.readlines():
if not line:
break
[left, bottom, right, top] = [float(x) for x in line.split()]
boxes3.append((left, bottom, right, top))
points = []
f = file(os.path.join(os.path.dirname(__file__), 'point_clusters.data'), 'r')
for line in f.readlines():
if not line:
break
[left, bottom] = [float(x) for x in line.split()]
points.append((left, bottom))
def draw_data(filename):
from PIL import Image, ImageDraw
im = Image.new('RGB', (1440, 720))
d = ImageDraw.Draw(im)
for box in boxes15:
coords = [4.0*(box[0]+180), 4.0*(box[1]+90), 4.0*(box[2]+180), 4.0*(box[3]+90)]
d.rectangle(coords, outline='red')
for box in boxes3:
coords = [4.0*(box[0]+180), 4.0*(box[1]+90), 4.0*(box[2]+180), 4.0*(box[3]+90)]
d.rectangle(coords, outline='blue')
im.save(filename)
|
lgpl-2.1
| 514,539,122,646,075,300
| 31.076923
| 87
| 0.577138
| false
| 2.889145
| false
| false
| false
|
kvaps/vdsm
|
vdsm/network/configurators/dhclient.py
|
1
|
4498
|
# Copyright (C) 2013, IBM Corporation
# Copyright (C) 2013-2014, Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
import errno
import logging
import os
import signal
import threading
from vdsm import cmdutils
from vdsm import ipwrapper
from vdsm import netinfo
from vdsm.utils import CommandPath
from vdsm.utils import execCmd
from vdsm.utils import pgrep
from vdsm.utils import rmFile
DHCLIENT_CGROUP = 'vdsm-dhclient'
class DhcpClient(object):
PID_FILE = '/var/run/dhclient%s-%s.pid'
LEASE_DIR = '/var/lib/dhclient'
LEASE_FILE = os.path.join(LEASE_DIR, 'dhclient{0}--{1}.lease')
DHCLIENT = CommandPath('dhclient', '/sbin/dhclient')
def __init__(self, iface, family=4, cgroup=DHCLIENT_CGROUP):
self.iface = iface
self.family = family
self.pidFile = self.PID_FILE % (family, self.iface)
if not os.path.exists(self.LEASE_DIR):
os.mkdir(self.LEASE_DIR)
self.leaseFile = self.LEASE_FILE.format(
'' if family == 4 else '6', self.iface)
self._cgroup = cgroup
def _dhclient(self):
# Ask dhclient to stop any dhclient running for the device
if os.path.exists(os.path.join(netinfo.NET_PATH, self.iface)):
kill_dhclient(self.iface, self.family)
cmd = [self.DHCLIENT.cmd, '-%s' % self.family, '-1', '-pf',
self.pidFile, '-lf', self.leaseFile, self.iface]
cmd = cmdutils.systemd_run(cmd, scope=True, slice=self._cgroup)
rc, out, err = execCmd(cmd)
return rc, out, err
def start(self, blocking):
if blocking:
rc, _, _ = self._dhclient()
return rc
else:
t = threading.Thread(target=self._dhclient, name='vdsm-dhclient-%s'
% self.iface)
t.daemon = True
t.start()
def shutdown(self):
try:
pid = int(open(self.pidFile).readline().strip())
except IOError as e:
if e.errno == os.errno.ENOENT:
pass
else:
raise
else:
_kill_and_rm_pid(pid, self.pidFile)
def kill_dhclient(device_name, family=4):
for pid in pgrep('dhclient'):
try:
with open('/proc/%s/cmdline' % pid) as cmdline:
args = cmdline.read().strip('\0').split('\0')
except IOError as ioe:
if ioe.errno == errno.ENOENT: # exited before we read cmdline
continue
if args[-1] != device_name: # dhclient of another device
continue
tokens = iter(args)
pid_file = '/var/run/dhclient.pid' # Default client pid location
running_family = 4
for token in tokens:
if token == '-pf':
pid_file = next(tokens)
elif token == '--no-pid':
pid_file = None
elif token == '-6':
running_family = 6
if running_family != family:
continue
logging.info('Stopping dhclient -%s before running our own on %s',
family, device_name)
_kill_and_rm_pid(pid, pid_file)
# In order to be able to configure the device with dhclient again. It is
# necessary that dhclient does not find it configured with any IP address
# (except 0.0.0.0 which is fine, or IPv6 link-local address needed for
# DHCPv6).
ipwrapper.addrFlush(device_name, family)
def _kill_and_rm_pid(pid, pid_file):
try:
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno == os.errno.ESRCH: # Already exited
pass
else:
raise
if pid_file is not None:
rmFile(pid_file)
|
gpl-2.0
| 2,782,631,134,923,793,000
| 33.6
| 79
| 0.609604
| false
| 3.73588
| false
| false
| false
|
jason-ni/eventlet-raft
|
counter_test.py
|
1
|
1047
|
from eventlet_raft.client import RaftClient
server_address_list = [
('127.0.0.1', 4000),
('127.0.0.1', 4001),
('127.0.0.1', 4002),
('127.0.0.1', 4003),
('127.0.0.1', 4004),
]
def write_log(log, data, msg):
log.write("{0}: {1}\n".format(
msg,
str(data),
))
client = RaftClient(server_address_list)
print client.register()
with open('counter_test.log', 'w') as log:
ret = client.set_value('counter', 0)
if not ret['success']:
raise Exception("failed to reset counter")
write_log(log, ret, 'reset counter')
accu = 0
for i in range(1000):
ret = client.set_value('counter', i)
if not ret['success']:
raise Exception("failed to set counter")
write_log(log, ret, 'set counter:')
ret = client.get_value('counter')
write_log(log, ret, 'get counter:')
if not ret['success']:
raise Exception("failed to get counter")
accu += ret['resp'][1]
write_log(log, accu, i)
print 'result: ', accu
|
apache-2.0
| 6,919,018,597,424,594,000
| 25.175
| 52
| 0.560649
| false
| 3.201835
| false
| false
| false
|
Signbank/FinSL-signbank
|
signbank/dictionary/views.py
|
1
|
7652
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.contrib import messages
from django.contrib.auth.decorators import permission_required
from django.contrib.admin.views.decorators import user_passes_test
from django.core.exceptions import PermissionDenied
from django.urls import reverse, reverse_lazy
from django.utils.translation import ugettext as _
from django.views.generic.list import ListView
from django.views.generic import FormView
from django.db.models import Q, F, Count, Case, Value, When, BooleanField
from tagging.models import Tag
from guardian.shortcuts import get_perms, get_objects_for_user, get_users_with_perms
from notifications.signals import notify
from .models import Dataset, Keyword, FieldChoice, Gloss, GlossRelation
from .forms import GlossCreateForm, LexiconForm
from ..video.forms import GlossVideoForm
@permission_required('dictionary.add_gloss')
def create_gloss(request):
"""Handle Gloss creation."""
if request.method == 'POST':
form = GlossCreateForm(request.POST)
glossvideoform = GlossVideoForm(request.POST, request.FILES)
glossvideoform.fields['videofile'].required=False
if form.is_valid() and glossvideoform.is_valid():
if 'view_dataset' not in get_perms(request.user, form.cleaned_data["dataset"]):
# If user has no permissions to dataset, raise PermissionDenied to show 403 template.
msg = _("You do not have permissions to create glosses for this lexicon.")
messages.error(request, msg)
raise PermissionDenied(msg)
new_gloss = form.save(commit=False)
new_gloss.created_by = request.user
new_gloss.updated_by = request.user
new_gloss.save()
if form.cleaned_data["tag"]:
Tag.objects.add_tag(new_gloss, form.cleaned_data["tag"].name)
if glossvideoform.cleaned_data['videofile']:
glossvideo = glossvideoform.save(commit=False)
glossvideo.gloss = new_gloss
glossvideo.save()
return HttpResponseRedirect(reverse('dictionary:admin_gloss_view', kwargs={'pk': new_gloss.pk}))
else:
# Return bound fields with errors if the form is not valid.
allowed_datasets = get_objects_for_user(request.user, 'dictionary.view_dataset')
form.fields["dataset"].queryset = Dataset.objects.filter(id__in=[x.id for x in allowed_datasets])
return render(request, 'dictionary/create_gloss.html', {'form': form, 'glossvideoform': glossvideoform})
else:
allowed_datasets = get_objects_for_user(request.user, 'dictionary.view_dataset')
form = GlossCreateForm()
glossvideoform = GlossVideoForm()
form.fields["dataset"].queryset = Dataset.objects.filter(id__in=[x.id for x in allowed_datasets])
return render(request, 'dictionary/create_gloss.html', {'form': form, 'glossvideoform': glossvideoform})
def keyword_value_list(request, prefix=None):
"""View to generate a list of possible values for a keyword given a prefix."""
kwds = Keyword.objects.filter(text__startswith=prefix)
kwds_list = [k.text for k in kwds]
return HttpResponse("\n".join(kwds_list), content_type='text/plain')
@user_passes_test(lambda u: u.is_staff, login_url='/accounts/login/')
def try_code(request):
"""A view for the developer to try out things"""
choicedict = {}
for key, choices in list(choicedict.items()):
for machine_value, english_name in choices:
FieldChoice(
english_name=english_name, field=key, machine_value=machine_value).save()
return HttpResponse('OK', status=200)
class ManageLexiconsListView(ListView):
model = Dataset
template_name = 'dictionary/manage_lexicons.html'
paginate_by = 50
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
qs = self.get_queryset()
context['has_permissions'] = qs.filter(has_view_perm=True)
context['no_permissions'] = qs.filter(has_view_perm=False)
# Show users with permissions to lexicons to SuperUsers
if self.request.user.is_superuser:
for lexicon in context['has_permissions']:
lexicon.users_with_perms = get_users_with_perms(obj=lexicon, with_superusers=True)
for lexicon in context['no_permissions']:
lexicon.users_with_perms = get_users_with_perms(obj=lexicon, with_superusers=True)
return context
def get_queryset(self):
# Get allowed datasets for user (django-guardian)
allowed_datasets = get_objects_for_user(self.request.user, 'dictionary.view_dataset')
# Get queryset
qs = super().get_queryset()
qs = qs.annotate(
has_view_perm=Case(
When(Q(id__in=allowed_datasets), then=Value(True)),
default=Value(False), output_field=BooleanField()))
qs = qs.select_related('signlanguage')
return qs
class ApplyLexiconPermissionsFormView(FormView):
form_class = LexiconForm
template_name = 'dictionary/manage_lexicons.html'
success_url = reverse_lazy('dictionary:manage_lexicons')
def form_valid(self, form):
dataset = form.cleaned_data['dataset']
admins = dataset.admins.all()
notify.send(sender=self.request.user, recipient=admins,
verb="{txt} {dataset}".format(txt=_("applied for permissions to:"), dataset=dataset.public_name),
action_object=self.request.user,
description="{user} ({user.first_name} {user.last_name}) {txt} {dataset}".format(
user=self.request.user, txt=_("applied for permissions to lexicon:"),
dataset=dataset.public_name
),
target=self.request.user, public=False)
msg = "{text} {lexicon_name}".format(text=_("Successfully applied permissions for"), lexicon_name=dataset.public_name)
messages.success(self.request, msg)
return super().form_valid(form)
def network_graph(request):
"""Network graph of GlossRelations"""
context = dict()
form = LexiconForm(request.GET, use_required_attribute=False)
# Get allowed datasets for user (django-guardian)
allowed_datasets = get_objects_for_user(request.user, 'dictionary.view_dataset')
# Filter the forms dataset field for the datasets user has permission to.
form.fields["dataset"].queryset = Dataset.objects.filter(id__in=[x.id for x in allowed_datasets])
dataset = None
if form.is_valid():
form.fields["dataset"].widget.is_required = False
dataset = form.cleaned_data["dataset"]
if dataset:
context["dataset"] = dataset
nodeqs = Gloss.objects.filter(Q(dataset=dataset),
Q(glossrelation_target__isnull=False) | Q(glossrelation_source__isnull=False))\
.distinct().values("id").annotate(label=F("idgloss"), size=Count("glossrelation_source")+Count("glossrelation_target"))
context["nodes"] = json.dumps(list(nodeqs))
edgeqs = GlossRelation.objects.filter(Q(source__dataset=dataset) | Q(target__dataset=dataset)).values("id", "source", "target")
context["edges"] = json.dumps(list(edgeqs))
return render(request, "dictionary/network_graph.html",
{'context': context,
'form': form
})
|
bsd-3-clause
| -1,069,654,723,331,064,700
| 46.825
| 135
| 0.659827
| false
| 3.962714
| false
| false
| false
|
PoprostuRonin/memes-api
|
parsers/mistrzowie.py
|
1
|
1724
|
from parsel import Selector
from utils import download, find_id_in_url, catch_errors, get_last_part_url
from data import ImageContent, Meme, Author, Page
import re
ROOT = "https://mistrzowie.org"
COMMENT = re.compile(r"Skomentuj\(([0-9]+?)\)")
def scrap(url):
html = download(url)
return parse(html)
def parse(html):
document = Selector(text=html)
memes = [catch_errors(parse_meme, element) for element in document.css("div.pic")]
memes = [meme for meme in memes if meme is not None]
title = document.css("title::text").get()
next_page_url = "/mistrzowie/page/" + get_last_part_url(
document.css(".list_next_page_button::attr(href)").get()
)
return Page(title, memes, next_page_url)
def parse_meme(m):
title = m.css("h1.picture > a::text").get()
if title is None:
return None
title = title.strip()
url = m.css("h1.picture > a::attr(href)").get()
points = None
points_text = m.css("span.total_votes_up > span.value::text").get()
try:
points = int(points_text)
except:
pass
comment_count = None
comments_count_text = (
m.css("a.lcomment::text").get().replace("\t", "").replace("\n", "")
)
result = COMMENT.match(comments_count_text)
if result:
try:
comment_count = int(result[1])
except:
pass
else:
comment_count = 0
content = None
src = m.css("img.pic::attr(src)").get()
if src:
content = ImageContent(ROOT + src)
return Meme(
title,
ROOT + url,
"/mistrzowie/{}".format(find_id_in_url(url)),
content,
None,
None,
points,
comment_count,
)
|
mit
| -4,502,319,310,330,862,600
| 23.28169
| 86
| 0.581206
| false
| 3.25283
| false
| false
| false
|
MarkusHackspacher/unknown-horizons
|
horizons/util/startgameoptions.py
|
1
|
6231
|
# ###################################################
# Copyright (C) 2008-2017 The Unknown Horizons Team
# team@unknown-horizons.org
# This file is part of Unknown Horizons.
#
# Unknown Horizons is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# ###################################################
from operator import itemgetter
import horizons.globals
from horizons.constants import AI, COLORS
from horizons.util.color import Color
from horizons.util.difficultysettings import DifficultySettings
class StartGameOptions:
def __init__(self, game_identifier):
super().__init__() # TODO: check if this call is needed
self.game_identifier = game_identifier
self._player_list = None
self.trader_enabled = True
self.pirate_enabled = True
self.natural_resource_multiplier = 1
self.disasters_enabled = True
self.force_player_id = None
self.is_map = False
self.is_multiplayer = False
self.is_scenario = False
self.player_name = 'Player'
self.player_color = None
self.ai_players = 0
self.human_ai = AI.HUMAN_AI
# this is used by the map editor to pass along the new map's size
self.map_padding = None
self.is_editor = False
def init_new_world(self, session):
# NOTE: this must be sorted before iteration, cause there is no defined order for
# iterating a dict, and it must happen in the same order for mp games.
for i in sorted(self._get_player_list(), key=itemgetter('id')):
session.world.setup_player(i['id'], i['name'], i['color'], i['clientid'] if self.is_multiplayer else None, i['local'], i['ai'], i['difficulty'])
session.world.set_forced_player(self.force_player_id)
center = session.world.init_new_world(self.trader_enabled, self.pirate_enabled, self.natural_resource_multiplier)
session.view.center(center[0], center[1])
def set_human_data(self, player_name, player_color):
self.player_name = player_name
self.player_color = player_color
def _get_player_list(self):
if self._player_list is not None:
return self._player_list
# for now just make it a bit easier for the AI
difficulty_level = {False: DifficultySettings.DEFAULT_LEVEL, True: DifficultySettings.EASY_LEVEL}
players = []
players.append({
'id': 1,
'name': self.player_name,
'color': Color.get(1) if self.player_color is None else self.player_color,
'local': True,
'ai': self.human_ai,
'difficulty': difficulty_level[bool(self.human_ai)],
})
cur_locale = horizons.globals.fife.get_locale()
# add AI players with a distinct color; if none can be found then use black
for num in range(self.ai_players):
color = Color.get(COLORS.BLACK) # if none can be found then be black
for possible_color in Color.get_defaults():
if possible_color == Color.get(COLORS.BLACK):
continue # black is used by the trader and the pirate
used = any(possible_color == player['color'] for player in players)
if not used:
color = possible_color
break
name = horizons.globals.db.get_random_ai_name(cur_locale, [p['name'] for p in players])
# out of pre-defined names?
if name is None:
name = 'AI' + str(num + 1)
players.append({
'id': num + 2,
'name': name,
'color': color,
'local': False,
'ai': True,
'difficulty': difficulty_level[True],
})
return players
@classmethod
def create_start_multiplayer(cls, game_file, player_list, is_map):
options = StartGameOptions(game_file)
options._player_list = player_list
options.is_map = is_map
options.is_multiplayer = True
return options
@classmethod
def create_start_singleplayer(cls, game_identifier, is_scenario, ai_players,
trader_enabled, pirate_enabled, force_player_id, is_map):
options = StartGameOptions(game_identifier)
options.is_scenario = is_scenario
options.ai_players = ai_players
options.trader_enabled = trader_enabled
options.pirate_enabled = pirate_enabled
options.force_player_id = force_player_id
options.is_map = is_map
return options
@classmethod
def create_start_random_map(cls, ai_players, seed, force_player_id):
from horizons.util.random_map import generate_map_from_seed
options = StartGameOptions(generate_map_from_seed(seed))
options.ai_players = ai_players
options.force_player_id = force_player_id
options.is_map = True
return options
@classmethod
def create_editor_load(cls, map_name):
options = StartGameOptions(map_name)
options.player_name = 'Editor'
options.trader_enabled = False
options.pirate_enabled = False
options.natural_resource_multiplier = 0
options.disasters_enabled = False
options.is_map = True
options.is_editor = True
return options
@classmethod
def create_start_scenario(cls, scenario_file):
options = StartGameOptions(scenario_file)
options.is_scenario = True
return options
@classmethod
def create_start_map(cls, map_name):
options = StartGameOptions(map_name)
options.is_map = True
return options
@classmethod
def create_load_game(cls, saved_game, force_player_id):
options = StartGameOptions(saved_game)
options.force_player_id = force_player_id
return options
@classmethod
def create_game_test(cls, game_identifier, player_list):
options = StartGameOptions(game_identifier)
options._player_list = player_list
options.trader_enabled = False
options.pirate_enabled = False
options.natural_resource_multiplier = 0
return options
@classmethod
def create_ai_test(cls, game_identifier, player_list):
options = StartGameOptions(game_identifier)
options._player_list = player_list
options.is_map = True
return options
|
gpl-2.0
| 626,343,943,137,751,400
| 32.5
| 147
| 0.71048
| false
| 3.291601
| false
| false
| false
|
ConnectedVision/connectedvision
|
test/UnitTest/GeneratorTestCode.py
|
1
|
1271
|
import os
import subprocess
if not "ConnectedVision" in os.environ:
raise Exception("\"ConnectedVision\" environment variable is not defined")
cvDir = os.path.abspath(os.environ["ConnectedVision"])
if not os.path.isdir(cvDir):
raise Exception("the directory path referenced by the ConnectedVision environment variable is invalid: " + cvDir)
toolsDir = os.path.join(cvDir, "tools")
targetDir = os.path.join(cvDir, "test", "UnitTest")
schemaFile = os.path.join(targetDir, "GeneratorTestCode.schema")
dirStructureFile = os.path.join(targetDir, "GeneratorTestCodeStructure.json")
print("")
print(schemaFile)
print("--------------------------------")
print("CreateItemsForSchema.js")
subprocess.check_call(["node", os.path.join(toolsDir, "CodeFromTemplate", "CreateItemForSchema.js"), targetDir, cvDir, schemaFile, dirStructureFile, "-id", "UnitTest"])
print("")
print("CodeFromTemplate.js - DATA items")
globalJson = os.path.join(targetDir, "global.json")
itemsJson = os.path.join(targetDir, "items.json")
subprocess.check_call(["node", os.path.join(toolsDir, "CodeFromTemplate", "CodeFromTemplate.js"), globalJson, itemsJson])
print("")
print("Clean Up")
os.remove(globalJson)
os.remove(itemsJson)
os.remove(os.path.join(targetDir, "artefacts.json"))
print("OK")
|
mit
| 495,858,675,220,353,340
| 35.314286
| 168
| 0.738002
| false
| 3.284238
| false
| true
| false
|
agoragames/kairos
|
test/functional/histogram_helper.py
|
1
|
14454
|
from helper_helper import *
from helper_helper import _time
from collections import OrderedDict
@unittest.skipUnless( os.environ.get('TEST_HISTOGRAM','true').lower()=='true', 'skipping histogram' )
class HistogramHelper(Chai):
def setUp(self):
super(HistogramHelper,self).setUp()
self.series = Timeseries(self.client, type='histogram', prefix='kairos',
read_func=int,
intervals={
'minute' : {
'step' : 60,
'steps' : 5,
},
'hour' : {
'step' : 3600,
'resolution' : 60,
}
} )
self.series.delete_all()
def tearDown(self):
self.series.delete_all()
def test_bulk_insert(self):
inserts = {
None : { 'test1':[1,2,3], 'test2':[4,5,6] },
_time(0) : { 'test1':[1,2,3], 'test2':[4,5,6], 'test3':[7,8,9] },
_time(30) : { 'test1':[1,2,3], 'test2':[4,5,6] },
_time(60) : { 'test1':[1,2,3], 'test3':[7,8,9] }
}
self.series.bulk_insert( inserts )
t1_i1 = self.series.get('test1', 'minute', timestamp=_time(0))
assert_equals( {1:2, 2:2, 3:2}, t1_i1[_time(0)] )
t2_i1 = self.series.get('test2', 'minute', timestamp=_time(0))
assert_equals( {4:2, 5:2, 6:2}, t2_i1[_time(0)] )
t3_i1 = self.series.get('test3', 'minute', timestamp=_time(0))
assert_equals( {7:1, 8:1, 9:1}, t3_i1[_time(0)] )
t1_i2 = self.series.get('test1', 'minute', timestamp=_time(60))
assert_equals( {1:1, 2:1, 3:1}, t1_i2[_time(60)] )
def test_bulk_insert_intervals_after(self):
a,b,c,d,e,f = 10,11,12,13,14,15
inserts = OrderedDict( (
(None , { 'test1':[1,2,3], 'test2':[4,5,6] } ),
(_time(0) , { 'test1':[1,2,3], 'test2':[4,5,6], 'test3':[7,8,9] } ),
(_time(30), { 'test1':[1,2,3], 'test2':[4,5,6] } ),
(_time(60), { 'test1':[a,b,c], 'test3':[d,e,f] })
) )
self.series.bulk_insert( inserts, intervals=3 )
t1_i1 = self.series.get('test1', 'minute', timestamp=_time(0))
assert_equals( {1:2, 2:2, 3:2}, t1_i1[_time(0)] )
t2_i1 = self.series.get('test2', 'minute', timestamp=_time(0))
assert_equals( {4:2, 5:2, 6:2}, t2_i1[_time(0)] )
t3_i1 = self.series.get('test3', 'minute', timestamp=_time(0))
assert_equals( {7:1, 8:1, 9:1}, t3_i1[_time(0)] )
t1_i2 = self.series.get('test1', 'minute', timestamp=_time(60))
assert_equals( {1:2, 2:2, 3:2, a:1, b:1, c:1}, t1_i2[_time(60)] )
t3_i3 = self.series.get('test3', 'minute', timestamp=_time(120))
assert_equals( {7:1, 8:1, 9:1, d:1, e:1, f:1}, t3_i3[_time(120)] )
t3_i4 = self.series.get('test3', 'minute', timestamp=_time(180))
assert_equals( {7:1, 8:1, 9:1, d:1, e:1, f:1}, t3_i4[_time(180)] )
def test_bulk_insert_intervals_before(self):
a,b,c,d,e,f = 10,11,12,13,14,15
inserts = OrderedDict( (
(None , { 'test1':[1,2,3], 'test2':[4,5,6] } ),
(_time(0) , { 'test1':[1,2,3], 'test2':[4,5,6], 'test3':[7,8,9] } ),
(_time(30), { 'test1':[1,2,3], 'test2':[4,5,6] } ),
(_time(60), { 'test1':[a,b,c], 'test3':[d,e,f] })
) )
self.series.bulk_insert( inserts, intervals=-3 )
t1_i1 = self.series.get('test1', 'minute', timestamp=_time(0))
assert_equals( {1:2, 2:2, 3:2, a:1, b:1, c:1}, t1_i1[_time(0)] )
t2_i1 = self.series.get('test2', 'minute', timestamp=_time(0))
assert_equals( {4:2, 5:2, 6:2}, t2_i1[_time(0)] )
t3_i1 = self.series.get('test3', 'minute', timestamp=_time(0))
assert_equals( {7:1, 8:1, 9:1, d:1, e:1, f:1}, t3_i1[_time(0)] )
t1_i2 = self.series.get('test1', 'minute', timestamp=_time(-60))
assert_equals( {1:2, 2:2, 3:2, a:1, b:1, c:1}, t1_i2[_time(-60)] )
t3_i3 = self.series.get('test3', 'minute', timestamp=_time(-120))
assert_equals( {7:1, 8:1, 9:1, d:1, e:1, f:1}, t3_i3[_time(-120)] )
t3_i4 = self.series.get('test3', 'minute', timestamp=_time(-180))
assert_equals( {7:1, 8:1, 9:1}, t3_i4[_time(-180)] )
def test_get(self):
# 2 hours worth of data, value is same asV timestamp
for t in xrange(1, 7200):
self.series.insert( 'test', t/2, timestamp=_time(t) )
###
### no resolution, condensed has no impact
###
# middle of an interval
interval = self.series.get( 'test', 'minute', timestamp=_time(100) )
assert_equals( [_time(60)], interval.keys() )
keys = list(range(30,60))
assert_equals( keys, interval[_time(60)].keys() )
for k in keys:
assert_equals( 2, interval[_time(60)][k] )
# no matching interval, returns no with empty value list
interval = self.series.get( 'test', 'minute' )
assert_equals( 1, len(interval) )
assert_equals( 0, len(interval.values()[0]) )
###
### with resolution, optionally condensed
###
interval = self.series.get( 'test', 'hour', timestamp=_time(100) )
keys = list(range(30,60))
assert_equals( 60, len(interval) )
assert_equals( keys, interval[_time(60)].keys() )
interval = self.series.get( 'test', 'hour', timestamp=_time(100), condensed=True )
assert_equals( 1, len(interval) )
assert_equals( list(range(0,1800)), interval[_time(0)].keys() )
def test_get_joined(self):
# put some data in the first minutes of each hour for test1, and then for
# a few more minutes in test2
for t in xrange(1, 120):
self.series.insert( 'test1', t, timestamp=_time(t) )
self.series.insert( 'test2', t, timestamp=_time(t) )
for t in xrange(3600, 3720):
self.series.insert( 'test1', t, timestamp=_time(t) )
self.series.insert( 'test2', t, timestamp=_time(t) )
for t in xrange(120, 240):
self.series.insert( 'test1', t, timestamp=_time(t) )
for t in xrange(3721, 3840):
self.series.insert( 'test1', t, timestamp=_time(t) )
###
### no resolution, condensed has no impact
###
# interval with 2 series worth of data
interval = self.series.get( ['test1','test2'], 'minute', timestamp=_time(100) )
assert_equals( [_time(60)], interval.keys() )
assert_equals( dict.fromkeys(range(60,120),2), interval[_time(60)] )
# interval with 1 series worth of data
interval = self.series.get( ['test1','test2'], 'minute', timestamp=_time(122) )
assert_equals( [_time(120)], interval.keys() )
assert_equals( dict.fromkeys(range(120,180),1), interval[_time(120)] )
# no matching interval, returns no with empty value list
interval = self.series.get( ['test1','test2'], 'minute' )
assert_equals( 1, len(interval) )
assert_equals( 0, len(interval.values()[0]) )
###
### with resolution, optionally condensed
###
interval = self.series.get( ['test1','test2'], 'hour', timestamp=_time(100) )
assert_equals( map(_time,[0,60,120,180]), interval.keys() )
assert_equals( dict.fromkeys(range(1,60), 2), interval[_time(0)] )
assert_equals( dict.fromkeys(range(60,120), 2), interval[_time(60)] )
assert_equals( dict.fromkeys(range(120,180), 1), interval[_time(120)] )
assert_equals( dict.fromkeys(range(180,240), 1), interval[_time(180)] )
data = dict.fromkeys(range(1,120), 2)
data.update( dict.fromkeys(range(120,240),1) )
interval = self.series.get( ['test1','test2'], 'hour', timestamp=_time(100), condensed=True )
assert_equals( [_time(0)], interval.keys() )
assert_equals( data, interval[_time(0)] )
# with transforms
interval = self.series.get( ['test1','test2'], 'hour', timestamp=_time(100), transform='count' )
assert_equals( 120, interval[_time(60)] )
interval = self.series.get( ['test1','test2'], 'hour', timestamp=_time(100), transform=['min','max','count'], condensed=True )
assert_equals( {'min':1, 'max':239, 'count':358}, interval[_time(0)] )
def test_series(self):
# 2 hours worth of data, value is same asV timestamp
for t in xrange(1, 7200):
self.series.insert( 'test', t/2, timestamp=_time(t) )
###
### no resolution, condensed has no impact
###
interval = self.series.series( 'test', 'minute', end=_time(250) )
assert_equals( map(_time, [0,60,120,180,240]), interval.keys() )
assert_equals( list(range(0,30)), sorted(interval[_time(0)].keys()) )
assert_equals( 1, interval[_time(0)][0] )
for k in xrange(1,30):
assert_equals(2, interval[_time(0)][k])
assert_equals( list(range(120,150)), sorted(interval[_time(240)].keys()) )
for k in xrange(120,150):
assert_equals(2, interval[_time(240)][k])
interval = self.series.series( 'test', 'minute', steps=2, end=_time(250) )
assert_equals( map(_time, [180,240]), interval.keys() )
assert_equals( list(range(120,150)), sorted(interval[_time(240)].keys()) )
# with collapsed
interval = self.series.series( 'test', 'minute', end=_time(250), collapse=True )
assert_equals( map(_time, [0]), interval.keys() )
assert_equals( list(range(0,150)), sorted(interval[_time(0)].keys()) )
for k in xrange(1,150):
assert_equals(2, interval[_time(0)][k])
###
### with resolution
###
interval = self.series.series( 'test', 'hour', end=_time(250) )
assert_equals( 1, len(interval) )
assert_equals( 60, len(interval[_time(0)]) )
assert_equals( list(range(0,30)), sorted(interval[_time(0)][_time(0)].keys()) )
# single step, last one
interval = self.series.series( 'test', 'hour', condensed=True, end=_time(4200) )
assert_equals( 1, len(interval) )
assert_equals( 1800, len(interval[_time(3600)]) )
assert_equals( list(range(1800,3600)), sorted(interval[_time(3600)].keys()) )
interval = self.series.series( 'test', 'hour', condensed=True, end=_time(4200), steps=2 )
assert_equals( map(_time, [0,3600]), interval.keys() )
assert_equals( 1800, len(interval[_time(0)]) )
assert_equals( 1800, len(interval[_time(3600)]) )
assert_equals( list(range(1800,3600)), sorted(interval[_time(3600)].keys()) )
# with collapsed
interval = self.series.series( 'test', 'hour', condensed=True, end=_time(4200), steps=2, collapse=True )
assert_equals( map(_time, [0]), interval.keys() )
assert_equals( 3600, len(interval[_time(0)]) )
assert_equals( list(range(0,3600)), sorted(interval[_time(0)].keys()) )
def test_series_joined(self):
# put some data in the first minutes of each hour for test1, and then for
# a few more minutes in test2
for t in xrange(1, 120):
self.series.insert( 'test1', t, timestamp=_time(t) )
self.series.insert( 'test2', t, timestamp=_time(t) )
for t in xrange(3600, 3720):
self.series.insert( 'test1', t, timestamp=_time(t) )
self.series.insert( 'test2', t, timestamp=_time(t) )
for t in xrange(120, 240):
self.series.insert( 'test1', t, timestamp=_time(t) )
for t in xrange(3720, 3840):
self.series.insert( 'test1', t, timestamp=_time(t) )
###
### no resolution, condensed has no impact
###
interval = self.series.series( ['test1','test2'], 'minute', end=_time(250) )
assert_equals( map(_time,[0,60,120,180,240]), interval.keys() )
assert_equals( dict.fromkeys(range(1,60), 2), interval[_time(0)] )
assert_equals( dict.fromkeys(range(60,120), 2), interval[_time(60)] )
assert_equals( dict.fromkeys(range(120,180), 1), interval[_time(120)] )
assert_equals( dict.fromkeys(range(180,240), 1), interval[_time(180)] )
assert_equals( {}, interval[_time(240)] )
# no matching interval, returns no with empty value list
interval = self.series.series( ['test1','test2'], 'minute', start=time.time(), steps=2 )
assert_equals( 2, len(interval) )
assert_equals( {}, interval.values()[0] )
# with transforms
interval = self.series.series( ['test1','test2'], 'minute', end=_time(250), transform=['min','count'] )
assert_equals( map(_time,[0,60,120,180,240]), interval.keys() )
assert_equals( {'min':1, 'count':118}, interval[_time(0)] )
assert_equals( {'min':60, 'count':120}, interval[_time(60)] )
assert_equals( {'min':120, 'count':60}, interval[_time(120)] )
assert_equals( {'min':180, 'count':60}, interval[_time(180)] )
assert_equals( {'min':0, 'count':0}, interval[_time(240)] )
# with collapsed
data = dict.fromkeys(range(1,120), 2)
data.update( dict.fromkeys(range(120,240), 1) )
interval = self.series.series( ['test1','test2'], 'minute', end=_time(250), collapse=True )
assert_equals( [_time(0)], interval.keys() )
assert_equals( data, interval[_time(0)] )
# with tranforms and collapsed
interval = self.series.series( ['test1','test2'], 'minute', end=_time(250), transform=['min','max', 'count'], collapse=True )
assert_equals( [_time(0)], interval.keys() )
assert_equals( {'min':1, 'max':239, 'count':358}, interval[_time(0)] )
###
### with resolution, optionally condensed
###
interval = self.series.series( ['test1','test2'], 'hour', end=_time(250) )
assert_equals( 1, len(interval) )
assert_equals( map(_time,[0,60,120,180]), interval[_time(0)].keys() )
assert_equals( 4, len(interval[_time(0)]) )
assert_equals( dict.fromkeys(range(1,60), 2), interval[_time(0)][_time(0)] )
assert_equals( dict.fromkeys(range(60,120), 2), interval[_time(0)][_time(60)] )
assert_equals( dict.fromkeys(range(120,180), 1), interval[_time(0)][_time(120)] )
assert_equals( dict.fromkeys(range(180,240), 1), interval[_time(0)][_time(180)] )
# condensed
data = dict.fromkeys(range(1,120), 2)
data.update( dict.fromkeys(range(120,240), 1) )
interval = self.series.series( ['test1','test2'], 'hour', end=_time(250), condensed=True )
assert_equals( [_time(0)], interval.keys() )
assert_equals( data, interval[_time(0)] )
# with collapsed across multiple intervals
data = dict.fromkeys(range(1,120), 2)
data.update( dict.fromkeys(range(120,240), 1) )
data.update( dict.fromkeys(range(3600,3720), 2) )
data.update( dict.fromkeys(range(3720,3840), 1) )
interval = self.series.series( ['test1','test2'], 'hour', condensed=True, end=_time(4200), steps=2, collapse=True )
assert_equals( map(_time, [0]), interval.keys() )
assert_equals( data, interval[_time(0)] )
# with transforms collapsed
interval = self.series.series( ['test1','test2'], 'hour', condensed=True, end=_time(4200), steps=2, collapse=True, transform=['min','max','count'] )
assert_equals( map(_time, [0]), interval.keys() )
assert_equals( {'min':1,'max':3839,'count':718}, interval[_time(0)] )
|
bsd-3-clause
| -724,267,455,489,234,600
| 42.667674
| 152
| 0.600664
| false
| 2.938402
| true
| false
| false
|
alex/wal-e
|
wal_e/worker/s3_deleter.py
|
1
|
4203
|
import gevent
from gevent import queue
from wal_e import exception
from wal_e import retries
class Deleter(object):
def __init__(self):
# Allow enqueuing of several API calls worth of work, which
# right now allow 1000 key deletions per job.
self.PAGINATION_MAX = 1000
self._q = queue.JoinableQueue(self.PAGINATION_MAX * 10)
self._worker = gevent.spawn(self._work)
self._parent_greenlet = gevent.getcurrent()
self.closing = False
def close(self):
self.closing = True
self._q.join()
self._worker.kill(block=True)
def delete(self, key):
if self.closing:
raise exception.UserCritical(
msg='attempt to delete while closing Deleter detected',
hint='This should be reported as a bug.')
self._q.put(key)
def _work(self):
try:
while True:
# If _cut_batch has an error, it is responsible for
# invoking task_done() the appropriate number of
# times.
page = self._cut_batch()
# If nothing was enqueued, yield and wait around a bit
# before looking for work again.
if not page:
gevent.sleep(1)
continue
# However, in event of success, the jobs are not
# considered done until the _delete_batch returns
# successfully. In event an exception is raised, it
# will be propagated to the Greenlet that created the
# Deleter, but the tasks are marked done nonetheless.
try:
self._delete_batch(page)
finally:
for i in xrange(len(page)):
self._q.task_done()
except KeyboardInterrupt, e:
# Absorb-and-forward the exception instead of using
# gevent's link_exception operator, because in gevent <
# 1.0 there is no way to turn off the alarming stack
# traces emitted when an exception propagates to the top
# of a greenlet, linked or no.
#
# Normally, gevent.kill is ill-advised because it results
# in asynchronous exceptions being raised in that
# greenlet, but given that KeyboardInterrupt is nominally
# asynchronously raised by receiving SIGINT to begin with,
# there nothing obvious being lost from using kill() in
# this case.
gevent.kill(self._parent_greenlet, e)
def _cut_batch(self):
# Attempt to obtain as much work as possible, up to the
# maximum able to be processed by S3 at one time,
# PAGINATION_MAX.
page = []
try:
for i in xrange(self.PAGINATION_MAX):
page.append(self._q.get_nowait())
except queue.Empty:
pass
except:
# In event everything goes sideways while dequeuing,
# carefully un-lock the queue.
for i in xrange(len(page)):
self._q.task_done()
raise
return page
@retries.retry()
def _delete_batch(self, page):
# Check that all keys are in the same bucket; this code is not
# designed to deal with fast deletion of keys from multiple
# buckets at the same time, and not checking this could result
# in deleting similarly named keys from the wrong bucket.
#
# In wal-e's use, homogeneity of the bucket retaining the keys
# is presumed to be always the case.
bucket_name = page[0].bucket.name
for key in page:
if key.bucket.name != bucket_name:
raise exception.UserCritical(
msg='submitted keys are not part of the same bucket',
detail=('The clashing bucket names are {0} and {1}.'
.format(key.bucket.name, bucket_name)),
hint='This should be reported as a bug.')
bucket = page[0].bucket
bucket.delete_keys([key.name for key in page])
|
bsd-3-clause
| -8,131,093,756,282,691,000
| 37.559633
| 73
| 0.563169
| false
| 4.593443
| false
| false
| false
|
rnelsonchem/gcmstools
|
gcmstools/general.py
|
1
|
3104
|
import os
from urllib.request import urlopen
from IPython.parallel import Client, interactive
import gcmstools.filetypes as gcf
import gcmstools.reference as gcr
import gcmstools.fitting as gcfit
import gcmstools.datastore as gcd
import gcmstools.calibration as gcc
_ROOT = os.path.abspath(os.path.dirname(__file__))
_PWD = os.getcwd()
def get_sample_data(fname=None):
'''Copy sample data to current folder.
Use this function to download sample data as a zip file into the current
folder.
'''
url = "http://gcmstools.rcnelson.com/_downloads/sampledata.zip"
zipdata = urlopen(url)
with open('sampledata.zip', 'wb') as f:
f.write(zipdata.read())
zipdata.close()
def proc_data(data_folder, h5name, multiproc=False, chunk_size=4,
filetype='aia', reffile=None, fittype=None, calfile=None,
picts=False, **kwargs):
if filetype == 'aia':
GcmsObj = gcf.AiaFile
ends = ('CDF', 'AIA', 'cdf', 'aia')
files = os.listdir(data_folder)
files = [f for f in files if f.endswith(ends)]
files = [os.path.join(data_folder, f) for f in files]
ref = None
if reffile:
if reffile.endswith(('txt', 'TXT')):
ref = gcr.TxtReference(reffile, **kwargs)
fit = None
if fittype:
if fittype.lower() == 'nnls':
fit = gcfit.Nnls(**kwargs)
h5 = gcd.GcmsStore(h5name, **kwargs)
if multiproc:
try:
client = Client()
except:
error = "ERROR! You do not have an IPython Cluster running.\n\n"
error += "Start cluster with: ipcluster start -n # &\n"
error += "Where # == the number of processors.\n\n"
error += "Stop cluster with: ipcluster stop"
print(error)
h5.close()
return
dview = client[:]
dview.block = True
dview['ref'] = ref
dview['fit'] = fit
dview['GcmsObj'] = GcmsObj
chunk_size = len(dview)
# Chunk the data so lots of data files aren't opened in memory.
for chunk in _chunker(files, chunk_size):
if multiproc:
datafiles = dview.map_sync(_proc_file,
[(i, kwargs) for i in chunk])
else:
datafiles = [GcmsObj(f, **kwargs) for f in chunk]
if ref:
ref(datafiles)
if fit:
fit(datafiles)
h5.append_gcms(datafiles)
if calfile:
cal = gcc.Calibrate(h5, **kwargs)
cal.curvegen(calfile, picts=picts, **kwargs)
cal.datagen(picts=picts, **kwargs)
h5.compress()
# This function is from: http://stackoverflow.com/questions/434287
def _chunker(seq, size):
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
# This function is for the multiproc version.
# Must use the interactive decorator to update the node namespace
@interactive
def _proc_file(file_kwargs):
filename, kwargs = file_kwargs
datafile = GcmsObj(filename, **kwargs)
if ref:
ref(datafile)
if fit:
fit(datafile)
return datafile
|
bsd-3-clause
| 6,772,476,318,539,915,000
| 27.740741
| 76
| 0.600515
| false
| 3.495495
| false
| false
| false
|
camilothorne/nasslli2016
|
Nasslli16/annotation/savestat.py
|
1
|
2647
|
'''
Created on 2016
@author: camilothorne
'''
#import re, string, array
from subprocess import call
import os
class SaveStat:
# path : path to report file
# plotfile : path to the plots
# tables : path to the table
# constructor
def __init__(self,table,plotfile1,name):
# self.path = "/home/camilo/mmap-wsd/tex/"+name+".tex"
self.path = os.environ['TEX']+name+"-report.tex"
self.plotfile1 = plotfile1
self.table = table
#print self.table
# building the report
res = self.makeRes(self.table, self.plotfile1, name)
# saving the report
print "###################################################"
print "\n\npreparing report...\n\n"
self.compileFile(self.path, res)
self.fileSave(self.path, res)
# make contingency table
def makeRes(self,table,plotfile1,name):
# plugin table
title = r'\begin{center}\textbf{\Large '+name+'}\end{center}\n'
ntable = title + r'\begin{center}\begin{table}[p]\centering' + "\n"
#print table
myfile = open(table,'r')
myfiler = myfile.read()
ntable = ntable + myfiler
ntable = ntable + "\caption{Results.}\end{table}\end{center}\n\n"
myfile.close()
# complete and return table
fig1 = r'\begin{center}' + "\n\includegraphics[scale=0.8]{" + plotfile1 + "}\n\end{center}\n"
res = ntable + "\n\n" + r'\vspace{0.2cm}' + "\n\n" + fig1 + "\\newpage\n" #+ fig2
return res
# save the table in a .tex file
def fileSave(self,path,res):
myfile = open(path,'w')
myfile.write(res)
myfile.close()
# compile with pdflatex
def compileFile(self,path,res):
myfile = open(path,'w')
myfile.write("\documentclass[a4paper,12pt]{article}")
myfile.write("\n\n")
myfile.write("\usepackage{graphicx}\n")
myfile.write("\usepackage{epstopdf}\n")
myfile.write("\usepackage{rotating}\n")
myfile.write("\usepackage{times}\n")
myfile.write("\n\n")
myfile.write(r'\begin{document}')
myfile.write("\n\n")
myfile.write(res)
myfile.write("\n\n")
myfile.write("\end{document}")
myfile.close()
call(['/usr/bin/pdflatex',
# '-output-directory='+'/home/camilo/workspace-git/RestWSD/results/'+'tex/',
'-output-directory='+os.environ['TEX'],
path],
shell=False)
|
gpl-3.0
| -8,763,846,241,750,692,000
| 32.0875
| 101
| 0.529278
| false
| 3.53877
| false
| false
| false
|
wlashell/lyrical_page
|
site_seo/migrations/0002_auto__add_siteurldefaults.py
|
1
|
5402
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SiteUrlDefaults'
db.create_table('site_seo_siteurldefaults', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'])),
('url', self.gf('django.db.models.fields.CharField')(max_length=255)),
('page_title', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('page_keywords', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('page_description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('site_seo', ['SiteUrlDefaults'])
def backwards(self, orm):
# Deleting model 'SiteUrlDefaults'
db.delete_table('site_seo_siteurldefaults')
models = {
'site_content.sitepage': {
'Meta': {'unique_together': "(('site', 'url'),)", 'object_name': 'SitePage'},
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_header': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'custom_template': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'enable_rte': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_index': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'meta_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'page_class': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['site_content.SitePageTemplateSelection']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'site_content.sitepagetemplateselection': {
'Meta': {'object_name': 'SitePageTemplateSelection'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_system': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'template_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'site_seo.siteurl': {
'Meta': {'object_name': 'SiteUrl'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'page_keywords': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'page_title': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'sitepages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['site_content.SitePage']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'site_seo.siteurldefaults': {
'Meta': {'object_name': 'SiteUrlDefaults'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'page_keywords': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'page_title': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['site_seo']
|
apache-2.0
| 3,382,999,590,637,498,400
| 65.703704
| 178
| 0.556831
| false
| 3.60855
| false
| false
| false
|
dodonator/area51
|
Kryptotests/OneTimePad/Alpha/oneTimePy2.py
|
1
|
1880
|
import os
import random
import getpass
import time
os.system('clear')
def encode(klartext):
'''
Create a random One-Time-Pad and encode the input strings
'''
laengeKlartext = len(klartext)
key = ''
keyArray = []
klartextArray = list(klartext)
geheimtextArray = []
geheimtext = ''
alphabet = []
for i in range(26):
alphabet.append(chr(i+65))
for i in range(26):
alphabet.append(chr(i+97))
for i in range(laengeKlartext): # Diese for-Schleife generiert den Schluessel
keyArray.append(random.choice(alphabet))
for i in range(laengeKlartext): # Diese for-Schleife kuemmert sich um die Codierung
tmpKlartextIndex = alphabet.index(klartextArray[i])
tmpKeyIndex = alphabet.index(keyArray[i])
tmpG = alphabet[(tmpKlartextIndex + tmpKeyIndex) % 52]
geheimtextArray.append(tmpG)
for element in geheimtextArray: # Diese for-Schleife wandelt den Array in einen String
geheimtext += element
for element in keyArray:
key += element
return [geheimtext,key]
def decode(geheimtext,key):
laengeGeheimtext = len(geheimtext)
keyArray = list(key)
geheimArray = list(geheimtext)
klartextArray = []
klartext = ''
alphabet = []
for i in range(26):
alphabet.append(chr(i+65))
for i in range(26):
alphabet.append(chr(i+97))
for i in range(laengeGeheimtext):
tmpGeheimtextIndex = alphabet.index(geheimArray[i])
tmpKeyIndex = alphabet.index(keyArray[i])
tmpDifferenz = tmpGeheimtextIndex - tmpKeyIndex
if tmpDifferenz >= 0:
klartextArray.append(alphabet[tmpDifferenz])
else:
tmpDifferenz = tmpGeheimtextIndex + 52 - tmpKeyIndex
klartextArray.append(alphabet[tmpDifferenz])
for element in klartextArray:
klartext += element
return klartext
klartext = raw_input(': \n')
result = encode(klartext)
print 'Geheimtext: ' + result[0]
print 'Key: ' + result[1]
print 'Enschluesselt: ' + decode(result[0],result[1])
|
gpl-3.0
| 5,432,290,994,494,786,000
| 24.066667
| 87
| 0.722872
| false
| 2.666667
| false
| false
| false
|
christophercrouzet/nani
|
tests/data/particle.py
|
1
|
1153
|
import numpy
import nani
from . import vector2
_PARTICLE_ID = 0
_PARTICLE_POSITION = 1
_PARTICLE_MASS = 2
_PARTICLE_NEIGHBOURS = 3
class ParticleView(object):
__slots__ = ('_data',)
def __init__(self, data):
self._data = data
def __str__(self):
return (
"Particle(id=%s, position=%s, mass=%s, neighbours=%s)"
% (self.id, self.position, self.mass, self.neighbours)
)
@property
def id(self):
return self._data[_PARTICLE_ID]
@property
def position(self):
return vector2.Vector2View(self._data[_PARTICLE_POSITION])
@property
def mass(self):
return self._data[_PARTICLE_MASS]
@mass.setter
def mass(self, value):
self._data[_PARTICLE_MASS] = value
@property
def neighbours(self):
return self._data[_PARTICLE_NEIGHBOURS]
PARTICLE_TYPE = nani.Structure(
fields=(
('id', nani.Number(type=numpy.uint32, default=-1)),
('position', vector2.VECTOR2_TYPE),
('mass', nani.Number(type=numpy.float32, default=1.0)),
('neighbours', nani.Object()),
),
view=ParticleView
)
|
mit
| 3,566,905,308,198,292,000
| 19.589286
| 66
| 0.590633
| false
| 3.211699
| false
| false
| false
|
iEngage/python-sdk
|
iengage_client/models/notification.py
|
1
|
7552
|
# coding: utf-8
"""
Stakeholder engagement API
This API enables Intelligent Engagement for your Business. iEngage is a platform that combines process, augmented intelligence and rewards to help you intelligently engage customers.
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Notification(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, notification_id=None, type=None, message=None, date=None, by_user=None, entity=None, parent_entity=None, extra_data=None, read=False):
"""
Notification - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'notification_id': 'int',
'type': 'str',
'message': 'str',
'date': 'datetime',
'by_user': 'User',
'entity': 'Entity',
'parent_entity': 'Entity',
'extra_data': 'str',
'read': 'bool'
}
self.attribute_map = {
'notification_id': 'notificationId',
'type': 'type',
'message': 'message',
'date': 'date',
'by_user': 'byUser',
'entity': 'entity',
'parent_entity': 'parentEntity',
'extra_data': 'extraData',
'read': 'read'
}
self._notification_id = notification_id
self._type = type
self._message = message
self._date = date
self._by_user = by_user
self._entity = entity
self._parent_entity = parent_entity
self._extra_data = extra_data
self._read = read
@property
def notification_id(self):
"""
Gets the notification_id of this Notification.
:return: The notification_id of this Notification.
:rtype: int
"""
return self._notification_id
@notification_id.setter
def notification_id(self, notification_id):
"""
Sets the notification_id of this Notification.
:param notification_id: The notification_id of this Notification.
:type: int
"""
self._notification_id = notification_id
@property
def type(self):
"""
Gets the type of this Notification.
:return: The type of this Notification.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this Notification.
:param type: The type of this Notification.
:type: str
"""
self._type = type
@property
def message(self):
"""
Gets the message of this Notification.
:return: The message of this Notification.
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""
Sets the message of this Notification.
:param message: The message of this Notification.
:type: str
"""
self._message = message
@property
def date(self):
"""
Gets the date of this Notification.
:return: The date of this Notification.
:rtype: datetime
"""
return self._date
@date.setter
def date(self, date):
"""
Sets the date of this Notification.
:param date: The date of this Notification.
:type: datetime
"""
self._date = date
@property
def by_user(self):
"""
Gets the by_user of this Notification.
:return: The by_user of this Notification.
:rtype: User
"""
return self._by_user
@by_user.setter
def by_user(self, by_user):
"""
Sets the by_user of this Notification.
:param by_user: The by_user of this Notification.
:type: User
"""
self._by_user = by_user
@property
def entity(self):
"""
Gets the entity of this Notification.
:return: The entity of this Notification.
:rtype: Entity
"""
return self._entity
@entity.setter
def entity(self, entity):
"""
Sets the entity of this Notification.
:param entity: The entity of this Notification.
:type: Entity
"""
self._entity = entity
@property
def parent_entity(self):
"""
Gets the parent_entity of this Notification.
:return: The parent_entity of this Notification.
:rtype: Entity
"""
return self._parent_entity
@parent_entity.setter
def parent_entity(self, parent_entity):
"""
Sets the parent_entity of this Notification.
:param parent_entity: The parent_entity of this Notification.
:type: Entity
"""
self._parent_entity = parent_entity
@property
def extra_data(self):
"""
Gets the extra_data of this Notification.
:return: The extra_data of this Notification.
:rtype: str
"""
return self._extra_data
@extra_data.setter
def extra_data(self, extra_data):
"""
Sets the extra_data of this Notification.
:param extra_data: The extra_data of this Notification.
:type: str
"""
self._extra_data = extra_data
@property
def read(self):
"""
Gets the read of this Notification.
:return: The read of this Notification.
:rtype: bool
"""
return self._read
@read.setter
def read(self, read):
"""
Sets the read of this Notification.
:param read: The read of this Notification.
:type: bool
"""
self._read = read
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
apache-2.0
| -2,243,176,710,946,412,300
| 23.842105
| 186
| 0.532707
| false
| 4.514047
| false
| false
| false
|
vrbagalkote/avocado-misc-tests-1
|
generic/openblas.py
|
1
|
2440
|
#!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2017 IBM
# Author: Pooja B Surya <pooja@linux.vnet.ibm.com>
import os
from avocado import Test
from avocado import main
from avocado.utils import build
from avocado.utils import archive
from avocado.utils.software_manager import SoftwareManager
from avocado.utils import distro
class Openblas(Test):
"""
OpenBLAS is an optimized BLAS library based on GotoBLAS2 1.13 BSD version.
This test runs openblas tests
"""
def setUp(self):
smm = SoftwareManager()
detected_distro = distro.detect()
packages = ['make', 'gcc']
if detected_distro.name == "Ubuntu":
packages.append("gfortran")
elif detected_distro.name == "SuSE":
packages.append(["gcc-fortran", "libgfortran4"])
else:
packages.append("gcc-gfortran")
for package in packages:
if not smm.check_installed(package) and not smm.install(package):
self.cancel(' %s is needed for the test to be run' % package)
url = "https://github.com/xianyi/OpenBLAS/archive/develop.zip"
tarball = self.fetch_asset("OpenBLAS-develop.zip", locations=[url],
expire='7d')
archive.extract(tarball, self.srcdir)
openblas_dir = os.path.join(self.srcdir, "OpenBLAS-develop")
openblas_bin_dir = os.path.join(openblas_dir, 'bin')
os.mkdir(openblas_bin_dir)
build.make(openblas_dir, extra_args='FC=gfortran')
build.make(openblas_dir, extra_args='PREFIX=%s install' %
openblas_bin_dir)
self.test_dir = os.path.join(openblas_dir, "test")
def test(self):
result = build. run_make(self.test_dir)
for line in str(result).splitlines():
if '[FAIL]' in line:
self.fail("test failed, Please check debug log for failed"
"test cases")
if __name__ == "__main__":
main()
|
gpl-2.0
| -1,176,776,689,350,797,300
| 35.41791
| 78
| 0.643443
| false
| 3.77709
| true
| false
| false
|
RobertABT/heightmap
|
region.py
|
1
|
2223
|
from numpy import *
class Region:
xllc = 0 #<--x
yllc = 0 #<--y
nrows = 3 #<-- b
ncols = 3 #<-- a
step = 50
grid = [[1,2,3], [4,5,6], [3,8,9] ]
#Reading files, retrieving integers and creating an array.
def read (self, filename):
if filename is None:
print("Your Grid Reference format is incorrect for UK!")
return False
try:
file = open(filename,'r')
except:
print("No such Grid Reference in the UK!")
return False
a = file.readline().split()
self.ncols = int(a[1])
b = file.readline().split()
self.nrows = int(b[1])
x = file.readline().split()
self.xllc = int(x[1])
y = file.readline().split()
self.yllc = int(y[1])
z = file.readline().split()
self.step = int(z[1])
file.close
self.grid = loadtxt(filename, skiprows=5)
return True
#Retrieving files according to grid references.
def readgr(self, gridsqr):
thepath = "data/" + gridsqr[0:3].upper()
if len(gridsqr) > 12:
thepath = None
elif len(gridsqr) == 12:
thepath = thepath + gridsqr[7]
elif len(gridsqr) == 10:
thepath = thepath + gridsqr[6]
elif len(gridsqr) == 8:
thepath = thepath + gridsqr[5]
elif len(gridsqr) == 6:
thepath = thepath + gridsqr[4]
elif len(gridsqr) == 4:
thepath = thepath + gridsqr[3]
else:
thepath = None
if thepath != None:
thepath = thepath + ".asc"
self.read(thepath)
if __name__ == "__main__":
#Defining global variable.
region = Region()
#Users input
region.readgr("SN43567 43567")
#Printing values.
print("------------")
print(region.xllc)
print("xllcorner")
print("------------")
print(region.yllc)
print("yllcorner")
print("------------")
print(region.ncols)
print("ncolumns")
print("------------")
print(region.nrows)
print("nrows")
print("------------")
|
mit
| -6,753,461,964,680,506,000
| 22.648936
| 68
| 0.482231
| false
| 3.597087
| false
| false
| false
|
CCI-MOC/GUI-Backend
|
api/v1/serializers/export_request_serializer.py
|
1
|
1091
|
from core.models.export_request import ExportRequest
from core.models.user import AtmosphereUser
from core.models.instance import Instance
from rest_framework import serializers
class ExportRequestSerializer(serializers.ModelSerializer):
"""
"""
name = serializers.CharField(source='export_name')
instance = serializers.SlugRelatedField(
slug_field='provider_alias',
queryset=Instance.objects.all()
)
status = serializers.CharField(default="pending")
disk_format = serializers.CharField(source='export_format')
owner = serializers.SlugRelatedField(slug_field='username',
source='export_owner',
queryset=AtmosphereUser.objects.all()
)
file = serializers.CharField(read_only=True, default="",
required=False, source='export_file')
class Meta:
model = ExportRequest
fields = ('id', 'instance', 'status', 'name',
'owner', 'disk_format', 'file')
|
apache-2.0
| 9,100,617,223,407,332,000
| 37.964286
| 78
| 0.605866
| false
| 4.914414
| false
| false
| false
|
emmanuelle/scikits.image
|
skimage/feature/texture.py
|
2
|
9818
|
"""
Methods to characterize image textures.
"""
import math
import numpy as np
from scipy import ndimage
from ._texture import _glcm_loop, _local_binary_pattern
def greycomatrix(image, distances, angles, levels=256, symmetric=False,
normed=False):
"""Calculate the grey-level co-occurrence matrix.
A grey level co-occurence matrix is a histogram of co-occuring
greyscale values at a given offset over an image.
Parameters
----------
image : array_like of uint8
Integer typed input image. The image will be cast to uint8, so
the maximum value must be less than 256.
distances : array_like
List of pixel pair distance offsets.
angles : array_like
List of pixel pair angles in radians.
levels : int, optional
The input image should contain integers in [0, levels-1],
where levels indicate the number of grey-levels counted
(typically 256 for an 8-bit image). The maximum value is
256.
symmetric : bool, optional
If True, the output matrix `P[:, :, d, theta]` is symmetric. This
is accomplished by ignoring the order of value pairs, so both
(i, j) and (j, i) are accumulated when (i, j) is encountered
for a given offset. The default is False.
normed : bool, optional
If True, normalize each matrix `P[:, :, d, theta]` by dividing
by the total number of accumulated co-occurrences for the given
offset. The elements of the resulting matrix sum to 1. The
default is False.
Returns
-------
P : 4-D ndarray
The grey-level co-occurrence histogram. The value
`P[i,j,d,theta]` is the number of times that grey-level `j`
occurs at a distance `d` and at an angle `theta` from
grey-level `i`. If `normed` is `False`, the output is of
type uint32, otherwise it is float64.
References
----------
.. [1] The GLCM Tutorial Home Page,
http://www.fp.ucalgary.ca/mhallbey/tutorial.htm
.. [2] Pattern Recognition Engineering, Morton Nadler & Eric P.
Smith
.. [3] Wikipedia, http://en.wikipedia.org/wiki/Co-occurrence_matrix
Examples
--------
Compute 2 GLCMs: One for a 1-pixel offset to the right, and one
for a 1-pixel offset upwards.
>>> image = np.array([[0, 0, 1, 1],
... [0, 0, 1, 1],
... [0, 2, 2, 2],
... [2, 2, 3, 3]], dtype=np.uint8)
>>> result = greycomatrix(image, [1], [0, np.pi/2], levels=4)
>>> result[:, :, 0, 0]
array([[2, 2, 1, 0],
[0, 2, 0, 0],
[0, 0, 3, 1],
[0, 0, 0, 1]], dtype=uint32)
>>> result[:, :, 0, 1]
array([[3, 0, 2, 0],
[0, 2, 2, 0],
[0, 0, 1, 2],
[0, 0, 0, 0]], dtype=uint32)
"""
assert levels <= 256
image = np.ascontiguousarray(image)
assert image.ndim == 2
assert image.min() >= 0
assert image.max() < levels
image = image.astype(np.uint8)
distances = np.ascontiguousarray(distances, dtype=np.float64)
angles = np.ascontiguousarray(angles, dtype=np.float64)
assert distances.ndim == 1
assert angles.ndim == 1
P = np.zeros((levels, levels, len(distances), len(angles)),
dtype=np.uint32, order='C')
# count co-occurences
_glcm_loop(image, distances, angles, levels, P)
# make each GLMC symmetric
if symmetric:
Pt = np.transpose(P, (1, 0, 2, 3))
P = P + Pt
# normalize each GLMC
if normed:
P = P.astype(np.float64)
glcm_sums = np.apply_over_axes(np.sum, P, axes=(0, 1))
glcm_sums[glcm_sums == 0] = 1
P /= glcm_sums
return P
def greycoprops(P, prop='contrast'):
"""Calculate texture properties of a GLCM.
Compute a feature of a grey level co-occurrence matrix to serve as
a compact summary of the matrix. The properties are computed as
follows:
- 'contrast': :math:`\\sum_{i,j=0}^{levels-1} P_{i,j}(i-j)^2`
- 'dissimilarity': :math:`\\sum_{i,j=0}^{levels-1}P_{i,j}|i-j|`
- 'homogeneity': :math:`\\sum_{i,j=0}^{levels-1}\\frac{P_{i,j}}{1+(i-j)^2}`
- 'ASM': :math:`\\sum_{i,j=0}^{levels-1} P_{i,j}^2`
- 'energy': :math:`\\sqrt{ASM}`
- 'correlation':
.. math:: \\sum_{i,j=0}^{levels-1} P_{i,j}\\left[\\frac{(i-\\mu_i) \\
(j-\\mu_j)}{\\sqrt{(\\sigma_i^2)(\\sigma_j^2)}}\\right]
Parameters
----------
P : ndarray
Input array. `P` is the grey-level co-occurrence histogram
for which to compute the specified property. The value
`P[i,j,d,theta]` is the number of times that grey-level j
occurs at a distance d and at an angle theta from
grey-level i.
prop : {'contrast', 'dissimilarity', 'homogeneity', 'energy', \
'correlation', 'ASM'}, optional
The property of the GLCM to compute. The default is 'contrast'.
Returns
-------
results : 2-D ndarray
2-dimensional array. `results[d, a]` is the property 'prop' for
the d'th distance and the a'th angle.
References
----------
.. [1] The GLCM Tutorial Home Page,
http://www.fp.ucalgary.ca/mhallbey/tutorial.htm
Examples
--------
Compute the contrast for GLCMs with distances [1, 2] and angles
[0 degrees, 90 degrees]
>>> image = np.array([[0, 0, 1, 1],
... [0, 0, 1, 1],
... [0, 2, 2, 2],
... [2, 2, 3, 3]], dtype=np.uint8)
>>> g = greycomatrix(image, [1, 2], [0, np.pi/2], levels=4,
... normed=True, symmetric=True)
>>> contrast = greycoprops(g, 'contrast')
>>> contrast
array([[ 0.58333333, 1. ],
[ 1.25 , 2.75 ]])
"""
assert P.ndim == 4
(num_level, num_level2, num_dist, num_angle) = P.shape
assert num_level == num_level2
assert num_dist > 0
assert num_angle > 0
# create weights for specified property
I, J = np.ogrid[0:num_level, 0:num_level]
if prop == 'contrast':
weights = (I - J) ** 2
elif prop == 'dissimilarity':
weights = np.abs(I - J)
elif prop == 'homogeneity':
weights = 1. / (1. + (I - J) ** 2)
elif prop in ['ASM', 'energy', 'correlation']:
pass
else:
raise ValueError('%s is an invalid property' % (prop))
# compute property for each GLCM
if prop == 'energy':
asm = np.apply_over_axes(np.sum, (P ** 2), axes=(0, 1))[0, 0]
results = np.sqrt(asm)
elif prop == 'ASM':
results = np.apply_over_axes(np.sum, (P ** 2), axes=(0, 1))[0, 0]
elif prop == 'correlation':
results = np.zeros((num_dist, num_angle), dtype=np.float64)
I = np.array(range(num_level)).reshape((num_level, 1, 1, 1))
J = np.array(range(num_level)).reshape((1, num_level, 1, 1))
diff_i = I - np.apply_over_axes(np.sum, (I * P), axes=(0, 1))[0, 0]
diff_j = J - np.apply_over_axes(np.sum, (J * P), axes=(0, 1))[0, 0]
std_i = np.sqrt(np.apply_over_axes(np.sum, (P * (diff_i) ** 2),
axes=(0, 1))[0, 0])
std_j = np.sqrt(np.apply_over_axes(np.sum, (P * (diff_j) ** 2),
axes=(0, 1))[0, 0])
cov = np.apply_over_axes(np.sum, (P * (diff_i * diff_j)),
axes=(0, 1))[0, 0]
# handle the special case of standard deviations near zero
mask_0 = std_i < 1e-15
mask_0[std_j < 1e-15] = True
results[mask_0] = 1
# handle the standard case
mask_1 = mask_0 == False
results[mask_1] = cov[mask_1] / (std_i[mask_1] * std_j[mask_1])
elif prop in ['contrast', 'dissimilarity', 'homogeneity']:
weights = weights.reshape((num_level, num_level, 1, 1))
results = np.apply_over_axes(np.sum, (P * weights), axes=(0, 1))[0, 0]
return results
def local_binary_pattern(image, P, R, method='default'):
"""Gray scale and rotation invariant LBP (Local Binary Patterns).
LBP is an invariant descriptor that can be used for texture classification.
Parameters
----------
image : (N, M) array
Graylevel image.
P : int
Number of circularly symmetric neighbour set points (quantization of the
angular space).
R : float
Radius of circle (spatial resolution of the operator).
method : {'D', 'R', 'U', 'V'}
Method to determine the pattern::
* 'default': original local binary pattern which is gray scale but not
rotation invariant.
* 'ror': extension of default implementation which is gray scale and
rotation invariant.
* 'uniform': improved rotation invariance with uniform patterns and
finer quantization of the angular space which is gray scale and
rotation invariant.
* 'var': rotation invariant variance measures of the contrast of local
image texture which is rotation but not gray scale invariant.
Returns
-------
output : (N, M) array
LBP image.
References
----------
.. [1] Multiresolution Gray-Scale and Rotation Invariant Texture
Classification with Local Binary Patterns.
Timo Ojala, Matti Pietikainen, Topi Maenpaa.
http://www.rafbis.it/biplab15/images/stories/docenti/Danielriccio/\
Articoliriferimento/LBP.pdf, 2002.
"""
methods = {
'default': ord('D'),
'ror': ord('R'),
'uniform': ord('U'),
'var': ord('V')
}
image = np.array(image, dtype='double', copy=True)
output = _local_binary_pattern(image, P, R, methods[method.lower()])
return output
|
bsd-3-clause
| -8,732,840,251,207,591,000
| 34.316547
| 80
| 0.561316
| false
| 3.450967
| false
| false
| false
|
lablup/sorna-jupyter-kernel
|
src/ai/backend/integration/jupyter/install.py
|
1
|
4036
|
'''
The kernel installer.
Run `python -m ai.backend.integration.jupyter.install` to use Backend.AI in your Jupyter notebooks.
'''
import argparse
import json
import os
import sys
import webbrowser
from jupyter_client.kernelspec import KernelSpecManager
from IPython.utils.tempdir import TemporaryDirectory
from .kernel import kernels
def clean_kernel_spec(user=True, prefix=None):
mgr = KernelSpecManager()
# NOTE: remove_kernel_spec() and get_all_specs() does not support explicit prefix.
# Sometimes we may need to perform --clean-only multiple times to completely
# remove all kernelspecs installed around venvs and system global directories.
for name, info in mgr.get_all_specs().items():
if name.startswith('backend'):
print("Removing existing Backend.AI kernel: {0}"
.format(info['spec']['display_name']))
mgr.remove_kernel_spec(name)
def install_kernel_spec(name, spec_json, user=True, prefix=None):
with TemporaryDirectory() as td:
os.chmod(td, 0o755) # Starts off as 700, not user readable
with open(os.path.join(td, 'kernel.json'), 'w') as f:
json.dump(spec_json, f, sort_keys=True)
print("Installing Backend.AI Jupyter kernel spec: {0}"
.format(spec_json['display_name']))
KernelSpecManager().install_kernel_spec(
td, name, user=user, replace=True, prefix=prefix)
def query_yes_no(prompt):
valid = {'y': True, 'yes': True, 'n': False, 'no': False}
while True:
choice = input('{0} [y/n] '.format(prompt)).lower()
if choice in valid:
return valid[choice]
else:
prompt = 'Pleas answer in y/yes/n/no.'
def _is_root():
try:
return os.geteuid() == 0
except AttributeError:
return False # assume not an admin on non-Unix platforms
def main(argv=None):
ap = argparse.ArgumentParser()
ap.add_argument('--user', action='store_true',
help="Install to the per-user kernels registry. Default if not root.")
ap.add_argument('--sys-prefix', action='store_true',
help="Install to sys.prefix (e.g. a virtualenv or conda env)")
ap.add_argument('--clean-only', action='store_true',
help="Perform only clean-up of existing Backend.AI kernels.")
ap.add_argument('-q', '--quiet', action='store_true',
help="Do not ask the user anything.")
ap.add_argument('--prefix',
help="Install to the given prefix. "
"Kernelspec will be installed in {PREFIX}/share/jupyter/kernels/")
args = ap.parse_args(argv)
if args.sys_prefix:
args.prefix = sys.prefix
if not args.prefix and not _is_root():
args.user = True
clean_kernel_spec(user=args.user, prefix=args.prefix)
if args.clean_only:
return
for kern in kernels:
spec = {
"argv": [sys.executable, "-m", "ai.backend.integration.jupyter",
"-f", "{connection_file}",
"--",
"-k", kern.__name__],
"display_name": kern.language_info['name'],
"language": kern.language,
}
install_kernel_spec(kern.__name__, spec, user=args.user, prefix=args.prefix)
if not args.quiet:
print()
has_api_key = bool(os.environ.get('BACKEND_ACCESS_KEY', ''))
if has_api_key:
print('It seems that you already configured the API key. Enjoy!')
else:
if query_yes_no('You can get your own API keypair from https://cloud.backend.ai. Do you want to open the site?'):
webbrowser.open_new_tab('https://cloud.backend.ai')
print()
print('If you already have the keypair or just grabbed a new one,')
print('run the following in your shell before running jupyter notebook:\n')
print(' export BACKEND_ACCESS_KEY="AKIA..."')
print(' export BACKEND_SECRET_KEY="......."\n')
if __name__ == '__main__':
main()
|
mit
| 8,950,631,238,253,865,000
| 36.027523
| 125
| 0.610505
| false
| 3.793233
| false
| false
| false
|
deryni/cockpit
|
tools/title2sentence.py
|
1
|
4253
|
#!/usr/bin/env python3
# This file is part of Cockpit.
#
# Copyright (C) 2020 Red Hat, Inc.
#
# Cockpit is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# Cockpit is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Cockpit; If not, see <http://www.gnu.org/licenses/>.
import sys
import argparse
keep_words = [
'Web Console', 'Cockpit',
'Red Hat',
'Insights',
'Docker',
'Customer Portal',
'SELinux', 'SETroubleshoot',
'Tang',
'iSCSI',
'Linux',
'NetworkManager',
'PackageKit',
'vCPU',
'IPv4', 'IPv6',
'IoT',
'ID',
': Server',
': Invalid',
'KiB', 'MiB', 'GiB',
'ABRT Analytics',
'GNOME Software',
'CAs', 'VMs', 'CPUs',
'Hour : Minute',
'Ctrl+Alt',
'$ExitCode',
'Launch Remote Viewer',
'Failed to start',
]
patterns = [
"of $0 CPU",
"No memory reserved. Append a crashkernel option",
"Cockpit was unable to log in",
"Cockpit had an unexpected internal error",
"You need to switch to",
"Free up space in this group",
"This day doesn",
"Tip: Make your key",
"virt-install package needs to be",
"You need to switch to",
]
the_map = []
# Replace exact positions
def replace(s, old_s, word):
if not word.strip():
return s
while word in old_s:
i = old_s.find(word)
s = s[:i] + word + s[i + len(word):]
old_s = old_s.replace(word, " " * len(word), 1)
return s
def capitalize(s):
for word in keep_words:
if s.startswith(word):
return s
return s[0].upper() + s[1:]
def main():
parser = argparse.ArgumentParser(description="TODO")
parser.add_argument("-i", "--input", required=True, help="File containing strings")
parser.add_argument("-o", "--output", required=True, help="File for output script to be written into")
opts = parser.parse_args()
with open(opts.input, "r") as f:
for line in f:
old_s = line.strip()
old_s = old_s[1:-1] # Remove first and last quotes
if not old_s:
continue
# Leave out strings that don't contain a single upper case letter
if not [x for x in old_s if x.isupper()]:
continue
# MEH: There are some strings that don't need any action but are tricky to ignore
skip = False
for pattern in patterns:
if old_s.startswith(pattern):
skip = True
if skip:
continue
# Backslash special characters
for c in ['"', "&", "$", "/"]:
if c in old_s:
old_s = old_s.replace(c, "\\{0}".format(c))
new_s = old_s.lower()
# Return words that should stay upper-case
for word in keep_words:
new_s = replace(new_s, old_s, word)
# Return words that were all caps before (stuff like 'CPU', 'DNS'...)
for word in old_s.split(" "):
if word == word.upper():
new_s = replace(new_s, old_s, word)
# Return capitalization of (multiple) sentences
sentences = new_s.split(". ")
Sentences = list(map(capitalize, sentences))
new_s = ". ".join(Sentences)
if new_s != old_s:
the_map.append([old_s, new_s])
# Generate script for replacing these strings
output = ""
if the_map:
output = "find pkg src test/verify -type f -exec sed -i \\\n"
for pair in the_map:
output += '-e "s/\([^ ]\){0}/\\1{1}/" \\\n'.format(pair[0], pair[1])
output += "{} \;"
with open(opts.output, "w") as f:
f.write(output)
if __name__ == '__main__':
sys.exit(main())
|
lgpl-2.1
| 3,002,470,520,402,526,700
| 27.543624
| 106
| 0.5589
| false
| 3.619574
| false
| false
| false
|
jclement/Cacheberry-Pi
|
lib/gislib.py
|
1
|
3683
|
# Adapted from code & formulas by David Z. Creemer and others
# http://www.zachary.com/blog/2005/01/12/python_zipcode_geo-programming
# http://williams.best.vwh.net/avform.htm
#
# Additions by Jeff Clement
from math import sin,cos,atan,acos,asin,atan2,sqrt,pi, modf, radians,degrees
# At the equator / on another great circle???
nauticalMilePerLat = 60.00721
nauticalMilePerLongitude = 60.10793
rad = pi / 180.0
milesPerNauticalMile = 1.15078
kmsPerNauticalMile = 1.85200
degreeInMiles = milesPerNauticalMile * 60
degreeInKms = kmsPerNauticalMile * 60
# earth's mean radius = 6,371km
earthradius = 6371.0
def getDistance(loc1, loc2):
"aliased default algorithm; args are (lat_decimal,lon_decimal) tuples"
return getDistanceByHaversine(loc1, loc2)
def getDistanceByHaversine(loc1, loc2):
"Haversine formula - give coordinates as (lat_decimal,lon_decimal) tuples"
lat1, lon1 = loc1
lat2, lon2 = loc2
#if type(loc1[0]) == type(()):
# # convert from DMS to decimal
# lat1,lon1 = DMSToDecimal(loc1[0]),DMSToDecimal(loc1[1])
#if type(loc2[0]) == type(()):
# lat2,lon2 = DMSToDecimal(loc2[0]),DMSToDecimal(loc2[1])
# convert to radians
lon1 = lon1 * pi / 180.0
lon2 = lon2 * pi / 180.0
lat1 = lat1 * pi / 180.0
lat2 = lat2 * pi / 180.0
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = (sin(dlat/2))**2 + cos(lat1) * cos(lat2) * (sin(dlon/2.0))**2
c = 2.0 * atan2(sqrt(a), sqrt(1.0-a))
km = earthradius * c
return km
def DecimalToDMS(decimalvalue):
"convert a decimal value to degree,minute,second tuple"
d = modf(decimalvalue)[0]
m=0
s=0
return (d,m,s)
def DMSToDecimal((degrees,minutes,seconds)):
"Convert a value from decimal (float) to degree,minute,second tuple"
d = abs(degrees) + (minutes/60.0) + (seconds/3600.0)
if degrees < 0:
return -d
else:
return d
def getCoordinateDiffForDistance(originlat, originlon, distance, units="km"):
"""return longitude & latitude values that, when added to & subtraced from
origin longitude & latitude, form a cross / 'plus sign' whose ends are
a given distance from the origin"""
degreelength = 0
if units == "km":
degreelength = degreeInKms
elif units == "miles":
degreelength = degreeInMiles
else:
raise Exception("Units must be either 'km' or 'miles'!")
lat = distance / degreelength
lon = distance / (cos(originlat * rad) * degreelength)
return (lat, lon)
def isWithinDistance(origin, loc, distance):
"boolean for checking whether a location is within a distance"
if getDistanceByHaversine(origin, loc) <= distance:
return True
else:
return False
def isAngleWithin(a1, a2, threshold):
"determine if two angles are within {threshold} degrees of each other"
a_min = min(a1, a2)
a_max = max(a1, a2)
if (a_max-a_min) > threshold:
return ((a_min+360) - a_max) <= threshold
return (a_max - a_min) <= threshold
def calculateBearing(start, target):
"calculate a bearing in degrees (N=0 deg) from start to target point"
lat1, lon1 = map(radians, start)
lat2, lon2 = map(radians, target)
dLon = lon2-lon1
y = sin(dLon) * cos(lat2)
x = cos(lat1)*sin(lat2) - \
sin(lat1)*cos(lat2)*cos(dLon)
return (degrees(atan2(y, x))) % 360
def humanizeBearing(bearing):
"convert a bearing in degrees to a human readable version"
#symbols = ['N','NE','E','SE','S','SW','W','NW']
symbols = ['N','NNE','NE','ENE','E','ESE','SE','SSE','S','SSW','SW','WSW','W','WNW','NW','NNW']
step = 360.0 / len(symbols)
for i in range(len(symbols)):
if isAngleWithin(i*step, bearing, step/2):
return symbols[i]
|
bsd-3-clause
| 593,282,141,699,682,600
| 28.230159
| 97
| 0.663046
| false
| 2.890895
| false
| false
| false
|
jmdejong/Asciifarm
|
asciifarm/client/display.py
|
1
|
5695
|
import os
from ratuil.layout import Layout
from ratuil.bufferedscreen import BufferedScreen as Screen
#from ratuil.screen import Screen
from ratuil.textstyle import TextStyle
from asciifarm.common.utils import get
from .listselector import ListSelector
SIDEWIDTH = 20
ALPHABET = "!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~"
class Display:
def __init__(self, charMap):
self.characters = {}
def parseSprite(sprite):
if isinstance(sprite, str):
return (sprite, None, None)
char = get(sprite, 0, " ")
fg = get(sprite, 1)
bg = get(sprite, 2)
return (char, fg, bg)
for name, sprite in charMap["mapping"].items():
vals = parseSprite(sprite)
if vals:
self.characters[name] = vals
for name, colours in charMap.get("writable", {}).items():
fg = get(colours, 0)
bg = get(colours, 1)
for i in range(min(len(ALPHABET), len(charMap.get("alphabet", [])))):
self.characters[name + '-' + ALPHABET[i]] = (charMap["alphabet"][i], fg, bg)
self.defaultChar = parseSprite(charMap.get("default", "?"))
self.messageColours = charMap.get("msgcolours", {})
fname = os.path.join(os.path.dirname(__file__), "layout.xml")
self.layout = Layout.from_xml_file(fname)
self.layout.get("field").set_char_size(charMap.get("charwidth", 1))
self.screen = Screen()
self.screen.clear()
self.layout.set_target(self.screen)
self.layout.update()
# temporary, until these have a better place
self.inventory = ListSelector(self.getWidget("inventory"))
self.inventory._debug_name = "inventory"
self.equipment = ListSelector(self.getWidget("equipment"))
self.equipment._debug_name = "equipment"
self.ground = ListSelector(self.getWidget("ground"))
self.ground._debug_name = "ground"
self.switch = ListSelector(self.getWidget("switchtitles"))
self.switch._debug_name = "switch"
self.switch.setItems(["inventory", "equipment", "ground"])
self.menus = {
"inventory": self.inventory,
"equipment": self.equipment,
"ground": self.ground
}
self.layout.get("switch").select(0)
def getWidget(self, name):
return self.layout.get(name)
def resizeField(self, size):
self.getWidget("field").set_size(*size)
self.getWidget("fieldbackground").change()
def drawFieldCells(self, cells):
field = self.getWidget("field")
for cell in cells:
(x, y), spriteNames = cell
if not len(spriteNames):
char, fg, bg = self.getChar(' ')
else:
char, fg, bg = self.getChar(spriteNames[0])
for spriteName in spriteNames[1:]:
if bg is not None:
break
_char, _fg, bg = self.getChar(spriteName)
field.change_cell(x, y, char, TextStyle(fg, bg))
def setFieldCenter(self, pos):
self.getWidget("field").set_center(*pos)
def setHealth(self, health, maxHealth):
if health is None:
health = 0
if maxHealth is None:
maxHealth = 0
self.getWidget("health").set_total(maxHealth)
self.getWidget("health").set_filled(health)
self.getWidget("healthtitle").format({"filled": health, "total":maxHealth})
def showInfo(self, infostring):
self.getWidget("info").set_text(infostring)
def selectMenu(self, *args, **kwargs):
self.switch.select(*args, **kwargs)
self.layout.get("switch").select(self.getSelectedMenu())
def getSelectedMenu(self):
return self.switch.getSelectedItem()
def getSelectedItem(self, menu=None):
return self._getMenu(menu).getSelected()
def selectItem(self, menu=None, *args, **kwargs):
self._getMenu(menu).select(*args, **kwargs)
def _getMenu(self, name=None):
if name is None:
name = self.getSelectedMenu()
name = name.casefold()
return self.menus[name]
def setInventory(self, items):
self.inventory.setItems(items)
def setEquipment(self, slots):
self.equipment.setItems([
slot + ": " + (item if item else "")
for slot, item in slots
])
def setGround(self, items):
self.ground.setItems(items)
def addMessage(self, message, msgtype=None):
if msgtype is not None:
style = TextStyle(*self.messageColours.get(msgtype, (7,0)))
else:
style = None
self.getWidget("msg").add_message(message, style)
def log(self, message):
self.addMessage(str(message))
def scrollBack(self, amount, relative=True):
self.getWidget("msg").scroll(amount, relative)
def setInputString(self, string, cursor):
self.getWidget("textinput").set_text(string, cursor)
def update(self):
self.layout.update()
self.screen.update()
def getChar(self, sprite):
"""This returns the character belonging to some spritename. This does not read a character"""
return self.characters.get(sprite, self.defaultChar)
def update_size(self):
self.screen.reset()
|
gpl-3.0
| 3,941,003,261,231,911,400
| 31.729885
| 109
| 0.569096
| false
| 4.01339
| false
| false
| false
|
3DGenomes/tadbit
|
scripts/liftover_tads_genomes.py
|
2
|
7055
|
"""
17 May 2013
Liftover (1) wrapper applied to the comparison of topologically associated
domains.
This script allows to compare Hi-C experiments (mainly align TAD boundaries)
done with different assemblies (e.g.: NCBI36 and GRCh37 for human genome), or
in different species.
INSTALL:
- liftover tool needs to be downloaded from
(http://hgdownload.cse.ucsc.edu/admin/exe/), installed, and appended to the
path.
- depending on the data a 'chain' file may also be downloaded. For example
from: http://hgdownload.cse.ucsc.edu/goldenPath/hg19/liftOver/
(1) Fujita, P. A., Rhead, B., Zweig, A. S., Hinrichs, A. S., Karolchik, D.,
Cline, M. S., Goldman, M., et al. (2011).
The UCSC Genome Browser database: update 2011.
Nucleic Acids Research, 39(Database issue), D876-82. doi:10.1093/nar/gkq963
"""
from os import system, listdir
from os.path import isdir
from pytadbit import load_chromosome
from pytadbit.utils.remap_tads import remap_chr, reorder
from optparse import OptionParser
def check_pik(path):
with open(path, "r") as f:
f.seek (0, 2) # Seek @ EOF
fsize = f.tell() # Get Size
f.seek (max (fsize-2, 0), 0) # Set pos @ last n chars
key = f.read() # Read to end
return key == 's.'
def main():
"""
main function
"""
opts = get_options()
res = opts.res
if opts.genomes:
# load all chromosomes of reference genomes
ref_genome = {}
for crm in listdir(opts.ref_genome):
crm_path = opts.ref_genome + crm + '/'
if not isdir(crm_path):
continue
for crm_fh in listdir(crm_path):
crm_pik = crm_path + crm_fh
if not check_pik(crm_pik):
continue
ref_genome[crm] = load_chromosome(crm_pik)
if not opts.res:
resolutions = []
for crm in ref_genome:
for exp in ref_genome[crm].experiments:
resolutions.append(exp.resolution)
if not all([r == resolutions[0] for r in resolutions]):
raise AssertionError('Not all Experiments have the ' +
'same resolution\n')
res = resolutions[0]
alt_genomes = {}
for i, genome in enumerate(opts.genomes):
alt_genomes[i] = {}
for crm in listdir(genome):
crm_path = genome + crm + '/'
if not isdir(crm_path):
continue
for crm_fh in listdir(crm_path):
crm_pik = crm_path + crm_fh
if not check_pik(crm_pik):
continue
try:
alt_genomes[i][crm] = load_chromosome(crm_pik)
except:
print ('SKIPPING: {} \n not a valid ' +
'chromosome').format(crm_pik)
genome = {}
for crm in alt_genomes[i]:
genome = remap_chr(alt_genomes[i][crm], crm, '/tmp/',
opts.lft_path, opts.chain_path,
genome=genome)
reorder(genome)
for exp in genome:
for crm in genome[exp]:
try:
ref_genome[crm].add_experiment(
exp, res, tad_handler=genome[exp][crm])
except KeyError:
print ('Chromosome {} skipped, not in reference ' +
'genome').format(crm)
system('mkdir -p ' + opts.out_path)
for crm in ref_genome:
system('mkdir -p ' + opts.out_path + '/' + crm)
out_f = opts.out_path + '/' + crm + '/chr' + crm + '.tdb'
ref_genome[crm].save_chromosome(out_f, force=True)
# TODO: the same for 1 chromosome
def get_options():
'''
parse option from call
'''
def vararg_callback(option, _, value, parser):
assert value is None
value = []
rargs = parser.rargs
while rargs:
arg = rargs[0]
if ((arg[:2] == "--" and len(arg) > 2) or
(arg[:1] == "-" and len(arg) > 1 and arg[1] != "-")):
break
else:
value.append(arg)
del rargs[0]
setattr(parser.values, option.dest, value)
#
parser = OptionParser(
usage=("%prog [options] file [options] file [options] " +
"file [options [file ...]]"))
parser.add_option('--genomes', dest='genomes', metavar="PATH",
action='callback', default=None,
callback=vararg_callback,
help='''path(s) to a directory/ies with a list of
chromosomes saved through tadbit (required if not
passing chromosomes)''')
parser.add_option('--ref_genome', dest='ref_genome', metavar="PATH",
help='''path to a directory with a list of chromosomes
saved through tadbit (required with genomes option)''')
parser.add_option('--crm', dest='crm', metavar="PATH",
help='''path to input file, a chromosome saved through
tadbit (required if not passing genomes)''')
parser.add_option('--ref_crm', dest='ref_crm', metavar="PATH",
help='''path to second input file, a reference chromosome
saved through tadbit (required)''')
parser.add_option('--chain', dest='chain_path', action="store", \
help=
'''path to UCSC chain file (required)''')
parser.add_option('-o', dest='out_path', metavar="PATH",
default='./',
help='''path to out file where merged tadbit chromosome
will be stored''')
parser.add_option('--res', dest='res',
default=None,
help='''Wanted resolution for the detection of TADs (i.e.:
100Kb)''')
parser.add_option('--crm_name', dest='crm_name',
default=None,
help='''Chromosome name for crm1 (e.g. 21).''')
parser.add_option('--tmp', dest='tmp_path', metavar="PATH",
default='./',
help='''path to temporary directory to store liftover
outfiles''')
parser.add_option('--liftover',
dest='lft_path', default='/usr/local/bin/',\
help='''[%default] path to liftover binary''')
opts = parser.parse_args()[0]
if not opts.crm or not opts.ref_crm or not opts.chain_path:
if not opts.genomes or not opts.ref_genome or not opts.chain_path:
exit(parser.print_help())
return opts
if __name__ == "__main__":
exit(main())
|
gpl-3.0
| -5,894,526,456,116,291,000
| 38.634831
| 80
| 0.505599
| false
| 4.019943
| false
| false
| false
|
smallyear/linuxLearn
|
salt/salt/modules/cloud.py
|
1
|
8214
|
# -*- coding: utf-8 -*-
'''
Salt-specific interface for calling Salt Cloud directly
'''
# Import python libs
from __future__ import absolute_import
import os
import logging
import copy
# Import salt libs
try:
import salt.cloud
HAS_SALTCLOUD = True
except ImportError:
HAS_SALTCLOUD = False
import salt.utils
# Import 3rd-party libs
import salt.ext.six as six
log = logging.getLogger(__name__)
__func_alias__ = {
'profile_': 'profile'
}
def __virtual__():
'''
Only work on POSIX-like systems
'''
if HAS_SALTCLOUD:
return True
return False
def _get_client():
'''
Return a cloud client
'''
client = salt.cloud.CloudClient(
os.path.join(os.path.dirname(__opts__['conf_file']), 'cloud'),
pillars=copy.deepcopy(__pillar__.get('cloud', {}))
)
return client
def list_sizes(provider='all'):
'''
List cloud provider sizes for the given providers
CLI Example:
.. code-block:: bash
salt '*' cloud.list_sizes my-gce-config
'''
client = _get_client()
sizes = client.list_sizes(provider)
return sizes
def list_images(provider='all'):
'''
List cloud provider images for the given providers
CLI Example:
.. code-block:: bash
salt '*' cloud.list_images my-gce-config
'''
client = _get_client()
images = client.list_images(provider)
return images
def list_locations(provider='all'):
'''
List cloud provider locations for the given providers
CLI Example:
.. code-block:: bash
salt '*' cloud.list_locations my-gce-config
'''
client = _get_client()
locations = client.list_locations(provider)
return locations
def query(query_type='list_nodes'):
'''
List cloud provider data for all providers
CLI Examples:
.. code-block:: bash
salt '*' cloud.query
salt '*' cloud.query list_nodes_full
salt '*' cloud.query list_nodes_select
'''
client = _get_client()
info = client.query(query_type)
return info
def full_query(query_type='list_nodes_full'):
'''
List all available cloud provider data
CLI Example:
.. code-block:: bash
salt '*' cloud.full_query
'''
return query(query_type=query_type)
def select_query(query_type='list_nodes_select'):
'''
List selected nodes
CLI Example:
.. code-block:: bash
salt '*' cloud.select_query
'''
return query(query_type=query_type)
def has_instance(name, provider=None):
'''
Return true if the instance is found on a provider
CLI Example:
.. code-block:: bash
salt '*' cloud.has_instance myinstance
'''
data = get_instance(name, provider)
if data is None:
return False
return True
def get_instance(name, provider=None):
'''
Return details on an instance.
Similar to the cloud action show_instance
but returns only the instance details.
CLI Example:
.. code-block:: bash
salt '*' cloud.get_instance myinstance
SLS Example:
.. code-block:: bash
{{ salt['cloud.get_instance']('myinstance')['mac_address'] }}
'''
data = action(fun='show_instance', names=[name], provider=provider)
info = salt.utils.cloud.simple_types_filter(data)
try:
# get the first: [alias][driver][vm_name]
info = next(six.itervalues(next(six.itervalues(next(six.itervalues(info))))))
except AttributeError:
return None
return info
def profile_(profile, names, vm_overrides=None, **kwargs):
'''
Spin up an instance using Salt Cloud
CLI Example:
.. code-block:: bash
salt '*' cloud.profile my-gce-config myinstance
'''
client = _get_client()
info = client.profile(profile, names, vm_overrides=vm_overrides, **kwargs)
return info
def destroy(names):
'''
Destroy the named VM(s)
CLI Example:
.. code-block:: bash
salt '*' cloud.destroy myinstance
'''
client = _get_client()
info = client.destroy(names)
return info
def action(
fun=None,
cloudmap=None,
names=None,
provider=None,
instance=None,
**kwargs):
'''
Execute a single action on the given provider/instance
CLI Example:
.. code-block:: bash
salt '*' cloud.action start instance=myinstance
salt '*' cloud.action stop instance=myinstance
salt '*' cloud.action show_image provider=my-ec2-config image=ami-1624987f
'''
client = _get_client()
info = client.action(fun, cloudmap, names, provider, instance, kwargs)
return info
def create(provider, names, **kwargs):
'''
Create an instance using Salt Cloud
CLI Example:
.. code-block:: bash
salt minionname cloud.create my-ec2-config myinstance image=ami-1624987f size='t1.micro' ssh_username=ec2-user securitygroup=default delvol_on_destroy=True
'''
client = _get_client()
info = client.create(provider, names, **kwargs)
return info
def volume_list(provider):
'''
List block storage volumes
CLI Example:
.. code-block:: bash
salt minionname cloud.volume_list my-nova
'''
client = _get_client()
info = client.extra_action(action='volume_list', provider=provider, names='name')
return info['name']
def volume_delete(provider, names, **kwargs):
'''
Delete volume
CLI Example:
.. code-block:: bash
salt minionname cloud.volume_delete my-nova myblock
'''
client = _get_client()
info = client.extra_action(provider=provider, names=names, action='volume_delete', **kwargs)
return info
def volume_create(provider, names, **kwargs):
'''
Create volume
CLI Example:
.. code-block:: bash
salt minionname cloud.volume_create my-nova myblock size=100 voltype=SSD
'''
client = _get_client()
info = client.extra_action(action='volume_create', names=names, provider=provider, **kwargs)
return info
def volume_attach(provider, names, **kwargs):
'''
Attach volume to a server
CLI Example:
.. code-block:: bash
salt minionname cloud.volume_attach my-nova myblock server_name=myserver device='/dev/xvdf'
'''
client = _get_client()
info = client.extra_action(provider=provider, names=names, action='volume_attach', **kwargs)
return info
def volume_detach(provider, names, **kwargs):
'''
Detach volume from a server
CLI Example:
.. code-block:: bash
salt minionname cloud.volume_detach my-nova myblock server_name=myserver
'''
client = _get_client()
info = client.extra_action(provider=provider, names=names, action='volume_detach', **kwargs)
return info
def network_list(provider):
'''
List private networks
CLI Example:
.. code-block:: bash
salt minionname cloud.network_list my-nova
'''
client = _get_client()
return client.extra_action(action='network_list', provider=provider, names='names')
def network_create(provider, names, **kwargs):
'''
Create private network
CLI Example:
.. code-block:: bash
salt minionname cloud.network_create my-nova names=['salt'] cidr='192.168.100.0/24'
'''
client = _get_client()
return client.extra_action(provider=provider, names=names, action='network_create', **kwargs)
def virtual_interface_list(provider, names, **kwargs):
'''
List virtual interfaces on a server
CLI Example:
.. code-block:: bash
salt minionname cloud.virtual_interface_list my-nova names=['salt-master']
'''
client = _get_client()
return client.extra_action(provider=provider, names=names, action='virtual_interface_list', **kwargs)
def virtual_interface_create(provider, names, **kwargs):
'''
Attach private interfaces to a server
CLI Example:
.. code-block:: bash
salt minionname cloud.virtual_interface_create my-nova names=['salt-master'] net_name='salt'
'''
client = _get_client()
return client.extra_action(provider=provider, names=names, action='virtual_interface_create', **kwargs)
|
apache-2.0
| 4,759,294,254,014,473,000
| 20.061538
| 163
| 0.632213
| false
| 3.876357
| true
| false
| false
|
jasonhamilton/hotwing-core
|
hotwing_core/panel.py
|
1
|
3463
|
from __future__ import division
from .rib import Rib
from .profile import Profile
from .coordinate import Coordinate
class Panel():
"""
A Panel is a representation of a wing panel and contains all of the items/objects
necessary to define a wing.
A Panel can be thought of as a wing facing down with rib_1 on the left and rib_2 on the right.
If rib_1 is the root chord and rib_2 is the tip, chord the panel will represent the left part
of a wing.
.. code-block:: bash
| ------ width ---------- |
trailing edge
---------------------------
| |
rib_1 | | rib_2
| |
---------------------------
leading edge
Args:
left_rib (Rib): Rib defining the left of the wing
right_rib (Rib): Rib defining the right of the wing
width (Float): Width of the Panel measured from left_rib to right_rib
:ivar left_rib: Left Rib
:ivar right_rib: Right Rib
:ivar width: Width
"""
def __init__(self, left_rib, right_rib, width):
self.left_rib = left_rib
self.right_rib = right_rib
self.width = width
@classmethod
def copy(cls, panel):
"""
Copy a panel
Args:
panel (Panel): object to copy
Returns:
Panel: New panel
"""
return cls(panel.left_rib, panel.right_rib, panel.width)
@classmethod
def reverse(cls, panel):
"""
Reverse the ribs on the panel. If you have a left side, it will make it a right side. The ribs
will maintain the same direction, but just switch sides.
Args:
panel (Panel): object to flip
Returns:
Panel: New flipped panel
"""
return cls(panel.right_rib, panel.left_rib, panel.width)
@classmethod
def trim(cls, panel, left=None, right=None):
"""
Creates a new Panel by taking an existing Panel and trimming it.
The new panel's ribs will be interpolated to the correct shape.
Args:
panel (Panel): object to trim
left (Float): distance from left rib to make the left side cut
right (Float): distance from left rib to make the right side cut
Returns:
Panel: New trimmed Panel
"""
if left is None or left == 0:
# no need to trim left
r1 = panel.left_rib
left = 0
else:
# need to interp new left
r1 = Rib.interpolate_new_rib(
panel.left_rib, panel.right_rib, panel.width, left)
if right is None or right == panel.width:
# no need to trim right
r2 = panel.right_rib
right = panel.width
else:
r2 = Rib.interpolate_new_rib(
panel.left_rib, panel.right_rib, panel.width, right)
new_width = right - left
p = cls(r1, r2, new_width)
return p
def __getitem__(self, key):
"""
Trim Panel using the slice functionality.
Ex: panel_obj[2:5], trims from 2 to 5
"""
if isinstance(key, slice):
return Panel.trim(self,key.start,key.stop)
raise NotImplementedError
|
gpl-3.0
| 3,650,149,134,089,995,000
| 28.862069
| 103
| 0.520358
| false
| 4.270037
| false
| false
| false
|
emanuil-tolev/fundfind
|
fundfind/core.py
|
1
|
1312
|
import os
from flask import Flask
from flask.ext.login import LoginManager, current_user
from fundfind import default_settings
login_manager = LoginManager()
def create_app():
app = Flask(__name__)
configure_app(app)
configure_jinja(app)
setup_error_email(app)
login_manager.setup_app(app)
return app
def configure_app(app):
app.config.from_object(default_settings)
# parent directory
here = os.path.dirname(os.path.abspath( __file__ ))
config_path = os.path.join(os.path.dirname(here), 'app.cfg')
if os.path.exists(config_path):
app.config.from_pyfile(config_path)
def setup_error_email(app):
ADMINS = app.config.get('ADMINS', '')
if not app.debug and ADMINS:
import logging
from logging.handlers import SMTPHandler
mail_handler = SMTPHandler('127.0.0.1',
'server-error@no-reply.com',
ADMINS, 'FundFind error')
mail_handler.setLevel(logging.error)
app.logger.addHandler(mail_handler)
def configure_jinja(app):
# expore some more objects to the templates
add_to_globals = {
'isinstance': isinstance,
'list': list,
'dict': dict
}
app.jinja_env.globals.update(**add_to_globals)
app = create_app()
|
mit
| 3,052,434,311,215,307,300
| 28.155556
| 64
| 0.632622
| false
| 3.624309
| true
| false
| false
|
nikkomidoy/project_soa
|
project_soa/users/views.py
|
1
|
1647
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.core.urlresolvers import reverse
from django.views.generic import DetailView, ListView, RedirectView, UpdateView
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import User
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("soamgr:order")
class UserUpdateView(LoginRequiredMixin, UpdateView):
fields = ['name',]
# we already imported User in the view code above, remember?
model = User
def get_context_data(self, **kwargs):
context = super(UserUpdateView, self).get_context_data(**kwargs)
if self.request.user.is_paraplanner:
self.fields += ['account_name','bsb','account_number',]
return context
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
|
mit
| 1,562,233,376,126,577,700
| 29.5
| 79
| 0.694596
| false
| 4.12782
| false
| false
| false
|
mozilla-releng/services
|
lib/please_cli/please_cli/config.py
|
1
|
3928
|
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import glob
import os
import click
CWD_DIR = os.path.abspath(os.getcwd())
NO_ROOT_DIR_ERROR = '''Project root directory couldn't be detected.
`please` file couln't be found in any of the following folders:
%s
'''
with open(os.path.join(os.path.dirname(__file__), 'VERSION')) as f:
VERSION = f.read().strip()
ROOT_DIR = None
_folders = []
for item in reversed(CWD_DIR.split(os.sep)):
item_dir = '/' + CWD_DIR[:CWD_DIR.find(item) + len(item)][1:]
_folders.append(item_dir)
if os.path.isfile(os.path.join(item_dir, 'please')):
ROOT_DIR = item_dir
break
if ROOT_DIR is None:
raise click.ClickException(NO_ROOT_DIR_ERROR % '\n - '.join(_folders))
CACHE_URLS = [
'https://cache.mozilla-releng.net',
]
SRC_DIR = os.path.join(ROOT_DIR, 'src')
TMP_DIR = os.path.join(ROOT_DIR, 'tmp')
CHANNELS = ['master', 'testing', 'staging', 'production']
DEPLOY_CHANNELS = ['testing', 'staging', 'production']
DOCKER_BASE_REGISTRY = 'index.docker.io'
DOCKER_BASE_REPO = 'mozillareleng/services'
DOCKER_BASE_TAG = 'base-' + VERSION
NIX_BIN_DIR = os.environ.get('NIX_BIN_DIR', '') # must end with /
OPENSSL_BIN_DIR = os.environ.get('OPENSSL_BIN_DIR', '') # must end with /
OPENSSL_ETC_DIR = os.environ.get('OPENSSL_ETC_DIR', '') # must end with /
POSTGRESQL_BIN_DIR = os.environ.get('POSTGRESQL_BIN_DIR', '') # must end with /
IN_DOCKER = False
if os.path.exists('/proc/1/cgroup'):
with open('/proc/1/cgroup', 'rt') as ifh:
IN_DOCKER = 'docker' in ifh.read()
TEMPLATES = {
'backend-json-api': {}
}
DEV_PROJECTS = ['postgresql', 'redis']
PROJECTS = list(map(lambda x: x.replace('_', '-')[len(SRC_DIR) + 1:],
filter(lambda x: os.path.exists(os.path.join(SRC_DIR, x, 'default.nix')),
glob.glob(SRC_DIR + '/*') + glob.glob(SRC_DIR + '/*/*'))))
PROJECTS += DEV_PROJECTS
# TODO: below data should be placed in src/<app>/default.nix files alongside
PROJECTS_CONFIG = {
'common/naming': {
'update': False,
},
'postgresql': {
'update': False,
'run': 'POSTGRESQL',
'run_options': {
'port': 9000,
'data_dir': os.path.join(TMP_DIR, 'postgresql'),
},
},
'redis': {
'update': False,
'run': 'REDIS',
'run_options': {
'port': 6379,
'schema': 'redis',
'data_dir': os.path.join(TMP_DIR, 'redis'),
},
},
'docs': {
'update': False,
'run': 'SPHINX',
'run_options': {
'schema': 'http',
'port': 7000,
},
'deploys': [
{
'target': 'S3',
'options': {
'testing': {
'enable': True,
's3_bucket': 'relengstatic-testing-relengdocs-static-website',
'url': 'https://docs.testing.mozilla-releng.net',
'dns': 'd1sw5c8kdn03y.cloudfront.net.',
},
'staging': {
'enable': True,
's3_bucket': 'relengstatic-staging-relengdocs-static-website',
'url': 'https://docs.staging.mozilla-releng.net',
'dns': 'd32jt14rospqzr.cloudfront.net.',
},
'production': {
'enable': True,
's3_bucket': 'relengstatic-prod-relengdocs-static-website',
'url': 'https://docs.mozilla-releng.net',
'dns': 'd1945er7u4liht.cloudfront.net.',
},
},
},
],
},
}
|
mpl-2.0
| -2,205,204,115,440,344,000
| 30.677419
| 93
| 0.52113
| false
| 3.295302
| false
| false
| false
|
andreask/mailchimp-python
|
mailchimp/objects/mc_interest_category.py
|
1
|
3803
|
# coding=utf-8
import logging
from mailchimp.exceptions import MCInterestCategoryNotFound, MCListNotFound, ObjectNotFound
from mailchimp import Request
from .base_object import BaseObject
from .mc_link import MCLink
logger = logging.getLogger(__name__)
class MCInterestCategory(BaseObject):
item_url = '/lists/{list_id}/interest-categories'
def __init__(self, json_data={}):
super(MCInterestCategory, self).__init__()
self._update(json_data)
def _update(self, json_data):
self.id = json_data.get("id")
self.list_id = json_data.get("list_id")
self.title = json_data.get("title")
self.display_order = json_data.get("display_order")
self.type = json_data.get("type")
self.links = [MCLink(link) for link in json_data.get('_links')] if json_data.get('_links') else []
@classmethod
def get_list_url(cls, list_id):
"""
Replace the placeholder for the list id with the list id sent to the method - creates a valid url.
:param list_id: the list to get the url for
:return: the url for the list
"""
return cls.item_url.replace("{list_id}", list_id)
@classmethod
def get(cls, list_id, category_id):
"""
Get the category from the mailchimp API. list_id has to be a valid list and category_id should be the
id of the category to retrieve.
:param list_id: the list id to get the category from
:param category_id: the category to get
:return: a MCInterestCategory object containing the category if successful, raises an MCInterestCategoryNotFound
exception otherwise
"""
try:
response = Request.get("%s/%s" % (MCInterestCategory.get_list_url(list_id), category_id))
return MCInterestCategory(response.json())
except ObjectNotFound:
raise MCInterestCategoryNotFound(list_id, category_id)
@classmethod
def list(cls, list_id, params={}):
"""
Get the list of categories for the list corresponding with the id list_id from the mailchimp API.
:param list_id: the id of the list to get members from
:param params: parameters for defining limits for the search - can be used to page result or search for a
specific status.
:return: an array of MCInterestCategory objects if successful, raises a MCListNotFound exception otherwise
"""
try:
response = Request.get("%s" % MCInterestCategory.get_list_url(list_id), params)
return [MCInterestCategory(category) for category in response.json()['categories']]
except ObjectNotFound:
raise MCListNotFound(list_id)
def delete(self):
"""
Deletes the current category from the list
:return: True if successful
"""
if not self.id:
return False
try:
Request.delete("%s/%s" % (MCInterestCategory.get_list_url(self.list_id), self.id))
return True
except Exception as e:
logger.error("Unable to delete member from list")
raise e
def save(self):
"""
Saves the current category to the list
:return: True if successful
"""
hash_value = self.id
if not self.id:
md = hashlib.md5()
md.update(self.email_address.lower().encode("utf-8"))
hash_value = md.hexdigest()
try:
response = Request.put("%s/%s" % (MCMember.get_list_url(self.list_id), hash_value),
self.to_dict())
self._update(response.json())
return True
except Exception as e:
logger.error("Unable to save member")
raise e
|
mit
| 8,170,451,207,158,942,000
| 32.359649
| 120
| 0.611097
| false
| 4.138194
| false
| false
| false
|
sondree/Master-thesis
|
Python PLOTS/batchRunner.py
|
1
|
1407
|
from plotfitness_emitter import main as main_em
from plotfitness_receiver import main as main_recv
from plotfitness_direction import main as main_dir
from time import sleep
production = True
if production:
print "Warning running in production mode. This will take a long time"
sleep(1)
num_steps = [1,1]
else:
num_steps = [5,5]
def run_pltfit_emitter():
for radius in xrange(200,350,50):
main_em(1.5, 40, 3, radius, None, [1000,1000], num_steps, "PltFit Emitter UAV radius %s," % radius)
def run_pltfit_receiver():
for uav_count in xrange(2,6):
for noise_step in xrange(5):
main_recv(0.5 + noise_step, 40, uav_count, 400, None, [1000,1000], num_steps, "PltFit Receiver UAV count %s," % uav_count)
def high_res():
#main_em(1.5, 40, 3, 200, None, [1000,1000], [1,1], "PltFit Emitter UAV count %s," % 3)
for uav_count in xrange(2,6):
for noise_step in xrange(5):
if uav_count < 3 and noise_step < 3:
continue
main_recv(0.5 + noise_step, 40, uav_count, 400, None, [1000,1000], [1,1], "PltFit Receiver UAV count %s," % uav_count)
def run_pltfit_direction():
for uav_count in xrange(3,5):
main_dir(1.0, (670.0,670.0), uav_count, 1000, 3, 50, 1, 2, None, "PltFit Direction")
if __name__=="__main__":
run_pltfit_emitter()
#run_pltfit_receiver()
#run_pltfit_direction()
|
gpl-3.0
| -3,059,308,023,333,114,400
| 32.5
| 134
| 0.625444
| false
| 2.853955
| false
| false
| false
|
code-sauce/tensorflow
|
tensorflow/contrib/distributions/python/ops/kullback_leibler.py
|
1
|
5019
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Registration and usage mechanisms for KL-divergences."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
_DIVERGENCES = {}
def _registered_kl(type_a, type_b):
"""Get the KL function registered for classes a and b."""
hierarchy_a = inspect.getmro(type_a)
hierarchy_b = inspect.getmro(type_b)
dist_to_children = None
kl_fn = None
for mro_to_a, parent_a in enumerate(hierarchy_a):
for mro_to_b, parent_b in enumerate(hierarchy_b):
candidate_dist = mro_to_a + mro_to_b
candidate_kl_fn = _DIVERGENCES.get((parent_a, parent_b), None)
if not kl_fn or (candidate_kl_fn and candidate_dist < dist_to_children):
dist_to_children = candidate_dist
kl_fn = candidate_kl_fn
return kl_fn
def kl(dist_a, dist_b, allow_nan=False, name=None):
"""Get the KL-divergence KL(dist_a || dist_b).
If there is no KL method registered specifically for `type(dist_a)` and
`type(dist_b)`, then the class hierarchies of these types are searched.
If one KL method is registered between any pairs of classes in these two
parent hierarchies, it is used.
If more than one such registered method exists, the method whose registered
classes have the shortest sum MRO paths to the input types is used.
If more than one such shortest path exists, the first method
identified in the search is used (favoring a shorter MRO distance to
`type(dist_a)`).
Args:
dist_a: The first distribution.
dist_b: The second distribution.
allow_nan: If `False` (default), a runtime error is raised
if the KL returns NaN values for any batch entry of the given
distributions. If `True`, the KL may return a NaN for the given entry.
name: (optional) Name scope to use for created operations.
Returns:
A Tensor with the batchwise KL-divergence between dist_a and dist_b.
Raises:
NotImplementedError: If no KL method is defined for distribution types
of dist_a and dist_b.
"""
kl_fn = _registered_kl(type(dist_a), type(dist_b))
if kl_fn is None:
raise NotImplementedError(
"No KL(dist_a || dist_b) registered for dist_a type %s and dist_b "
"type %s" % (type(dist_a).__name__, type(dist_b).__name__))
with ops.name_scope("KullbackLeibler"):
kl_t = kl_fn(dist_a, dist_b, name=name)
if allow_nan:
return kl_t
# Check KL for NaNs
kl_t = array_ops.identity(kl_t, name="kl")
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.logical_not(
math_ops.reduce_any(math_ops.is_nan(kl_t))),
["KL calculation between %s and %s returned NaN values "
"(and was called with allow_nan=False). Values:"
% (dist_a.name, dist_b.name), kl_t])]):
return array_ops.identity(kl_t, name="checked_kl")
class RegisterKL(object):
"""Decorator to register a KL divergence implementation function.
Usage:
@distributions.RegisterKL(distributions.Normal, distributions.Normal)
def _kl_normal_mvn(norm_a, norm_b):
# Return KL(norm_a || norm_b)
"""
def __init__(self, dist_cls_a, dist_cls_b):
"""Initialize the KL registrar.
Args:
dist_cls_a: the class of the first argument of the KL divergence.
dist_cls_b: the class of the second argument of the KL divergence.
"""
self._key = (dist_cls_a, dist_cls_b)
def __call__(self, kl_fn):
"""Perform the KL registration.
Args:
kl_fn: The function to use for the KL divergence.
Returns:
kl_fn
Raises:
TypeError: if kl_fn is not a callable.
ValueError: if a KL divergence function has already been registered for
the given argument classes.
"""
if not callable(kl_fn):
raise TypeError("kl_fn must be callable, received: %s" % kl_fn)
if self._key in _DIVERGENCES:
raise ValueError("KL(%s || %s) has already been registered to: %s"
% (self._key[0].__name__, self._key[1].__name__,
_DIVERGENCES[self._key]))
_DIVERGENCES[self._key] = kl_fn
return kl_fn
|
apache-2.0
| 2,924,097,993,749,842,400
| 34.097902
| 80
| 0.662483
| false
| 3.618601
| false
| false
| false
|
kirstymcnaught/SpecialEffectMinecraftMods
|
src/gen_toml.py
|
1
|
1831
|
import subprocess, re, shutil
## Grep all the files for MODID
cmd="find . -iname '*.java' | xargs grep 'MODID = ' -h"
regex = "\"(.*)\""
result = subprocess.run(cmd.split(), stdout=subprocess.PIPE, shell=True)
lines = result.stdout.decode("utf-8").split("\r\n")
mod_ids = []
for line in lines:
match = re.search(regex, line)
if match:
mod_id = match.group(1)
mod_ids.append(mod_id)
print(mod_id)
# Copy base file
fname_base = 'main/resources/META-INF/mods.toml.base'
fname_final = 'main/resources/META-INF/mods.toml'
shutil.copyfile(fname_base, fname_final)
author='Kirsty McNaught'
url = 'https://www.specialeffect.org.uk/eyemine'
# Append mod details
with open(fname_final, "a") as myfile:
for mod_id in mod_ids:
myfile.write('[[mods]]\n')
myfile.write('modId=\"{}\"\n'.format(mod_id))
myfile.write('version=\"${file.jarVersion}\"\n')
myfile.write('displayName=\"{}\"\n'.format(mod_id)) # TODO: nicer display names
myfile.write('displayURL=\"{}\"\n'.format(url))
myfile.write('authors=\"{}\"\n'.format(author))
# myfile.write('\n')
myfile.write('[[dependencies.{}]]\n'.format(mod_id))
myfile.write('\tmodId="forge"\n')
myfile.write('\tmandatory=true\n')
myfile.write('\tversionRange="[25,)"\n')
myfile.write('\tordering="NONE"\n')
myfile.write('\tside="BOTH"\n') # TODO: maybe client only??
# Here's another dependency
myfile.write('[[dependencies.{}]]\n'.format(mod_id))
myfile.write('\tmodId="minecraft"\n')
myfile.write('\tmandatory=true\n')
myfile.write('\tversionRange="[1.14.4]"\n')
myfile.write('\tordering="NONE"\n')
myfile.write('\tside="BOTH"\n')
myfile.write('\n')
|
gpl-3.0
| 8,918,407,722,003,720,000
| 29.032787
| 87
| 0.59148
| false
| 3.092905
| false
| false
| false
|
ericchill/gnofract4d
|
fract4dgui/painter.py
|
1
|
1313
|
# GUI for painting colors onto the fractal
import gtk
import dialog
import browser
import utils
def show(parent,f):
PainterDialog.show(parent,f)
class PainterDialog(dialog.T):
def show(parent, f):
dialog.T.reveal(PainterDialog, True, parent, None, f)
show = staticmethod(show)
def __init__(self,main_window,f):
dialog.T.__init__(
self,
_("Painter"),
main_window,
gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE))
self.main_window = main_window
self.f = f
self.paint_toggle = gtk.ToggleButton(_("Painting"))
self.paint_toggle.set_active(True)
self.paint_toggle.connect('toggled',self.onChangePaintMode)
self.csel = gtk.ColorSelection()
self.vbox.add(self.csel)
self.vbox.add(self.paint_toggle)
self.vbox.show_all()
self.onChangePaintMode()
def onChangePaintMode(self,*args):
self.f.set_paint_mode(self.paint_toggle.get_active(), self.csel)
def onResponse(self,widget,id):
if id == gtk.RESPONSE_CLOSE or \
id == gtk.RESPONSE_NONE or \
id == gtk.RESPONSE_DELETE_EVENT:
self.hide()
self.f.set_paint_mode(False,None)
|
bsd-3-clause
| -2,438,796,135,652,036,600
| 28.177778
| 72
| 0.593298
| false
| 3.587432
| false
| false
| false
|
kubernetes-client/python
|
kubernetes/client/configuration.py
|
1
|
13238
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.18
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import copy
import logging
import multiprocessing
import sys
import urllib3
import six
from six.moves import http_client as httplib
class Configuration(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param host: Base url
:param api_key: Dict to store API key(s).
Each entry in the dict specifies an API key.
The dict key is the name of the security scheme in the OAS specification.
The dict value is the API key secret.
:param api_key_prefix: Dict to store API prefix (e.g. Bearer)
The dict key is the name of the security scheme in the OAS specification.
The dict value is an API key prefix when generating the auth data.
:param username: Username for HTTP basic authentication
:param password: Password for HTTP basic authentication
:param discard_unknown_keys: Boolean value indicating whether to discard
unknown properties. A server may send a response that includes additional
properties that are not known by the client in the following scenarios:
1. The OpenAPI document is incomplete, i.e. it does not match the server
implementation.
2. The client was generated using an older version of the OpenAPI document
and the server has been upgraded since then.
If a schema in the OpenAPI document defines the additionalProperties attribute,
then all undeclared properties received by the server are injected into the
additional properties map. In that case, there are undeclared properties, and
nothing to discard.
:Example:
API Key Authentication Example.
Given the following security scheme in the OpenAPI specification:
components:
securitySchemes:
cookieAuth: # name for the security scheme
type: apiKey
in: cookie
name: JSESSIONID # cookie name
You can programmatically set the cookie:
conf = client.Configuration(
api_key={'cookieAuth': 'abc123'}
api_key_prefix={'cookieAuth': 'JSESSIONID'}
)
The following cookie will be added to the HTTP request:
Cookie: JSESSIONID abc123
"""
_default = None
def __init__(self, host="http://localhost",
api_key=None, api_key_prefix=None,
username=None, password=None,
discard_unknown_keys=False,
):
"""Constructor
"""
self.host = host
"""Default Base url
"""
self.temp_folder_path = None
"""Temp file folder for downloading files
"""
# Authentication Settings
self.api_key = {}
if api_key:
self.api_key = api_key
"""dict to store API key(s)
"""
self.api_key_prefix = {}
if api_key_prefix:
self.api_key_prefix = api_key_prefix
"""dict to store API prefix (e.g. Bearer)
"""
self.refresh_api_key_hook = None
"""function hook to refresh API key if expired
"""
self.username = username
"""Username for HTTP basic authentication
"""
self.password = password
"""Password for HTTP basic authentication
"""
self.discard_unknown_keys = discard_unknown_keys
self.logger = {}
"""Logging Settings
"""
self.logger["package_logger"] = logging.getLogger("client")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
"""Log format
"""
self.logger_stream_handler = None
"""Log stream handler
"""
self.logger_file_handler = None
"""Log file handler
"""
self.logger_file = None
"""Debug file location
"""
self.debug = False
"""Debug switch
"""
self.verify_ssl = True
"""SSL/TLS verification
Set this to false to skip verifying SSL certificate when calling API
from https server.
"""
self.ssl_ca_cert = None
"""Set this to customize the certificate file to verify the peer.
"""
self.cert_file = None
"""client certificate file
"""
self.key_file = None
"""client key file
"""
self.assert_hostname = None
"""Set this to True/False to enable/disable SSL hostname verification.
"""
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
"""urllib3 connection pool's maximum number of connections saved
per pool. urllib3 uses 1 connection as default value, but this is
not the best value when you are making a lot of possibly parallel
requests to the same host, which is often the case here.
cpu_count * 5 is used as default value to increase performance.
"""
self.proxy = None
"""Proxy URL
"""
self.proxy_headers = None
"""Proxy headers
"""
self.safe_chars_for_path_param = ''
"""Safe chars for path_param
"""
self.retries = None
"""Adding retries to override urllib3 default value 3
"""
# Disable client side validation
self.client_side_validation = True
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k not in ('logger', 'logger_file_handler'):
setattr(result, k, copy.deepcopy(v, memo))
# shallow copy of loggers
result.logger = copy.copy(self.logger)
# use setters to configure loggers
result.logger_file = self.logger_file
result.debug = self.debug
return result
@classmethod
def set_default(cls, default):
"""Set default instance of configuration.
It stores default configuration, which can be
returned by get_default_copy method.
:param default: object of Configuration
"""
cls._default = copy.deepcopy(default)
@classmethod
def get_default_copy(cls):
"""Return new instance of configuration.
This method returns newly created, based on default constructor,
object of Configuration class or returns a copy of default
configuration passed by the set_default method.
:return: The configuration object.
"""
if cls._default is not None:
return copy.deepcopy(cls._default)
return Configuration()
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if self.refresh_api_key_hook is not None:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
username = ""
if self.username is not None:
username = self.username
password = ""
if self.password is not None:
password = self.password
return urllib3.util.make_headers(
basic_auth=username + ':' + password
).get('authorization')
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
auth = {}
if 'authorization' in self.api_key:
auth['BearerToken'] = {
'type': 'api_key',
'in': 'header',
'key': 'authorization',
'value': self.get_api_key_with_prefix('authorization')
}
return auth
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: release-1.18\n"\
"SDK Package Version: 18.0.0-snapshot".\
format(env=sys.platform, pyversion=sys.version)
def get_host_settings(self):
"""Gets an array of host settings
:return: An array of host settings
"""
return [
{
'url': "/",
'description': "No description provided",
}
]
def get_host_from_settings(self, index, variables=None):
"""Gets host URL based on the index and variables
:param index: array index of the host settings
:param variables: hash of variable and the corresponding value
:return: URL based on host settings
"""
variables = {} if variables is None else variables
servers = self.get_host_settings()
try:
server = servers[index]
except IndexError:
raise ValueError(
"Invalid index {0} when selecting the host settings. "
"Must be less than {1}".format(index, len(servers)))
url = server['url']
# go through variables and replace placeholders
for variable_name, variable in server['variables'].items():
used_value = variables.get(
variable_name, variable['default_value'])
if 'enum_values' in variable \
and used_value not in variable['enum_values']:
raise ValueError(
"The variable `{0}` in the host URL has invalid value "
"{1}. Must be {2}.".format(
variable_name, variables[variable_name],
variable['enum_values']))
url = url.replace("{" + variable_name + "}", used_value)
return url
|
apache-2.0
| 6,432,329,210,058,188,000
| 32.261307
| 124
| 0.585662
| false
| 4.654712
| true
| false
| false
|
Szkered/BC_2402
|
sydb/stocks/admin.py
|
1
|
1758
|
from django.contrib import admin
from stocks.models import *
class StockAdmin(admin.ModelAdmin):
list_display = ['name', 'unit_measure', 'unit_price', 'category_slug']
ordering = ['name']
search_fields = ('name',)
class DonorAdmin(admin.ModelAdmin):
list_display = ['name', 'contact_no', 'address', 'referral', 'mailing']
ordering = ['name']
class DonateAdmin(admin.ModelAdmin):
list_display = ['stock', 'quantity']
ordering = ['stock']
class DonationAdmin(admin.ModelAdmin):
list_display = ['date', 'donor']
ordering = ['date']
class DestinationAdmin(admin.ModelAdmin):
pass
class VendorAdmin(admin.ModelAdmin):
list_display = ['name', 'contact_no', 'address']
ordering = ['name']
class CategoryAdmin(admin.ModelAdmin):
list_display = ['name', 'stock']
ordering = ['stock']
class DistributeAdmin(admin.ModelAdmin):
list_display = ['quantity', 'stock', 'family_type', 'date']
ordering = ['stock']
class PurchaseInline(admin.StackedInline):
model = Purchase
extra = 0
class OrderAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['vendor', 'confirm']}),
('Date Info', {'fields': ['date'], 'classes' : ['collapse']}),
]
inlines = [PurchaseInline]
admin.site.register(Stock, StockAdmin)
admin.site.register(Donor, DonorAdmin)
admin.site.register(Destination, DestinationAdmin)
admin.site.register(Vendor, VendorAdmin)
admin.site.register(Donate, DonateAdmin)
admin.site.register(Donation, DonationAdmin)
# admin.site.register(Purchase, PurchaseAdmin)
admin.site.register(Distribute, DistributeAdmin)
admin.site.register(Transfer)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Order, OrderAdmin)
|
apache-2.0
| 4,979,858,879,367,325,000
| 27.354839
| 75
| 0.6843
| false
| 3.573171
| false
| false
| false
|
centricular/cerbero
|
cerbero/commands/check.py
|
1
|
2640
|
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
from cerbero.commands import Command, register_command
from cerbero.build.cookbook import CookBook
from cerbero.errors import FatalError
from cerbero.utils import _, N_, ArgparseArgument
from cerbero.utils import messages as m
class Check(Command):
doc = N_('Run checks on a given recipe')
name = 'check'
def __init__(self):
Command.__init__(self,
[ArgparseArgument('recipe', nargs=1,
help=_('name of the recipe to run checks on')),
ArgparseArgument('--recursive', action='store_true', default=False,
help=_('Recursively run checks on dependencies')),
])
def run(self, config, args):
cookbook = CookBook(config)
recipe_name = args.recipe[0]
recursive = args.recursive
recipe = cookbook.get_recipe(recipe_name)
if recursive:
ordered_recipes = cookbook.list_recipe_deps(recipe_name)
else:
ordered_recipes = [recipe]
for recipe in ordered_recipes:
if cookbook.recipe_needs_build(recipe.name):
raise FatalError(_("Recipe %s is not built yet" % recipe.name))
for recipe in ordered_recipes:
# call step function
stepfunc = None
try:
stepfunc = getattr(recipe, 'check')
except:
m.message('%s has no check step, skipped' % recipe.name)
if stepfunc:
try:
stepfunc()
except FatalError as e:
raise e
except Exception as ex:
raise FatalError(_("Error running %s checks: %s") %
(recipe.name, ex))
register_command(Check)
|
lgpl-2.1
| -215,881,122,121,253,340
| 35.666667
| 79
| 0.623864
| false
| 4.334975
| false
| false
| false
|
Teagan42/home-assistant
|
homeassistant/components/netatmo/binary_sensor.py
|
1
|
6522
|
"""Support for the Netatmo binary sensors."""
import logging
import pyatmo
from homeassistant.components.binary_sensor import BinarySensorDevice
from .camera import CameraData
from .const import AUTH, DOMAIN, MANUFACTURER
_LOGGER = logging.getLogger(__name__)
# These are the available sensors mapped to binary_sensor class
WELCOME_SENSOR_TYPES = {
"Someone known": "motion",
"Someone unknown": "motion",
"Motion": "motion",
}
PRESENCE_SENSOR_TYPES = {
"Outdoor motion": "motion",
"Outdoor human": "motion",
"Outdoor animal": "motion",
"Outdoor vehicle": "motion",
}
TAG_SENSOR_TYPES = {"Tag Vibration": "vibration", "Tag Open": "opening"}
SENSOR_TYPES = {"NACamera": WELCOME_SENSOR_TYPES, "NOC": PRESENCE_SENSOR_TYPES}
CONF_HOME = "home"
CONF_CAMERAS = "cameras"
CONF_WELCOME_SENSORS = "welcome_sensors"
CONF_PRESENCE_SENSORS = "presence_sensors"
CONF_TAG_SENSORS = "tag_sensors"
DEFAULT_TIMEOUT = 90
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the access to Netatmo binary sensor."""
auth = hass.data[DOMAIN][entry.entry_id][AUTH]
def get_entities():
"""Retrieve Netatmo entities."""
entities = []
def get_camera_home_id(data, camera_id):
"""Return the home id for a given camera id."""
for home_id in data.camera_data.cameras:
for camera in data.camera_data.cameras[home_id].values():
if camera["id"] == camera_id:
return home_id
return None
try:
data = CameraData(hass, auth)
for camera in data.get_all_cameras():
home_id = get_camera_home_id(data, camera_id=camera["id"])
sensor_types = {}
sensor_types.update(SENSOR_TYPES[camera["type"]])
# Tags are only supported with Netatmo Welcome indoor cameras
if camera["type"] == "NACamera" and data.get_modules(camera["id"]):
sensor_types.update(TAG_SENSOR_TYPES)
for sensor_name in sensor_types:
entities.append(
NetatmoBinarySensor(data, camera["id"], home_id, sensor_name)
)
except pyatmo.NoDevice:
_LOGGER.debug("No camera entities to add")
return entities
async_add_entities(await hass.async_add_executor_job(get_entities), True)
class NetatmoBinarySensor(BinarySensorDevice):
"""Represent a single binary sensor in a Netatmo Camera device."""
def __init__(self, data, camera_id, home_id, sensor_type, module_id=None):
"""Set up for access to the Netatmo camera events."""
self._data = data
self._camera_id = camera_id
self._module_id = module_id
self._sensor_type = sensor_type
camera_info = data.camera_data.cameraById(cid=camera_id)
self._camera_name = camera_info["name"]
self._camera_type = camera_info["type"]
self._home_id = home_id
self._home_name = self._data.camera_data.getHomeName(home_id=home_id)
self._timeout = DEFAULT_TIMEOUT
if module_id:
self._module_name = data.camera_data.moduleById(mid=module_id)["name"]
self._name = (
f"{MANUFACTURER} {self._camera_name} {self._module_name} {sensor_type}"
)
self._unique_id = (
f"{self._camera_id}-{self._module_id}-"
f"{self._camera_type}-{sensor_type}"
)
else:
self._name = f"{MANUFACTURER} {self._camera_name} {sensor_type}"
self._unique_id = f"{self._camera_id}-{self._camera_type}-{sensor_type}"
self._state = None
@property
def name(self):
"""Return the name of the Netatmo device and this sensor."""
return self._name
@property
def unique_id(self):
"""Return the unique ID for this sensor."""
return self._unique_id
@property
def device_info(self):
"""Return the device info for the sensor."""
return {
"identifiers": {(DOMAIN, self._camera_id)},
"name": self._camera_name,
"manufacturer": MANUFACTURER,
"model": self._camera_type,
}
@property
def is_on(self):
"""Return true if binary sensor is on."""
return self._state
def update(self):
"""Request an update from the Netatmo API."""
self._data.update()
self._data.update_event(camera_type=self._camera_type)
if self._camera_type == "NACamera":
if self._sensor_type == "Someone known":
self._state = self._data.camera_data.someone_known_seen(
cid=self._camera_id, exclude=self._timeout
)
elif self._sensor_type == "Someone unknown":
self._state = self._data.camera_data.someone_unknown_seen(
cid=self._camera_id, exclude=self._timeout
)
elif self._sensor_type == "Motion":
self._state = self._data.camera_data.motion_detected(
cid=self._camera_id, exclude=self._timeout
)
elif self._camera_type == "NOC":
if self._sensor_type == "Outdoor motion":
self._state = self._data.camera_data.outdoor_motion_detected(
cid=self._camera_id, offset=self._timeout
)
elif self._sensor_type == "Outdoor human":
self._state = self._data.camera_data.human_detected(
cid=self._camera_id, offset=self._timeout
)
elif self._sensor_type == "Outdoor animal":
self._state = self._data.camera_data.animal_detected(
cid=self._camera_id, offset=self._timeout
)
elif self._sensor_type == "Outdoor vehicle":
self._state = self._data.camera_data.car_detected(
cid=self._camera_id, offset=self._timeout
)
if self._sensor_type == "Tag Vibration":
self._state = self._data.camera_data.module_motion_detected(
mid=self._module_id, cid=self._camera_id, exclude=self._timeout
)
elif self._sensor_type == "Tag Open":
self._state = self._data.camera_data.module_opened(
mid=self._module_id, cid=self._camera_id, exclude=self._timeout
)
|
apache-2.0
| -1,740,085,250,344,084,000
| 36.268571
| 87
| 0.568997
| false
| 3.882143
| false
| false
| false
|
dksr/REMIND
|
python/base/utils/LoggerManager.py
|
1
|
3182
|
#!/usr/bin/env python
import logging
import logging.handlers
from Singleton import Singleton
import os
LOGPATH = '/tmp'
class LoggerManager(Singleton):
def __init__(self):
self.loggers = {}
formatter = logging.Formatter('%(asctime)s:%(levelname)-8s:%(name)-10s:%(lineno)4s: %(message)-80s')
level = 'DEBUG'
nlevel = getattr(logging, level, None)
if nlevel != None:
self.LOGGING_MODE = nlevel
else:
self.LOGGING_MODE = logging.DEBUG
self.LOGGING_HANDLER = logging.handlers.RotatingFileHandler(
os.path.join(LOGPATH, 'log_event.log'),'a',0, 10)
self.LOGGING_HANDLER.doRollover()
self.ERROR_HANDLER = logging.handlers.RotatingFileHandler(
os.path.join(LOGPATH,'log_error.log'),'a',0, 10)
self.ERROR_HANDLER.doRollover()
self.LOGGING_HANDLER.setFormatter(formatter)
self.LOGGING_HANDLER.setLevel(self.LOGGING_MODE)
def getLogger(self, loggername):
if not self.loggers.has_key(loggername):
logger = Logger(loggername,
logging_handler= self.LOGGING_HANDLER,
error_handler = self.ERROR_HANDLER,
logging_mode = self.LOGGING_MODE)
self.loggers[loggername] = logger
return self.loggers[loggername]
class Logger:
'''
Implements the christine logging facility.
'''
def __init__(self, loggername, type = 'event', logging_handler= '', error_handler = '', logging_mode = ''):
'''
Constructor, construye una clase de logger.
@param loggername: Nombre que el logger tendra.
@param type: Tipo de logger. Los valores disponibles son : event y error
por defecto apunta a event. En caso de utilizarse otro
que no sea event o error se apuntara a event.
'''
# Creating two logger, one for the info, debug and warnings and
#other for errors, criticals and exceptions
self.__Logger = logging.getLogger(loggername)
self.__ErrorLogger = logging.getLogger('Error'+ loggername)
# Setting Logger properties
self.__Logger.addHandler(logging_handler)
self.__Logger.setLevel(logging_mode)
self.__ErrorLogger.addHandler(error_handler)
self.__ErrorLogger.setLevel(logging_mode)
self.info = self.__Logger.info
self.debug = self.__Logger.debug
self.warning = self.__Logger.warning
self.critical = self.__ErrorLogger.critical
self.error = self.__ErrorLogger.error
self.exception = self.__ErrorLogger.exception
|
mit
| -1,496,373,961,397,045,000
| 48.734375
| 116
| 0.52137
| false
| 4.895385
| false
| false
| false
|
trevor/calendarserver
|
txweb2/server.py
|
1
|
26937
|
# -*- test-case-name: txweb2.test.test_server -*-
##
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# Copyright (c) 2010-2014 Apple Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
##
"""
This is a web-server which integrates with the twisted.internet
infrastructure.
"""
from __future__ import print_function
import cgi, time, urlparse
from urllib import quote, unquote
from urlparse import urlsplit
import weakref
from zope.interface import implements
from twisted.internet import defer
from twisted.python import failure
from twext.python.log import Logger
from txweb2 import http, iweb, fileupload, responsecode
from txweb2 import http_headers
from txweb2.filter.range import rangefilter
from txweb2 import error
from txweb2 import __version__ as web2_version
from twisted import __version__ as twisted_version
VERSION = "Twisted/%s TwistedWeb/%s" % (twisted_version, web2_version)
_errorMarker = object()
log = Logger()
def defaultHeadersFilter(request, response):
if not response.headers.hasHeader('server'):
response.headers.setHeader('server', VERSION)
if not response.headers.hasHeader('date'):
response.headers.setHeader('date', time.time())
return response
defaultHeadersFilter.handleErrors = True
def preconditionfilter(request, response):
if request.method in ("GET", "HEAD"):
http.checkPreconditions(request, response)
return response
def doTrace(request):
request = iweb.IRequest(request)
txt = "%s %s HTTP/%d.%d\r\n" % (request.method, request.uri,
request.clientproto[0], request.clientproto[1])
l=[]
for name, valuelist in request.headers.getAllRawHeaders():
for value in valuelist:
l.append("%s: %s\r\n" % (name, value))
txt += ''.join(l)
return http.Response(
responsecode.OK,
{'content-type': http_headers.MimeType('message', 'http')},
txt)
def parsePOSTData(request, maxMem=100*1024, maxFields=1024,
maxSize=10*1024*1024):
"""
Parse data of a POST request.
@param request: the request to parse.
@type request: L{txweb2.http.Request}.
@param maxMem: maximum memory used during the parsing of the data.
@type maxMem: C{int}
@param maxFields: maximum number of form fields allowed.
@type maxFields: C{int}
@param maxSize: maximum size of file upload allowed.
@type maxSize: C{int}
@return: a deferred that will fire when the parsing is done. The deferred
itself doesn't hold a return value, the request is modified directly.
@rtype: C{defer.Deferred}
"""
if request.stream.length == 0:
return defer.succeed(None)
ctype = request.headers.getHeader('content-type')
if ctype is None:
return defer.succeed(None)
def updateArgs(data):
args = data
request.args.update(args)
def updateArgsAndFiles(data):
args, files = data
request.args.update(args)
request.files.update(files)
def error(f):
f.trap(fileupload.MimeFormatError)
raise http.HTTPError(
http.StatusResponse(responsecode.BAD_REQUEST, str(f.value)))
if (ctype.mediaType == 'application'
and ctype.mediaSubtype == 'x-www-form-urlencoded'):
d = fileupload.parse_urlencoded(request.stream)
d.addCallbacks(updateArgs, error)
return d
elif (ctype.mediaType == 'multipart'
and ctype.mediaSubtype == 'form-data'):
boundary = ctype.params.get('boundary')
if boundary is None:
return defer.fail(http.HTTPError(
http.StatusResponse(
responsecode.BAD_REQUEST,
"Boundary not specified in Content-Type.")))
d = fileupload.parseMultipartFormData(request.stream, boundary,
maxMem, maxFields, maxSize)
d.addCallbacks(updateArgsAndFiles, error)
return d
else:
return defer.fail(http.HTTPError(
http.StatusResponse(
responsecode.BAD_REQUEST,
"Invalid content-type: %s/%s" % (
ctype.mediaType, ctype.mediaSubtype))))
class StopTraversal(object):
"""
Indicates to Request._handleSegment that it should stop handling
path segments.
"""
pass
class Request(http.Request):
"""
vars:
site
remoteAddr
scheme
host
port
path
params
querystring
args
files
prepath
postpath
@ivar path: The path only (arguments not included).
@ivar args: All of the arguments, including URL and POST arguments.
@type args: A mapping of strings (the argument names) to lists of values.
i.e., ?foo=bar&foo=baz&quux=spam results in
{'foo': ['bar', 'baz'], 'quux': ['spam']}.
"""
implements(iweb.IRequest)
site = None
_initialprepath = None
responseFilters = [rangefilter, preconditionfilter,
error.defaultErrorHandler, defaultHeadersFilter]
def __init__(self, *args, **kw):
self.timeStamps = [("t", time.time(),)]
if kw.has_key('site'):
self.site = kw['site']
del kw['site']
if kw.has_key('prepathuri'):
self._initialprepath = kw['prepathuri']
del kw['prepathuri']
self._resourcesByURL = {}
self._urlsByResource = {}
# Copy response filters from the class
self.responseFilters = self.responseFilters[:]
self.files = {}
self.resources = []
http.Request.__init__(self, *args, **kw)
try:
self.serverInstance = self.chanRequest.channel.transport.server.port
except AttributeError:
self.serverInstance = "Unknown"
def timeStamp(self, tag):
self.timeStamps.append((tag, time.time(),))
def addResponseFilter(self, filter, atEnd=False, onlyOnce=False):
"""
Add a response filter to this request.
Response filters are applied to the response to this request in order.
@param filter: a callable which takes an response argument and returns
a response object.
@param atEnd: if C{True}, C{filter} is added at the end of the list of
response filters; if C{False}, it is added to the beginning.
@param onlyOnce: if C{True}, C{filter} is not added to the list of
response filters if it already in the list.
"""
if onlyOnce and filter in self.responseFilters:
return
if atEnd:
self.responseFilters.append(filter)
else:
self.responseFilters.insert(0, filter)
def unparseURL(self, scheme=None, host=None, port=None,
path=None, params=None, querystring=None, fragment=None):
"""Turn the request path into a url string. For any pieces of
the url that are not specified, use the value from the
request. The arguments have the same meaning as the same named
attributes of Request."""
if scheme is None: scheme = self.scheme
if host is None: host = self.host
if port is None: port = self.port
if path is None: path = self.path
if params is None: params = self.params
if querystring is None: querystring = self.querystring
if fragment is None: fragment = ''
if port == http.defaultPortForScheme.get(scheme, 0):
hostport = host
else:
hostport = host + ':' + str(port)
return urlparse.urlunparse((
scheme, hostport, path,
params, querystring, fragment))
def _parseURL(self):
if self.uri[0] == '/':
# Can't use urlparse for request_uri because urlparse
# wants to be given an absolute or relative URI, not just
# an abs_path, and thus gets '//foo' wrong.
self.scheme = self.host = self.path = self.params = self.querystring = ''
if '?' in self.uri:
self.path, self.querystring = self.uri.split('?', 1)
else:
self.path = self.uri
if ';' in self.path:
self.path, self.params = self.path.split(';', 1)
else:
# It is an absolute uri, use standard urlparse
(self.scheme, self.host, self.path,
self.params, self.querystring, fragment) = urlparse.urlparse(self.uri)
if self.querystring:
self.args = cgi.parse_qs(self.querystring, True)
else:
self.args = {}
path = map(unquote, self.path[1:].split('/'))
if self._initialprepath:
# We were given an initial prepath -- this is for supporting
# CGI-ish applications where part of the path has already
# been processed
prepath = map(unquote, self._initialprepath[1:].split('/'))
if path[:len(prepath)] == prepath:
self.prepath = prepath
self.postpath = path[len(prepath):]
else:
self.prepath = []
self.postpath = path
else:
self.prepath = []
self.postpath = path
#print("_parseURL", self.uri, (self.uri, self.scheme, self.host, self.path, self.params, self.querystring))
def _schemeFromPort(self, port):
"""
Try to determine the scheme matching the supplied server port. This is needed in case
where a device in front of the server is changing the scheme (e.g. decoding SSL) but not
rewriting the scheme in URIs returned in responses (e.g. in Location headers). This could trick
clients into using an inappropriate scheme for subsequent requests. What we should do is
take the port number from the Host header or request-URI and map that to the scheme that
matches the service we configured to listen on that port.
@param port: the port number to test
@type port: C{int}
@return: C{True} if scheme is https (secure), C{False} otherwise
@rtype: C{bool}
"""
#from twistedcaldav.config import config
if hasattr(self.site, "EnableSSL") and self.site.EnableSSL:
if port == self.site.SSLPort:
return True
elif port in self.site.BindSSLPorts:
return True
return False
def _fixupURLParts(self):
hostaddr, secure = self.chanRequest.getHostInfo()
if not self.scheme:
self.scheme = ('http', 'https')[secure]
if self.host:
self.host, self.port = http.splitHostPort(self.scheme, self.host)
self.scheme = ('http', 'https')[self._schemeFromPort(self.port)]
else:
# If GET line wasn't an absolute URL
host = self.headers.getHeader('host')
if host:
self.host, self.port = http.splitHostPort(self.scheme, host)
self.scheme = ('http', 'https')[self._schemeFromPort(self.port)]
else:
# When no hostname specified anywhere, either raise an
# error, or use the interface hostname, depending on
# protocol version
if self.clientproto >= (1,1):
raise http.HTTPError(responsecode.BAD_REQUEST)
self.host = hostaddr.host
self.port = hostaddr.port
def process(self):
"Process a request."
log.info("%s %s %s" % (
self.method,
self.uri,
"HTTP/%s.%s" % self.clientproto
))
try:
self.checkExpect()
resp = self.preprocessRequest()
if resp is not None:
self._cbFinishRender(resp).addErrback(self._processingFailed)
return
self._parseURL()
self._fixupURLParts()
self.remoteAddr = self.chanRequest.getRemoteHost()
except:
self._processingFailed(failure.Failure())
return
d = defer.Deferred()
d.addCallback(self._getChild, self.site.resource, self.postpath)
d.addCallback(self._rememberResource, "/" + "/".join(quote(s) for s in self.postpath))
d.addCallback(self._processTimeStamp)
d.addCallback(lambda res, req: res.renderHTTP(req), self)
d.addCallback(self._cbFinishRender)
d.addErrback(self._processingFailed)
d.callback(None)
return d
def _processTimeStamp(self, res):
self.timeStamp("t-req-proc")
return res
def preprocessRequest(self):
"""Do any request processing that doesn't follow the normal
resource lookup procedure. "OPTIONS *" is handled here, for
example. This would also be the place to do any CONNECT
processing."""
if self.method == "OPTIONS" and self.uri == "*":
response = http.Response(responsecode.OK)
response.headers.setHeader('allow', ('GET', 'HEAD', 'OPTIONS', 'TRACE'))
return response
elif self.method == "POST":
# Allow other methods to tunnel through using POST and a request header.
# See http://code.google.com/apis/gdata/docs/2.0/basics.html
if self.headers.hasHeader("X-HTTP-Method-Override"):
intendedMethod = self.headers.getRawHeaders("X-HTTP-Method-Override")[0];
if intendedMethod:
self.originalMethod = self.method
self.method = intendedMethod
# This is where CONNECT would go if we wanted it
return None
def _getChild(self, _, res, path, updatepaths=True):
"""Call res.locateChild, and pass the result on to _handleSegment."""
self.resources.append(res)
if not path:
return res
result = res.locateChild(self, path)
if isinstance(result, defer.Deferred):
return result.addCallback(self._handleSegment, res, path, updatepaths)
else:
return self._handleSegment(result, res, path, updatepaths)
def _handleSegment(self, result, res, path, updatepaths):
"""Handle the result of a locateChild call done in _getChild."""
newres, newpath = result
# If the child resource is None then display a error page
if newres is None:
raise http.HTTPError(responsecode.NOT_FOUND)
# If we got a deferred then we need to call back later, once the
# child is actually available.
if isinstance(newres, defer.Deferred):
return newres.addCallback(
lambda actualRes: self._handleSegment(
(actualRes, newpath), res, path, updatepaths)
)
if path:
url = quote("/" + "/".join(path))
else:
url = "/"
if newpath is StopTraversal:
# We need to rethink how to do this.
#if newres is res:
return res
#else:
# raise ValueError("locateChild must not return StopTraversal with a resource other than self.")
newres = iweb.IResource(newres)
if newres is res:
assert not newpath is path, "URL traversal cycle detected when attempting to locateChild %r from resource %r." % (path, res)
assert len(newpath) < len(path), "Infinite loop impending..."
if updatepaths:
# We found a Resource... update the request.prepath and postpath
for x in xrange(len(path) - len(newpath)):
self.prepath.append(self.postpath.pop(0))
url = quote("/" + "/".join(self.prepath) + ("/" if self.prepath and self.prepath[-1] else ""))
self._rememberResource(newres, url)
else:
try:
previousURL = self.urlForResource(res)
url = quote(previousURL + path[0] + ("/" if path[0] and len(path) > 1 else ""))
self._rememberResource(newres, url)
except NoURLForResourceError:
pass
child = self._getChild(None, newres, newpath, updatepaths=updatepaths)
return child
_urlsByResource = weakref.WeakKeyDictionary()
def _rememberResource(self, resource, url):
"""
Remember the URL of a visited resource.
"""
self._resourcesByURL[url] = resource
self._urlsByResource[resource] = url
return resource
def _forgetResource(self, resource, url):
"""
Remember the URL of a visited resource.
"""
del self._resourcesByURL[url]
del self._urlsByResource[resource]
def urlForResource(self, resource):
"""
Looks up the URL of the given resource if this resource was found while
processing this request. Specifically, this includes the requested
resource, and resources looked up via L{locateResource}.
Note that a resource may be found at multiple URIs; if the same resource
is visited at more than one location while processing this request,
this method will return one of those URLs, but which one is not defined,
nor whether the same URL is returned in subsequent calls.
@param resource: the resource to find a URI for. This resource must
have been obtained from the request (i.e. via its C{uri} attribute, or
through its C{locateResource} or C{locateChildResource} methods).
@return: a valid URL for C{resource} in this request.
@raise NoURLForResourceError: if C{resource} has no URL in this request
(because it was not obtained from the request).
"""
url = self._urlsByResource.get(resource, None)
if url is None:
raise NoURLForResourceError(resource)
return url
def locateResource(self, url):
"""
Looks up the resource with the given URL.
@param uri: The URL of the desired resource.
@return: a L{Deferred} resulting in the L{IResource} at the
given URL or C{None} if no such resource can be located.
@raise HTTPError: If C{url} is not a URL on the site that this
request is being applied to. The contained response will
have a status code of L{responsecode.BAD_GATEWAY}.
@raise HTTPError: If C{url} contains a query or fragment.
The contained response will have a status code of
L{responsecode.BAD_REQUEST}.
"""
if url is None:
return defer.succeed(None)
#
# Parse the URL
#
(scheme, host, path, query, fragment) = urlsplit(url)
if query or fragment:
raise http.HTTPError(http.StatusResponse(
responsecode.BAD_REQUEST,
"URL may not contain a query or fragment: %s" % (url,)
))
# Look for cached value
cached = self._resourcesByURL.get(path, None)
if cached is not None:
return defer.succeed(cached)
segments = unquote(path).split("/")
assert segments[0] == "", "URL path didn't begin with '/': %s" % (path,)
# Walk the segments up to see if we can find a cached resource to start from
preSegments = segments[:-1]
postSegments = segments[-1:]
cachedParent = None
while(len(preSegments)):
parentPath = "/".join(preSegments) + "/"
cachedParent = self._resourcesByURL.get(parentPath, None)
if cachedParent is not None:
break
else:
postSegments.insert(0, preSegments.pop())
if cachedParent is None:
cachedParent = self.site.resource
postSegments = segments[1:]
def notFound(f):
f.trap(http.HTTPError)
if f.value.response.code != responsecode.NOT_FOUND:
return f
return None
d = defer.maybeDeferred(self._getChild, None, cachedParent, postSegments, updatepaths=False)
d.addCallback(self._rememberResource, path)
d.addErrback(notFound)
return d
def locateChildResource(self, parent, childName):
"""
Looks up the child resource with the given name given the parent
resource. This is similar to locateResource(), but doesn't have to
start the lookup from the root resource, so it is potentially faster.
@param parent: the parent of the resource being looked up. This resource
must have been obtained from the request (i.e. via its C{uri} attribute,
or through its C{locateResource} or C{locateChildResource} methods).
@param childName: the name of the child of C{parent} to looked up.
to C{parent}.
@return: a L{Deferred} resulting in the L{IResource} at the
given URL or C{None} if no such resource can be located.
@raise NoURLForResourceError: if C{resource} was not obtained from the
request.
"""
if parent is None or childName is None:
return None
assert "/" not in childName, "Child name may not contain '/': %s" % (childName,)
parentURL = self.urlForResource(parent)
if not parentURL.endswith("/"):
parentURL += "/"
url = parentURL + quote(childName)
segment = childName
def notFound(f):
f.trap(http.HTTPError)
if f.value.response.code != responsecode.NOT_FOUND:
return f
return None
d = defer.maybeDeferred(self._getChild, None, parent, [segment], updatepaths=False)
d.addCallback(self._rememberResource, url)
d.addErrback(notFound)
return d
def _processingFailed(self, reason):
if reason.check(http.HTTPError) is not None:
# If the exception was an HTTPError, leave it alone
d = defer.succeed(reason.value.response)
else:
# Otherwise, it was a random exception, so give a
# ICanHandleException implementer a chance to render the page.
def _processingFailed_inner(reason):
handler = iweb.ICanHandleException(self, self)
return handler.renderHTTP_exception(self, reason)
d = defer.maybeDeferred(_processingFailed_inner, reason)
d.addCallback(self._cbFinishRender)
d.addErrback(self._processingReallyFailed, reason)
return d
def _processingReallyFailed(self, reason, origReason):
"""
An error occurred when attempting to report an error to the HTTP
client.
"""
log.failure("Exception rendering error page", reason)
log.failure("Original exception", origReason)
try:
body = (
"<html><head><title>Internal Server Error</title></head>"
"<body><h1>Internal Server Error</h1>"
"An error occurred rendering the requested page. "
"Additionally, an error occurred rendering the error page."
"</body></html>"
)
response = http.Response(
responsecode.INTERNAL_SERVER_ERROR,
{'content-type': http_headers.MimeType('text','html')},
body
)
self.writeResponse(response)
except:
log.failure(
"An error occurred. We tried to report that error. "
"Reporting that error caused an error. "
"In the process of reporting the error-reporting error to "
"the client, there was *yet another* error. Here it is. "
"I give up."
)
self.chanRequest.abortConnection()
def _cbFinishRender(self, result):
def filterit(response, f):
if (hasattr(f, 'handleErrors') or
(response.code >= 200 and response.code < 300)):
return f(self, response)
else:
return response
response = iweb.IResponse(result, None)
if response:
d = defer.Deferred()
for f in self.responseFilters:
d.addCallback(filterit, f)
d.addCallback(self.writeResponse)
d.callback(response)
return d
resource = iweb.IResource(result, None)
if resource:
self.resources.append(resource)
d = defer.maybeDeferred(resource.renderHTTP, self)
d.addCallback(self._cbFinishRender)
return d
raise TypeError("html is not a resource or a response")
def renderHTTP_exception(self, req, reason):
log.failure("Exception rendering request: {request}", reason, request=req)
body = ("<html><head><title>Internal Server Error</title></head>"
"<body><h1>Internal Server Error</h1>An error occurred rendering the requested page. More information is available in the server log.</body></html>")
return http.Response(
responsecode.INTERNAL_SERVER_ERROR,
{'content-type': http_headers.MimeType('text','html')},
body)
class Site(object):
def __init__(self, resource):
"""Initialize.
"""
self.resource = iweb.IResource(resource)
def __call__(self, *args, **kwargs):
return Request(site=self, *args, **kwargs)
class NoURLForResourceError(RuntimeError):
def __init__(self, resource):
RuntimeError.__init__(self, "Resource %r has no URL in this request." % (resource,))
self.resource = resource
__all__ = ['Request', 'Site', 'StopTraversal', 'VERSION', 'defaultHeadersFilter', 'doTrace', 'parsePOSTData', 'preconditionfilter', 'NoURLForResourceError']
|
apache-2.0
| -2,017,913,453,591,127,000
| 36.516713
| 165
| 0.605932
| false
| 4.334889
| false
| false
| false
|
elthariel/dff
|
modules/viewer/hexedit/offsetItem.py
|
1
|
2183
|
# DFF -- An Open Source Digital Forensics Framework
# Copyright (C) 2009-2010 ArxSys
# This program is free software, distributed under the terms of
# the GNU General Public License Version 2. See the LICENSE file
# at the top of the source tree.
#
# See http://www.digital-forensic.org for more information about this
# project. Please do not directly contact any of the maintainers of
# DFF for assistance; the project provides a web site, mailing lists
# and IRC channels for your use.
#
# Author(s):
# Jeremy Mounier <jmo@digital-forensic.org>
import binascii
import struct
import string
import time
from PyQt4.QtCore import QString, Qt
from PyQt4.QtGui import QWidget, QFont, QColor, QTextCursor, QGraphicsTextItem
class offsetItem(QGraphicsTextItem):
def __init__(self, whex):
QGraphicsTextItem.__init__(self)
self.initValues(whex)
# self.initShape()
self.initPosition()
self.initFont()
def initPosition(self):
self.setPos(0, 25)
def initValues(self, whex):
self.whex = whex
self.heditor = self.whex.heditor
#Buffer
self.buffer = []
self.bufferLines = 0
#Line
self.currentLine = 0
#Offset
self.startOffset = 0
self.fontPixel = 14
def initFont(self):
self.setDefaultTextColor(QColor(Qt.red))
self.font = QFont("Gothic")
self.font.setFixedPitch(1)
self.font.setBold(False)
self.font.setPixelSize(self.fontPixel)
self.setFont(self.font)
#Print Operations
def printFullOffset(self, start, len):
count = 0
fullBuff = QString()
while count <= len:
if self.heditor.decimalview:
fullBuff.append("%.10d" % start)
else:
fullBuff.append("%.10X" % start)
fullBuff.append("\n")
start += 16
count += 1
#Clear and set
cursor = self.textCursor()
cursor.movePosition(QTextCursor.Start)
cursor.movePosition(QTextCursor.End, QTextCursor.KeepAnchor)
self.setPlainText(fullBuff)
cursor.movePosition(QTextCursor.Start)
|
gpl-2.0
| 3,857,610,205,408,654,000
| 27.723684
| 78
| 0.63628
| false
| 3.675084
| false
| false
| false
|
iakov/margen
|
margen.py
|
1
|
4589
|
#!/usr/bin/python2
"""
Copyright 2016 Iakov Kirilenko
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Simple generator for colorful AR tags
"""
import argparse
import errno
import numpy as np
import os
import cv2 as cv
def palette2((row,col)):
maxDarkLuma = 140 if row % 5 == 0 or col % 5 == 0 else 250
colors = [(maxDarkLuma * 7 / 10, 0, 0), (0, maxDarkLuma * 6 / 10, 0), (0, 0, maxDarkLuma)]
return colors[(row + col) % len(colors)]
def generate_palette(s):
n = int(s)
if n == 0:
return lambda _: (0, 0, 0)
elif n == 1:
maxDarkLuma = 150
colors = [(maxDarkLuma * 7 / 10, 0, 0), (0, maxDarkLuma * 6 / 10, 0), (0, 0, maxDarkLuma)]
return lambda(row,col): colors[(row + col) % len(colors)]
elif n == 2:
return palette2
elif n == 3:
return lambda(row,col): palette2((row,col)) if row % 5 != 0 and col % 5 != 0 else (0, 0, 0)
else:
raise argparse.ArgumentTypeError("palette %r not implemented" % s)
def parse_args():
parser = argparse.ArgumentParser(description='AR marker tag generator')
parser.add_argument('codes', metavar='N[..M]', nargs='+', help='integer code N or range N..M')
parser.add_argument('--force', dest='force', action='store_true',
help='ignore checks & errors (depends on context)')
parser.add_argument('--out-dir', dest='dir', default='.', help='output directory name')
parser.add_argument('--palette', dest='palette', metavar='P', type=generate_palette, default=generate_palette("0"),
help='use palette #P ( 0 -- b/w) ')
parser.add_argument('--box-size', dest='boxSize', type=int, default=50, help='bit box size per side in pixels')
parser.add_argument('--verbose', dest='verbose', action='store_true', help='verbose output')
return parser.parse_args()
class Generator:
def draw_box(self, pos, color=None):
if color is None:
color = self.args.palette(pos)
row, col = pos
top_left = ((col + 1) * self.args.boxSize, (row + 1) * self.args.boxSize)
down_right = ((col + 2) * self.args.boxSize - 1, (row + 2) * self.args.boxSize - 1)
points = np.array([top_left, (down_right[0], top_left[1]), down_right, (top_left[0], down_right[1])])
cv.fillConvexPoly(self.img, points, color)
def generate(self, code):
freeBits = (6 - 2) * (6 - 2) - 3
if code < 0 or code >= 1 << freeBits:
return None
binCode = bin(code)[2:].zfill(freeBits)
binCode = '1' + binCode[0:11] + '1' + binCode[11:] + '0'
"""Check message (for parity, etc.)"""
if binCode[3] == '1' or binCode[4] == '1' or binCode.count('1') % 2 != 0:
if not self.args.force:
if self.args.verbose: print '%d\t=> %s (-)'%(code,binCode)
return None
"""Draw border"""
for i in range(0, 6):
for pos in [(0, i), (5, i), (i, 0), (i, 5)]:
self.draw_box(pos)
"""Draw message"""
for i in range(0, len(binCode)):
pos = (i / 4 + 1, i % 4 + 1)
self.draw_box(pos, None if binCode[i] == '1' else (255, 255, 255))
if self.args.verbose: print "%d\t=> %s (+)"%(code,binCode)
return self.img
def __init__(self, args):
self.img = cv.bitwise_not(np.zeros(((6 + 2) * args.boxSize, (6 + 2) * args.boxSize, 3), np.uint8))
self.args = args
def main():
args = parse_args()
try:
os.makedirs(args.dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
g = Generator(args)
for code in [z for y in [x.split("..") for x in args.codes]
for z in range(int(y[0]), 1 + int(y[0] if len(y) == 1 else y[1]))]:
marker = g.generate(code)
if marker is None: continue
filename = args.dir + '/{0:04}.png'.format(code)
cv.cvtColor(marker, cv.COLOR_RGB2BGR, marker)
cv.imwrite(filename, marker, [cv.IMWRITE_PNG_COMPRESSION, 9])
if __name__ == "__main__":
main()
|
apache-2.0
| -5,827,541,999,847,004,000
| 36.008065
| 119
| 0.577468
| false
| 3.330189
| false
| false
| false
|
fiete201/qutebrowser
|
qutebrowser/mainwindow/prompt.py
|
1
|
34258
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2021 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <https://www.gnu.org/licenses/>.
"""Showing prompts above the statusbar."""
import os.path
import html
import collections
import functools
import dataclasses
from typing import Deque, MutableSequence, Optional, cast
from PyQt5.QtCore import (pyqtSlot, pyqtSignal, Qt, QTimer, QDir, QModelIndex,
QItemSelectionModel, QObject, QEventLoop)
from PyQt5.QtWidgets import (QWidget, QGridLayout, QVBoxLayout, QLineEdit,
QLabel, QFileSystemModel, QTreeView, QSizePolicy,
QSpacerItem)
from qutebrowser.browser import downloads
from qutebrowser.config import config, configtypes, configexc, stylesheet
from qutebrowser.utils import usertypes, log, utils, qtutils, objreg, message
from qutebrowser.keyinput import modeman
from qutebrowser.api import cmdutils
from qutebrowser.utils import urlmatch
prompt_queue = cast('PromptQueue', None)
@dataclasses.dataclass
class AuthInfo:
"""Authentication info returned by a prompt."""
user: str
password: str
class Error(Exception):
"""Base class for errors in this module."""
class UnsupportedOperationError(Error):
"""Raised when the prompt class doesn't support the requested operation."""
class PromptQueue(QObject):
"""Global manager and queue for upcoming prompts.
The way in which multiple questions are handled deserves some explanation.
If a question is blocking, we *need* to ask it immediately, and can't wait
for previous questions to finish. We could theoretically ask a blocking
question inside of another blocking one, so in ask_question we simply save
the current question on the stack, let the user answer the *most recent*
question, and then restore the previous state.
With a non-blocking question, things are a bit easier. We simply add it to
self._queue if we're still busy handling another question, since it can be
answered at any time.
In either case, as soon as we finished handling a question, we call
_pop_later() which schedules a _pop to ask the next question in _queue. We
schedule it rather than doing it immediately because then the order of how
things happen is clear, e.g. on_mode_left can't happen after we already set
up the *new* question.
Attributes:
_shutting_down: Whether we're currently shutting down the prompter and
should ignore future questions to avoid segfaults.
_loops: A list of local EventLoops to spin in when blocking.
_queue: A deque of waiting questions.
_question: The current Question object if we're handling a question.
Signals:
show_prompts: Emitted with a Question object when prompts should be
shown.
"""
show_prompts = pyqtSignal(usertypes.Question)
def __init__(self, parent=None):
super().__init__(parent)
self._question = None
self._shutting_down = False
self._loops: MutableSequence[qtutils.EventLoop] = []
self._queue: Deque[usertypes.Question] = collections.deque()
message.global_bridge.mode_left.connect(self._on_mode_left)
def __repr__(self):
return utils.get_repr(self, loops=len(self._loops),
queue=len(self._queue), question=self._question)
def _pop_later(self):
"""Helper to call self._pop as soon as everything else is done."""
QTimer.singleShot(0, self._pop)
def _pop(self):
"""Pop a question from the queue and ask it, if there are any."""
log.prompt.debug("Popping from queue {}".format(self._queue))
if self._queue:
question = self._queue.popleft()
if not question.is_aborted:
# the question could already be aborted, e.g. by a cancelled
# download. See
# https://github.com/qutebrowser/qutebrowser/issues/415 and
# https://github.com/qutebrowser/qutebrowser/issues/1249
self.ask_question(question, blocking=False)
def shutdown(self):
"""Cancel all blocking questions.
Quits and removes all running event loops.
Return:
True if loops needed to be aborted,
False otherwise.
"""
log.prompt.debug("Shutting down with loops {}".format(self._loops))
self._shutting_down = True
if self._loops:
for loop in self._loops:
loop.quit()
loop.deleteLater()
return True
else:
return False
@pyqtSlot(usertypes.Question, bool)
def ask_question(self, question, blocking):
"""Display a prompt for a given question.
Args:
question: The Question object to ask.
blocking: If True, this function blocks and returns the result.
Return:
The answer of the user when blocking=True.
None if blocking=False.
"""
log.prompt.debug("Asking question {}, blocking {}, loops {}, queue "
"{}".format(question, blocking, self._loops,
self._queue))
if self._shutting_down:
# If we're currently shutting down we have to ignore this question
# to avoid segfaults - see
# https://github.com/qutebrowser/qutebrowser/issues/95
log.prompt.debug("Ignoring question because we're shutting down.")
question.abort()
return None
if self._question is not None and not blocking:
# We got an async question, but we're already busy with one, so we
# just queue it up for later.
log.prompt.debug("Adding {} to queue.".format(question))
self._queue.append(question)
return None
if blocking:
# If we're blocking we save the old question on the stack, so we
# can restore it after exec, if exec gets called multiple times.
log.prompt.debug("New question is blocking, saving {}".format(
self._question))
old_question = self._question
if old_question is not None:
old_question.interrupted = True
self._question = question
self.show_prompts.emit(question)
if blocking:
loop = qtutils.EventLoop()
self._loops.append(loop)
loop.destroyed.connect(lambda: self._loops.remove(loop))
question.completed.connect(loop.quit)
question.completed.connect(loop.deleteLater)
log.prompt.debug("Starting loop.exec() for {}".format(question))
flags = cast(QEventLoop.ProcessEventsFlags,
QEventLoop.ExcludeSocketNotifiers)
loop.exec(flags)
log.prompt.debug("Ending loop.exec() for {}".format(question))
log.prompt.debug("Restoring old question {}".format(old_question))
self._question = old_question
self.show_prompts.emit(old_question)
if old_question is None:
# Nothing left to restore, so we can go back to popping async
# questions.
if self._queue:
self._pop_later()
return question.answer
else:
question.completed.connect(self._pop_later)
return None
@pyqtSlot(usertypes.KeyMode)
def _on_mode_left(self, mode):
"""Abort question when a prompt mode was left."""
if mode not in [usertypes.KeyMode.prompt, usertypes.KeyMode.yesno]:
return
if self._question is None:
return
log.prompt.debug("Left mode {}, hiding {}".format(
mode, self._question))
self.show_prompts.emit(None)
if self._question.answer is None and not self._question.is_aborted:
log.prompt.debug("Cancelling {} because {} was left".format(
self._question, mode))
self._question.cancel()
self._question = None
class PromptContainer(QWidget):
"""Container for prompts to be shown above the statusbar.
This is a per-window object, however each window shows the same prompt.
Attributes:
_layout: The layout used to show prompts in.
_win_id: The window ID this object is associated with.
Signals:
update_geometry: Emitted when the geometry should be updated.
"""
STYLESHEET = """
QWidget#PromptContainer {
{% if conf.statusbar.position == 'top' %}
border-bottom-left-radius: {{ conf.prompt.radius }}px;
border-bottom-right-radius: {{ conf.prompt.radius }}px;
{% else %}
border-top-left-radius: {{ conf.prompt.radius }}px;
border-top-right-radius: {{ conf.prompt.radius }}px;
{% endif %}
}
QWidget {
font: {{ conf.fonts.prompts }};
color: {{ conf.colors.prompts.fg }};
background-color: {{ conf.colors.prompts.bg }};
}
QLineEdit {
border: {{ conf.colors.prompts.border }};
}
QTreeView {
selection-background-color: {{ conf.colors.prompts.selected.bg }};
border: {{ conf.colors.prompts.border }};
}
QTreeView::branch {
background-color: {{ conf.colors.prompts.bg }};
}
QTreeView::item:selected, QTreeView::item:selected:hover,
QTreeView::branch:selected {
background-color: {{ conf.colors.prompts.selected.bg }};
}
"""
update_geometry = pyqtSignal()
def __init__(self, win_id, parent=None):
super().__init__(parent)
self._layout = QVBoxLayout(self)
self._layout.setContentsMargins(10, 10, 10, 10)
self._win_id = win_id
self._prompt: Optional[_BasePrompt] = None
self.setObjectName('PromptContainer')
self.setAttribute(Qt.WA_StyledBackground, True)
stylesheet.set_register(self)
message.global_bridge.prompt_done.connect(self._on_prompt_done)
prompt_queue.show_prompts.connect(self._on_show_prompts)
message.global_bridge.mode_left.connect(self._on_global_mode_left)
def __repr__(self):
return utils.get_repr(self, win_id=self._win_id)
@pyqtSlot(usertypes.Question)
def _on_show_prompts(self, question):
"""Show a prompt for the given question.
Args:
question: A Question object or None.
"""
item = self._layout.takeAt(0)
if item is not None:
widget = item.widget()
log.prompt.debug("Deleting old prompt {}".format(widget))
widget.hide()
widget.deleteLater()
if question is None:
log.prompt.debug("No prompts left, hiding prompt container.")
self._prompt = None
self.hide()
return
classes = {
usertypes.PromptMode.yesno: YesNoPrompt,
usertypes.PromptMode.text: LineEditPrompt,
usertypes.PromptMode.user_pwd: AuthenticationPrompt,
usertypes.PromptMode.download: DownloadFilenamePrompt,
usertypes.PromptMode.alert: AlertPrompt,
}
klass = classes[question.mode]
prompt = klass(question)
log.prompt.debug("Displaying prompt {}".format(prompt))
self._prompt = prompt
# If this question was interrupted, we already connected the signal
if not question.interrupted:
question.aborted.connect(
functools.partial(self._on_aborted, prompt.KEY_MODE))
modeman.enter(self._win_id, prompt.KEY_MODE, 'question asked')
self.setSizePolicy(prompt.sizePolicy())
self._layout.addWidget(prompt)
prompt.show()
self.show()
prompt.setFocus()
self.update_geometry.emit()
@pyqtSlot()
def _on_aborted(self, key_mode):
"""Leave KEY_MODE whenever a prompt is aborted."""
try:
modeman.leave(self._win_id, key_mode, 'aborted', maybe=True)
except objreg.RegistryUnavailableError:
# window was deleted: ignore
pass
@pyqtSlot(usertypes.KeyMode)
def _on_prompt_done(self, key_mode):
"""Leave the prompt mode in this window if a question was answered."""
modeman.leave(self._win_id, key_mode, ':prompt-accept', maybe=True)
@pyqtSlot(usertypes.KeyMode)
def _on_global_mode_left(self, mode):
"""Leave prompt/yesno mode in this window if it was left elsewhere.
This ensures no matter where a prompt was answered, we leave the prompt
mode and dispose of the prompt object in every window.
"""
if mode not in [usertypes.KeyMode.prompt, usertypes.KeyMode.yesno]:
return
modeman.leave(self._win_id, mode, 'left in other window', maybe=True)
item = self._layout.takeAt(0)
if item is not None:
widget = item.widget()
log.prompt.debug("Deleting prompt {}".format(widget))
widget.hide()
widget.deleteLater()
@cmdutils.register(instance='prompt-container', scope='window',
modes=[usertypes.KeyMode.prompt,
usertypes.KeyMode.yesno])
def prompt_accept(self, value=None, *, save=False):
"""Accept the current prompt.
//
This executes the next action depending on the question mode, e.g. asks
for the password or leaves the mode.
Args:
value: If given, uses this value instead of the entered one.
For boolean prompts, "yes"/"no" are accepted as value.
save: Save the value to the config.
"""
assert self._prompt is not None
question = self._prompt.question
try:
done = self._prompt.accept(value, save=save)
except Error as e:
raise cmdutils.CommandError(str(e))
if done:
message.global_bridge.prompt_done.emit(self._prompt.KEY_MODE)
question.done()
@cmdutils.register(instance='prompt-container', scope='window',
modes=[usertypes.KeyMode.prompt], maxsplit=0)
def prompt_open_download(self, cmdline: str = None,
pdfjs: bool = False) -> None:
"""Immediately open a download.
If no specific command is given, this will use the system's default
application to open the file.
Args:
cmdline: The command which should be used to open the file. A `{}`
is expanded to the temporary file name. If no `{}` is
present, the filename is automatically appended to the
cmdline.
pdfjs: Open the download via PDF.js.
"""
assert self._prompt is not None
try:
self._prompt.download_open(cmdline, pdfjs=pdfjs)
except UnsupportedOperationError:
pass
@cmdutils.register(instance='prompt-container', scope='window',
modes=[usertypes.KeyMode.prompt])
@cmdutils.argument('which', choices=['next', 'prev'])
def prompt_item_focus(self, which):
"""Shift the focus of the prompt file completion menu to another item.
Args:
which: 'next', 'prev'
"""
assert self._prompt is not None
try:
self._prompt.item_focus(which)
except UnsupportedOperationError:
pass
@cmdutils.register(
instance='prompt-container', scope='window',
modes=[usertypes.KeyMode.prompt, usertypes.KeyMode.yesno])
def prompt_yank(self, sel=False):
"""Yank URL to clipboard or primary selection.
Args:
sel: Use the primary selection instead of the clipboard.
"""
assert self._prompt is not None
question = self._prompt.question
if question.url is None:
message.error('No URL found.')
return
if sel and utils.supports_selection():
target = 'primary selection'
else:
sel = False
target = 'clipboard'
utils.set_clipboard(question.url, sel)
message.info("Yanked to {}: {}".format(target, question.url))
class LineEdit(QLineEdit):
"""A line edit used in prompts."""
def __init__(self, parent=None):
super().__init__(parent)
self.setStyleSheet("""
QLineEdit {
background-color: transparent;
}
""")
self.setAttribute(Qt.WA_MacShowFocusRect, False)
def keyPressEvent(self, e):
"""Override keyPressEvent to paste primary selection on Shift + Ins."""
if e.key() == Qt.Key_Insert and e.modifiers() == Qt.ShiftModifier:
try:
text = utils.get_clipboard(selection=True, fallback=True)
except utils.ClipboardError: # pragma: no cover
e.ignore()
else:
e.accept()
self.insert(text)
return
super().keyPressEvent(e)
def __repr__(self):
return utils.get_repr(self)
class _BasePrompt(QWidget):
"""Base class for all prompts."""
KEY_MODE = usertypes.KeyMode.prompt
def __init__(self, question, parent=None):
super().__init__(parent)
self.question = question
self._vbox = QVBoxLayout(self)
self._vbox.setSpacing(15)
self._key_grid = None
def __repr__(self):
return utils.get_repr(self, question=self.question, constructor=True)
def _init_texts(self, question):
assert question.title is not None, question
title = '<font size="4"><b>{}</b></font>'.format(
html.escape(question.title))
title_label = QLabel(title, self)
self._vbox.addWidget(title_label)
if question.text is not None:
# Not doing any HTML escaping here as the text can be formatted
text_label = QLabel(question.text)
text_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self._vbox.addWidget(text_label)
def _init_key_label(self):
assert self._key_grid is None, self._key_grid
self._key_grid = QGridLayout()
self._key_grid.setVerticalSpacing(0)
all_bindings = config.key_instance.get_reverse_bindings_for(
self.KEY_MODE.name)
labels = []
for cmd, text in self._allowed_commands():
bindings = all_bindings.get(cmd, [])
if bindings:
binding = None
preferred = ['<enter>', '<escape>']
for pref in preferred:
if pref in bindings:
binding = pref
if binding is None:
binding = bindings[0]
key_label = QLabel('<b>{}</b>'.format(html.escape(binding)))
text_label = QLabel(text)
labels.append((key_label, text_label))
for i, (key_label, text_label) in enumerate(labels):
self._key_grid.addWidget(key_label, i, 0)
self._key_grid.addWidget(text_label, i, 1)
spacer = QSpacerItem(0, 0, QSizePolicy.Expanding)
self._key_grid.addItem(spacer, 0, 2)
self._vbox.addLayout(self._key_grid)
def _check_save_support(self, save):
if save:
raise UnsupportedOperationError("Saving answers is only possible "
"with yes/no prompts.")
def accept(self, value=None, save=False):
raise NotImplementedError
def download_open(self, cmdline, pdfjs):
"""Open the download directly if this is a download prompt."""
utils.unused(cmdline)
utils.unused(pdfjs)
raise UnsupportedOperationError
def item_focus(self, _which):
"""Switch to next file item if this is a filename prompt.."""
raise UnsupportedOperationError
def _allowed_commands(self):
"""Get the commands we could run as response to this message."""
raise NotImplementedError
class LineEditPrompt(_BasePrompt):
"""A prompt for a single text value."""
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._lineedit = LineEdit(self)
self._init_texts(question)
self._vbox.addWidget(self._lineedit)
if question.default:
self._lineedit.setText(question.default)
self._lineedit.selectAll()
self.setFocusProxy(self._lineedit)
self._init_key_label()
def accept(self, value=None, save=False):
self._check_save_support(save)
text = value if value is not None else self._lineedit.text()
self.question.answer = text
return True
def _allowed_commands(self):
return [('prompt-accept', 'Accept'), ('mode-leave', 'Abort')]
class FilenamePrompt(_BasePrompt):
"""A prompt for a filename."""
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._init_texts(question)
self._init_key_label()
self._lineedit = LineEdit(self)
if question.default:
self._lineedit.setText(question.default)
self._lineedit.textEdited.connect(self._set_fileview_root)
self._vbox.addWidget(self._lineedit)
self.setFocusProxy(self._lineedit)
self._init_fileview()
self._set_fileview_root(question.default)
if config.val.prompt.filebrowser:
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
self._to_complete = ''
@pyqtSlot(str)
def _set_fileview_root(self, path, *, tabbed=False):
"""Set the root path for the file display."""
separators = os.sep
if os.altsep is not None:
separators += os.altsep
dirname = os.path.dirname(path)
basename = os.path.basename(path)
if not tabbed:
self._to_complete = ''
try:
if not path:
pass
elif path in separators and os.path.isdir(path):
# Input "/" -> don't strip anything
pass
elif path[-1] in separators and os.path.isdir(path):
# Input like /foo/bar/ -> show /foo/bar/ contents
path = path.rstrip(separators)
elif os.path.isdir(dirname) and not tabbed:
# Input like /foo/ba -> show /foo contents
path = dirname
self._to_complete = basename
else:
return
except OSError:
log.prompt.exception("Failed to get directory information")
return
root = self._file_model.setRootPath(path)
self._file_view.setRootIndex(root)
@pyqtSlot(QModelIndex)
def _insert_path(self, index, *, clicked=True):
"""Handle an element selection.
Args:
index: The QModelIndex of the selected element.
clicked: Whether the element was clicked.
"""
if index == QModelIndex():
path = os.path.join(self._file_model.rootPath(), self._to_complete)
else:
path = os.path.normpath(self._file_model.filePath(index))
if clicked:
path += os.sep
else:
# On Windows, when we have C:\foo and tab over .., we get C:\
path = path.rstrip(os.sep)
log.prompt.debug('Inserting path {}'.format(path))
self._lineedit.setText(path)
self._lineedit.setFocus()
self._set_fileview_root(path, tabbed=True)
if clicked:
# Avoid having a ..-subtree highlighted
self._file_view.setCurrentIndex(QModelIndex())
def _init_fileview(self):
self._file_view = QTreeView(self)
self._file_model = QFileSystemModel(self)
self._file_view.setModel(self._file_model)
self._file_view.clicked.connect(self._insert_path)
if config.val.prompt.filebrowser:
self._vbox.addWidget(self._file_view)
else:
self._file_view.hide()
# Only show name
self._file_view.setHeaderHidden(True)
for col in range(1, 4):
self._file_view.setColumnHidden(col, True)
# Nothing selected initially
self._file_view.setCurrentIndex(QModelIndex())
# The model needs to be sorted so we get the correct first/last index
self._file_model.directoryLoaded.connect(
lambda: self._file_model.sort(0))
def accept(self, value=None, save=False):
self._check_save_support(save)
text = value if value is not None else self._lineedit.text()
text = downloads.transform_path(text)
if text is None:
message.error("Invalid filename")
return False
self.question.answer = text
return True
def item_focus(self, which):
# This duplicates some completion code, but I don't see a nicer way...
assert which in ['prev', 'next'], which
selmodel = self._file_view.selectionModel()
parent = self._file_view.rootIndex()
first_index = self._file_model.index(0, 0, parent)
row = self._file_model.rowCount(parent) - 1
last_index = self._file_model.index(row, 0, parent)
if not first_index.isValid():
# No entries
return
assert last_index.isValid()
idx = selmodel.currentIndex()
if not idx.isValid():
# No item selected yet
idx = last_index if which == 'prev' else first_index
elif which == 'prev':
idx = self._file_view.indexAbove(idx)
else:
assert which == 'next', which
idx = self._file_view.indexBelow(idx)
# wrap around if we arrived at beginning/end
if not idx.isValid():
idx = last_index if which == 'prev' else first_index
idx = self._do_completion(idx, which)
selmodel.setCurrentIndex(
idx,
QItemSelectionModel.ClearAndSelect | # type: ignore[arg-type]
QItemSelectionModel.Rows)
self._insert_path(idx, clicked=False)
def _do_completion(self, idx, which):
filename = self._file_model.fileName(idx)
while not filename.startswith(self._to_complete) and idx.isValid():
if which == 'prev':
idx = self._file_view.indexAbove(idx)
else:
assert which == 'next', which
idx = self._file_view.indexBelow(idx)
filename = self._file_model.fileName(idx)
return idx
def _allowed_commands(self):
return [('prompt-accept', 'Accept'), ('mode-leave', 'Abort')]
class DownloadFilenamePrompt(FilenamePrompt):
"""A prompt for a filename for downloads."""
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._file_model.setFilter(
QDir.AllDirs | QDir.Drives | QDir.NoDot) # type: ignore[arg-type]
def accept(self, value=None, save=False):
done = super().accept(value, save)
answer = self.question.answer
if answer is not None:
self.question.answer = downloads.FileDownloadTarget(answer)
return done
def download_open(self, cmdline, pdfjs):
if pdfjs:
target: 'downloads._DownloadTarget' = downloads.PDFJSDownloadTarget()
else:
target = downloads.OpenFileDownloadTarget(cmdline)
self.question.answer = target
self.question.done()
message.global_bridge.prompt_done.emit(self.KEY_MODE)
def _allowed_commands(self):
cmds = [
('prompt-accept', 'Accept'),
('mode-leave', 'Abort'),
('prompt-open-download', "Open download"),
('prompt-open-download --pdfjs', "Open download via PDF.js"),
('prompt-yank', "Yank URL"),
]
return cmds
class AuthenticationPrompt(_BasePrompt):
"""A prompt for username/password."""
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._init_texts(question)
user_label = QLabel("Username:", self)
self._user_lineedit = LineEdit(self)
password_label = QLabel("Password:", self)
self._password_lineedit = LineEdit(self)
self._password_lineedit.setEchoMode(QLineEdit.Password)
grid = QGridLayout()
grid.addWidget(user_label, 1, 0)
grid.addWidget(self._user_lineedit, 1, 1)
grid.addWidget(password_label, 2, 0)
grid.addWidget(self._password_lineedit, 2, 1)
self._vbox.addLayout(grid)
self._init_key_label()
assert not question.default, question.default
self.setFocusProxy(self._user_lineedit)
def accept(self, value=None, save=False):
self._check_save_support(save)
if value is not None:
if ':' not in value:
raise Error("Value needs to be in the format "
"username:password, but {} was given".format(
value))
username, password = value.split(':', maxsplit=1)
self.question.answer = AuthInfo(username, password)
return True
elif self._user_lineedit.hasFocus():
# Earlier, tab was bound to :prompt-accept, so to still support
# that we simply switch the focus when tab was pressed.
self._password_lineedit.setFocus()
return False
else:
self.question.answer = AuthInfo(self._user_lineedit.text(),
self._password_lineedit.text())
return True
def item_focus(self, which):
"""Support switching between fields with tab."""
assert which in ['prev', 'next'], which
if which == 'next' and self._user_lineedit.hasFocus():
self._password_lineedit.setFocus()
elif which == 'prev' and self._password_lineedit.hasFocus():
self._user_lineedit.setFocus()
def _allowed_commands(self):
return [('prompt-accept', "Accept"),
('mode-leave', "Abort")]
class YesNoPrompt(_BasePrompt):
"""A prompt with yes/no answers."""
KEY_MODE = usertypes.KeyMode.yesno
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._init_texts(question)
self._init_key_label()
def _check_save_support(self, save):
if save and self.question.option is None:
raise Error("No setting available to save the answer for this "
"question.")
def accept(self, value=None, save=False):
self._check_save_support(save)
if value is None:
if self.question.default is None:
raise Error("No default value was set for this question!")
self.question.answer = self.question.default
elif value == 'yes':
self.question.answer = True
elif value == 'no':
self.question.answer = False
else:
raise Error("Invalid value {} - expected yes/no!".format(value))
if save:
opt = config.instance.get_opt(self.question.option)
assert isinstance(opt.typ, configtypes.Bool)
pattern = urlmatch.UrlPattern(self.question.url)
try:
config.instance.set_obj(opt.name, self.question.answer,
pattern=pattern, save_yaml=True)
except configexc.Error as e:
raise Error(str(e))
return True
def _allowed_commands(self):
cmds = []
cmds.append(('prompt-accept yes', "Yes"))
if self.question.option is not None:
cmds.append(('prompt-accept --save yes', "Always"))
cmds.append(('prompt-accept no', "No"))
if self.question.option is not None:
cmds.append(('prompt-accept --save no', "Never"))
if self.question.default is not None:
assert self.question.default in [True, False]
default = 'yes' if self.question.default else 'no'
cmds.append(('prompt-accept', "Use default ({})".format(default)))
cmds.append(('mode-leave', "Abort"))
cmds.append(('prompt-yank', "Yank URL"))
return cmds
class AlertPrompt(_BasePrompt):
"""A prompt without any answer possibility."""
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._init_texts(question)
self._init_key_label()
def accept(self, value=None, save=False):
self._check_save_support(save)
if value is not None:
raise Error("No value is permitted with alert prompts!")
# Simply mark prompt as done without setting self.question.answer
return True
def _allowed_commands(self):
return [('prompt-accept', "Hide")]
def init():
"""Initialize global prompt objects."""
global prompt_queue
prompt_queue = PromptQueue()
message.global_bridge.ask_question.connect( # type: ignore[call-arg]
prompt_queue.ask_question, Qt.DirectConnection)
|
gpl-3.0
| 8,010,042,735,672,391,000
| 34.537344
| 81
| 0.594547
| false
| 4.234087
| false
| false
| false
|
huggingface/transformers
|
src/transformers/utils/versions.py
|
1
|
4381
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for working with package versions
"""
import operator
import re
import sys
from typing import Optional
from packaging import version
# The package importlib_metadata is in a different place, depending on the python version.
if sys.version_info < (3, 8):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
ops = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _compare_versions(op, got_ver, want_ver, requirement, pkg, hint):
if got_ver is None:
raise ValueError("got_ver is None")
if want_ver is None:
raise ValueError("want_ver is None")
if not ops[op](version.parse(got_ver), version.parse(want_ver)):
raise ImportError(
f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}"
)
def require_version(requirement: str, hint: Optional[str] = None) -> None:
"""
Perform a runtime check of the dependency versions, using the exact same syntax used by pip.
The installed module version comes from the `site-packages` dir via `importlib_metadata`.
Args:
requirement (:obj:`str`): pip style definition, e.g., "tokenizers==0.9.4", "tqdm>=4.27", "numpy"
hint (:obj:`str`, `optional`): what suggestion to print in case of requirements not being met
Example::
require_version("pandas>1.1.2")
require_version("numpy>1.18.5", "this is important to have for whatever reason")
"""
hint = f"\n{hint}" if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$", requirement):
pkg, op, want_ver = requirement, None, None
else:
match = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)", requirement)
if not match:
raise ValueError(
f"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but got {requirement}"
)
pkg, want_full = match[0]
want_range = want_full.split(",") # there could be multiple requirements
wanted = {}
for w in want_range:
match = re.findall(r"^([\s!=<>]{1,2})(.+)", w)
if not match:
raise ValueError(
f"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but got {requirement}"
)
op, want_ver = match[0]
wanted[op] = want_ver
if op not in ops:
raise ValueError(f"{requirement}: need one of {list(ops.keys())}, but got {op}")
# special case
if pkg == "python":
got_ver = ".".join([str(x) for x in sys.version_info[:3]])
for op, want_ver in wanted.items():
_compare_versions(op, got_ver, want_ver, requirement, pkg, hint)
return
# check if any version is installed
try:
got_ver = importlib_metadata.version(pkg)
except importlib_metadata.PackageNotFoundError:
raise importlib_metadata.PackageNotFoundError(
f"The '{requirement}' distribution was not found and is required by this application. {hint}"
)
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(op, got_ver, want_ver, requirement, pkg, hint)
def require_version_core(requirement):
"""require_version wrapper which emits a core-specific hint on failure"""
hint = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git master"
return require_version(requirement, hint)
|
apache-2.0
| -2,630,832,435,068,718,000
| 35.508333
| 139
| 0.635471
| false
| 3.880425
| false
| false
| false
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/Python/bin/2.78/scripts/addons/io_scene_obj/export_obj.py
|
1
|
38495
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import os
import bpy
import mathutils
import bpy_extras.io_utils
from progress_report import ProgressReport, ProgressReportSubstep
def name_compat(name):
if name is None:
return 'None'
else:
return name.replace(' ', '_')
def mesh_triangulate(me):
import bmesh
bm = bmesh.new()
bm.from_mesh(me)
bmesh.ops.triangulate(bm, faces=bm.faces)
bm.to_mesh(me)
bm.free()
def write_mtl(scene, filepath, path_mode, copy_set, mtl_dict):
from mathutils import Color, Vector
world = scene.world
if world:
world_amb = world.ambient_color
else:
world_amb = Color((0.0, 0.0, 0.0))
source_dir = os.path.dirname(bpy.data.filepath)
dest_dir = os.path.dirname(filepath)
with open(filepath, "w", encoding="utf8", newline="\n") as f:
fw = f.write
fw('# Blender MTL File: %r\n' % (os.path.basename(bpy.data.filepath) or "None"))
fw('# Material Count: %i\n' % len(mtl_dict))
mtl_dict_values = list(mtl_dict.values())
mtl_dict_values.sort(key=lambda m: m[0])
# Write material/image combinations we have used.
# Using mtl_dict.values() directly gives un-predictable order.
for mtl_mat_name, mat, face_img in mtl_dict_values:
# Get the Blender data for the material and the image.
# Having an image named None will make a bug, dont do it :)
fw('\nnewmtl %s\n' % mtl_mat_name) # Define a new material: matname_imgname
if mat:
use_mirror = mat.raytrace_mirror.use and mat.raytrace_mirror.reflect_factor != 0.0
# convert from blenders spec to 0 - 1000 range.
if mat.specular_shader == 'WARDISO':
tspec = (0.4 - mat.specular_slope) / 0.0004
else:
tspec = (mat.specular_hardness - 1) / 0.51
fw('Ns %.6f\n' % tspec)
del tspec
# Ambient
if use_mirror:
fw('Ka %.6f %.6f %.6f\n' % (mat.raytrace_mirror.reflect_factor * mat.mirror_color)[:])
else:
fw('Ka %.6f %.6f %.6f\n' % (mat.ambient, mat.ambient, mat.ambient)) # Do not use world color!
fw('Kd %.6f %.6f %.6f\n' % (mat.diffuse_intensity * mat.diffuse_color)[:]) # Diffuse
fw('Ks %.6f %.6f %.6f\n' % (mat.specular_intensity * mat.specular_color)[:]) # Specular
# Emission, not in original MTL standard but seems pretty common, see T45766.
# XXX Blender has no color emission, it's using diffuse color instead...
fw('Ke %.6f %.6f %.6f\n' % (mat.emit * mat.diffuse_color)[:])
if hasattr(mat, "raytrace_transparency") and hasattr(mat.raytrace_transparency, "ior"):
fw('Ni %.6f\n' % mat.raytrace_transparency.ior) # Refraction index
else:
fw('Ni %.6f\n' % 1.0)
fw('d %.6f\n' % mat.alpha) # Alpha (obj uses 'd' for dissolve)
# See http://en.wikipedia.org/wiki/Wavefront_.obj_file for whole list of values...
# Note that mapping is rather fuzzy sometimes, trying to do our best here.
if mat.use_shadeless:
fw('illum 0\n') # ignore lighting
elif mat.specular_intensity == 0:
fw('illum 1\n') # no specular.
elif use_mirror:
if mat.use_transparency and mat.transparency_method == 'RAYTRACE':
if mat.raytrace_mirror.fresnel != 0.0:
fw('illum 7\n') # Reflection, Transparency, Ray trace and Fresnel
else:
fw('illum 6\n') # Reflection, Transparency, Ray trace
elif mat.raytrace_mirror.fresnel != 0.0:
fw('illum 5\n') # Reflection, Ray trace and Fresnel
else:
fw('illum 3\n') # Reflection and Ray trace
elif mat.use_transparency and mat.transparency_method == 'RAYTRACE':
fw('illum 9\n') # 'Glass' transparency and no Ray trace reflection... fuzzy matching, but...
else:
fw('illum 2\n') # light normaly
else:
# Write a dummy material here?
fw('Ns 0\n')
fw('Ka %.6f %.6f %.6f\n' % world_amb[:]) # Ambient, uses mirror color,
fw('Kd 0.8 0.8 0.8\n')
fw('Ks 0.8 0.8 0.8\n')
fw('d 1\n') # No alpha
fw('illum 2\n') # light normaly
# Write images!
if face_img: # We have an image on the face!
filepath = face_img.filepath
if filepath: # may be '' for generated images
# write relative image path
filepath = bpy_extras.io_utils.path_reference(filepath, source_dir, dest_dir,
path_mode, "", copy_set, face_img.library)
fw('map_Kd %s\n' % filepath) # Diffuse mapping image
del filepath
else:
# so we write the materials image.
face_img = None
if mat: # No face image. if we havea material search for MTex image.
image_map = {}
# backwards so topmost are highest priority
for mtex in reversed(mat.texture_slots):
if mtex and mtex.texture and mtex.texture.type == 'IMAGE':
image = mtex.texture.image
if image:
# texface overrides others
if (mtex.use_map_color_diffuse and (face_img is None) and
(mtex.use_map_warp is False) and (mtex.texture_coords != 'REFLECTION')):
image_map["map_Kd"] = (mtex, image)
if mtex.use_map_ambient:
image_map["map_Ka"] = (mtex, image)
# this is the Spec intensity channel but Ks stands for specular Color
'''
if mtex.use_map_specular:
image_map["map_Ks"] = (mtex, image)
'''
if mtex.use_map_color_spec: # specular color
image_map["map_Ks"] = (mtex, image)
if mtex.use_map_hardness: # specular hardness/glossiness
image_map["map_Ns"] = (mtex, image)
if mtex.use_map_alpha:
image_map["map_d"] = (mtex, image)
if mtex.use_map_translucency:
image_map["map_Tr"] = (mtex, image)
if mtex.use_map_normal:
image_map["map_Bump"] = (mtex, image)
if mtex.use_map_displacement:
image_map["disp"] = (mtex, image)
if mtex.use_map_color_diffuse and (mtex.texture_coords == 'REFLECTION'):
image_map["refl"] = (mtex, image)
if mtex.use_map_emit:
image_map["map_Ke"] = (mtex, image)
for key, (mtex, image) in sorted(image_map.items()):
filepath = bpy_extras.io_utils.path_reference(image.filepath, source_dir, dest_dir,
path_mode, "", copy_set, image.library)
options = []
if key == "map_Bump":
if mtex.normal_factor != 1.0:
options.append('-bm %.6f' % mtex.normal_factor)
if mtex.offset != Vector((0.0, 0.0, 0.0)):
options.append('-o %.6f %.6f %.6f' % mtex.offset[:])
if mtex.scale != Vector((1.0, 1.0, 1.0)):
options.append('-s %.6f %.6f %.6f' % mtex.scale[:])
if options:
fw('%s %s %s\n' % (key, " ".join(options), repr(filepath)[1:-1]))
else:
fw('%s %s\n' % (key, repr(filepath)[1:-1]))
def test_nurbs_compat(ob):
if ob.type != 'CURVE':
return False
for nu in ob.data.splines:
if nu.point_count_v == 1 and nu.type != 'BEZIER': # not a surface and not bezier
return True
return False
def write_nurb(fw, ob, ob_mat):
tot_verts = 0
cu = ob.data
# use negative indices
for nu in cu.splines:
if nu.type == 'POLY':
DEG_ORDER_U = 1
else:
DEG_ORDER_U = nu.order_u - 1 # odd but tested to be correct
if nu.type == 'BEZIER':
print("\tWarning, bezier curve:", ob.name, "only poly and nurbs curves supported")
continue
if nu.point_count_v > 1:
print("\tWarning, surface:", ob.name, "only poly and nurbs curves supported")
continue
if len(nu.points) <= DEG_ORDER_U:
print("\tWarning, order_u is lower then vert count, skipping:", ob.name)
continue
pt_num = 0
do_closed = nu.use_cyclic_u
do_endpoints = (do_closed == 0) and nu.use_endpoint_u
for pt in nu.points:
fw('v %.6f %.6f %.6f\n' % (ob_mat * pt.co.to_3d())[:])
pt_num += 1
tot_verts += pt_num
fw('g %s\n' % (name_compat(ob.name))) # name_compat(ob.getData(1)) could use the data name too
fw('cstype bspline\n') # not ideal, hard coded
fw('deg %d\n' % DEG_ORDER_U) # not used for curves but most files have it still
curve_ls = [-(i + 1) for i in range(pt_num)]
# 'curv' keyword
if do_closed:
if DEG_ORDER_U == 1:
pt_num += 1
curve_ls.append(-1)
else:
pt_num += DEG_ORDER_U
curve_ls = curve_ls + curve_ls[0:DEG_ORDER_U]
fw('curv 0.0 1.0 %s\n' % (" ".join([str(i) for i in curve_ls]))) # Blender has no U and V values for the curve
# 'parm' keyword
tot_parm = (DEG_ORDER_U + 1) + pt_num
tot_parm_div = float(tot_parm - 1)
parm_ls = [(i / tot_parm_div) for i in range(tot_parm)]
if do_endpoints: # end points, force param
for i in range(DEG_ORDER_U + 1):
parm_ls[i] = 0.0
parm_ls[-(1 + i)] = 1.0
fw("parm u %s\n" % " ".join(["%.6f" % i for i in parm_ls]))
fw('end\n')
return tot_verts
def write_file(filepath, objects, scene,
EXPORT_TRI=False,
EXPORT_EDGES=False,
EXPORT_SMOOTH_GROUPS=False,
EXPORT_SMOOTH_GROUPS_BITFLAGS=False,
EXPORT_NORMALS=False,
EXPORT_UV=True,
EXPORT_MTL=True,
EXPORT_APPLY_MODIFIERS=True,
EXPORT_APPLY_MODIFIERS_RENDER=False,
EXPORT_BLEN_OBS=True,
EXPORT_GROUP_BY_OB=False,
EXPORT_GROUP_BY_MAT=False,
EXPORT_KEEP_VERT_ORDER=False,
EXPORT_POLYGROUPS=False,
EXPORT_CURVE_AS_NURBS=True,
EXPORT_GLOBAL_MATRIX=None,
EXPORT_PATH_MODE='AUTO',
progress=ProgressReport(),
):
"""
Basic write function. The context and options must be already set
This can be accessed externaly
eg.
write( 'c:\\test\\foobar.obj', Blender.Object.GetSelected() ) # Using default options.
"""
if EXPORT_GLOBAL_MATRIX is None:
EXPORT_GLOBAL_MATRIX = mathutils.Matrix()
def veckey3d(v):
return round(v.x, 4), round(v.y, 4), round(v.z, 4)
def veckey2d(v):
return round(v[0], 4), round(v[1], 4)
def findVertexGroupName(face, vWeightMap):
"""
Searches the vertexDict to see what groups is assigned to a given face.
We use a frequency system in order to sort out the name because a given vetex can
belong to two or more groups at the same time. To find the right name for the face
we list all the possible vertex group names with their frequency and then sort by
frequency in descend order. The top element is the one shared by the highest number
of vertices is the face's group
"""
weightDict = {}
for vert_index in face.vertices:
vWeights = vWeightMap[vert_index]
for vGroupName, weight in vWeights:
weightDict[vGroupName] = weightDict.get(vGroupName, 0.0) + weight
if weightDict:
return max((weight, vGroupName) for vGroupName, weight in weightDict.items())[1]
else:
return '(null)'
with ProgressReportSubstep(progress, 2, "OBJ Export path: %r" % filepath, "OBJ Export Finished") as subprogress1:
with open(filepath, "w", encoding="utf8", newline="\n") as f:
fw = f.write
# Write Header
fw('# Blender v%s OBJ File: %r\n' % (bpy.app.version_string, os.path.basename(bpy.data.filepath)))
fw('# www.blender.org\n')
# Tell the obj file what material file to use.
if EXPORT_MTL:
mtlfilepath = os.path.splitext(filepath)[0] + ".mtl"
# filepath can contain non utf8 chars, use repr
fw('mtllib %s\n' % repr(os.path.basename(mtlfilepath))[1:-1])
# Initialize totals, these are updated each object
totverts = totuvco = totno = 1
face_vert_index = 1
# A Dict of Materials
# (material.name, image.name):matname_imagename # matname_imagename has gaps removed.
mtl_dict = {}
# Used to reduce the usage of matname_texname materials, which can become annoying in case of
# repeated exports/imports, yet keeping unique mat names per keys!
# mtl_name: (material.name, image.name)
mtl_rev_dict = {}
copy_set = set()
# Get all meshes
subprogress1.enter_substeps(len(objects))
for i, ob_main in enumerate(objects):
# ignore dupli children
if ob_main.parent and ob_main.parent.dupli_type in {'VERTS', 'FACES'}:
# XXX
subprogress1.step("Ignoring %s, dupli child..." % ob_main.name)
continue
obs = [(ob_main, ob_main.matrix_world)]
if ob_main.dupli_type != 'NONE':
# XXX
print('creating dupli_list on', ob_main.name)
ob_main.dupli_list_create(scene)
obs += [(dob.object, dob.matrix) for dob in ob_main.dupli_list]
# XXX debug print
print(ob_main.name, 'has', len(obs) - 1, 'dupli children')
subprogress1.enter_substeps(len(obs))
for ob, ob_mat in obs:
with ProgressReportSubstep(subprogress1, 6) as subprogress2:
uv_unique_count = no_unique_count = 0
# Nurbs curve support
if EXPORT_CURVE_AS_NURBS and test_nurbs_compat(ob):
ob_mat = EXPORT_GLOBAL_MATRIX * ob_mat
totverts += write_nurb(fw, ob, ob_mat)
continue
# END NURBS
try:
me = ob.to_mesh(scene, EXPORT_APPLY_MODIFIERS, calc_tessface=False,
settings='RENDER' if EXPORT_APPLY_MODIFIERS_RENDER else 'PREVIEW')
except RuntimeError:
me = None
if me is None:
continue
me.transform(EXPORT_GLOBAL_MATRIX * ob_mat)
if EXPORT_TRI:
# _must_ do this first since it re-allocs arrays
mesh_triangulate(me)
if EXPORT_UV:
faceuv = len(me.uv_textures) > 0
if faceuv:
uv_texture = me.uv_textures.active.data[:]
uv_layer = me.uv_layers.active.data[:]
else:
faceuv = False
me_verts = me.vertices[:]
# Make our own list so it can be sorted to reduce context switching
face_index_pairs = [(face, index) for index, face in enumerate(me.polygons)]
# faces = [ f for f in me.tessfaces ]
if EXPORT_EDGES:
edges = me.edges
else:
edges = []
if not (len(face_index_pairs) + len(edges) + len(me.vertices)): # Make sure there is something to write
# clean up
bpy.data.meshes.remove(me)
continue # dont bother with this mesh.
if EXPORT_NORMALS and face_index_pairs:
me.calc_normals_split()
# No need to call me.free_normals_split later, as this mesh is deleted anyway!
loops = me.loops
if (EXPORT_SMOOTH_GROUPS or EXPORT_SMOOTH_GROUPS_BITFLAGS) and face_index_pairs:
smooth_groups, smooth_groups_tot = me.calc_smooth_groups(EXPORT_SMOOTH_GROUPS_BITFLAGS)
if smooth_groups_tot <= 1:
smooth_groups, smooth_groups_tot = (), 0
else:
smooth_groups, smooth_groups_tot = (), 0
materials = me.materials[:]
material_names = [m.name if m else None for m in materials]
# avoid bad index errors
if not materials:
materials = [None]
material_names = [name_compat(None)]
# Sort by Material, then images
# so we dont over context switch in the obj file.
if EXPORT_KEEP_VERT_ORDER:
pass
else:
if faceuv:
if smooth_groups:
sort_func = lambda a: (a[0].material_index,
hash(uv_texture[a[1]].image),
smooth_groups[a[1]] if a[0].use_smooth else False)
else:
sort_func = lambda a: (a[0].material_index,
hash(uv_texture[a[1]].image),
a[0].use_smooth)
elif len(materials) > 1:
if smooth_groups:
sort_func = lambda a: (a[0].material_index,
smooth_groups[a[1]] if a[0].use_smooth else False)
else:
sort_func = lambda a: (a[0].material_index,
a[0].use_smooth)
else:
# no materials
if smooth_groups:
sort_func = lambda a: smooth_groups[a[1] if a[0].use_smooth else False]
else:
sort_func = lambda a: a[0].use_smooth
face_index_pairs.sort(key=sort_func)
del sort_func
# Set the default mat to no material and no image.
contextMat = 0, 0 # Can never be this, so we will label a new material the first chance we get.
contextSmooth = None # Will either be true or false, set bad to force initialization switch.
if EXPORT_BLEN_OBS or EXPORT_GROUP_BY_OB:
name1 = ob.name
name2 = ob.data.name
if name1 == name2:
obnamestring = name_compat(name1)
else:
obnamestring = '%s_%s' % (name_compat(name1), name_compat(name2))
if EXPORT_BLEN_OBS:
fw('o %s\n' % obnamestring) # Write Object name
else: # if EXPORT_GROUP_BY_OB:
fw('g %s\n' % obnamestring)
subprogress2.step()
# Vert
for v in me_verts:
fw('v %.6f %.6f %.6f\n' % v.co[:])
subprogress2.step()
# UV
if faceuv:
# in case removing some of these dont get defined.
uv = f_index = uv_index = uv_key = uv_val = uv_ls = None
uv_face_mapping = [None] * len(face_index_pairs)
uv_dict = {}
uv_get = uv_dict.get
for f, f_index in face_index_pairs:
uv_ls = uv_face_mapping[f_index] = []
for uv_index, l_index in enumerate(f.loop_indices):
uv = uv_layer[l_index].uv
# include the vertex index in the key so we don't share UV's between vertices,
# allowed by the OBJ spec but can cause issues for other importers, see: T47010.
# this works too, shared UV's for all verts
#~ uv_key = veckey2d(uv)
uv_key = loops[l_index].vertex_index, veckey2d(uv)
uv_val = uv_get(uv_key)
if uv_val is None:
uv_val = uv_dict[uv_key] = uv_unique_count
fw('vt %.6f %.6f\n' % uv[:])
uv_unique_count += 1
uv_ls.append(uv_val)
del uv_dict, uv, f_index, uv_index, uv_ls, uv_get, uv_key, uv_val
# Only need uv_unique_count and uv_face_mapping
subprogress2.step()
# NORMAL, Smooth/Non smoothed.
if EXPORT_NORMALS:
no_key = no_val = None
normals_to_idx = {}
no_get = normals_to_idx.get
loops_to_normals = [0] * len(loops)
for f, f_index in face_index_pairs:
for l_idx in f.loop_indices:
no_key = veckey3d(loops[l_idx].normal)
no_val = no_get(no_key)
if no_val is None:
no_val = normals_to_idx[no_key] = no_unique_count
fw('vn %.4f %.4f %.4f\n' % no_key)
no_unique_count += 1
loops_to_normals[l_idx] = no_val
del normals_to_idx, no_get, no_key, no_val
else:
loops_to_normals = []
if not faceuv:
f_image = None
subprogress2.step()
# XXX
if EXPORT_POLYGROUPS:
# Retrieve the list of vertex groups
vertGroupNames = ob.vertex_groups.keys()
if vertGroupNames:
currentVGroup = ''
# Create a dictionary keyed by face id and listing, for each vertex, the vertex groups it belongs to
vgroupsMap = [[] for _i in range(len(me_verts))]
for v_idx, v_ls in enumerate(vgroupsMap):
v_ls[:] = [(vertGroupNames[g.group], g.weight) for g in me_verts[v_idx].groups]
for f, f_index in face_index_pairs:
f_smooth = f.use_smooth
if f_smooth and smooth_groups:
f_smooth = smooth_groups[f_index]
f_mat = min(f.material_index, len(materials) - 1)
if faceuv:
tface = uv_texture[f_index]
f_image = tface.image
# MAKE KEY
if faceuv and f_image: # Object is always true.
key = material_names[f_mat], f_image.name
else:
key = material_names[f_mat], None # No image, use None instead.
# Write the vertex group
if EXPORT_POLYGROUPS:
if vertGroupNames:
# find what vertext group the face belongs to
vgroup_of_face = findVertexGroupName(f, vgroupsMap)
if vgroup_of_face != currentVGroup:
currentVGroup = vgroup_of_face
fw('g %s\n' % vgroup_of_face)
# CHECK FOR CONTEXT SWITCH
if key == contextMat:
pass # Context already switched, dont do anything
else:
if key[0] is None and key[1] is None:
# Write a null material, since we know the context has changed.
if EXPORT_GROUP_BY_MAT:
# can be mat_image or (null)
fw("g %s_%s\n" % (name_compat(ob.name), name_compat(ob.data.name)))
if EXPORT_MTL:
fw("usemtl (null)\n") # mat, image
else:
mat_data = mtl_dict.get(key)
if not mat_data:
# First add to global dict so we can export to mtl
# Then write mtl
# Make a new names from the mat and image name,
# converting any spaces to underscores with name_compat.
# If none image dont bother adding it to the name
# Try to avoid as much as possible adding texname (or other things)
# to the mtl name (see [#32102])...
mtl_name = "%s" % name_compat(key[0])
if mtl_rev_dict.get(mtl_name, None) not in {key, None}:
if key[1] is None:
tmp_ext = "_NONE"
else:
tmp_ext = "_%s" % name_compat(key[1])
i = 0
while mtl_rev_dict.get(mtl_name + tmp_ext, None) not in {key, None}:
i += 1
tmp_ext = "_%3d" % i
mtl_name += tmp_ext
mat_data = mtl_dict[key] = mtl_name, materials[f_mat], f_image
mtl_rev_dict[mtl_name] = key
if EXPORT_GROUP_BY_MAT:
# can be mat_image or (null)
fw("g %s_%s_%s\n" % (name_compat(ob.name), name_compat(ob.data.name), mat_data[0]))
if EXPORT_MTL:
fw("usemtl %s\n" % mat_data[0]) # can be mat_image or (null)
contextMat = key
if f_smooth != contextSmooth:
if f_smooth: # on now off
if smooth_groups:
f_smooth = smooth_groups[f_index]
fw('s %d\n' % f_smooth)
else:
fw('s 1\n')
else: # was off now on
fw('s off\n')
contextSmooth = f_smooth
f_v = [(vi, me_verts[v_idx], l_idx)
for vi, (v_idx, l_idx) in enumerate(zip(f.vertices, f.loop_indices))]
fw('f')
if faceuv:
if EXPORT_NORMALS:
for vi, v, li in f_v:
fw(" %d/%d/%d" % (totverts + v.index,
totuvco + uv_face_mapping[f_index][vi],
totno + loops_to_normals[li],
)) # vert, uv, normal
else: # No Normals
for vi, v, li in f_v:
fw(" %d/%d" % (totverts + v.index,
totuvco + uv_face_mapping[f_index][vi],
)) # vert, uv
face_vert_index += len(f_v)
else: # No UV's
if EXPORT_NORMALS:
for vi, v, li in f_v:
fw(" %d//%d" % (totverts + v.index, totno + loops_to_normals[li]))
else: # No Normals
for vi, v, li in f_v:
fw(" %d" % (totverts + v.index))
fw('\n')
subprogress2.step()
# Write edges.
if EXPORT_EDGES:
for ed in edges:
if ed.is_loose:
fw('l %d %d\n' % (totverts + ed.vertices[0], totverts + ed.vertices[1]))
# Make the indices global rather then per mesh
totverts += len(me_verts)
totuvco += uv_unique_count
totno += no_unique_count
# clean up
bpy.data.meshes.remove(me)
if ob_main.dupli_type != 'NONE':
ob_main.dupli_list_clear()
subprogress1.leave_substeps("Finished writing geometry of '%s'." % ob_main.name)
subprogress1.leave_substeps()
subprogress1.step("Finished exporting geometry, now exporting materials")
# Now we have all our materials, save them
if EXPORT_MTL:
write_mtl(scene, mtlfilepath, EXPORT_PATH_MODE, copy_set, mtl_dict)
# copy all collected files.
bpy_extras.io_utils.path_reference_copy(copy_set)
def _write(context, filepath,
EXPORT_TRI, # ok
EXPORT_EDGES,
EXPORT_SMOOTH_GROUPS,
EXPORT_SMOOTH_GROUPS_BITFLAGS,
EXPORT_NORMALS, # ok
EXPORT_UV, # ok
EXPORT_MTL,
EXPORT_APPLY_MODIFIERS, # ok
EXPORT_APPLY_MODIFIERS_RENDER, # ok
EXPORT_BLEN_OBS,
EXPORT_GROUP_BY_OB,
EXPORT_GROUP_BY_MAT,
EXPORT_KEEP_VERT_ORDER,
EXPORT_POLYGROUPS,
EXPORT_CURVE_AS_NURBS,
EXPORT_SEL_ONLY, # ok
EXPORT_ANIMATION,
EXPORT_GLOBAL_MATRIX,
EXPORT_PATH_MODE, # Not used
):
with ProgressReport(context.window_manager) as progress:
base_name, ext = os.path.splitext(filepath)
context_name = [base_name, '', '', ext] # Base name, scene name, frame number, extension
scene = context.scene
# Exit edit mode before exporting, so current object states are exported properly.
if bpy.ops.object.mode_set.poll():
bpy.ops.object.mode_set(mode='OBJECT')
orig_frame = scene.frame_current
# Export an animation?
if EXPORT_ANIMATION:
scene_frames = range(scene.frame_start, scene.frame_end + 1) # Up to and including the end frame.
else:
scene_frames = [orig_frame] # Dont export an animation.
# Loop through all frames in the scene and export.
progress.enter_substeps(len(scene_frames))
for frame in scene_frames:
if EXPORT_ANIMATION: # Add frame to the filepath.
context_name[2] = '_%.6d' % frame
scene.frame_set(frame, 0.0)
if EXPORT_SEL_ONLY:
objects = context.selected_objects
else:
objects = scene.objects
full_path = ''.join(context_name)
# erm... bit of a problem here, this can overwrite files when exporting frames. not too bad.
# EXPORT THE FILE.
progress.enter_substeps(1)
write_file(full_path, objects, scene,
EXPORT_TRI,
EXPORT_EDGES,
EXPORT_SMOOTH_GROUPS,
EXPORT_SMOOTH_GROUPS_BITFLAGS,
EXPORT_NORMALS,
EXPORT_UV,
EXPORT_MTL,
EXPORT_APPLY_MODIFIERS,
EXPORT_APPLY_MODIFIERS_RENDER,
EXPORT_BLEN_OBS,
EXPORT_GROUP_BY_OB,
EXPORT_GROUP_BY_MAT,
EXPORT_KEEP_VERT_ORDER,
EXPORT_POLYGROUPS,
EXPORT_CURVE_AS_NURBS,
EXPORT_GLOBAL_MATRIX,
EXPORT_PATH_MODE,
progress,
)
progress.leave_substeps()
scene.frame_set(orig_frame, 0.0)
progress.leave_substeps()
"""
Currently the exporter lacks these features:
* multiple scene export (only active scene is written)
* particles
"""
def save(context,
filepath,
*,
use_triangles=False,
use_edges=True,
use_normals=False,
use_smooth_groups=False,
use_smooth_groups_bitflags=False,
use_uvs=True,
use_materials=True,
use_mesh_modifiers=True,
use_mesh_modifiers_render=False,
use_blen_objects=True,
group_by_object=False,
group_by_material=False,
keep_vertex_order=False,
use_vertex_groups=False,
use_nurbs=True,
use_selection=True,
use_animation=False,
global_matrix=None,
path_mode='AUTO'
):
_write(context, filepath,
EXPORT_TRI=use_triangles,
EXPORT_EDGES=use_edges,
EXPORT_SMOOTH_GROUPS=use_smooth_groups,
EXPORT_SMOOTH_GROUPS_BITFLAGS=use_smooth_groups_bitflags,
EXPORT_NORMALS=use_normals,
EXPORT_UV=use_uvs,
EXPORT_MTL=use_materials,
EXPORT_APPLY_MODIFIERS=use_mesh_modifiers,
EXPORT_APPLY_MODIFIERS_RENDER=use_mesh_modifiers_render,
EXPORT_BLEN_OBS=use_blen_objects,
EXPORT_GROUP_BY_OB=group_by_object,
EXPORT_GROUP_BY_MAT=group_by_material,
EXPORT_KEEP_VERT_ORDER=keep_vertex_order,
EXPORT_POLYGROUPS=use_vertex_groups,
EXPORT_CURVE_AS_NURBS=use_nurbs,
EXPORT_SEL_ONLY=use_selection,
EXPORT_ANIMATION=use_animation,
EXPORT_GLOBAL_MATRIX=global_matrix,
EXPORT_PATH_MODE=path_mode,
)
return {'FINISHED'}
|
gpl-3.0
| -8,155,557,614,567,815,000
| 44.181925
| 132
| 0.444317
| false
| 4.304965
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.