repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
vnsofthe/odoo-dev
|
addons/rhwl_hr/__init__.py
|
Python
|
agpl-3.0
| 55
| 0
|
import rhwl_hr
import rhwl_holidays
imp
|
ort con
|
trollers
|
mnrozhkov/candybot
|
twitter_stream.py
|
Python
|
gpl-2.0
| 1,522
| 0.027595
|
#!/usr/bin/python
# Import the necessary package to process data in JSON format
try:
import json
except ImportError:
import simplejson as json
# Import the necessary methods from "twitter" library
from twitter import Twitter, OAuth, TwitterHTTPError, TwitterStream
# from twython import Twython
from secret import (
TW_CONSUMER_KEY,
TW_CONSUMER_SECRET,
TW_ACCESS_TOKEN_KEY,
TW_ACCESS_TOKEN_SECRET
)
CONSUMER_KEY=TW_CONSUMER_KEY
CONSUMER_SECRET=TW_CONSUMER_SECRET
ACCESS_TOKEN=TW_ACCESS_TOKEN_KEY
ACCESS_SECRET=TW_ACCESS_TOKEN_SECRET
oauth = OAuth(ACCESS_TOKEN, ACCESS_SECRE
|
T, CONSUMER_KEY, CO
|
NSUMER_SECRET)
# Initiate the connection to Twitter Streaming API
twitter_stream = TwitterStream(auth=oauth)
def listenTwitter(track, code):
"""
Listen Twitter for mention of keywords stated in 'track' and 'code'.
Use Twitter stream API
Params:
track: message to track in Tweets
code: unique code from CandyBot
Returns:
True or False decision status on candy dispensing
"""
# Listen for tweets with required track (@fun_robots) and #code
iterator = twitter_stream.statuses.filter(track=track)
while True:
for tweet in iterator:
tw_text = json.loads(json.dumps(tweet)).get('text')
# print(tw_text, "\n") ##for debug
if code in tw_text:
print("PLEASE, TAKE YOUR CANDY! :)))))))))")
return(True)
else:
break
return(False)
if __name__ == "__main__":
get_candy = listenTwitter(track='@fun_robots', code='4451')
print(get_candy)
|
daniellima/othello
|
main.py
|
Python
|
mit
| 204
| 0.014706
|
from controllers.board_controller import BoardController
from mo
|
dels.move import Move
from models.board import Board
controller = BoardController()
controller.
|
init_game()
|
bowenliu16/deepchem
|
examples/benchmark.py
|
Python
|
gpl-3.0
| 23,002
| 0.012086
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 18 15:53:27 2016
@author: Michael Wu
Benchmark test:
Giving classification performances of:
Random forest(rf), MultitaskDNN(tf),
RobustMultitaskDNN(tf_robust),
Logistic regression(logreg),
Graph convolution(graphconv)
on datasets: muv, pcba, tox21, sider, toxcast
Giving regression performances of:
MultitaskDNN(tf_regression),
Graph convolution regression(graphconvreg)
on datasets: delaney, nci, kaggle, pdbbind
time estimation listed in README file
Total time of running a benchmark test(for one splitting function): 20h
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import sys
import os
import numpy as np
import shutil
import time
import deepchem as dc
import tensorflow as tf
import argparse
from keras import backend as K
import csv
from sklearn.ensemble import RandomForestClassifier
from muv.muv_datasets import load_muv
from nci.nci_datasets import load_nci
from pcba.pcba_datasets import load_pcba
from tox21.tox21_datasets import load_tox21
from toxcast.toxcast_datasets import load_toxcast
from sider.sider_datasets import load_sider
from kaggle.kaggle_datasets import load_kaggle
from delaney.delaney_datasets import load_delaney
from nci.nci_datasets import load_nci
from pdbbind.pdbbind_datasets import load_pdbbind_grid
def benchmark_loading_datasets(hyper_parameters,
dataset='tox21', model='tf', split=None,
reload=True, out_path='.'):
"""
Loading dataset for benchmark test
Parameters
----------
hyper_parameters: dict of list
hyper parameters including dropout rate, learning rate, etc.
dataset: string, optional (default='tox21')
choice of which dataset to use, should be: tox21, muv, sider,
toxcast, pcba, delaney, kaggle, nci
model: string, optional (default='tf')
choice of which model to use, should be: rf, tf, tf_robust, logreg,
graphconv, tf_regression, graphconvreg
split: string, optional (default=None)
choice of splitter function, None = using the default splitter
out_path: string, optional(default='.')
path of result file
"""
if dataset in ['muv', 'pcba', 'tox21', 'sider', 'toxcast']:
mode = 'classification'
elif dataset in ['kaggle', 'delaney', 'nci','pdbbind']:
mode = 'regression'
else:
raise ValueError('Dataset not supported')
#assigning featurizer
if model in ['graphconv', 'graphconvreg']:
featurizer = 'GraphConv'
n_features = 75
elif model in ['tf', 'tf_robust', 'logreg', 'rf', 'tf_regression']:
featurizer = 'ECFP'
n_features = 1024
else:
raise ValueError('Model not supported')
if dataset in ['kaggle']:
featurizer = None #kaggle dataset use its own features
if split in ['random', 'scaffold']:
return
else:
split = None #kaggle dataset is already splitted
if not model in ['tf_regression']:
return
if dataset in ['pdbbind']:
featurizer = 'grid' #pdbbind use grid featurizer
if split in ['scaffold', 'index']:
return #skip the scaffold and index splitting of pdbbind
if not model in ['tf_regression']:
return
if not split in [None, 'index','random','scaffold']:
raise ValueError('Splitter function not supported')
loading_functions = {'tox21': load_tox21, 'muv': load_muv,
'pcba': load_pcba, 'nci': load_nci,
'sider': load_sider, 'toxcast': load_toxcast,
'kaggle': load_kaggle, 'delaney': load_delaney,
'pdbbind': load_pdbbind_grid}
print('-------------------------------------')
print('Benchmark %s on dataset: %s' % (model, dataset))
print('-------------------------------------')
time_start = time.time()
#loading datasets
if split is not None:
print('Splitting function: %s' % split)
tasks, all_dataset, transformers = loading_functions[dataset](
featurizer=featurizer, split=split)
else:
tasks, all_dataset, transformers = loading_functions[dataset](
featurizer=featurizer)
train_dataset, valid_dataset, test_dataset = all_dataset
time_finish_loading = time.time()
#time_finish_loading-time_start is the time(s) used for dataset loading
if dataset in ['kaggle','pdbbind']:
n_features = train_dataset.get_data_shape()[0]
#kaggle dataset has customized features
#running model
for count, hp in enumerate(hyper_parameters[model]):
time_start_fitting = time.time()
if mode == 'classification':
train_score, valid_score = benchmark_classification(
train_dataset, valid_dataset, tasks,
transformers, hp, n_features,
model=model)
elif mode == 'regression':
train_score, v
|
alid_score = benchmark_regression(
train_dataset, valid_dataset, tasks,
transformers, hp, n_features,
model=model)
time_finish_fitting = time.time()
with open(os.path.join(out_path, 'results.csv'),'a') as f:
writer = csv.writer(f)
if mode == 'classification':
for i in train_score:
output_line = [count, dataset, str(split), mode, 'train', i,
train_score[i]['mean-roc_auc
|
_score'], 'valid', i,
valid_score[i]['mean-roc_auc_score'],
'time_for_running',
time_finish_fitting-time_start_fitting]
writer.writerow(output_line)
else:
for i in train_score:
output_line = [count, dataset, str(split), mode, 'train', i,
train_score[i]['mean-pearson_r2_score'], 'valid', i,
valid_score[i]['mean-pearson_r2_score'],
'time_for_running',
time_finish_fitting-time_start_fitting]
writer.writerow(output_line)
def benchmark_classification(train_dataset, valid_dataset, tasks,
transformers, hyper_parameters,
n_features, model='tf', seed=123):
"""
Calculate performance of different models on the specific dataset & tasks
Parameters
----------
train_dataset: dataset struct
loaded dataset using load_* or splitter function
valid_dataset: dataset struct
loaded dataset using load_* or splitter function
tasks: list of string
list of targets(tasks, datasets)
transformers: BalancingTransformer struct
loaded properties of dataset from load_* function
hyper_parameters: dict
hyper parameters including dropout rate, learning rate, etc.
n_features: integer
number of features, or length of binary fingerprints
model: string, optional (default='tf')
choice of which model to use, should be: rf, tf, tf_robust, logreg,
graphconv
Returns
-------
train_scores : dict
predicting results(AUC) on training set
valid_scores : dict
predicting results(AUC) on valid set
"""
train_scores = {}
valid_scores = {}
# Initialize metrics
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean)
assert model in ['rf', 'tf', 'tf_robust', 'logreg', 'graphconv']
if model == 'tf':
# Loading hyper parameters
layer_sizes = hyper_parameters['layer_sizes']
weight_init_stddevs = hyper_parameters['weight_init_stddevs']
bias_init_consts = hyper_parameters['bias_init_consts']
dropouts = hyper_parameters['dropouts']
penalty = hyper_parameters['penalty']
penalty_type = hyper_parameters['penalty_type']
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
# Building tensorflow MultiTaskDNN model
model_tf = dc.models.TensorflowMultiTaskClassifier(len(tasks),
n_features, layer_sizes=layer_sizes,
weight_init_stddevs=weight_init_stddevs,
bias_init_consts=bias_init_consts, dropouts=dropouts, penalty=penalty,
penalty_type=penalty_type, batch_s
|
dnr2/fml-twitter
|
tweets.py
|
Python
|
mit
| 1,301
| 0.017679
|
#gets COUNT tweets from user's timeline
import os
import tweepy
import cPickle as pickle
from config i
|
mport Config
#constants
COUNT = 200
#tweepy configuration
keys = file('config.cfg')
cfg = Config(keys)
consumer_key= cfg.consumer_key
consumer_secret= cfg.consumer_secret
access_token= cfg.access_token
access_token_secret= cfg.access_token_secret
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
def get_tweets(username,
|
isVerified):
if isVerified:
file_name = './verified/'+username+'/'+username+'_tweets.pickle'
else:
file_name = './unverified/'+username+'/'+username+'_tweets.pickle'
#save tweets
with open(file_name, 'wb') as f:
pickler = pickle.Pickler(f, -1)
tweet_count = 0
for tweet in tweepy.Cursor(api.user_timeline,screen_name=username).items(200):
pickler.dump(tweet)
tweet_count = tweet_count +1
print tweet_count
if __name__ == "__main__":
for directory in os.listdir("verified/"):
if directory == ".DS_Store":
continue
print directory
get_tweets(directory, True)
for directory in os.listdir("unverified/"):
print directory
get_tweets(directory, False)
|
alflanagan/css_compare
|
doc/conf.py
|
Python
|
agpl-3.0
| 9,525
| 0.005879
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CSS Compare documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 6 06:29:25 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'CSS Compare'
copyright = '2015, Adrian L. Flanagan'
author = 'Adrian L. Flanagan'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico)
|
being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file
|
named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'CSSComparedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CSSCompare.tex', 'CSS Compare Documentation',
'Adrian L. Flanagan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manu
|
melund/python-prompt-toolkit
|
examples/switch-between-vi-emacs.py
|
Python
|
bsd-3-clause
| 1,411
| 0.000709
|
#!/usr/bin/env python
"""
Example that displays how to switch between Emacs and Vi input mode.
"""
from prompt_toolkit import prompt
from prompt_toolkit.enums import EditingMode
from prompt_toolkit.key_binding.manager import KeyBindingManager
from prompt_toolkit.keys import Keys
from prompt_toolkit.styles import style_from_dict
from prompt_toolkit.token import Token
def run():
# Create a set of key bindings that have Vi mode enabled if the
# ``vi_mode_enabled`` is True..
manager = KeyBindingManager.for_prompt()
# A
|
dd an additional key binding for
|
toggling this flag.
@manager.registry.add_binding(Keys.F4)
def _(event):
" Toggle between Emacs and Vi mode. "
if event.cli.editing_mode == EditingMode.VI:
event.cli.editing_mode = EditingMode.EMACS
else:
event.cli.editing_mode = EditingMode.VI
# Add a bottom toolbar to display the status.
style = style_from_dict({
Token.Toolbar: 'reverse',
})
def get_bottom_toolbar_tokens(cli):
" Display the current input mode. "
text = 'Vi' if cli.editing_mode == EditingMode.VI else 'Emacs'
return [
(Token.Toolbar, ' [F4] %s ' % text)
]
prompt('> ', key_bindings_registry=manager.registry,
get_bottom_toolbar_tokens=get_bottom_toolbar_tokens,
style=style)
if __name__ == '__main__':
run()
|
wilvk/ansible
|
lib/ansible/modules/network/aci/aci_taboo_contract.py
|
Python
|
gpl-3.0
| 4,522
| 0.002211
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_taboo_contract
short_description: Manage taboo contracts on Cisco ACI fabrics (vz:BrCP)
description:
- Manage taboo contracts on Cisco ACI fabrics.
- More information from the internal APIC class I(vz:BrCP) at
U(https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Dag Wieers (@dagwieers)
version_added: '2.4'
notes:
- The C(tenant) used must exist before using this module in your playbook.
The M(aci_tenant) module can be used for this.
options:
taboo_contract:
description:
- The name of the Taboo Contract.
required: yes
aliases: [ name ]
description:
description:
- The description for the Taboo Contract.
aliases: [ descr ]
tenant:
description:
- The name of the tenant.
required: yes
aliases: [ tenant_name ]
scope:
description:
- The scope of a service contract.
- The APIC defaults new Taboo Contracts to C(context).
choices: [ application-profile, context, global, tenant ]
default: context
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
# FIXME: Add more, better examples
EXAMPLES = r'''
- aci_taboo_contract:
host: '{{ inventory_hostname }}'
username: '{{ username }}'
password: '{{ password }}'
taboo_contract: '{{ taboo_contract }}'
description: '{{ descr }}'
tenant: '{{ tenant }}'
'''
RETURN = r'''
#
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
taboo_contract=dict(type='str', required=False, aliases=['name']), # Not required for querying all contracts
tenant=dict(type='str', required=False, aliases=['tenant_name']), # Not required for querying all contracts
scope=dict(type='str', choices=['application-profile', 'context', 'global', 'tenant']),
description=dict(type='str', aliases=['descr']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6
protocol=dict(type='str', removed_in_version='2.6'), # Deprecated in v2.6
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['tenant', 'taboo_contract']],
['state', 'present', ['tenant', 'taboo_contract']],
],
)
taboo_contract = module.params['taboo_contract']
description = module.params['description']
scope = module.params['scope']
state = module.params['state']
tenant = module.params['tenant']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
filter_target='eq(fvTenant.name, "{0}")'.for
|
mat(tenant),
module_object=tenant,
),
subclass_1=dict(
aci_class='vzTaboo',
aci_rn='taboo-{0}'.format(taboo_contract),
filter_target='eq(vzTaboo.name, "{0}")'.format(tab
|
oo_contract),
module_object=taboo_contract,
),
)
aci.get_existing()
if state == 'present':
# Filter out module parameters with null values
aci.payload(
aci_class='vzTaboo',
class_config=dict(
name=taboo_contract,
descr=description, scope=scope,
),
)
# Generate config diff which will be used as POST request body
aci.get_diff(aci_class='vzTaboo')
# Submit changes if module not in check_mode and the proposed is different than existing
aci.post_config()
elif state == 'absent':
aci.delete_config()
module.exit_json(**aci.result)
if __name__ == "__main__":
main()
|
VolpeUSDOT/CV-PEP
|
lambda/cvp-qc/bucket_handler_lambda/bucket_event_lambda_handler.py
|
Python
|
mit
| 9,274
| 0.003882
|
import os
import boto3
import json
import urllib.parse
from elasticsearch import ElasticsearchException
from botocore.exceptions import ClientError
from common.elasticsearch_client import *
from common.constants import *
from common.logger_utility import *
class HandleBucketEvent:
def _fetchS3DetailsFromEvent(self, event):
try:
sns_message = json.loads(event["Records"][0]["Sns"]["Message"])
bucket = sns_message["Records"][0]["s3"]["bucket"]["name"]
key = urllib.parse.unquote_plus(sns_message["Records"][0]["s3"]["object"]["key"])
except Exception as e:
LoggerUtility.logError(str(e))
LoggerUtility.logError("Failed to process the event")
raise e
else:
LoggerUtility.logInfo("Bucket name: " + bucket)
LoggerUtility.logInfo("Object key: " + key)
return bucket, key
def _getS3HeadObject(self, bucket_name, object_key):
s3_client = boto3.client(Constants.S3_SERVICE_CLIENT)
try:
response = s3_client.head_object(Bucket=bucket_name, Key=object_key)
except ClientError as e:
LoggerUtility.logError(e)
LoggerUtility.logError('Error getting object {} from bucket {}. Make sure they exist, '
'your bucket is in the same region as this function and necessary permissions '
'have been granted.'.format(object_key, bucket_name))
raise e
else:
return response
def _createMetadataObject(self, s3_head_object, key, bucket_name=None):
metadata = {
Constants.KEY_REFERENCE: key,
Constants.CONTENT_LENGTH_REFERENCE: s3_head_object[Constants.CONTENT_LENGTH_REFERENCE],
Constants.SIZE_MIB_REFERENCE: s3_head_object[Constants.CONTENT_LENGTH_REFERENCE] / 1024**2,
Constants.LAST_MODIFIED_REFERENCE: s3_head_object[Constants.LAST_MODIFIED_REFERENCE].isoformat(),
Constants.CONTENT_TYPE_REFERENCE: s3_head_object[Constants.CONTENT_TYPE_REFERENCE],
Constants.ETAG_REFERENCE: s3_head_object[Constants.ETAG_REFERENCE],
Constants.DATASET_REFERENCE: key.split('/')[0],
Constants.ENVIRONMENT_NAME: os.environ["ENVIRONMENT_NAME"]
}
if key.split('/')[0] == "waze":
if 'type' in key:
type_value = key.split('/type=')[1].split('/')[0]
type_metadata = {
Constants.TRAFFIC_TYPE_REFERENCE: type_value
}
metadata.update(type_metadata)
if 'table' in key:
table_value = key.split('/table=')[1].split('/')[0]
table_metadata = {
Constants.TABLE_NAME_REFERENCE: table_value
}
metadata.update(table_metadata)
if 'state' in key:
state_value = key.split('/state=')[1].split('/')[0]
state_metadata = {
Constants.STATE_REFERENCE: state_value
}
metadata.update(state_metadata)
elif key.split('/')[0] == "cv":
data_provider_type_value = key.split('/')[1]
data_provider_type_metadata = {
Constants.DATA_PROVIDER_REFERENCE: data_provider_type_value
}
metadata.update(data_provider_type_metadata)
data_type_value = key.split('/')[2]
data_type_metadata = {
Constants.DATA_TYPE_REFERENCE: data_type_value
}
metadata.update(data_type_metadata)
LoggerUtility.logInfo("METADATA: "+str(metadata))
return metadata
def _pushMetadataToElasticsearch(self, bucket_name, metadata):
try:
elasticsearch_endpoint = os.environ[Constants.ES_ENDPOINT_ENV_VAR]
except KeyError as e:
LoggerUtility.logError(str(e) + " not configured")
raise e
es_client = ElasticsearchClient.getClient(elasticsearch_endpoint)
try:
es_client.index(index=C
|
onstants.DEFAULT_INDEX_ID, doc_type=bucket_name, body=json.dumps(metadata))
except ElasticsearchException as e:
LoggerUtility.logError(e)
LoggerUtility.logError("Could not index in Elasticsearch")
raise e
def _publishCustomMetricsToCloudwatch(self, bucket_name, metadata):
try:
if bucket_name == os.envir
|
on["SUBMISSIONS_BUCKET_NAME"] and metadata["Dataset"] == "waze":
cloudwatch_client = boto3.client('cloudwatch')
cloudwatch_client.put_metric_data(
Namespace='dot-sdc-waze-submissions-bucket-metric',
MetricData=[
{
'MetricName' : 'Counts by state and traffic type',
'Dimensions' : [
{
'Name' : 'State',
'Value': metadata["State"]
},
{
'Name' : 'TrafficType',
'Value': metadata["TrafficType"]
}
],
'Value' : 1,
'Unit': 'Count'
},
]
)
if metadata["ContentLength"] == 166:
cloudwatch_client = boto3.client('cloudwatch')
cloudwatch_client.put_metric_data(
Namespace='dot-sdc-waze-zero-byte-submissions-metric',
MetricData=[
{
'MetricName' : 'Zero Byte Submissions by State and traffic type',
'Dimensions' : [
{
'Name' : 'State',
'Value': metadata["State"]
},
{
'Name' : 'TrafficType',
'Value': metadata["TrafficType"]
}
],
'Value' : 1,
'Unit': 'Count'
},
]
)
elif bucket_name == os.environ["SUBMISSIONS_BUCKET_NAME"] and metadata["Dataset"] == "cv":
cloudwatch_client = boto3.client('cloudwatch')
cloudwatch_client.put_metric_data(
Namespace='dot-sdc-cv-submissions-bucket-metric',
MetricData=[
{
'MetricName' : 'Counts by provider and datatype',
'Dimensions' : [
{
'Name' : 'DataProvider',
'Value': metadata["DataProvider"]
},
{
'Name' : 'DataType',
'Value': metadata["DataType"]
}
],
'Value' : 10,
'Unit': 'Count'
},
]
)
elif bucket_name == os.environ["CURATED_BUCKET_NAME"] and metadata["Dataset"] != "manifest":
cloudwatch_client = boto3.client('cloudwatch')
cloudwatch_client.put_metric_data(
Namespace='dot-sdc-waze-curated-bucket-metric',
MetricData=[
{
'MetricName' : 'Counts by state and table name',
'Dimensions' : [
{
'Nam
|
Thraxis/pymedusa
|
sickbeard/providers/thepiratebay.py
|
Python
|
gpl-3.0
| 7,881
| 0.00368
|
# coding=utf-8
# Author: Dustyn Gibson <miigotu@gmail.com>
#
# This file is part of Medusa.
#
# Medusa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Medusa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Medusa. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
import traceback
import validators
from requests.compat import urljoin
from sickbeard import logger, tvcache
from sickbeard.bs4_parser import BS4Parser
from sickrage.helper.common import convert_size, try_int
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
class ThePirateBayProvider(TorrentProvider): # pylint: disable=too-many-instance-attributes
"""ThePirateBay Torrent provider"""
def __init__(self):
# Provider Init
TorrentProvider.__init__(self, 'ThePirateBay')
# Credentials
self.public = True
# URLs
self.url = 'https://thepiratebay.org'
self.urls = {
'rss': urljoin(self.url, 'tv/latest'),
'search': urljoin(self.url, 's/'), # Needs trailing /
}
self.custom_url = None
# Proper Strings
# Miscellaneous Options
self.confirmed = True
# Torrent Stats
self.minseed = None
self.minleech = None
# Cache
self.cache = tvcache.TVCache(self, min_time=20) # only poll ThePirateBay every 20 minutes max
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches, too-many-statements
results = []
"""
205 = SD, 208 = HD, 200 = All Videos
https://pirateproxy.pl/s/?q=Game of Thrones&type=search&orderby=7&page=0&category=200
"""
search_params = {
'q': '',
'type': 'search',
'orderby': 7,
'page': 0,
'category': 200
}
# Units
units = ['B', 'KIB', 'MIB', 'GIB', 'TIB', 'PIB']
def process_column_header(th):
result = ''
if th.a:
result = th.a.get_text(strip=True)
if not result:
result = th.get_text(strip=True)
return result
for mode in search_strings:
items = []
logger.log('Search mode: {0}'.format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
search_url = self.urls['search'] if mode != 'RSS' else self.urls['rss']
if self.custom_url:
if not validators.url(self.custom_url):
logger.log('Invalid custom url: {0}'.format(self.custom_url), logger.WARNING)
return results
search_url = urljoin(self.custom_url, search_url.split(self.url)[1])
if mode != 'RSS':
search_params['q'] = search_string
logger.log('Search string: {search}'.format
(search=search_string), logger.DEBUG)
data = self.get_url(search_url, params=search_params, returns='text')
else:
data = self.get_url(search_url, returns='text')
if not data:
logger.log('No data returned from provider', logger.DEBUG)
continue
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('table', id='searchResult')
torrent_rows = torrent_table('tr') if torrent_table else []
# Continue only if at least one release is found
if len(torrent_rows) < 2:
logger.log('Data returned from provider does not contain any torrents', logger.DEBUG)
continue
labels = [process_column_header(label) for label in torrent_rows[0]('th')]
# Skip column headers
for result in torrent_rows[1:]:
try:
cells = result('td')
|
title = result.find(class_='detName')
title = title.get_text(strip=True) if title else None
download_url = result.find(title='Download this torrent using magnet')
download_url = download_url['href'] + self._custom_trackers if download_url else No
|
ne
if download_url and 'magnet:?' not in download_url:
logger.log('Invalid ThePirateBay proxy please try another one', logger.DEBUG)
continue
if not all([title, download_url]):
continue
seeders = try_int(cells[labels.index('SE')].get_text(strip=True))
leechers = try_int(cells[labels.index('LE')].get_text(strip=True))
# Filter unseeded torrent
if seeders < min(self.minseed, 1):
if mode != 'RSS':
logger.log("Discarding torrent because it doesn't meet the "
"minimum seeders: {0}. Seeders: {1}".format
(title, seeders), logger.DEBUG)
continue
# Accept Torrent only from Good People for every Episode Search
if self.confirmed and not result.find(alt=re.compile(r'VIP|Trusted')):
if mode != 'RSS':
logger.log("Found result {0} but that doesn't seem like a trusted"
" result so I'm ignoring it".format(title), logger.DEBUG)
continue
# Convert size after all possible skip scenarios
torrent_size = cells[labels.index('Name')].find(class_='detDesc').get_text(strip=True).split(', ')[1]
torrent_size = re.sub(r'Size ([\d.]+).+([KMGT]iB)', r'\1 \2', torrent_size)
size = convert_size(torrent_size, units=units) or -1
item = {
'title': title,
'link': download_url,
'size': size,
'seeders': seeders,
'leechers': leechers,
'pubdate': None,
'hash': None,
}
if mode != 'RSS':
logger.log('Found result: {0} with {1} seeders and {2} leechers'.format
(title, seeders, leechers), logger.DEBUG)
items.append(item)
except (AttributeError, TypeError, KeyError, ValueError, IndexError):
logger.log('Failed parsing provider. Traceback: {0!r}'.format
(traceback.format_exc()), logger.ERROR)
continue
results += items
return results
provider = ThePirateBayProvider()
|
kmad1729/python_notes
|
metaprogramming/practice_code/metatype_demo.py
|
Python
|
unlicense
| 583
| 0.010292
|
from cls_method_decorators import debugmethods
class metatype(type):
def __new__(cls, cls_name, bases, cls_dict):
clsobj = super().__new__(cls, cls_name, bases, cl
|
s_dict)
clsobj = debugmethods(clsobj)
return clsobj
class Animal(metaclass = metatype):
def __init__(self, name):
self.name = name
def greet(self):
print("hi, my name is ", self.name)
class Dog(Animal):
def greet(self):
print("Woof!, my name is", self.name)
class Cat(Animal):
def greet(self):
print("meow!, my name is",
|
self.name)
|
inclement/kivy
|
kivy/uix/behaviors/button.py
|
Python
|
mit
| 6,290
| 0
|
'''
Button Behavior
===============
The :class:`~kivy.uix.behaviors.button.ButtonBehavior`
`mixin <https://en.wikipedia.org/wiki/Mixin>`_ class provides
:class:`~kivy.uix.button.Button` behavior. You can combine this class with
other widgets, such as an :class:`~kivy.uix.image.Image`, to provide
alternative buttons that preserve Kivy button behavior.
For an overview of behaviors, please refer to the :mod:`~kivy.uix.behaviors`
documentation.
Example
-------
The following example adds button behavior to an image to make a checkbox that
behaves like a button::
from kivy.app import App
from kivy.uix.image import Image
from kivy.uix.behaviors import ButtonBehavior
class MyButton(ButtonBehavior, Image):
def __init__(self, **kwargs):
super(MyButton, self).__init__(**kwargs)
self.source = 'atlas://data/images/defaulttheme/checkbox_off'
def on_press(self):
self.source = 'atlas://data/images/defaulttheme/checkbox_on'
def on_release(self):
self.source = 'atlas://data/images/defaulttheme/checkbox_off'
class SampleApp(App):
def build(self):
return MyButton()
SampleApp().run()
See :class:`~kivy.uix.behaviors.ButtonBehavior` for details.
'''
__all__ = ('ButtonBehavior', )
from kivy.clock import Clock
from kivy.config import Config
from kivy.properties import OptionProperty, ObjectProperty, \
BooleanProperty, NumericProperty
from time import time
class ButtonBehavior(object):
'''
This `mixin <https://en.wikipedia.org/wiki/Mixin>`_ class provides
:class:`~kivy.uix.button.Button` behavior. Please see the
:mod:`button behaviors module <kivy.uix.behaviors.button>` documentation
for more information.
:Events:
`on_press`
Fired when the button is pressed.
`on_rele
|
ase`
|
Fired when the button is released (i.e. the touch/click that
pressed the button goes away).
'''
state = OptionProperty('normal', options=('normal', 'down'))
'''The state of the button, must be one of 'normal' or 'down'.
The state is 'down' only when the button is currently touched/clicked,
otherwise its 'normal'.
:attr:`state` is an :class:`~kivy.properties.OptionProperty` and defaults
to 'normal'.
'''
last_touch = ObjectProperty(None)
'''Contains the last relevant touch received by the Button. This can
be used in `on_press` or `on_release` in order to know which touch
dispatched the event.
.. versionadded:: 1.8.0
:attr:`last_touch` is a :class:`~kivy.properties.ObjectProperty` and
defaults to `None`.
'''
min_state_time = NumericProperty(0)
'''The minimum period of time which the widget must remain in the
`'down'` state.
.. versionadded:: 1.9.1
:attr:`min_state_time` is a float and defaults to 0.035. This value is
taken from :class:`~kivy.config.Config`.
'''
always_release = BooleanProperty(False)
'''This determines whether or not the widget fires an `on_release` event if
the touch_up is outside the widget.
.. versionadded:: 1.9.0
.. versionchanged:: 1.10.0
The default value is now False.
:attr:`always_release` is a :class:`~kivy.properties.BooleanProperty` and
defaults to `False`.
'''
def __init__(self, **kwargs):
self.register_event_type('on_press')
self.register_event_type('on_release')
if 'min_state_time' not in kwargs:
self.min_state_time = float(Config.get('graphics',
'min_state_time'))
super(ButtonBehavior, self).__init__(**kwargs)
self.__state_event = None
self.__touch_time = None
self.fbind('state', self.cancel_event)
def _do_press(self):
self.state = 'down'
def _do_release(self, *args):
self.state = 'normal'
def cancel_event(self, *args):
if self.__state_event:
self.__state_event.cancel()
self.__state_event = None
def on_touch_down(self, touch):
if super(ButtonBehavior, self).on_touch_down(touch):
return True
if touch.is_mouse_scrolling:
return False
if not self.collide_point(touch.x, touch.y):
return False
if self in touch.ud:
return False
touch.grab(self)
touch.ud[self] = True
self.last_touch = touch
self.__touch_time = time()
self._do_press()
self.dispatch('on_press')
return True
def on_touch_move(self, touch):
if touch.grab_current is self:
return True
if super(ButtonBehavior, self).on_touch_move(touch):
return True
return self in touch.ud
def on_touch_up(self, touch):
if touch.grab_current is not self:
return super(ButtonBehavior, self).on_touch_up(touch)
assert(self in touch.ud)
touch.ungrab(self)
self.last_touch = touch
if (not self.always_release and
not self.collide_point(*touch.pos)):
self._do_release()
return
touchtime = time() - self.__touch_time
if touchtime < self.min_state_time:
self.__state_event = Clock.schedule_once(
self._do_release, self.min_state_time - touchtime)
else:
self._do_release()
self.dispatch('on_release')
return True
def on_press(self):
pass
def on_release(self):
pass
def trigger_action(self, duration=0.1):
'''Trigger whatever action(s) have been bound to the button by calling
both the on_press and on_release callbacks.
This simulates a quick button press without using any touch events.
Duration is the length of the press in seconds. Pass 0 if you want
the action to happen instantly.
.. versionadded:: 1.8.0
'''
self._do_press()
self.dispatch('on_press')
def trigger_release(dt):
self._do_release()
self.dispatch('on_release')
if not duration:
trigger_release(0)
else:
Clock.schedule_once(trigger_release, duration)
|
santisiri/popego
|
envs/ALPHA-POPEGO/lib/python2.5/site-packages/twisted/words/test/test_jabbercomponent.py
|
Python
|
bsd-3-clause
| 4,313
| 0.002319
|
# Copyright (c) 2001-2005 Twisted Matrix Laboratories.
# See LICENSE for details.
import sys, os, sha
from twisted.trial import unittest
from twisted.words.protocols.jabber.component import ConnectComponentAuthenticator, ComponentInitiatingInitializer
from twisted.words.protocols import jabber
from twisted.words.protocols.jabber import xmlstream
class DummyTransport:
def __init__(self, list):
self.list = list
def write(self, bytes):
self.list.append(bytes)
class ComponentInitiatingInitializerTest(unittest.TestCase):
def setUp(self):
self.output = []
self.authenticator = xmlstream.Authenticator()
self.authenticator.password = 'secret'
self.xmlstream = xmlstream.XmlStream(self.authenticator)
self.xmlstream.namespace = 'test:component'
self.xmlstream.send = self.output.append
self.xmlstream.connectionMade()
self.xmlstream.dataReceived(
"<stream:stream xmlns='test:component' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.com' id='12345' version='1.0'>")
self.init = ComponentInitiatingInitializer(self.xmlstream)
def testHandshake(self):
"""
Test basic operations of component handshake.
"""
d = self.init.initialize()
# the initializer should have sent the handshake request
handshake = self.output[0]
self.assertEquals('handshake', handshake.name)
self.assertEquals('test:component', handshake.uri)
self.assertEquals(sha.new("%s%s" % ('12345', 'secret')).hexdigest(),
unicode(handshake))
# successful authentication
handshake.children = []
self.xmlstream.dataReceived(handshake.toXml())
return d
class ComponentAuthTest(unittest.TestCase):
def authPassed(self, stream):
self.authComplete = True
def testAuth(self):
self.authComplete = False
outlist = []
ca = ConnectComponentAuthenticator("cjid", "secret")
xs = xmlstream.XmlStream(ca)
xs.transport = DummyTransport(outlist)
xs.addObserver(xmlstream.STREAM_AUTHD_EVENT,
self.authPassed)
# Go...
|
xs.connectionMade()
xs.dataReceived("<stream:stream xmlns='jabber:component:accept' xmlns:stream='http://etherx.jabber.org/streams' from='cjid' id='12345'>")
# Calculate what we expect the handshake value to be
hv = sha.new("%s%s" % ("12345", "secret")).hexdigest()
self.assertEquals(outlist[1], "<handshake>%s</handshake>" % (hv))
xs.dataReceived("<handshake/>")
self.assertEquals(self.authComplete, True)
class Jabb
|
erServiceHarness(jabber.component.Service):
def __init__(self):
self.componentConnectedFlag = False
self.componentDisconnectedFlag = False
self.transportConnectedFlag = False
def componentConnected(self, xmlstream):
self.componentConnectedFlag = True
def componentDisconnected(self):
self.componentDisconnectedFlag = True
def transportConnected(self, xmlstream):
self.transportConnectedFlag = True
class TestJabberServiceManager(unittest.TestCase):
def testSM(self):
# Setup service manager and test harnes
sm = jabber.component.ServiceManager("foo", "password")
svc = JabberServiceHarness()
svc.setServiceParent(sm)
# Create a write list
wlist = []
# Setup a XmlStream
xs = sm.getFactory().buildProtocol(None)
xs.transport = self
xs.transport.write = wlist.append
# Indicate that it's connected
xs.connectionMade()
# Ensure the test service harness got notified
self.assertEquals(True, svc.transportConnectedFlag)
# Jump ahead and pretend like the stream got auth'd
xs.dispatch(xs, xmlstream.STREAM_AUTHD_EVENT)
# Ensure the test service harness got notified
self.assertEquals(True, svc.componentConnectedFlag)
# Pretend to drop the connection
xs.connectionLost(None)
# Ensure the test service harness got notified
self.assertEquals(True, svc.componentDisconnectedFlag)
|
jaggu303619/asylum
|
openerp/addons/crm/report/__init__.py
|
Python
|
agpl-3.0
| 1,107
| 0.00271
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004
|
-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# T
|
his program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_lead_report
import crm_phonecall_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
anderasberger/pydnsmap
|
pydnsmap/util.py
|
Python
|
bsd-3-clause
| 18,175
| 0.010344
|
# Copyright (c) 2014, FTW Forschungszentrum Telekommunikation Wien
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of FTW nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL FTW
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
from collections import defaultdict
import itertools
import logging
import re
import linecache
import netaddr
import GeoIP
geodb_ = GeoIP.open("data/GeoIPASNum.dat", GeoIP.GEOIP_MEMORY_CACHE)
def getTopDomainSuffix(domains, levels=2):
d=defaultdict(int)
for domain in domains:
sdomain=domain.split('.')
if len(sdomain)<levels:
suffix=domain
else:
|
suffix='.'.join(sdomain[-levels:])
d[suffix]+=1
domainCounts=d.items()
_,counts=zip(*domainCounts)
domainCounts.sort(key=lambda x:x[1])
return (domainCounts[-1][0], domainCounts[-1][1]/float(sum(counts)))
def splitOnCondition(seq, condition):
"""
Splits a list of tuples (<x>,<y>) in two lists, depending on the condition
on <
|
y>. Returns the <x> elements as tuple of two lists.
"""
l1,l2 = itertools.tee((condition(item),item) for item in seq)
return ([i[0] for p, i in l1 if p], [i[0] for p, i in l2 if not p])
def minmax(data):
"""
Computes the minimum and maximum values in one-pass using only
1.5*len(data) comparisons
"""
it = iter(data)
try:
lo = hi = next(it)
except StopIteration:
raise ValueError('minmax() arg is an empty sequence')
for x, y in itertools.izip_longest(it, it, fillvalue=lo):
if x > y:
x, y = y, x
if x < lo:
lo = x
if y > hi:
hi = y
return lo, hi
def dnameEquality(d1, d2):
"""
returns an array of bools of length max(domain-levels(d1),
domain-levels(d2)). The i-th element of the array is True if i-ld(d1) ==
i-ld(d2), else it's False. d1 and d2 are aligned on the top level domain.
"""
sd1 = d1.split('.')
sd2 = d2.split('.')
if not sd1 or not sd2:
raise Exception('invalid domain names: '+d1+' '+d2)
l_d1 = len(sd1)
l_d2 = len(sd2)
if d1 == d2:
return [True]*l_d1
else:
min_l = min(l_d1, l_d2)
matchmap = [False] * min_l
for i in range(min_l):
print sd1[-1-i], sd2[-1-i]
if sd1[-1-i] == sd2[-1-i]:
matchmap[-1-i] = True
return matchmap
def getAsnAndOrganisation(ip):
try:
answer = geodb_.org_by_addr(str(ip))
except GeoIP.error:
return (None, None)
else:
if answer:
if answer.startswith('AS'):
try:
first_space = answer.index(' ')
except ValueError:
asn = int(answer[2:])
return (asn, None)
else:
asn = int(answer[2:first_space])
org = answer[first_space+1:]
return (asn, org)
else:
return (None, answer)
else:
return (None, None)
class SetGrab:
"""
Return the object in a set that matches <value>.
To be used as follows:
s=set(['foobar', 'foo', 'bar'])
g=SetGrab('foobar')
if g in s:
return g.actual_value
http://python.6.n6.nabble.com/Get-item-from-set-td1530758.html
"""
def __init__(self, value):
self.search_value = value
def __hash__(self):
return hash(self.search_value)
def __eq__(self, other):
if self.search_value == other:
self.actual_value = other
return True
return False
def punyDecodeDomain(dname):
if 'xn--' in dname:
try:
return dname.decode('idna')
except UnicodeError:
"""
there's a python bug that causes the german 'scharfes s' not to be
decoded correctly
"""
logging.warn(u'IDNA decoding failed for '+unicode(dname))
return dname
else:
return dname
def memory_usage():
"""Memory usage of the current process in kilobytes."""
status = None
result = {'peak': 0, 'rss': 0}
try:
# This will only work on systems with a /proc file system
# (like Linux).
status = open('/proc/self/status')
for line in status:
parts = line.split()
key = parts[0][2:-1].lower()
if key in result:
result[key] = int(parts[1])
finally:
if status is not None:
status.close()
return result
def filterSingles(data):
"""
"""
from collections import defaultdict
domainToIPs = defaultdict(set)
IPToDomains = defaultdict(set)
for d in data:
domainToIPs[d[1]].add(d[2])
IPToDomains[d[2]].add(d[1])
remainingDomains=set()
for domain, IPs in domainToIPs.iteritems():
if len(IPs)==1:
ip=IPs.pop()
if len(IPToDomains[ip])==1:
continue
remainingDomains.add(domain)
numRemaining=len(set([d[1] for d in data if d[1] in remainingDomains]))
print numRemaining, '/',len(domainToIPs),' domains left after removing singles'
filteredData=[]
for d in data:
if d[1] in remainingDomains:
filteredData.append(d)
return filteredData
def filterSuspiciousData(data, minNumDomains=2, minNumIPs=2):
"""
"""
from collections import defaultdict
domainToIPs = defaultdict(set)
IPToDomains = defaultdict(set)
for d in data:
domainToIPs[d[1]].add(d[2])
IPToDomains[d[2]].add(d[1])
remainingDomains=set()
for domain, IPs in domainToIPs.iteritems():
if len(IPs)<minNumIPs:
continue
for ip in IPs:
"""
find the number of domains to which <ip> maps
"""
numDomains = len(IPToDomains[ip])
if numDomains>=minNumDomains:
"""
This is an interesting domain-IP mapping, let's keep this domain
"""
remainingDomains.add(domain)
break
numRemaining=len(set([d[1] for d in data if d[1] in remainingDomains]))
print numRemaining, '/',len(domainToIPs),' domains left'
filteredData=[]
for d in data:
if d[1] in remainingDomains:
filteredData.append(d)
return filteredData
def readSuspiciousFile(filename, lineNumStart=1, lineNumStop=0,
omitNewIPs=False, filterExp=[], removeSingles=True):
"""
expected format:
timestamp fqdn IP None score <number of IPBlocks in which this fqdn
appears> <number of fqdns in the IPBlock which contains this IP>
"""
data=[]
lineNum=lineNumStart
if filterExp:
filterHits=dict.fromkeys([regex.pattern for regex in filterExp], 0
|
lingtools/lingtools
|
extract_elp_prons.py
|
Python
|
apache-2.0
| 4,566
| 0
|
#!/usr/bin/env python
"""
Extract pronunciations from the ELP items.
Outputs a CSV with the orthographic and phonological form on each
line. The phonological form is stripped of syllabification and stress
markers.
"""
# Copyright 2013 Constantine Lignos
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from lingtools.corpus.elp import ELP, NULL
# " is primary stress, % is secondary, . is syllable boundary
DELETION_CHARS = '"%.'
# These represent a reasonable attempt to map the phonemes to
# one-character versions. The distinction between @` and 3` is
# removed; it is not present in most standard phone sets. Flap (4) is
# left alone as it cannot be mapped back to its underlying form.
PHON_REPLACEMENTS = (
# R-colored schwa
("@`", "R"),
("3`", "R"),
# In the ELP it is always `, but some hand output uses '
("3'", "R"),
("@'", "R"),
# Syllabic l
("l=", "L"),
# Move engma to G to leave N for syllabic n.
("N", "G"),
# Syllabic n. Note that N is engma in the original.
("n=", "N"),
# Syllabic m
("m=", "M"),
# dZ to J (like JH in Arpabet)
("dZ", "J"),
# tS to C (like CH in Arpabet)
("tS", "C"),
# aI to Y (like AY in Arpabet)
("aI", "Y"),
# aU to W (like AW in Arpabet)
("aU", "W"),
# OI to 8 (cannot use O like OY in Arpabet, as O is in use)
("OI", "8"),
)
def replace_phons(pron):
"""Replace phonemes using the PHON_REPLACEMENTS table."""
for replacement in PHON_REPLACEMENTS:
pron = pron.replace(*replacement)
return pron
def extract(input_path, output_path, mono_only, cmudict_format, target_sylls):
"""Extract words from the input path and write them to the output."""
with o
|
pen(output_path, 'wb') as output_file:
elp = ELP(input_path)
# Sort by lowercase version of entry
words = sorted(elp.keys(), key=lambda s: s.lower())
count = 0
for word in words:
entry = elp[word]
# Extract orthography and pron
pron = entry.pron
nsyll = entry.nsyll
# Match syllable numbers if specified
if target_sylls is
|
not None and nsyll != target_sylls:
continue
# Skip non-monomorphs if specified
if mono_only and not entry.monomorph:
continue
# Skip NULL prons, get the length if there is a pron.
if pron == NULL:
continue
else:
n_phon = entry.nphon
# Perform phoneme replacement on the pron
pron = replace_phons(pron)
# Remove stress/syllable markers
pron = pron.translate(None, DELETION_CHARS)
# Check that length matches
if len(pron) != n_phon:
print "Bad pronunciation for {!r}:".format(word)
print "Pron. {!r} of length {}, expected {}.".format(
pron, len(pron), n_phon)
continue
out_line = ("{},{}".format(word, pron) if not cmudict_format else
"{} {}".format(word.upper(), " ".join(pron)))
print >> output_file, out_line
count += 1
print "{} pronunciations written to {}".format(count, output_path)
def main():
"""Parse arguments and call the extractor."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('input', help='input CSV file')
parser.add_argument('output', help='output CSV file')
parser.add_argument('-m', '--mono', action='store_true',
help='output only monomorphemic items')
parser.add_argument('-s', '--sylls', nargs='?', type=int, metavar='n',
help='output only items with n syllables')
parser.add_argument('-c', '--cmudict', action='store_true',
help='output in CMUDict format')
args = parser.parse_args()
extract(args.input, args.output, args.mono, args.cmudict, args.sylls)
if __name__ == "__main__":
main()
|
ankitjavalkar/algosutra
|
yaksh_video_replacer/replacer.py
|
Python
|
gpl-2.0
| 2,250
| 0.005333
|
from bs4 import BeautifulSoup
import os
PATH = '/home/arj/arj_projects/imp_dump/kv_advanced/course_material/Advanced_Python_Course_(KV_October_2019)'
VID_DIR = 'advanced_videos'
LESSON_VID_MAP = {
# 'AD_-_Decorators_2': '29B_Decorators.webm',
# 'AD_-_Introduction': '01_Introduction.webm',
# 'AD_-_Exercise1'
|
: '04_Exercise_1.webm',
# 'AD_-_Exercise5': '08_Exercise_5.webm',
}
def dir_walk(path):
""" Use to walk through all objects in a directory."""
for f in os.listdir(path):
yield os.path.join(path, f)
def walker(path):
flist = []
reslist
|
= _walker(path, flist)
return reslist
def _walker(dirpath, flist):
for f in os.listdir(dirpath):
fpath = os.path.join(dirpath, f)
if os.path.isfile(fpath):
flist.append(fpath)
else:
_walker(fpath, flist)
return flist
def get_soup(filepath):
with open(filepath, 'r') as f:
html = f.read()
return BeautifulSoup(html, 'html.parser')
def write_soup(filepath, soup):
with open(filepath, 'w') as f:
f.write(str(soup))
def check_iframe(filepath):
soup = get_soup(filepath)
return soup.iframe
def replace(filepath, video_dir, video_name):
soup = get_soup(filepath)
new_vid_tag = soup.new_tag('video')
new_vid_tag['width'] = "560"
new_vid_tag['height'] = "315"
new_vid_tag['controls'] = None
src_tag = soup.new_tag('source')
src_tag['src'] = "../../../{0}/{1}".format(video_dir, video_name)
src_tag['type'] = "video/mp4"
new_vid_tag.append(src_tag)
soup.iframe.replace_with(new_vid_tag)
write_soup(filepath, soup)
if __name__ == '__main__':
reslist = walker(PATH)
for f in reslist:
fpath, fname = os.path.split(f)
if 'html' in fname:
fn, fext = fname.split('.')
else:
print("NON HTML File: ", fname)
continue
if fn in LESSON_VID_MAP and check_iframe(f):
vid_name = LESSON_VID_MAP.get(fn)
if vid_name:
replace(f, VID_DIR, vid_name)
print("REPLACED: Video: ", vid_name)
else:
print("NO VIDEO FOUND: File: ", fname)
else:
print("Unknown FILE or NO IFRAME")
|
IfcOpenShell/IfcOpenShell
|
src/blenderbim/blenderbim/bim/module/style/prop.py
|
Python
|
lgpl-3.0
| 1,292
| 0
|
# BlenderBIM Add-on - OpenBIM Blender Add-on
# Copyright (C) 2020, 2021 Dion Moult <dion@thinkmoult.com>
#
# This file is part of BlenderBIM Add-on.
#
# BlenderBIM Add-on is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BlenderBIM Add-on is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BlenderBIM Add-on. If not, see <http://www.gnu.org/licenses/>.
import bpy
from blenderbim.bim.ifc import IfcStore
from blenderbim.bim.prop impo
|
rt StrProperty, Attribute
from bpy.types import PropertyGroup
from bpy.props import (
PointerProperty,
StringProperty,
EnumProperty,
BoolProperty,
IntProperty,
FloatProperty,
FloatVectorProperty,
CollectionProperty,
)
class BIMStyleProperties(PropertyGroup):
attributes: Coll
|
ectionProperty(name="Attributes", type=Attribute)
is_editing: BoolProperty(name="Is Editing")
|
tbtimes/checkup-survey
|
editorial_board/dbtemplates/south_migrations/0002_auto__del_unique_template_name.py
|
Python
|
mit
| 1,636
| 0.006724
|
# encoding: utf-8
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'Template', fields ['name']
db.delete_unique('django_template', ['name'])
def backwards(self, orm):
# Adding unique constraint on 'Template', fields ['name']
db.create_unique('django_template', ['name'])
models = {
'dbtemplates.template': {
'Meta': {'ordering': "('name',)", 'object_name': 'Template', 'db_table': "'django_template'"},
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.
|
models.fields.CharField', [], {'max_length': '100'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db
|
.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['dbtemplates']
|
dpapathanasiou/recipebook
|
sites/wsonoma.py
|
Python
|
mit
| 2,219
| 0.001803
|
#!/usr/bin/env python
"""
wsonoma.py
This module inherits from RecipeParser, and provides an implementation
for parsing recipes from the williams-sonoma.com site.
"""
from urllib.parse import urlsplit
from parser import RecipeParser
class WilliamsSonoma(RecipeParser):
def getTitle(self):
"""The title format is:
<title>Recipe | Williams Sonoma</title>
we want just 'Recipe'
"""
return self.tree.xpath('//title')[0].text.split('|')[0].strip()
def getImage(self):
"""The image format is:
<meta property="og:image" content="IMG_URL">
we want just 'IMG_URL'
"""
return self.tree.xpath('//meta[@property="og:image"]')[0].get('content')
def getIngredients(self):
"""Return a list or a map of the recipe ingredients"""
data = []
for node in self.tree.xpath('//li[@itemprop="ingredient"]'):
data.append(''.join(node.xpath('descendant-or-self::text()')).strip())
return data
def getDirections(self):
"""Return a list or a map of the preparation instructions"""
data = []
for node in self.tree.xpath('//div[@class="directions"]'):
data.append(node.xpath('descendant-or-self::text()'))
return [_f for _f in [x.strip() for x in data[0]] if _f]
def getTags(self):
"""Return a list of tags for this recipe"""
return []
def getOtherRecipeLinks(self):
"""Return a list of other recipes found in the page: while single recipe
pages do not have links, the various categories at
http://www.williams-sonoma.com/recipe/ do.
For example,
http://www.williams-sonoma.com/search/results.html?ac
|
tiveTab=recipes&words=winter_weeknight_dinners
has a collection of individual recipe links, and this method will find them.
"""
data = []
for link in self.tree.xpath('//ul[@class="recipe-list"]/li/a'):
if 'href' in list(link.keys()):
href = urlsplit(link.get('href'))
if 'c
|
m_src=RECIPESEARCH' == href.query:
data.append(href.scheme + '://' + href.netloc + href.path)
return data
|
DigitalSlideArchive/HistomicsTK
|
histomicstk/utils/hessian.py
|
Python
|
apache-2.0
| 1,287
| 0
|
import numpy as np
def hessian(im_input, sigma):
"""
Calculates hessian of image I convolved with a gaussian kernel with
covariance C = [Sigma^2 0; 0 Sigma^2].
Parameters
----------
im_input : array_like
M x N grayscale image.
sigma : double
standard deviation of
|
gaussian kernel.
Returns
-------
im_hess : array_like
M x N x 4 hessian matrix - im_hess[:,:,0] = dxx,
im_hess[:,:,1] = im_hess[:,:,2] = dxy, im_hess[:,:,3] = dyy.
"""
from scipy.ndimage.filters import convolve
# generate kernel domain
h, k = round(3 * sigma), round(3 * sigma + 1)
x, y = np.mgrid[-h:k, -h:k]
# generate kernels
gxx = 1./(2 * np.pi * sigma ** 4) * ((x / sigma) ** 2 - 1) * \
np.exp(-(x**2+
|
y**2) / (2 * sigma ** 2))
gxy = 1./(2 * np.pi * sigma ** 6) * np.multiply(x, y) * \
np.exp(-(x**2+y**2) / (2 * sigma ** 2))
gyy = np.transpose(gxx)
# convolve
dxx = convolve(im_input, gxx, mode='constant')
dxy = convolve(im_input, gxy, mode='constant')
dyy = convolve(im_input, gyy, mode='constant')
# format output
im_hess = np.concatenate(
(dxx[:, :, None], dxy[:, :, None], dxy[:, :, None], dyy[:, :, None]),
axis=2
)
return im_hess
|
foosel/OctoPrint
|
tests/settings/__init__.py
|
Python
|
agpl-3.0
| 394
| 0.007653
|
# -*- coding: utf-8 -*-
from __future__ import
|
absolute_import, division, print_function, unicode_literals
"""
Unit tests for ``octoprint.settings``.
"""
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/a
|
gpl.html'
__copyright__ = "Copyright (C) 2016 The OctoPrint Project - Released under terms of the AGPLv3 License"
|
gislab-npo/gislab-web
|
server/webgis/map/views.py
|
Python
|
gpl-2.0
| 9,021
| 0.002439
|
import os
import re
import json
import urllib.parse
import urllib.request
import contextlib
import hashlib
from lxml import etree
from django.conf import settings
from django.http import HttpResponse, JsonResponse, Http404
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.vary import vary_on_headers
from django.core.exceptions import PermissionDenied
from webgis.map.wfsfilter import webgisfilter
from webgis.libs.utils import set_query_parameters
from webgis.mapcache import get_tile_response, get_legendgraphic_response, \
WmsLayer, TileNotFoundException
from webgis.map.project import clean_project_name, get_project, filter_user_roles, \
get_project_info, get_last_project_version, InvalidProjectException
from webgis.auth import basic_auth
from webgis.auth.decorators import login_required
def abs_project_path(project):
return os.path.join(settings.GISQUICK_PROJECT_ROOT, project)
def check_project_access(request, project, project_auth):
if project_auth == "all":
return True
elif project_auth == "authenticated":
return request.user.is_authenticated
elif project_auth == "owner":
project_owner = project.split('/', 1)[0]
return request.user.is_authenticated and (project_owner == request.user.username or request.user.is_superuser)
return False
def check_layer_access(user_roles, layer_name, permission):
for role in user_roles:
perms = role['permissions']['layers']
if perms[layer_name][permission]:
return True
return False
def map_project(request):
try:
projec
|
t_data = get_project(request)
return JsonResponse(project_data, status=project_data['status'])
except InvalidProjectException:
raise Http404
project_name_pattern = re.compile('(.+)_(\d{10})')
def parse_project_name(name):
match = project_name_pattern.match(name)
if match:
return match.group(1), int(match.group(2))
return name, None
@csrf_exempt
@vary_on_headers('Authorization
|
')
def ows(request):
params = {key.upper(): request.GET[key] for key in request.GET.keys()}
ows_project = clean_project_name(params.get('MAP'))
project, timestamp = parse_project_name(ows_project)
project_hash = hashlib.md5(project.encode('utf-8')).hexdigest()
pi = get_project_info(project_hash, timestamp, project=ows_project)
if not request.user.is_authenticated:
basic_auth.is_authenticated(request)
if not check_project_access(request, project, pi['authentication']):
if not request.user.is_authenticated:
response = HttpResponse('Authentication required', status=401)
response['WWW-Authenticate'] = 'Basic realm=OWS API'
return response
raise PermissionDenied
if params.get('SERVICE') == 'WFS' and params.get('REQUEST') != 'GetFeature':
access_control = pi.get('access_control')
if access_control and access_control['enabled']:
root = etree.fromstring(request.body.decode())
user_roles = filter_user_roles(request.user, access_control['roles'])
for elem in root.findall('.//{*}Insert'):
for child in elem.getchildren():
layer_name = etree.QName(child).localname
if not check_layer_access(user_roles, layer_name, 'insert'):
raise PermissionDenied
checks = [
('.//{*}Update', 'update'),
('.//{*}Delete', 'delete')
]
for query_path, permission in checks:
for elem in root.findall(query_path):
layer_name = elem.get('typeName').split(':')[-1]
if not check_layer_access(user_roles, layer_name, permission):
raise PermissionDenied
url = "{0}?{1}".format(
settings.GISQUICK_MAPSERVER_URL.rstrip("/"),
request.environ['QUERY_STRING']
)
abs_project = abs_project_path(params.get('MAP'))
url = set_query_parameters(url, {'MAP': abs_project})
if request.method == 'POST':
owsrequest = urllib.request.Request(url, request.body)
else:
owsrequest = urllib.request.Request(url)
owsrequest.add_header("User-Agent", "Gisquick")
resp_content = b""
try:
with contextlib.closing(urllib.request.urlopen(owsrequest)) as resp:
while True:
data = resp.read()
if not data:
break
resp_content += data
if params.get('REQUEST', '') == 'GetCapabilities':
resp_content = resp_content.replace(
settings.GISQUICK_MAPSERVER_URL.encode(),
request.build_absolute_uri(request.path).encode()
)
content_type = resp.getheader('Content-Type')
status = resp.getcode()
return HttpResponse(resp_content, content_type=content_type, status=status)
except urllib.error.HTTPError as e:
# reason = e.read().decode("utf8")
return HttpResponse(e.read(), content_type=e.headers.get_content_type(), status=e.code)
def tile(request, project_hash, publish, layers_hash=None, z=None, x=None, y=None, format=None):
params = {key.upper(): request.GET[key] for key in request.GET.keys()}
project = params['PROJECT']+'.qgs'
mapserver_url = set_query_parameters(
settings.GISQUICK_MAPSERVER_URL,
{'MAP': abs_project_path(project)}
)
project_info = get_project_info(project_hash, publish, project=project)
if not project_info:
raise Http404
if not check_project_access(request, params['PROJECT'], project_info['authentication']):
raise PermissionDenied
try:
layer = WmsLayer(
project=project_hash,
publish=publish,
name=layers_hash,
provider_layers=params['LAYERS'].encode("utf-8"),
provider_url=mapserver_url,
image_format=format,
tile_size=256,
metasize=5,
extent=project_info['extent'],
resolutions=project_info['tile_resolutions'],
projection=project_info['projection']['code']
)
return get_tile_response(layer, z=z, x=x, y=y)
except TileNotFoundException as e:
raise Http404
def legend(request, project_hash, publish, layer_hash=None, zoom=None, format=None):
params = {key.upper(): request.GET[key] for key in request.GET.keys()}
project = params['PROJECT']+'.qgs'
mapserver_url = set_query_parameters(
settings.GISQUICK_MAPSERVER_URL,
{'MAP': abs_project_path(project)}
)
project_info = get_project_info(project_hash, publish, project=project)
if not project_info:
raise Http404
if not check_project_access(request, params['PROJECT'], project_info['authentication']):
raise PermissionDenied
try:
layer = WmsLayer(
project=project_hash,
publish=publish,
name=layer_hash,
provider_layers=params['LAYER'].encode('utf-8'),
provider_url=mapserver_url,
image_format=format,
)
params.pop('PROJECT')
params.pop('LAYER')
return get_legendgraphic_response(layer, zoom, **params)
except:
raise Http404
@csrf_exempt
def filterdata(request):
"""Handle filter requrest - using OGC WFS service
The request body should look like:
{
'layer': 'Places',
'maxfeatures': 1000,
'startindex': 0,
'bbox': [0, 1, 2, 3],
'filters': [{
'attribute': 'NAME',
'value': 'Prague',
'operator': '='
}]
}
sent as HTTP POST request
"""
# TODO: use check_project_access
if request.method == 'POST':
project = request.GET['PROJECT']
project = get_last_project_version(project) + '.qgs'
url = settings.GISQUICK_MAPSERVER_URL
params = {
'MAP': abs_project_path(project)
}
mapserv = '{}?{}'.format(url, urllib.parse.urlencode(params))
|
BeyondSkyCoder/BeyondCoder
|
leetcode/python/bulls_and_cows.py
|
Python
|
apache-2.0
| 1,021
| 0.002938
|
__author__ = 'BeyondSky'
from collections import defaultdict
class Solution(object):
def getHint(self, secret, guess):
"""
:type secret: str
:type guess: str
:rtype: str
"""
bulls = cows = 0
digits = defaultdict(int)
# first pass: count bulls and non-matching digits
for index in range(len(secret)):
if secret[index] == guess[index]:
# matches, count the number of bulls
bul
|
ls += 1
else:
# not match, increase number of non-matching digits
digits[secret[index]] += 1
# second pass: count number of cows
for index in range(len(secret)):
if secret[index] != guess[index]:
# decrease number of non-matching digit by 1 if it is greater than 0
if digits[guess[i
|
ndex]] > 0:
cows += 1
digits[guess[index]] -= 1
return str(bulls) + 'A' + str(cows) + 'B'
|
khalim19/gimp-plugin-export-layers
|
export_layers/pygimplib/gui/itembox.py
|
Python
|
gpl-3.0
| 18,637
| 0.014863
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2019 khalim19
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module defines a custom widget holding an array of GUI elements. The widget
is used as the default GUI for `setting.ArraySetting` instances.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
import collections
import contextlib
import pygtk
pygtk.require("2.0")
import gtk
import gobject
from .. import utils as pgutils
from . import draganddropcontext as draganddropcontext_
__all__ = [
"ItemBox",
"ArrayBox",
"ItemBoxItem",
]
class ItemBox(gtk.ScrolledWindow):
"""
This base class defines a scrollable box holding a vertical list of items.
Each item is an instance of `_ItemBoxItem` class or one of its subclasses.
"""
ITEM_SPACING = 4
VBOX_SPACING = 4
def __init__(self, item_spacing=ITEM_SPACING, *args, **kwargs):
super().__init__(*args, **kwargs)
self._item_spacing = item_spacing
self._drag_and_drop_context = draganddropcontext_.DragAndDropContext()
self._items = []
self._vbox_items = gtk.VBox(homogeneous=False)
self._vbox_items.set_spacing(self._item_spacing)
self._vbox = gtk.VBox(homogeneous=False)
self._vbox.set_spacing(self.VBOX_SPACING)
self._vbox.pack_start(self._vbox_items, expand=False, fill=False)
self.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.add_with_viewport(self._vbox)
self.get_child().set_shadow_type(gtk.SHADOW_NONE)
def add_item(self, item):
self._vbox_items.pack_start(item.widget, expand=False, fill=False)
item.button_remove.connect("clicked", self._on_item_button_remove_clicked, item)
item.widget.connect("key-press-event", self._on_item_widget_key_press_event, item)
self._setup_drag(item)
self._items.append(item)
return item
def reorder_item(self, item, position):
new_position = min(max(position, 0), len(self._items) - 1)
self._items.pop(self._get_item_position(item))
self._items.insert(new_position, item)
self._vbox_items.reorder_child(item.widget, new_position)
return new_position
def remove_item(self, item):
item_position = self._get_item_position(item)
if item_position < len(self._items) - 1:
next_item_position = item_position + 1
self._items[next_item_position].item_widget.grab_focus()
self._vbox_items.remove(item.widget)
item.remove_item_widget()
self._items.remove(item)
def clear(self):
for unused_ in range(len(self._items)):
self.remove_item(self._items[0])
def _setup_drag(self, item):
se
|
lf._drag_and_drop_context.setup_drag(
item.item_widget,
self._get_drag_data,
self._on_drag_data_received,
[item],
[item],
self)
def _get_drag_data(self, dragged_item):
return str(self._items.index(dragged_item))
def _on_drag_data_received(self, dragged_item_index_str, destination_item):
dragged_item = self._items[int(dragged_item_index_str)]
self.reorder_item(dragged_item, self._get_item_position(destination_item))
de
|
f _on_item_widget_key_press_event(self, widget, event, item):
if event.state & gtk.gdk.MOD1_MASK: # Alt key
key_name = gtk.gdk.keyval_name(event.keyval)
if key_name in ["Up", "KP_Up"]:
self.reorder_item(
item, self._get_item_position(item) - 1)
elif key_name in ["Down", "KP_Down"]:
self.reorder_item(
item, self._get_item_position(item) + 1)
def _on_item_button_remove_clicked(self, button, item):
self.remove_item(item)
def _get_item_position(self, item):
return self._items.index(item)
class ItemBoxItem(object):
_HBOX_BUTTONS_SPACING = 3
_HBOX_SPACING = 3
def __init__(self, item_widget):
self._item_widget = item_widget
self._hbox = gtk.HBox(homogeneous=False)
self._hbox.set_spacing(self._HBOX_SPACING)
self._hbox_buttons = gtk.HBox(homogeneous=False)
self._hbox_buttons.set_spacing(self._HBOX_BUTTONS_SPACING)
self._event_box_buttons = gtk.EventBox()
self._event_box_buttons.add(self._hbox_buttons)
self._hbox.pack_start(self._item_widget, expand=True, fill=True)
self._hbox.pack_start(self._event_box_buttons, expand=False, fill=False)
self._event_box = gtk.EventBox()
self._event_box.add(self._hbox)
self._has_hbox_buttons_focus = False
self._button_remove = gtk.Button()
self._setup_item_button(self._button_remove, gtk.STOCK_CLOSE)
self._event_box.connect("enter-notify-event", self._on_event_box_enter_notify_event)
self._event_box.connect("leave-notify-event", self._on_event_box_leave_notify_event)
self._is_event_box_allocated_size = False
self._buttons_allocation = None
self._event_box.connect("size-allocate", self._on_event_box_size_allocate)
self._event_box_buttons.connect(
"size-allocate", self._on_event_box_buttons_size_allocate)
self._event_box.show_all()
self._hbox_buttons.set_no_show_all(True)
@property
def widget(self):
return self._event_box
@property
def item_widget(self):
return self._item_widget
@property
def button_remove(self):
return self._button_remove
def remove_item_widget(self):
self._hbox.remove(self._item_widget)
def _setup_item_button(self, item_button, icon, position=None):
item_button.set_relief(gtk.RELIEF_NONE)
button_icon = gtk.image_new_from_pixbuf(
item_button.render_icon(icon, gtk.ICON_SIZE_MENU))
item_button.add(button_icon)
self._hbox_buttons.pack_start(item_button, expand=False, fill=False)
if position is not None:
self._hbox_buttons.reorder_child(item_button, position)
item_button.show_all()
def _on_event_box_enter_notify_event(self, event_box, event):
if event.detail != gtk.gdk.NOTIFY_INFERIOR:
self._hbox_buttons.show()
def _on_event_box_leave_notify_event(self, event_box, event):
if event.detail != gtk.gdk.NOTIFY_INFERIOR:
self._hbox_buttons.hide()
def _on_event_box_size_allocate(self, event_box, allocation):
if self._is_event_box_allocated_size:
return
self._is_event_box_allocated_size = True
# Assign enough height to the HBox to make sure it does not resize when
# showing buttons.
if self._buttons_allocation.height >= allocation.height:
self._hbox.set_property("height-request", allocation.height)
def _on_event_box_buttons_size_allocate(self, event_box, allocation):
if self._buttons_allocation is not None:
return
self._buttons_allocation = allocation
# Make sure the width allocated to the buttons remains the same even if
# buttons are hidden. This avoids a problem with unreachable buttons when
# the horizontal scrollbar is displayed.
self._event_box_buttons.set_property(
"width-request", self._buttons_allocation.width)
self._hbox_buttons.hide()
class ArrayBox(ItemBox):
"""
This class can be used to edit `setting.ArraySetting` instances interactively.
Signals:
* `"array-box-changed"` - An item was added, reordered or removed by the user.
* `"array-box-item-changed"` - The contents of an item was modified by the
user. Currently, this signal is not invoked in this widget and can only be
invoked explicitly by calling `ArrayBox.emit("array-box-item-changed")`.
"""
__gsignals__ = {
b"array-box-changed": (gobject.SIGNAL_RUN_FIRST, None, ()),
b"array-box-item-cha
|
diegojromerolopez/djanban
|
src/djanban/apps/reports/migrations/0011_auto_20170211_1640.py
|
Python
|
mit
| 573
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.1
|
0 on 2017-02-11 15:40
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reports', '0010_auto_20170211_0306'),
]
operations = [
migrations.RemoveField(
model_name='listreport',
name='board',
),
migrations.RemoveField(
model_name='listreport',
name='list',
),
migrations.DeleteModel(
name='ListReport
|
',
),
]
|
jhao104/proxy_pool
|
test.py
|
Python
|
mit
| 770
| 0.003916
|
# -*- coding: utf-8 -*-
"""
--------------------------------------------
|
-----
File Name: test.py
Des
|
cription :
Author : JHao
date: 2017/3/7
-------------------------------------------------
Change Activity:
2017/3/7:
-------------------------------------------------
"""
__author__ = 'JHao'
from test import testProxyValidator
from test import testConfigHandler
from test import testLogHandler
from test import testDbClient
if __name__ == '__main__':
print("ConfigHandler:")
testConfigHandler.testConfig()
print("LogHandler:")
testLogHandler.testLogHandler()
print("DbClient:")
testDbClient.testDbClient()
print("ProxyValidator:")
testProxyValidator.testProxyValidator()
|
niphlod/pydal
|
tests/_helpers.py
|
Python
|
bsd-3-clause
| 758
| 0.001319
|
from ._compat import unittest
from ._adapt import DEFAULT_URI
from pydal import DAL
class DALtest(unittest.TestCase):
|
def __init__(self, *args, **kwargs):
super(DALtest, self).__init__(*args, **kwargs)
self._connections = []
def connect(self, *args, **kwargs):
if not args:
kwargs.setdefault('uri', DEFAULT_URI)
kwargs.setdefault('check_reserved', ['all'])
ret = DAL(*args, **kwargs)
self._connections.append(ret)
return ret
def tearDown(self):
for db in self._connections:
db.commit()
tablist = list(db.tables)
|
for table in reversed(tablist):
db[table].drop()
db.close()
self._connections = []
|
PetePriority/home-assistant
|
homeassistant/components/satel_integra/__init__.py
|
Python
|
apache-2.0
| 5,379
| 0
|
"""
Support for Satel Integra devices.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/satel_integra/
"""
import asyncio
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_HOME, STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED, EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.dispatcher import async_dispatcher_send
REQUIREMENTS = ['satel_integra==0.2.0']
DEFAULT_ALARM_NAME = 'satel_integra'
DEFAULT_PORT = 7094
DEFAULT_CONF_ARM_HOME_MODE = 1
DEFAULT_DEVICE_PARTITION = 1
DEFAULT_ZONE_TYPE = 'motion'
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'satel_integra'
DATA_SATEL = 'satel_integra'
CONF_DEVICE_HOST = 'host'
CONF_DEVICE_PORT = 'port'
CONF_DEVICE_PARTITION = 'partition'
CONF_ARM_HOME_MODE = 'arm_home_mode'
CONF_ZONE_NAME = 'name'
CONF_ZONE_TYPE = 'type'
CONF_ZONES = 'zones'
CONF_OUTPUTS = 'outputs'
ZONES = 'zones'
SIGNAL_PANEL_MESSAGE = 'satel_integra.panel_message'
SIGNAL_PANEL_ARM_AWAY = 'satel_integra.panel_arm_away'
SIGNAL_PANEL_ARM_HOME = 'satel_integra.panel_arm_home'
SIGNAL_PANEL_DISARM = 'satel_integra.panel_disarm'
SIGNAL_ZONES_UPDATED = 'satel_integra.zones_updated'
SIGNAL_OUTPUTS_UPDATED = 'satel_integra.outputs_updated'
ZONE_SCHEMA = vol.Schema({
vol.Required(CONF_ZONE_NAME): cv.string,
vol.Optional(CONF_ZONE_TYPE, default=DEFAULT_ZONE_TYPE): cv.string})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_DEVICE_HOST): cv.string,
vol.Optional(CONF_DEVICE_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_DEVICE_PARTITION,
default=DEFAULT_DEVICE_PARTITION): cv.positive_int,
vol.Optional(CONF_ARM_HOME_MODE,
default=DEFAULT_CONF_ARM_HOME_MODE): vol.In([1, 2, 3]),
vol.Optional(CONF_ZONES,
default={}): {vol.Coerce(int): ZONE_SCHEMA},
vol.Optional(CONF_OUTPUTS,
default={}): {vol.Coerce(int): ZONE_SCHEMA},
}),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the Satel Integra component."""
conf = config.get(DOMAIN)
zones = conf.get(CONF_ZONES)
outputs = conf.get(CONF_OUTPUTS)
host = conf.get(CONF_DEVICE_HOST)
port = conf.get(CONF_DEVICE_PORT)
partition = conf.get(CONF_DEVICE_PARTITION)
from satel_integra.satel_integra import AsyncSatel, AlarmState
controller = AsyncSatel(host, port, hass.loop, zones, outputs, partition)
hass.data[DATA_SATEL] = controller
result = await controller.connect()
if not result:
return False
async def _close():
controller.close()
|
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _close())
_LOGGER
|
.debug("Arm home config: %s, mode: %s ",
conf,
conf.get(CONF_ARM_HOME_MODE))
task_control_panel = hass.async_create_task(
async_load_platform(hass, 'alarm_control_panel', DOMAIN, conf, config))
task_zones = hass.async_create_task(
async_load_platform(hass, 'binary_sensor', DOMAIN,
{CONF_ZONES: zones, CONF_OUTPUTS: outputs}, config)
)
await asyncio.wait([task_control_panel, task_zones], loop=hass.loop)
@callback
def alarm_status_update_callback(status):
"""Send status update received from alarm to home assistant."""
_LOGGER.debug("Alarm status callback, status: %s", status)
hass_alarm_status = STATE_ALARM_DISARMED
if status == AlarmState.ARMED_MODE0:
hass_alarm_status = STATE_ALARM_ARMED_AWAY
elif status in [
AlarmState.ARMED_MODE0,
AlarmState.ARMED_MODE1,
AlarmState.ARMED_MODE2,
AlarmState.ARMED_MODE3
]:
hass_alarm_status = STATE_ALARM_ARMED_HOME
elif status in [AlarmState.TRIGGERED, AlarmState.TRIGGERED_FIRE]:
hass_alarm_status = STATE_ALARM_TRIGGERED
elif status == AlarmState.DISARMED:
hass_alarm_status = STATE_ALARM_DISARMED
_LOGGER.debug("Sending hass_alarm_status: %s...", hass_alarm_status)
async_dispatcher_send(hass, SIGNAL_PANEL_MESSAGE, hass_alarm_status)
@callback
def zones_update_callback(status):
"""Update zone objects as per notification from the alarm."""
_LOGGER.debug("Zones callback, status: %s", status)
async_dispatcher_send(hass, SIGNAL_ZONES_UPDATED, status[ZONES])
@callback
def outputs_update_callback(status):
"""Update zone objects as per notification from the alarm."""
_LOGGER.debug("Outputs updated callback , status: %s", status)
async_dispatcher_send(hass, SIGNAL_OUTPUTS_UPDATED, status["outputs"])
# Create a task instead of adding a tracking job, since this task will
# run until the connection to satel_integra is closed.
hass.loop.create_task(controller.keep_alive())
hass.loop.create_task(
controller.monitor_status(
alarm_status_update_callback,
zones_update_callback,
outputs_update_callback)
)
return True
|
smarkets/pyunimate
|
unimate.py
|
Python
|
mit
| 1,311
| 0.00382
|
# coding=utf-8
from __future__ import unicode_literals
"""
Utility library for interacting with unimate
"""
import socket
import types
class Client(object):
"""
Unimate client
"
|
""
def __init__(self, server, port):
if not isinstance(server, types.StringTypes):
raise TypeError("server must be a string")
if not isinstance(port, (int, long)):
raise TypeError("port must be an integer")
self._server = server
self._port = port
def send(self, message, room=None):
"""Broadcast a message through unimate"""
|
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self._server, self._port))
if isinstance(message, str):
message = message.decode('utf-8')
if room is None:
msg = "broadcast %s\r\n" % message
else:
if isinstance(room, str):
room = room.decode('utf-8')
msg = "broadcast %s %s\r\n" % (room, message)
assert isinstance(msg, unicode)
msg = msg.encode('utf-8')
sock.send(msg)
sock.close()
class DummyUnimate(object):
def send(self, message, room = None):
pass
if __name__ == '__main__':
Client("unimate.corp.smarkets.com", 12344).send(u"Tëßt")
|
supergis/git_notebook
|
other/schedule.py
|
Python
|
gpl-3.0
| 515
| 0
|
# -*- coding: utf-
|
8 -*-
# Use this file to easily define all of your cron jobs.
#
# It's helpful to understand cron before proceeding.
# http://en.wikipedia.org/wiki/Cron
#
# Learn more: http://github.com/fengsp/plan
from plan import Plan
cron = Plan()
# register one command, script or module
# cron.command('command', every='1.day')
# cron.script('script.py', path='/web/yourproject/scripts', every='1.month')
# cron.module('calendar', every='feburary', at='day.3')
if __name__ == "__main
|
__":
cron.run()
|
blckshrk/Weboob
|
modules/poivy/browser.py
|
Python
|
agpl-3.0
| 2,888
| 0.001385
|
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Fourcot Florent
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be usefu
|
l,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.browser import BaseBrowser, BrowserIncorrec
|
tPassword, BrowserBanned
from .pages import HomePage, LoginPage, HistoryPage, BillsPage, ErrorPage
__all__ = ['PoivyBrowser']
class PoivyBrowser(BaseBrowser):
DOMAIN = 'www.poivy.com'
PROTOCOL = 'https'
ENCODING = None # refer to the HTML encoding
PAGES = {'.*login': LoginPage,
'.*buy_credit.*': HomePage,
'.*/recent_calls': HistoryPage,
'.*purchases': BillsPage,
'.*warning.*': ErrorPage
}
def __init__(self, *args, **kwargs):
BaseBrowser.__init__(self, *args, **kwargs)
def home(self):
self.location('/login')
def is_logged(self):
return not self.is_on_page(LoginPage)
def login(self):
assert isinstance(self.username, basestring)
assert isinstance(self.password, basestring)
if not self.is_on_page(LoginPage):
self.location('/login')
if not self.page.login(self.username, self.password):
raise BrowserBanned('Too many connections from you IP address: captcha enabled')
if self.is_on_page(LoginPage) or self.is_on_page(ErrorPage):
raise BrowserIncorrectPassword()
def get_subscription_list(self):
if not self.is_on_page(HomePage):
self.location('/buy_credit')
return self.page.get_list()
def get_subscription(self, id):
assert isinstance(id, basestring)
l = self.get_subscription_list()
for a in l:
if a.id == id:
return a
return None
def get_history(self):
if not self.is_on_page(HistoryPage):
self.location('/recent_calls')
return self.page.get_calls()
def iter_bills(self, parentid):
if not self.is_on_page(BillsPage):
self.location('/purchases')
return self.page.date_bills()
def get_bill(self, id):
assert isinstance(id, basestring)
l = self.iter_bills(id)
for a in l:
if a.id == id:
return a
|
talha131/pelican
|
pelican/tools/pelican_import.py
|
Python
|
agpl-3.0
| 37,631
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import logging
import os
import re
import subprocess
import sys
import time
from collections import defaultdict
from html import unescape
from urllib.error import URLError
from urllib.parse import quote, urlparse, urlsplit, urlunsplit
from urllib.request import urlretrieve
# because logging.setLoggerClass has to be called before logging.getLogger
from pelican.log import init
from pelican.settings import read_settings
from pelican.utils import SafeDatetime, slugify
logger = logging.getLogger(__name__)
def decode_wp_content(content, br=True):
pre_tags = {}
if content.strip() == "":
return ""
content += "\n"
if "<pre" in content:
pre_parts = content.split("</pre>")
last_pre = pre_parts.pop()
content = ""
pre_index = 0
for pre_part in pre_parts:
start = pre_part.find("<pre")
if start == -1:
content = content + pre_part
continue
name = "<pre wp-pre-tag-{0}></pre>".format(pre_index)
pre_tags[name] = pre_part[start:] + "</pre>"
content = content + pre_part[0:start] + name
pre_index += 1
content = content + last_pre
content = re.sub(r'<br />\s*<br />', "\n\n", content)
allblocks = ('(?:table|thead|tfoot|caption|col|colgroup|tbody|tr|'
'td|th|div|dl|dd|dt|ul|ol|li|pre|select|option|form|'
'map|area|blockquote|address|math|style|p|h[1-6]|hr|'
'fieldset|noscript|samp|legend|section|article|aside|'
'hgroup|header|footer|nav|figure|figcaption|details|'
'menu|summary)')
content = re.sub(r'(<' + allblocks + r'[^>]*>)', "\n\\1", content)
content = re.sub(r'(</' + allblocks + r'>)', "\\1\n\n", content)
# content = content.replace("\r\n", "\n")
if "<object" in content:
# no <p> inside object/embed
content = re.sub(r'\s*<param([^>]*)>\s*', "<param\\1>", content)
content = re.sub(r'\s*</embed>\s*', '</embed>', content)
# content = re.sub(r'/\n\n+/', '\n\n', content)
pgraphs = filter(lambda s: s != "", re.split(r'\n\s*\n', content))
content = ""
for p in pgraphs:
content = content + "<p>" + p.strip() + "</p>\n"
# under certain strange conditions it could create
# a P of entirely whitespace
content = re.sub(r'<p>\s*</p>', '', content)
content = re.sub(
r'<p>([^<]+)</(div|address|form)>',
"<p>\\1</p></\\2>",
content)
# don't wrap tags
content = re.sub(
r'<p>\s*(</?' + allblocks + r'[^>]*>)\s*</p>',
"\\1",
content)
# problem with nested lists
content = re.sub(r'<p>(<li.*)</p>', "\\1", content)
content = re.sub(r'<p><blockquote([^>]*)>', "<blockquote\\1><p>", content)
content = content.replace('</blockquote></p>', '</p></blockquote>')
content = re.sub(r'<p>\s*(</?' + allblocks + '[^>]*>)', "\\1", content)
content = re.sub(r'(</?' + allblocks + r'[^>]*>)\s*</p>', "\\1", content)
if br:
def _preserve_newline(match):
return match.group(0).replace("\n", "<WPPreserveNewline />")
content = re.sub(
r'/<(script|style).*?<\/\\1>/s',
_preserve_newline,
content)
# optionally make line breaks
content = re.sub(r'(?<!<br />)\s*\n', "<br />\n", content)
content = content.replace("<WPPreserveNewline />", "\n")
content = re.sub(
r'(</?' + allblocks + r'[^>]*>)\s*<br />', "\\1",
content)
content = re.sub(
r'<br />(\s*</?(?:p|li|div|dl|dd|dt|th|pre|td|ul|ol)[^>]*>)',
'\\1',
content)
content = re.sub(r'\n</p>', "</p>", content)
if pre_tags:
def _multi_replace(dic, string):
pattern = r'|'.join(map(re.escape, dic.keys()))
return re.sub(pattern, lambda m: dic[m.group()], string)
content = _multi_replace(pre_tags, content)
return content
def xml_to_soup(xml):
"""Opens an xml file"""
try:
from bs4 import BeautifulSoup
except ImportError:
error = ('Missing dependency "BeautifulSoup4" and "lxml" required to '
'import XML files.')
sys.exit(error)
with open(xml, encoding='utf-8') as infile:
xmlfile = infile.read()
soup = BeautifulSoup(xmlfile, "xml")
return soup
def get_filename(post_name, post_id):
if post_name is None or post_name.isspace():
return post_id
else:
return post_name
def wp2fields(xml, wp_custpost=False):
"""Opens a wordpress XML file, and yield Pelican fields"""
soup = xml_to_soup(xml)
items = soup.rss.channel.findAll('item')
for item in items:
if item.find('status').string in ["publish", "draft"]:
try:
# Use HTMLParser due to issues with BeautifulSoup 3
title = unescape(item.title.contents[0])
except IndexError:
title = 'No title [%s]' % item.find('post_name').string
logger.warning('Post "%s" is lacking a proper title', title)
post_name = item.find('post_name').string
post_id = item.find('post_id').string
filename = get_filename(post_name, post_id)
content = item.find('encoded').string
raw_date = item.find('post_date').string
if raw_date == u'0000-00-00 00:00:00':
date = None
else:
date_object = SafeDatetime.strptime(
raw_date, '%Y-%m-%d %H:%M:%S')
date = date_object.strftime('%Y-%m-%d %H:%M')
author = item.find('creator').string
categories = [cat.string for cat
in item.findAll('category', {'domain': 'category'})]
tags = [tag.string for tag
in item.findAll('category', {'domain': 'post_tag'})]
# To publish a post the status should be 'published'
status = 'published' if item.find('status').string == "publish" \
else item.find('status').string
kind = 'article'
post_type = item.find('post_type').string
if post_type == 'page':
kind = 'page'
elif wp_custpost:
if post_type == 'post':
pass
# Old behaviour was to name everything not a page as an
# article.Theoretically all attachments have status == inherit
# so no attachments should be here. But this statement is to
# maintain existing behaviour in case that doesn't hold true.
elif post_type == 'attachment':
pass
else:
kind = post_type
yield (title, content, filename, date, auth
|
or, categories,
tags, status, kind, 'wp-html')
def blogger2fields(xml):
"""Opens a blogger XML file, and yield Pelican fields"""
soup = xml_to_soup(xml)
entries = soup.feed.findAll('entry')
for entry in entries:
raw_kind = entry.find(
'category', {'scheme': 'http://schemas.google.com/g/2005#kind'}
).get('term')
if raw_kind == 'http://schemas.google.com/blogger/2008/kind#post':
kind = 'article'
|
elif raw_kind == 'http://schemas.google.com/blogger/2008/kind#comment':
kind = 'comment'
elif raw_kind == 'http://schemas.google.com/blogger/2008/kind#page':
kind = 'page'
else:
continue
try:
assert kind != 'comment'
filename = entry.find('link', {'rel': 'alternate'})['href']
filename = os.path.splitext(os.path.basename(filename))[0]
except (AssertionError, TypeError, KeyError):
filename = entry.find('id').string.split('.')[-1]
title = entry.find('title').string or ''
content = entry.find('content').string
raw_date = entry.find('published').string
if hasattr(SafeDatetime, 'fromisoformat'):
|
adaur/SickRage
|
sickbeard/providers/thepiratebay.py
|
Python
|
gpl-3.0
| 6,703
| 0.003431
|
# coding=utf-8
# Author: Dustyn Gibson <miigotu@gmail.com>
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# G
|
NU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
import posixpath # Must use posixpath
from urllib import urlencode
from sickbeard import logger
from sickbeard import tvcache
from sickbeard.bs4_parser import BS4Parser
from
|
sickrage.helper.common import try_int, convert_size
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
class ThePirateBayProvider(TorrentProvider): # pylint: disable=too-many-instance-attributes
def __init__(self):
TorrentProvider.__init__(self, "ThePirateBay")
self.public = True
self.ratio = None
self.confirmed = True
self.minseed = None
self.minleech = None
self.cache = ThePirateBayCache(self)
self.url = 'https://thepiratebay.se/'
self.urls = {
'search': self.url + 's/',
'rss': self.url + 'tv/latest'
}
self.custom_url = None
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches
results = []
"""
205 = SD, 208 = HD, 200 = All Videos
https://pirateproxy.pl/s/?q=Game of Thrones&type=search&orderby=7&page=0&category=200
"""
search_params = {
'q': '',
'type': 'search',
'orderby': 7,
'page': 0,
'category': 200
}
for mode in search_strings:
items = []
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log(u"Search string: %s " % search_string, logger.DEBUG)
search_params['q'] = search_string.strip()
search_url = self.urls[('search', 'rss')[mode == 'RSS']] + '?' + urlencode(search_params)
if self.custom_url:
search_url = posixpath.join(self.custom_url, search_url.split(self.url)[1].lstrip('/')) # Must use posixpath
logger.log(u"Search URL: %s" % search_url, logger.DEBUG)
data = self.get_url(search_url)
if not data:
logger.log(u'URL did not return data, maybe try a custom url, or a different one', logger.DEBUG)
continue
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('table', id='searchResult')
torrent_rows = torrent_table.find_all('tr') if torrent_table else []
# Continue only if one Release is found
if len(torrent_rows) < 2:
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue
def process_column_header(th):
result = ''
if th.a:
result = th.a.get_text(strip=True)
if not result:
result = th.get_text(strip=True)
return result
labels = [process_column_header(label) for label in torrent_rows[0].find_all('th')]
for result in torrent_rows[1:]:
try:
cells = result.find_all('td')
title = result.find(class_='detName').get_text(strip=True)
download_url = result.find(title="Download this torrent using magnet")['href']
if not all([title, download_url]):
continue
seeders = try_int(cells[labels.index('SE')].get_text(strip=True))
leechers = try_int(cells[labels.index('LE')].get_text(strip=True))
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
# Accept Torrent only from Good People for every Episode Search
if self.confirmed and result.find(alt=re.compile(r'(VIP|Trusted|Helper|Moderator)')):
if mode != 'RSS':
logger.log(u"Found result %s but that doesn't seem like a trusted result so I'm ignoring it" % title, logger.DEBUG)
continue
# Convert size after all possible skip scenarios
torrent_size = cells[labels.index('Name')].find(class_='detDesc').get_text(strip=True).split(', ')[1]
torrent_size = re.sub(r'Size ([\d.]+).+([KMGT]iB)', r'\1 \2', torrent_size)
size = convert_size(torrent_size) or -1
item = title, download_url, size, seeders, leechers
if mode != 'RSS':
logger.log(u"Found result: %s " % title, logger.DEBUG)
items.append(item)
except StandardError:
continue
# For each search mode sort all the items by seeders if available
items.sort(key=lambda tup: tup[3], reverse=True)
results += items
return results
def seed_ratio(self):
return self.ratio
class ThePirateBayCache(tvcache.TVCache):
def __init__(self, provider_obj):
tvcache.TVCache.__init__(self, provider_obj)
# only poll ThePirateBay every 30 minutes max
self.minTime = 30
def _getRSSData(self):
search_strings = {'RSS': ['']}
return {'entries': self.provider.search(search_strings)}
provider = ThePirateBayProvider()
|
MiniSEC/GRR_clone
|
lib/flows/general/services_test.py
|
Python
|
apache-2.0
| 1,398
| 0.003577
|
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""Tests for grr.lib.flows.general.services."""
from grr.lib import aff4
from grr.lib import rdfvalue
from grr.lib import test_lib
class ServicesTest(test_lib.FlowTestsBaseclass):
def testEnumerateRunningServices(self):
|
class ClientMock(object):
def EnumerateRunningServices(self, _):
service = rdfvalue.Service(label="org.openbsd.ssh-agent",
args="/usr/bin/ssh-agent -l")
service.osx_launchd.sessiontype = "Aqua"
service.osx_launchd.lastexitstatus = 0
service.osx_launchd.timeout = 30
service.osx_launchd.ondemand = 1
return [service]
# Run the flow in the emulated way.
|
for _ in test_lib.TestFlowHelper(
"EnumerateRunningServices", ClientMock(), client_id=self.client_id,
token=self.token):
pass
# Check the output file is created
fd = aff4.FACTORY.Open(rdfvalue.RDFURN(self.client_id)
.Add("analysis/Services"),
token=self.token)
self.assertEqual(fd.__class__.__name__, "RDFValueCollection")
jobs = list(fd)
self.assertEqual(len(fd), 1)
self.assertEqual(jobs[0].label, "org.openbsd.ssh-agent")
self.assertEqual(jobs[0].args, "/usr/bin/ssh-agent -l")
self.assertIsInstance(jobs[0], rdfvalue.Service)
|
franek/weboob
|
modules/societegenerale/backend.py
|
Python
|
agpl-3.0
| 2,388
| 0.000839
|
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Jocelyn Jaubert
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
# python2.5 compatibility
from __future__ import with_statement
from weboob.capabilities.bank import ICapBank, AccountNotFound
from weboob.tools.backend import BaseBackend, BackendConfig
from weboob.tools.value import ValueBackendPassword
from .browser import SocieteGenerale
__all__ = ['SocieteGene
|
raleBackend']
class SocieteGeneraleBackend(BaseBackend, ICapBank):
NAME = 'societegenerale'
MAINTAINER = u'Jocelyn Jaubert'
EMAIL = 'joce
|
lyn.jaubert@gmail.com'
VERSION = '0.f'
LICENSE = 'AGPLv3+'
DESCRIPTION = u'Société Générale French bank website'
CONFIG = BackendConfig(ValueBackendPassword('login', label='Account ID', masked=False),
ValueBackendPassword('password', label='Password'))
BROWSER = SocieteGenerale
def create_default_browser(self):
return self.create_browser(self.config['login'].get(),
self.config['password'].get())
def iter_accounts(self):
for account in self.browser.get_accounts_list():
yield account
def get_account(self, _id):
with self.browser:
account = self.browser.get_account(_id)
if account:
return account
else:
raise AccountNotFound()
def iter_history(self, account):
with self.browser:
for tr in self.browser.iter_history(account):
if not tr._coming:
yield tr
def iter_coming(self, account):
with self.browser:
for tr in self.browser.iter_history(account):
if tr._coming:
yield tr
|
masfaraud/volmdlr
|
scripts/faces/planar.py
|
Python
|
gpl-3.0
| 1,693
| 0.002363
|
import volmdlr
import volmdlr.edges
import volmdlr.wires
import volmdlr.faces
p1 = volmdlr.Point3D(0.15, 0.48, 0.5)
p2 = volmdlr.Point3D(0.15, 0.1, 0.5)
p1s = volmdlr.Point2D(0, 0)
p2s = volmdlr.Point2D(0.1, 0)
p3s = volmdlr.Point2D(0.2, 0.1)
p4s = volmdlr.Point2D(-0.01, 0.05)
surface2d = volmdlr.faces.Surface2D(volmdlr.wires.ClosedPolygon2D([p1s, p2s, p3s, p4s]), [])
u = volmdlr.Vector3D(0.1, 0.7, -0.5)
u.normalize()
v = u.deterministic_unit_normal_vector()
w = u.cross(v)
plane = volmdlr.faces.Plane3D(frame=volmdlr.Frame3D(0.1*volmdlr.X3D, u, v, w))
face = volmdlr.faces.PlaneFace3D(plane, surface2d)
ax = face.plot()
p1.plot(ax=ax, color='b')
p2.plot(ax=ax, color='g')
l1 = volmdlr.edges.LineSegment3D(p1, p1+w)
l2 = volmdlr.edges.LineSegment3D(p2, p2+w)
l1.plot(ax=ax, color='b')
l2.plot(ax=ax, color='g')
i1 = face.linesegment_intersections(l1)
if i1:
i1[0].plot(ax=ax, color='r')
i2 = face.linesegment_intersections(l2)
if i2:
i2[0].plot(ax=ax
|
, color='r')
plane_inter_1 = plane.linesegment_intersections(l1)
if plane_inter_1:
plane_inter_1[0].plot(ax=ax, color='b')
plane_inter_2 = plane.linesegment_int
|
ersections(l2)
if plane_inter_2:
plane_inter_2[0].plot(ax=ax, color='g')
plane_inter_1_2d = plane.point3d_to_2d(plane_inter_1[0])
plane_inter_2_2d = plane.point3d_to_2d(plane_inter_2[0])
ax2 = face.surface2d.plot()
plane_inter_1_2d.plot(ax=ax2, color='b')
plane_inter_2_2d.plot(ax=ax2, color='g')
assert surface2d.point_belongs(plane_inter_1_2d) == True
assert surface2d.point_belongs(plane_inter_2_2d) == False
p1_2dto3d = plane.point2d_to_3d(plane_inter_1_2d)
p1_2dto3d.plot(ax=ax, color='b')
assert p1_2dto3d == plane_inter_1[0]
face.babylonjs()
|
yannrouillard/weboob
|
modules/bp/pages/accountlist.py
|
Python
|
agpl-3.0
| 3,140
| 0.000637
|
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Nicolas Duhamel
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from decimal import Decimal
from weboob.capabilities.bank import Account, AccountNotFound
from weboob.tools.browser import BasePage
from weboob.tools.misc import to_unicode
from weboob.tools.capabilities.bank.transactions import FrenchTransaction
from weboob.tools.ordereddict import OrderedDict
__all__ = ['AccountList']
class AccountList(BasePage):
def on_loaded(self):
self.accounts = OrderedDict()
self.parse_table('comptes', Account.TYPE_CHECKING)
self.parse_table('comptesEpargne', Account.TYPE_SAVINGS)
self.parse_table('comptesTitres', Account.TYPE_MARKET)
self.parse_table('comptesVie', Account.TYPE_DEPOSIT)
self.parse_table('comptesRetraireEuros')
def get_accounts_list(self):
return self.accounts.itervalues()
def parse_table(self, what, actype=Account.TYPE_UNKNOWN):
tables = self.document.xpath("//table[@id='%s']" % what, smart_strings=False)
if len(tables) < 1:
return
lines = tables[0].xpath(".//tbody/tr")
for line in lines:
account = Account()
tmp = line.xpath("./td//a")[0]
account.label = to_unicode(tmp.text)
account.type = actype
account._link_id = tmp.get("href")
if 'BourseEnLigne' in account._link_id:
account.type = Account.TYPE_MARKET
tmp = line.xpath("./td/span/strong")
if len(tmp) >= 2:
tmp_id = tmp[0].text
tmp_balance = tmp[1].text
else:
tmp_id = line.xpath("./td//span")[1].text
tmp_balance = tmp[0].text
account.id = tmp_id
account.currency = account.get_currency(tmp_balance)
account.balance = Decimal(FrenchTransaction.clean_amount(tmp_balance))
if account.id in self.accounts:
a = self.accounts[account.id]
a._card_links.append(account._link_id)
if not a.coming:
|
a.coming = Decimal('0.0')
a.coming += account.balance
else:
account._card_links = []
self.accounts[account.id] = account
def get_account(self, id):
try:
return self.accounts[id]
except KeyError:
raise Account
|
NotFound('Unable to find account: %s' % id)
|
Endika/manufacture
|
mrp_production_real_cost/__init__.py
|
Python
|
agpl-3.0
| 165
| 0
|
# -*- coding: utf-8 -*-
# © 2014-20
|
15 Avanzosc
# © 2014-2015 Pedro M. Baeza
# License AGPL-3 - See http://www.gn
|
u.org/licenses/agpl-3.0.html
from . import models
|
insomnia-lab/calibre
|
src/calibre/devices/kobo/bookmark.py
|
Python
|
gpl-3.0
| 4,557
| 0.007022
|
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2011, Timothy Legge <timlegge@gmail.com> and Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
from contextlib import closing
class Bookmark(): # {{{
'''
A simple class fetching bookmark data
kobo-specific
'''
def __init__(self, db_path, contentid, path, id, book_format, bookmark_extension):
self.book_format = book_format
self.bookmark_extension = bookmark_extension
self.book_length = 0 # Not Used
self.id = id
self.last_read = 0
self.last_read_location = 0 # Not Used
self.path = path
self.timestamp = 0
self.user_notes = None
self.db_path = db_path
self.contentid = contentid
self.percent_read = 0
self.get_bookmark_data()
self.get_book_length() # Not Used
def get_bookmark_data(self):
''' Return the timestamp and last_read_location '''
import sqlite3 as sqlite
user_notes = {}
self.timestamp = os.path.getmtime(self.path)
with closing(sqlite.connect(self.db_path)) as connection:
# return bytestrings if the content cannot the decoded as unicode
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
cursor = connection.cursor()
t = (self.contentid,)
cursor.execute('select bm.bookmarkid, bm.contentid, bm.volumeid, '
'bm.text, bm.annotation, bm.ChapterProgress, '
'bm.StartContainerChildIndex, bm.StartOffset, c.BookTitle, '
'c.TITLE, c.volumeIndex, c.___NumPages '
'from Bookmark bm inner join Content c on '
'bm.contentid = c.contentid and '
'bm.volumeid = ? order by bm.volumeid, bm.chapterprogress', t)
previous_chapter = 0
bm_count = 0
for row in cursor:
current_chapter = row[10]
if previous_chapter == current_chapter:
bm_count = bm_count + 1
else:
bm_count = 0
text = row[3]
annotation = row[4]
# A dog ear (bent upper right corner) is a bookmark
if row[6] == row[7] == 0: # StartContainerChildIndex = S
|
tartOffset = 0
e_type = 'Bookmark'
text = row[9]
# highli
|
ght is text with no annotation
elif text is not None and (annotation is None or annotation == ""):
e_type = 'Highlight'
elif text and annotation:
e_type = 'Annotation'
else:
e_type = 'Unknown annotation type'
note_id = row[10] + bm_count
chapter_title = row[9]
# book_title = row[8]
chapter_progress = min(round(float(100*row[5]),2),100)
user_notes[note_id] = dict(id=self.id,
displayed_location=note_id,
type=e_type,
text=text,
annotation=annotation,
chapter=row[10],
chapter_title=chapter_title,
chapter_progress=chapter_progress)
previous_chapter = row[10]
# debug_print("e_type:" , e_type, '\t', 'loc: ', note_id, 'text: ', text,
# 'annotation: ', annotation, 'chapter_title: ', chapter_title,
# 'chapter_progress: ', chapter_progress, 'date: ')
cursor.execute('select datelastread, ___PercentRead from content '
'where bookid is Null and '
'contentid = ?', t)
for row in cursor:
self.last_read = row[0]
self.percent_read = row[1]
# print row[1]
cursor.close()
# self.last_read_location = self.last_read - self.pdf_page_offset
self.user_notes = user_notes
def get_book_length(self):
#TL self.book_length = 0
#TL self.book_length = int(unpack('>I', record0[0x04:0x08])[0])
pass
# }}}
|
thiagopena/djangoSIGE
|
djangosige/apps/estoque/forms/local.py
|
Python
|
mit
| 461
| 0
|
# -*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext_lazy as _
from djang
|
osige.apps.estoque.models import LocalEstoque
class LocalEstoqueForm(forms.ModelForm):
class Meta
|
:
model = LocalEstoque
fields = ('descricao',)
widgets = {
'descricao': forms.TextInput(attrs={'class': 'form-control'}),
}
labels = {
'descricao': _('Descrição'),
}
|
ueshin/apache-spark
|
python/pyspark/pandas/tests/test_window.py
|
Python
|
apache-2.0
| 13,671
| 0.003292
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
from pyspark import pandas as ps
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.pandas.missing.window import (
MissingPandasLikeExpanding,
MissingPandasLikeRolling,
MissingPandasLikeExpandingGroupby,
MissingPandasLikeRollingGroupby,
)
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
class ExpandingRollingTest(PandasOnSparkTestCase, TestUtils):
def test_missing(self):
psdf = ps.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9]})
# Expanding functions
missing_functions = inspect.getmembers(MissingPandasLikeExpanding, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.expanding(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.a.expanding(1), name)() # Series
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Expanding.*{}.*is deprecated".format(name)
):
getattr(psdf.expanding(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Expanding.*{}.*is deprecated".format(name)
):
getattr(psdf.a.expanding(1), name)() # Series
# Rolling functions
missing_functions = inspect.getmembers(MissingPandasLikeRolling, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.a.rolling(1), name)() # Series
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Rolling.*{}.*is deprecated".format(name)
):
getattr(psdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Rolling.*{}.*is deprecated".format(name)
):
getattr(psdf.a.rolling(1), name)() # Series
# Expanding properties
missing_properties = inspect.getmembers(
MissingPandasLikeExpanding, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.expanding(1), name) # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
)
|
:
getattr(psdf.a.expanding(1), name) # Series
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
wi
|
th self.assertRaisesRegex(
PandasNotImplementedError, "property.*Expanding.*{}.*is deprecated".format(name)
):
getattr(psdf.expanding(1), name) # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Expanding.*{}.*is deprecated".format(name)
):
getattr(psdf.a.expanding(1), name) # Series
# Rolling properties
missing_properties = inspect.getmembers(
MissingPandasLikeRolling, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.a.rolling(1), name)() # Series
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Rolling.*{}.*is deprecated".format(name)
):
getattr(psdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Rolling.*{}.*is deprecated".format(name)
):
getattr(psdf.a.rolling(1), name)() # Series
def test_missing_groupby(self):
psdf = ps.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9]})
# Expanding functions
missing_functions = inspect.getmembers(
MissingPandasLikeExpandingGroupby, inspect.isfunction
)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.groupby("a").expanding(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.a.groupby(psdf.a).expanding(1), name)() # Series
deprecated_functions = [
name for
|
CongBao/mrsys.online
|
sub_mrsys/schedule/apps.py
|
Python
|
apache-2.0
| 156
| 0
|
# -*-
|
coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class ScheduleConfig(AppConfig):
name = 'sched
|
ule'
|
rickiepark/openbidder
|
protobuf/protobuf-2.6.1/python/stubout.py
|
Python
|
mit
| 4,934
| 0.004662
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is used for testing. The original is at:
# http://code.google.com/p/pymox/
class StubOutForTesting:
"""Sample Usage:
You want os.path.exists() to always return true during testing.
stubs = StubOutForTesting()
stubs.Set(os.path, 'exists', lambda x: 1)
...
stubs.UnsetAll()
The above changes os.path.exists into a lambda that returns 1. Once
the ... part of the code finishes, the UnsetAll() looks up the old value
of os.path.exists and restores it.
"""
def __init__(self):
self.cache = []
self.stubs = []
def __del__(self):
self.SmartUnsetAll()
self.UnsetAll()
def SmartSet(self, obj, attr_name, new_attr):
"""Replace obj.attr_name with new_attr. This m
|
ethod is smart and works
at the module, class, and instance level while preserving proper
inheritance. It will not stub out C types however unless that has been
explicitly allowed by the type.
This method supports the case where attr_name is a staticmethod or a
classmethod of obj.
Notes:
- If
|
obj is an instance, then it is its class that will actually be
stubbed. Note that the method Set() does not do that: if obj is
an instance, it (and not its class) will be stubbed.
- The stubbing is using the builtin getattr and setattr. So, the __get__
and __set__ will be called when stubbing (TODO: A better idea would
probably be to manipulate obj.__dict__ instead of getattr() and
setattr()).
Raises AttributeError if the attribute cannot be found.
"""
if (inspect.ismodule(obj) or
(not inspect.isclass(obj) and attr_name in obj.__dict__)):
orig_obj = obj
orig_attr = getattr(obj, attr_name)
else:
if not inspect.isclass(obj):
mro = list(inspect.getmro(obj.__class__))
else:
mro = list(inspect.getmro(obj))
mro.reverse()
orig_attr = None
for cls in mro:
try:
orig_obj = cls
orig_attr = getattr(obj, attr_name)
except AttributeError:
continue
if orig_attr is None:
raise AttributeError("Attribute not found.")
# Calling getattr() on a staticmethod transforms it to a 'normal' function.
# We need to ensure that we put it back as a staticmethod.
old_attribute = obj.__dict__.get(attr_name)
if old_attribute is not None and isinstance(old_attribute, staticmethod):
orig_attr = staticmethod(orig_attr)
self.stubs.append((orig_obj, attr_name, orig_attr))
setattr(orig_obj, attr_name, new_attr)
def SmartUnsetAll(self):
"""Reverses all the SmartSet() calls, restoring things to their original
definition. Its okay to call SmartUnsetAll() repeatedly, as later calls
have no effect if no SmartSet() calls have been made.
"""
self.stubs.reverse()
for args in self.stubs:
setattr(*args)
self.stubs = []
def Set(self, parent, child_name, new_child):
"""Replace child_name's old definition with new_child, in the context
of the given parent. The parent could be a module when the child is a
function at module scope. Or the parent could be a class when a class'
method is being replaced. The named child is set to new_child, while
the prior definition is saved away for later, when UnsetAll() is called.
This method supports the case where child_name is a staticmethod or a
classmethod of parent.
"""
old_child = getattr(parent, child_name)
old_attribute = parent.__dict__.get(child_name)
if old_attribute is not None and isinstance(old_attribute, staticmethod):
old_child = staticmethod(old_child)
self.cache.append((parent, old_child, child_name))
setattr(parent, child_name, new_child)
def UnsetAll(self):
"""Reverses all the Set() calls, restoring things to their original
definition. Its okay to call UnsetAll() repeatedly, as later calls have
no effect if no Set() calls have been made.
"""
# Undo calls to Set() in reverse order, in case Set() was called on the
# same arguments repeatedly (want the original call to be last one undone)
self.cache.reverse()
for (parent, old_child, child_name) in self.cache:
setattr(parent, child_name, old_child)
self.cache = []
|
Maelstroms38/ecommerce
|
src/ecommerce/settings/local.py
|
Python
|
mit
| 7,411
| 0.003778
|
"""
Django settings for ecommerce project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
#root of project
#BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'csqwlmc8s55o($rt6ozh7u+ui9zb-et00w$d90j8$^!nvj41_r'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'stroms38@gmail.com'
EMAIL_HOST_PASSWORD = 'yourpassword'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
'''
If using gmail, you will need to
unlock Captcha to enable Django
to send for you:
https://accounts.google.com/displayunlockcaptcha
'''
# Application definition
INSTALLED_APPS = (
#django app
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
#third party apps
'crispy_forms',
'registration',
#my apps
'answers',
'newsletter',
"products",
"carts",
"billing",
"django_filters",
"storages",
'gunicorn',
"djstripe",
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'ecommerce.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'dj
|
ango.contrib.auth.context_processors.auth',
'django.contrib.messages.cont
|
ext_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ecommerce.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'EST'
USE_I18N = True
USE_L10N = True
USE_TZ = True
'''Image storage Amazon S3'''
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = 'examplefy'
S3_URL = 'http://%s.s3.amazonaws.com/' % AWS_STORAGE_BUCKET_NAME
STATIC_URL = S3_URL
AWS_QUERYSTRING_AUTH = False
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
'''Static storage'''
# # Static files (CSS, JavaScript, Images)
# # https://docs.djangoproject.com/en/1.8/howto/static-files/
# STATIC_ROOT = 'staticfiles'
# STATIC_URL = '/static/'
# STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_in_env", "static_root")
# MEDIA_URL = '/media/'
# MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_in_env", "media_root")
# PROTECTED_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_in_env", "protected_root")
# STATICFILES_DIRS = (
# os.path.join(BASE_DIR, "static", "static_root"),
# #os.path.join(BASE_DIR, "static_in_env"),
# #'/var/www/static/',
# )
#Production Code
#Parse database configuration from $DATABASE_URL
#import dj_database_url
#DATABASES['default'] = dj_database_url.config()
# #BOTO S3 Storage for Production ONLY
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static', "static_root"),
)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
# STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static", "static_root")
MEDIA_URL = S3_URL
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static", "media_root")
PROTECTED_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static", "protected_root")
# TEMPLATE_DIRS = (
# os.path.join(BASE_DIR, "templates"),
# )
# here() gives us file paths from the root of the system to the directory
# holding the current file.
here = lambda * x: os.path.join(os.path.abspath(os.path.dirname(__file__)), *x)
PROJECT_ROOT = here("..")
# root() gives us file paths from the root of the system to whatever
# folder(s) we pass it starting at the parent directory of the current file.
root = lambda * x: os.path.join(os.path.abspath(PROJECT_ROOT), *x)
TEMPLATE_DIRS = (
root('templates'),
)
#Crispy FORM TAGs SETTINGS
CRISPY_TEMPLATE_PACK = 'bootstrap3'
#DJANGO REGISTRATION REDUX SETTINGS
ACCOUNT_ACTIVATION_DAYS = 7
REGISTRATION_AUTO_LOGIN = True
SITE_ID = 1
LOGIN_REDIRECT_URL = '/'
#Braintree
BRAINTREE_PUBLIC = "hsjhmqhy73rvpqbv"
BRAINTREE_PRIVATE = "37b06da7e2cdb493bf0e0ddb1c47cbcd"
BRAINTREE_MERCHANT = "bgd7scxjbcrz6dd2"
BRAINTREE_ENVIRONMENT = "Sandbox"
#Stripe
STRIPE_PUBLIC_KEY = os.environ.get("STRIPE_PUBLIC_KEY", "pk_test_lLFAbBOc7bHtpxq5QnIp94xh")
STRIPE_SECRET_KEY = os.environ.get("STRIPE_SECRET_KEY", "sk_test_hWkIxMrsvR3IGJIRKLRy1Rts")
CURRENCIES = getattr(settings, "DJSTRIPE_CURRENCIES", (
('usd', 'U.S. Dollars',),
('gbp', 'Pounds (GBP)',),
('eur', 'Euros',))
)
DJSTRIPE_PLANS = {
"one-time": {
"stripe_plan_id": "one-time",
"name": "Examplefy ($0.99)",
"description": "A one-time buy to Examplefy",
"price": 99, # $0.99
"currency": "usd",
"interval": "day"
},
"monthly": {
"stripe_plan_id": "pro-monthly",
"name": "Examplefy Pro ($4.99/month)",
"description": "The monthly subscription plan to Examplefy",
"price": 499, # $4.99
"currency": "usd",
"interval": "month",
"interval_count": 1
},
"yearly": {
"stripe_plan_id": "pro-yearly",
"name": "Examplefy Prime ($49/year)",
"description": "The annual subscription plan to Examplefy",
"price": 4900, # $49.00
"currency": "usd",
"interval": "year",
"interval_count": 1
}
}
|
ferdas/ws-rpc
|
websocket.py
|
Python
|
gpl-3.0
| 8,711
| 0.011365
|
# versione 0.5
import socket
import threading
import hashlib
import base64
import json
class BadWSRequest(Exception):
pass
class BadWSFrame(Exception):
pass
class BadCmdCall(Exception):
pass
class BadCmdParam(Exception):
pass
class Client(threading.Thread):
_MAGIC_STRING = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
_OPCODE_TEXT = 0x1
_OPCODE_CLOSE = 0x8
def __init__(self, Manager, socket, address):
super().__init__()
self.Manager = Manager
self.socket = socket
self.ip, self.port = address
self.invokedPath = None
self.sessionStarted = False
def _parseHeader(self):
self.socket.settimeout(2.0)
rcvBuffer = ''
toRead = True
while toRead:
rcvBuffer += self.socket.recv(128).decode('utf-8')
#Check for the termination sequence
if rcvBuffer[-4:] == '\r\n\r\n': toRead = False
#vedere di usare splitlines
headerLines = rcvBuffer.split('\r\n')
requestLineElements = headerLines[0].split(' ')
if requestLineElements[0] == 'GET' and requestLineElements[-1] == 'HTTP/1.1':
self.invokedPath = requestLineElements[2]
else:
raise BadWSRequest
self.headerDict = {}
#Cut off rubbish (first line and termination sequence)
for header in headerLines[1:-2]:
headerKey, headerVal = header.split(':', 1)
self.headerDict.update({ headerKey: headerVal.strip() })
if (
'upgrade' not in self.headerDict['Connection'].lower().split(', ') or
self.headerDict['Upgrade'].lower() != 'websocket' or
'Sec-WebSocket-Key' not in self.headerDict
#Very weak part
):
raise BadWSRequest
#Operative mode needs more time
self.socket.settimeout(3600.0)
def _initComunication(self):
payload = 'HTTP/1.1 101 Web Socket Protocol Handshake\r\n'
payload += 'Upgrade: WebSocket\r\n'
payload += 'Connection: Upgrade\r\n'
#Generate the security key
acceptKey = self.headerDict['Sec-WebSocket-Key'] + self._MAGIC_STRING
acceptKey = hashlib.sha1( acceptKey.encode('ascii') ).digest()
acceptKey = base64.b64encode(acceptKey)
payload += 'Sec-WebSocket-Accept: ' + acceptKey.decode('utf-8') + '\r\n\r\n'
self.socket.send( payload.encode('utf-8') )
def _rcvRequest(self):
#1st byte: FIN, RUBBISH1, RUBBISH2, RUBBISH3, OPCODE (4 bit)
#2nd byte: MASKED, PAYLOAD_LENGTH (7 bit)
rcvBuffer = self.socket.recv(2)
print('FIN: ' + str( rcvBuffer[0] >> 7 ))
#0x0f is 00001111 binary sequence
opcode = rcvBuffer[0] & 0x0f
print('opcode: ' + hex( opcode ))
maskBit = rcvBuffer[1] >> 7
print('mask: ' + str( maskBit ))
if maskBit != 1:
raise BadWSFrame('Unmasked data')
#0x7f is 01111111 binary sequence
length = rcvBuffer[1] & 0x7f
if length == 126:
#A long length is stored in more space
rcvBuffer = self.socket.recv(2)
length = int.from_bytes(rcvBuffer, 'big')
elif length == 127:
#un carico maggiore di 65kb a thread mi fa collassare il tutto..
#Ma poi.. perche' un utente dovrebbe caricare cosi' tanti dati? :O
raise BadWSFrame('Too big payload')
print('len
|
gth: ' + str(length))
#Read the mask applied to data
maskKey = self.socket.recv(4)
#valutare di bufferizzare per rendere il thread piu' parsionioso
rcvBuffer = self.socket.recv(length)
message = b''
for i in range(length):
#Unmask the original message
message += bytes([ rcvBuffer[i] ^ maskKey[i % 4] ])
print(messa
|
ge)
if opcode == self._OPCODE_TEXT:
return json.loads( message.decode('utf-8') )
elif opcode == self._OPCODE_CLOSE:
return None
else:
raise BadWSFrame('Unknown OpCode')
def _sndResponse(self, data):
data = json.dumps(data).encode('utf-8')
length = len(data)
#FIN bit and opcode 0x1 (0x81 is 10000001 binary sequence)
payload = b'\x81'
if length >= 65535:
#Over the maximum length allowed by 16bit addressing
raise BadWSFrame('Too big payload')
elif length <= 125:
payload += bytes([length])
else:
payload += bytes([126])
payload += length.to_bytes(2, 'big')
#si potrebbe bufferizzare l'invio
self.socket.send(payload + data)
#Chiudere inviando un codice di errore e usando l'opcode globale
def _sndClose(self):
#FIN bit and opcode 0x8 (0x88 is 10001000 binary sequence)
#Mask and length bits are zero
self.socket.send(b'\x88\x00')
#Empty the remote buffer
self.socket.recv(100)
def run(self):
print('[+] Connection established with ' + self.ip + ':' + str(self.port), "[%s]" % str(len(self.Manager)))
try:
self._parseHeader()
self._initComunication()
self.sessionStarted = True
#socket non bloccanti potrebbero essere di aiuto per smaltire prima i dati
while True:
request = self._rcvRequest()
if not request: break
response = self.Manager.executeAction(self, request)
if response == None:
raise UnknownCommand
self._sndResponse(response)
except BadWSRequest:
print('[!] Bad-formed request from ' + self.ip + ':' + str(self.port))
except BadWSFrame as err:
print('[!] Bad-formed frame from ' + self.ip + ':' + str(self.port), str(err))
#valutare se lasciare il messaggio o meno
except BadCmdCall as err:
print('[!] Unknown command received from ' + self.ip + ':' + str(self.port), str(err))
except BadCmdParam as err:
print('[!] Invalid parameters from ' + self.ip + ':' + str(self.port), str(err))
except socket.timeout:
print('[!] Timeout occurred for ' + self.ip + ':' + str(self.port))
finally:
if self.sessionStarted:
self._sndClose()
self.socket.close()
self.Manager.rmvClient(self)
print('[-] Connection closed with ' + self.ip + ':' + str(self.port), "[%s]" % str(len(self.Manager)))
class ClientManager:
def __init__(self):
self.clientList = []
self.actionDict = {}
def __len__(self):
return len(self.clientList)
def addClient(self, clientSocket, address):
newClient = Client(self, clientSocket, address)
newClient.start()
self.clientList.append(newClient)
def rmvClient(self, clientInstance):
self.clientList.remove(clientInstance)
def registerAction(self, functionName, function):
self.actionDict.update({ functionName: function })
def executeAction(self, clientInstance, request):
#Array of two element is expected
function, parameters = request
if function in self.actionDict:
try:
return self.actionDict[function](*parameters)
except TypeError:
raise BadCmdParam(request)
else:
raise BadCmdCall(function)
def shutdown(self):
for client in self.clientList:
client.join()
class WebSocketServer:
def __init__(self, ip = '0.0.0.0', port = 8888, conns = 9999):
self.ip = ip
self.port = port
self.CM = ClientManager()
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.bind( (self.ip, self.port) )
self.socket.listen(conns)
print('[#] Waiting for connections on ' + self.ip + ':' + str(self.port) + '...')
except socket.error as err:
|
lukemetz/MLFun
|
DistCifar10/model.py
|
Python
|
mit
| 1,811
| 0.008283
|
import theano.tensor as T
import numpy as np
from cuboid.bricks import Flattener, FilterPool, Dropout, BatchNormalization
from cuboid.bricks import Convolutional, LeakyRectifier, BrickSequence
from blocks.bricks.conv import MaxPooling
from blocks.bricks import Linear, Softmax
from blocks.initialization import IsotropicGaussian, Constant
from blocks.bricks.cost import CategoricalCrossEntropy, MisclassificationRate
def conv3(num_filters):
return [Convolutional(filter_size=(3, 3),
num_filters=num_filters,
weights_init=IsotropicGaussian(std=0.05),
biases_init=Constant(0),
use_bias=True,
border_mode="same",
step=(1,1)),
LeakyRectifier(0.01)]
def max_pool():
return MaxPooling(pooling_size=(2, 2),
step=(2, 2))
def linear(n):
return Linear(output_dim=n,
weights_init=IsotropicGaussian(std=0.01),
biases_init=Constant(0),
use_
|
bias=True)
class ModelHelper():
def __init__(self, config):
self.X = T.tensor4("features")
c = config
seq = BrickSequence(input_dim = (3, 32, 32), bricks=[
conv3(c['n_l1']),
conv3(c['n_l2']),
max_pool(),
conv3(c['n_l3']),
conv3(c['n_l4']),
max_pool(),
#conv3(10),
#conv3(10),
Flattener(),
linear(c['n_l5']),
Softm
|
ax()
])
seq.initialize()
self.pred = seq.apply(self.X)
self.Y = T.imatrix("targets")
self.cost = CategoricalCrossEntropy().apply(self.Y.flatten(), self.pred)
self.cost.name = "cost"
self.accur = 1.0 - MisclassificationRate().apply(self.Y.flatten(), self.pred)
self.accur.name = "accur"
|
krintoxi/NoobSec-Toolkit
|
NoobSecToolkit /tools/inject/lib/core/optiondict.py
|
Python
|
gpl-2.0
| 13,002
| 0.000154
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
optDict = {
# Format:
# Family: { "parameter name": "parameter datatype" },
# Or:
# Family: { "parameter name": ("parameter datatype", "category name used for common outputs feature") },
"Target": {
"direct": "string",
|
"url": "string",
|
"logFile": "string",
"bulkFile": "string",
"requestFile": "string",
"sessionFile": "string",
"googleDork": "string",
"configFile": "string",
"sitemapUrl": "string",
},
"Request": {
"method": "string",
"data": "string",
"paramDel": "string",
"cookie": "string",
"cookieDel": "string",
"loadCookies": "string",
"dropSetCookie": "boolean",
"agent": "string",
"randomAgent": "boolean",
"host": "string",
"referer": "string",
"headers": "string",
"authType": "string",
"authCred": "string",
"authFile": "string",
"proxy": "string",
"proxyCred": "string",
"proxyFile": "string",
"ignoreProxy": "boolean",
"tor": "boolean",
"torPort": "integer",
"torType": "string",
"checkTor": "boolean",
"delay": "float",
"timeout": "float",
"retries": "integer",
"rParam": "string",
"safeUrl": "string",
"safePost": "string",
"safeReqFile": "string",
"safeFreq": "integer",
"skipUrlEncode": "boolean",
"csrfToken": "string",
"csrfUrl": "string",
"forceSSL": "boolean",
"hpp": "boolean",
"evalCode": "string",
},
"Optimization": {
"optimize": "boolean",
"predictOutput": "boolean",
"keepAlive": "boolean",
"nullConnection": "boolean",
"threads": "integer",
},
"Injection": {
"testParameter": "string",
"skip": "string",
"skipStatic": "boolean",
"dbms": "string",
"dbmsCred": "string",
"os": "string",
"invalidBignum": "boolean",
"invalidLogical": "boolean",
"invalidString": "boolean",
"noCast": "boolean",
"noEscape": "boolean",
"prefix": "string",
"suffix": "string",
"tamper": "string",
},
"Detection": {
"level": "integer",
"risk": "integer",
"string": "string",
"notString": "string",
"regexp": "string",
"code": "integer",
"textOnly": "boolean",
"titles": "boolean",
},
"Techniques": {
"tech": "string",
"timeSec": "integer",
"uCols": "string",
"uChar": "string",
"uFrom": "string",
"dnsName": "string",
"secondOrder": "string",
},
"Fingerprint": {
"extensiveFp": "boolean",
},
"Enumeration": {
"getAll": "boolean",
"getBanner": ("boolean", "Banners"),
"getCurrentUser": ("boolean", "Users"),
"getCurrentDb": ("boolean", "Databases"),
"getHostname": "boolean",
"isDba": "boolean",
"getUsers": ("boolean", "Users"),
"getPasswordHashes": ("boolean", "Passwords"),
"getPrivileges": ("boolean", "Privileges"),
"getRoles": ("boolean", "Roles"),
"getDbs": ("boolean", "Databases"),
"getTables": ("boolean", "Tables"),
"getColumns": ("boolean", "Columns"),
"getSchema": "boolean",
"getCount": "boolean",
"dumpTable": "boolean",
"dumpAll": "boolean",
"search": "boolean",
"getComments": "boolean",
"db": "string",
"tbl": "string",
"col": "string",
"excludeCol": "string",
"dumpWhere": "string",
"user": "string",
"excludeSysDbs": "boolean",
"limitStart": "integer",
"limitStop": "integer",
"firstChar": "integer",
"lastChar": "integer",
"query": "string",
"sqlShell": "boolean",
"sqlFile": "string",
},
"Bru
|
haypo/trollius
|
tests/test_subprocess.py
|
Python
|
apache-2.0
| 18,416
| 0.000163
|
from trollius import subprocess
from trollius import test_utils
import trollius as asyncio
import os
import signal
import sys
import warnings
from trollius import BrokenPipeError, ConnectionResetError, ProcessLookupError
from trollius import From, Return
from trollius import base_subprocess
from trollius import test_support as support
from trollius.test_utils import mock
from trollius.test_utils import unittest
if sys.platform != 'win32':
from trollius import unix_events
# Program blocking
PROGRAM_BLOCKED = [sys.executable, '-c', 'import time; time.sleep(3600)']
# Program copying input to output
if sys.version_info >= (3,):
PROGRAM_CAT = ';'.join(('import sys',
'data = sys.stdin.buffer.read()',
'sys.stdout.buffer.write(data)'))
else:
PROGRAM_CAT = ';'.join(('import sys',
'data = sys.stdin.read()',
'sys.stdout.write(data)'))
PROGRAM_CAT = [sys.executable, '-c', PROGRAM_CAT]
class TestSubprocessTransport(base_subprocess.BaseSubprocessTransport):
def _start(self, *args, **kwargs):
self._proc = mock.Mock()
self._proc.stdin = None
self._proc.stdout = None
self._proc.stderr = None
class SubprocessTransportTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
self.set_event_loop(self.loop)
def create_transport(self, waiter=None):
protocol = mock.Mock()
protocol.connection_made._is_coroutine = False
protocol.process_exited._is_coroutine = False
transport = TestSubprocessTransport(
self.loop, protocol, ['test'], False,
None, None, None, 0, waiter=waiter)
return (transport, protocol)
def test_proc_exited(self):
waiter = asyncio.Future(loop=self.loop)
transport, protocol = self.create_transport(waiter)
transport._process_exited(6)
self.loo
|
p.run_until_complete(waiter)
self.assertEqual(transport.get_returncode(), 6)
self.assertTrue(protocol.connection_made.called)
self.assertTrue(protocol.process_exited.called)
self.assertTrue(protocol.connection_lost.called)
self.assertE
|
qual(protocol.connection_lost.call_args[0], (None,))
self.assertFalse(transport._closed)
self.assertIsNone(transport._loop)
self.assertIsNone(transport._proc)
self.assertIsNone(transport._protocol)
# methods must raise ProcessLookupError if the process exited
self.assertRaises(ProcessLookupError,
transport.send_signal, signal.SIGTERM)
self.assertRaises(ProcessLookupError, transport.terminate)
self.assertRaises(ProcessLookupError, transport.kill)
transport.close()
class SubprocessMixin:
def test_stdin_stdout(self):
args = PROGRAM_CAT
@asyncio.coroutine
def run(data):
proc = yield From(asyncio.create_subprocess_exec(
*args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
loop=self.loop))
# feed data
proc.stdin.write(data)
yield From(proc.stdin.drain())
proc.stdin.close()
# get output and exitcode
data = yield From(proc.stdout.read())
exitcode = yield From(proc.wait())
raise Return(exitcode, data)
task = run(b'some data')
task = asyncio.wait_for(task, 60.0, loop=self.loop)
exitcode, stdout = self.loop.run_until_complete(task)
self.assertEqual(exitcode, 0)
self.assertEqual(stdout, b'some data')
def test_communicate(self):
args = PROGRAM_CAT
@asyncio.coroutine
def run(data):
proc = yield From(asyncio.create_subprocess_exec(
*args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
loop=self.loop))
stdout, stderr = yield From(proc.communicate(data))
raise Return(proc.returncode, stdout)
task = run(b'some data')
task = asyncio.wait_for(task, 60.0, loop=self.loop)
exitcode, stdout = self.loop.run_until_complete(task)
self.assertEqual(exitcode, 0)
self.assertEqual(stdout, b'some data')
def test_shell(self):
create = asyncio.create_subprocess_shell('exit 7',
loop=self.loop)
proc = self.loop.run_until_complete(create)
exitcode = self.loop.run_until_complete(proc.wait())
self.assertEqual(exitcode, 7)
@unittest.skipUnless(hasattr(os, 'setsid'), "need os.setsid()")
def test_start_new_session(self):
def start_new_session():
os.setsid()
# start the new process in a new session
create = asyncio.create_subprocess_shell('exit 8',
preexec_fn=start_new_session,
loop=self.loop)
proc = self.loop.run_until_complete(create)
exitcode = self.loop.run_until_complete(proc.wait())
self.assertEqual(exitcode, 8)
def test_kill(self):
args = PROGRAM_BLOCKED
create = asyncio.create_subprocess_exec(*args, loop=self.loop)
proc = self.loop.run_until_complete(create)
proc.kill()
returncode = self.loop.run_until_complete(proc.wait())
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGKILL, returncode)
def test_terminate(self):
args = PROGRAM_BLOCKED
create = asyncio.create_subprocess_exec(*args, loop=self.loop)
proc = self.loop.run_until_complete(create)
proc.terminate()
returncode = self.loop.run_until_complete(proc.wait())
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGTERM, returncode)
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
def test_send_signal(self):
code = '; '.join((
'import sys, time',
'print("sleeping")',
'sys.stdout.flush()',
'time.sleep(3600)'))
args = [sys.executable, '-c', code]
create = asyncio.create_subprocess_exec(*args,
stdout=subprocess.PIPE,
loop=self.loop)
proc = self.loop.run_until_complete(create)
@asyncio.coroutine
def send_signal(proc):
# basic synchronization to wait until the program is sleeping
line = yield From(proc.stdout.readline())
self.assertEqual(line, b'sleeping\n')
proc.send_signal(signal.SIGHUP)
returncode = yield From(proc.wait())
raise Return(returncode)
returncode = self.loop.run_until_complete(send_signal(proc))
self.assertEqual(-signal.SIGHUP, returncode)
def prepare_broken_pipe_test(self):
# buffer large enough to feed the whole pipe buffer
large_data = b'x' * support.PIPE_MAX_SIZE
# the program ends before the stdin can be feeded
create = asyncio.create_subprocess_exec(
sys.executable, '-c', 'pass',
stdin=subprocess.PIPE,
loop=self.loop)
proc = self.loop.run_until_complete(create)
return (proc, large_data)
def test_stdin_broken_pipe(self):
proc, large_data = self.prepare_broken_pipe_test()
@asyncio.coroutine
def write_stdin(proc, data):
proc.stdin.write(data)
yield From(proc.stdin.drain())
c
|
ghchinoy/tensorflow
|
tensorflow/python/distribute/multi_worker_util_test.py
|
Python
|
apache-2.0
| 8,226
| 0.004012
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for multi_worker_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.eager import test
from tensorflow.python.training import server_lib
class NormalizeClusterSpecTest(test.TestCase):
def assert_same_cluster(self, lhs, rhs):
self.assertEqual(
server_lib.ClusterSpec(lhs).as_dict(),
server_lib.ClusterSpec(rhs).as_dict())
def testDictAsInput(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assert_same_cluster(
cluster_spec, multi_worker_util.normalize_cluster_spec(cluster_spec))
def testClusterDefAsInput(self):
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = "chief"
job.tasks[0] = "127.0.0.1:1234"
job = cluster_def.job.add()
job.name = "worker"
job.tasks[0] = "127.0.0.1:8964"
job.tasks[1] = "127.0.0.1:2333"
job = cluster_def.job.add()
job.name = "ps"
job.tasks[0] = "127.0.0.1:1926"
job.tasks[1] = "127.0.0.1:3141"
self.assert_same_cluster(
cluster_def, multi_worker_util.normalize_cluster_spec(cluster_def))
def testClusterSpecAsInput(self):
cluster_spec = server_lib.ClusterSpec({
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
})
self.assert_same_cluster(
cluster_spec, multi_worker_util.normalize_cluster_spec(cluster_spec))
def testUnexpectedInput(self):
cluster_spec = ["127.0.0.1:8964", "127.0.0.1:2333"]
with self.assertRaisesRegexp(
ValueError,
"`cluster_spec' should be dict or a `tf.train.ClusterSpec` or a "
"`tf.train.ClusterDef` object"):
multi_worker_util.normalize_cluster_spec(cluster_spec)
class IsChiefTest(test.TestCase):
def testClusterWithChief(self):
cluste
|
r_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertTrue(multi_worker_util.is_chief(cluster_spec, "chief", 0))
self.assertFalse(multi_wo
|
rker_util.is_chief(cluster_spec, "worker", 0))
def testClusterWithoutChief(self):
cluster_spec = {"worker": ["127.0.0.1:8964", "127.0.0.1:2333"]}
self.assertTrue(multi_worker_util.is_chief(cluster_spec, "worker", 0))
self.assertFalse(multi_worker_util.is_chief(cluster_spec, "worker", 1))
with self.assertRaisesRegexp(
ValueError, "`task_type` 'chief' not found in cluster_spec."):
multi_worker_util.is_chief(cluster_spec, "chief", 0)
with self.assertRaisesRegexp(
ValueError, "The `task_id` 2 exceeds the maximum id of worker."):
multi_worker_util.is_chief(cluster_spec, "worker", 2)
def testEvaluatorIsChief(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"evaluator": ["127.0.0.1:2019"]
}
self.assertTrue(multi_worker_util.is_chief(cluster_spec, "evaluator", 0))
class NumWorkersTest(test.TestCase):
def testCountWorker(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.worker_count(cluster_spec, task_type="chief"), 3)
self.assertEqual(
multi_worker_util.worker_count(cluster_spec, task_type="worker"), 3)
def testCountEvaluator(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"evaluator": ["127.0.0.1:7566"]
}
self.assertEqual(
multi_worker_util.worker_count(cluster_spec, task_type="evaluator"), 1)
def testTaskTypeNotFound(self):
cluster_spec = {}
with self.assertRaisesRegexp(
ValueError, "`task_type` 'worker' not found in cluster_spec."):
multi_worker_util.worker_count(cluster_spec, task_type="worker")
def testCountPs(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
# A "ps" job shouldn't call this method.
with self.assertRaisesRegexp(ValueError, "Unexpected `task_type` 'ps'"):
multi_worker_util.worker_count(cluster_spec, task_type="ps")
class IdInClusterTest(test.TestCase):
def testChiefId(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.id_in_cluster(cluster_spec, "chief", 0), 0)
def testWorkerId(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.id_in_cluster(cluster_spec, "worker", 1), 2)
cluster_spec = {
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.id_in_cluster(cluster_spec, "worker", 1), 1)
def testEvaluatorId(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"evaluator": ["127.0.0.1:7566"]
}
self.assertEqual(
multi_worker_util.id_in_cluster(cluster_spec, "evaluator", 0), 0)
def testPsId(self):
cluster_spec = {"chief": ["127.0.0.1:1234"], "ps": ["127.0.0.1:7566"]}
with self.assertRaisesRegexp(ValueError,
"There is no id for task_type 'ps'"):
multi_worker_util.id_in_cluster(cluster_spec, "ps", 0)
def testMultipleChiefs(self):
cluster_spec = {
"chief": ["127.0.0.1:8258", "127.0.0.1:7566"],
}
with self.assertRaisesRegexp(ValueError,
"There must be at most one 'chief' job."):
multi_worker_util.id_in_cluster(cluster_spec, "chief", 0)
class CollectiveLeaderTest(test.TestCase):
def testChiefAsLeader(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.collective_leader(cluster_spec, "worker", 0),
"/job:chief/replica:0/task:0")
def testWorkerAsLeader(self):
cluster_spec = {
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.collective_leader(cluster_spec, "worker", 1),
"/job:worker/replica:0/task:0")
def testLeaderForEvaluator(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"],
"evaluator": ["127.0.0.1:2019"]
}
self.assertEqual(
multi_worker_util.collective_leader(cluster_spec, "evaluator", 0), "")
def testLocalLeader(self):
cluster_spec = {}
self.assertEqual(
multi_worker_util.collective_leader(cluster_spec, None, 0), "
|
tocisz/oraschemadoc
|
oraschemadoc/dot.py
|
Python
|
gpl-2.0
| 6,714
| 0.003724
|
""" Oriented graph aka ERD painter """
# Copyright (C) Petr Vanek <petr@yarpen.cz> , 2005
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
__author__ = 'Petr Vanek <petr@yarpen.cz>'
import types
import os
import sys
# local file subprocess24 is imported only for <2.4
if sys.version_info[:3] < (2, 4, 2):
import subprocess24 as subprocess
else:
import subprocess
class Dot:
"""! \brief Oriented graph aka ERD painter.
This class requires GraphViz installed because it calls 'dot'
externally. If it does not find that programm, no images are included
in html docs.
Format for parent - children: parent and [child1, child2, ..., childN]
Format for all - {
parent1: [child1, child2, ..., childN],
parent2: [child1, child2, ..., childN],
...
parentN: [child1, child2, ..., childN]
}
"""
def __init__(self, outPath):
## Path to write temp files and final images
self.outPath = outPath
## A flag for 'dot' availability
self.haveDot = self.haveDot()
## A text template for DOT source files.
self.graphTemplate = """
/* This is a DOT file created by Oraschemadoc (OSD).
When you see this file in your filesystem and OSD
is not running, there is propably a bug in this file.
Visit http://www.yarpen.cz/oraschemadoc and send me
this file to fix the bug, please. */
digraph G
{
label="%s";fontname="Helvetica";labelfontsize="12";
labelloc="t";labeljust="l";labeldistance="5.0";
edge [fontname="Helvetica",fontsize=10,labelfontname="Helvetica",labelfontsize=10];
node [fontname="Helvetica",fontsize=10,shape=record];
rankdir=LR;
%s
}
"""
def uniq(self, aList):
"""! \brief Create a list with unique values.
It's used for a dummy lists to be reset during diagrams source
code creation."""
set = {}
map(set.__setitem__, aList, [])
return set.keys()
def makeKeyNode(self, node, highlighNode=None):
"""! \brief Make base node.
Base node definiton for DOT source."""
bgcolor = 'white'
if highlighNode == node:
bgcolor = 'gray88'
s = '"%s" [label="%s" height=0.2,width=0.4,color="black",fillcolor="%s",style="filled",fontcolor="black",href="table-%s.html#t-fk"];\n' % (node, node, bgcolor, node)
return s
def graphList(self, mainName, children=[], inverseChildren=[]):
"""! \brief Make relations between the nodes.
Link base nodes (makeKeyNode()) together.
\param children leafs pointing to mainName
\param inverseChildren mainName is pointing to these leafs"""
s = []
for i in children:
s.append('''"%s" -> "%s" [color="black",fontsize=10,style="solid",arrowhead="crow"];\n''' % (i, mainName))
for i in inverseChildren:
s.append('''"%s" -> "%s" [color="black",fontsize=10,style="solid",arrowhead="crow"];\n''' % (mainName, i))
return ''.join(s)
def haveDot(self):
"""! \brief Check if there is a dot installed in PATH """
try:
"""
if os.spawnlp(os.P_WAIT, 'dot', 'dot', '-V') == 0:
return True
"""
print '\nChecking for dot binary...'
if self.runDot(['-V']) == 0:
return True
except OSError, e:
print '\nUnknown error in Dot.haveDot() method. ERD disabled.'
print '%s\n' % e
print ' Error'
return False
def runDot(self, params=[]):
"""! \brief Call the 'dot' binary. Searchnig in PATH variable"""
#return subprocess.call(["dot"] + params, env={"PATH": os.environ['PATH']}, stdout=None)
return subprocess.call(['dot'] + params)
def callDot(self, fname):
"""! \brief Create the PNGs and image maps from DOT files """
f = fname + '.dot'
retval = 1
self.runDot(params=['-Tcmap', '-o', fname + '.map', f])
retval = self.runDot(params=['-Tpng', '-o', fname + '.png', f])
if retval == 0:
try:
os.remove(f)
except IOError:
print 'cannot delete %s' % f
return retval
def fileGraphList(self, mainName, children=[], inverseChildren=[]):
"""! \brief Make a graph of the mainName's children """
allNodes = self.uniq(children + [mainName] + inverseChildren)
s = ''
for i in allNodes:
s += self.makeKeyNode(i, mainName)
s += self.graphList(mainName, children, inverseChildren)
s = self.graphTemplate % ('ERD related to the table', s)
fname = os.path.join(self.outPath, mainName)
f = file(fname+'.dot', 'w')
f.write(s)
f.close()
if self.callD
|
ot(fname) == 0:
return mainName+'.png'
return None
def fileGraphDict(self, all={}):
"""! \brief Make wide graph for the whole schema.
It's used at the index page."""
allNodes = all.keys()
for i in all.keys():
if type(i) != types.ListType:
continue
for j in i:
allNodes.append(j)
allNodes = self.uniq(allNodes)
s = ''
for i in allNodes
|
:
s += self.makeKeyNode(i)
for i in all.keys():
s += self.graphList(i, all[i])
s = self.graphTemplate % ('ERD of the schema', s)
fname = os.path.join(self.outPath, 'main')
f = file(fname + '.dot', 'w')
f.write(s)
f.close()
if self.callDot(fname) == 0:
return 'main.png'
return None
if __name__ == '__main__':
d = Dot()
d.fileGraphList('rodic', ['ch1', 'ch2', 'ch3'])
d.fileGraphDict({'rodic1': ['ch1', 'ch2', 'ch3', 'rodic2'], 'rodic2': ['x1', 'rodic1']})
|
OpenSoccerManager/opensoccermanager
|
uigtk/continuedialog.py
|
Python
|
gpl-3.0
| 1,783
| 0
|
#!/usr/bin/env python3
# This file is part of OpenSoccerManager.
#
# OpenSoccerManager is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# OpenSoccerManager is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# OpenSoccerManager. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk
from gi.repository import GObject
import data
class ContinueDialog(Gtk.Dialog):
'''
Dialog displayed when moving between dates in the game.
'''
def __init__(self):
Gtk.Dialog.__init__(self)
self.set_transient_for(data.window)
self.set_modal(True)
self.set_title("Continue Game")
self.set_default_size(200, -1)
self.set_resizable(False)
self.vbox.set_border_width(5)
self.progressbar = Gtk.ProgressBar()
self.progressbar.set_text("")
self.vbox.add(self.progressbar)
self.count = 0
def on_timeout_event(self, *args):
if self.count < 10:
self.count += 1
self.progressbar.set_fraction(self.count * 0.1)
state =
|
True
else:
self.destroy()
data.window.mainscreen.information.update_date()
state
|
= False
return state
def show(self):
self.show_all()
GObject.timeout_add(10, self.on_timeout_event)
|
pseudonym117/Riot-Watcher
|
tests/integration/riot/test_AccountApi.py
|
Python
|
mit
| 1,465
| 0.001365
|
import pytest
@pytest.fixture(params=["EUROPE", "ASIA", "AMERICAS"])
def region(request):
return request.param
@pytest.fixture(params=["pseudonym", "Tuxedo"])
def game_name(request):
return request.param
@pytest.fixture(params=["sudo", "riot"])
def t
|
ag_line(request):
return request.param
@pytest.fixture(params=["val", "lor"])
def game(request):
return request.param
@pytest.mark.riot
@pytest.mark.integration
class TestAccountApi:
def test_by_puuid(self, riot_context, region, puuid):
actual_response = riot_context.watcher.account.by_pu
|
uid(region, puuid)
riot_context.verify_api_call(
region, f"/riot/account/v1/accounts/by-puuid/{puuid}", {}, actual_response
)
def test_by_riot_id(self, riot_context, region, game_name, tag_line):
actual_response = riot_context.watcher.account.by_riot_id(
region, game_name, tag_line
)
riot_context.verify_api_call(
region,
f"/riot/account/v1/accounts/by-riot-id/{game_name}/{tag_line}",
{},
actual_response,
)
def test_active_shard(self, riot_context, region, game, puuid):
actual_response = riot_context.watcher.account.active_shard(region, game, puuid)
riot_context.verify_api_call(
region,
f"/riot/account/v1/active-shards/by-game/{game}/by-puuid/{puuid}",
{},
actual_response,
)
|
cheenwe/cheenwe.github.io
|
study/python/8_get_web_page.py
|
Python
|
mit
| 220
| 0.022727
|
import socket
s = socket.socket()
host = socket.gethostname()
port = 1234
s.bind
|
((host, port))
s.listen(5)
while True:
|
c, addr = s.accept()
print "Get coonect from", addr
c.send('Thanks your coonecting')
c.close()
|
caiobrentano/swift_undelete
|
setup.py
|
Python
|
apache-2.0
| 1,663
| 0.000601
|
#!/usr/bin/python
# Copyright (c) 2014 SwiftStack, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law
|
or ag
|
reed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
version = "__VERSION__"
setup(
name="swift_undelete",
version=version,
description='Undelete middleware for OpenStack Swift',
license='Apache License (2.0)',
author='Samuel N. Merritt',
author_email='sam@swiftstack.com',
url='https://github.com/swiftstack/swift_undelete',
packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Environment :: No Input/Output (Daemon)'],
# Ubuntu packaging incorrectly detects this as a dependency on the
# "python-swift" package, which SwiftStack doesn't use. So commenting this
# out so SwiftStack can still use ${python:Depends}
#install_requires=["swift"],
test_suite='nose.collector',
tests_require=["nose"],
scripts=[],
entry_points={
'paste.filter_factory': ['undelete=swift_undelete:filter_factory']})
|
hanuprateek/django-jsonfield
|
setup.py
|
Python
|
mit
| 1,596
| 0.001253
|
from distutils.core import Command
from setuptools import setup
class TestCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
from django.conf import settings
settings.configure(
DATABASES={'default': {'NAME': ':memory:', 'ENGINE': 'django.db.backends.sqlite3'}},
INSTALLED_APPS=('jsonfield',)
)
from django.core.management import call_command
import django
if django.VERSION[:2] >= (1, 7):
django.setup()
call_command('test', 'jsonfield')
setup(
name='jsonfield',
version='1.0.3',
packages=['jsonfield'],
license='MIT',
author='Brad Jasper',
author_email='bjasper@gmail.com',
url='https://github.com/bradjasper/django-jsonfield/',
description='A reusable Django field that allows you to store validated JSON in your model.',
lo
|
ng_description=open("README.rst").read(),
install_requires=['Django >= 1.4.3'],
tests_require=['Django >= 1.4.3'],
cmdclass={'test': TestCommand},
classifiers=[
'Environment :: Web Environment',
'Intended
|
Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Framework :: Django',
],
)
|
ecreall/deform_treepy
|
deform_treepy/utilities/tree_utility.py
|
Python
|
agpl-3.0
| 11,176
| 0.000358
|
# -*- coding: utf-8 -*-
"""Tree utilities
"""
class TranslationKind(object):
in_ = 'in'
out_ = 'out'
def _normalize_branche(branche, node_mapping):
new_branches = []
if node_mapping:
node_id = node_mapping.get('node_id')
parts = node_id.split('/')
aliases = node_mapping.get('aliases')
for aliase in aliases:
new_branche = parts[:-1]
new_branche.append(aliase)
new_branche = '/'.join(new_branche)
new_branche = branche.replace(node_id, new_branche)
new_branches.append(new_branche)
return new_branches
def _get_mapping_for_branche(branche, mapping):
nor_branche = branche + '/'
nodes = [node for node in mapping
if nor_branche.find(node.get('node_id') + '/') >= 0]
ordered_nodes = sorted(
nodes, key=lambda e: len(e.get('node_id').split('/')),
reverse=True)
return ordered_nodes
def normalize_branche(branche, mapping):
map_ = _get_mapping_for_branche(branche, mapping)
branches = []
for node in map_:
branches.extend(_normalize_branche(branche, node))
return list(set(branches))
def normalize_branches_in(branches, mapping):
result
|
= list(branches)
for branche in branches:
branch_result = normalize_branche(branche, mapping)
if branch_result:
result.extend(branch_result)
result.extend(normaliz
|
e_branches_out(branch_result, mapping))
return list(set(result))
def normalize_branches_out(branches, mapping):
new_branches = list(branches)
for node_mapping in mapping:
node_id = node_mapping.get('node_id')
parts = node_id.split('/')
aliases = node_mapping.get('aliases')
for aliase in aliases:
branche_to_replace = parts[:-1]
branche_to_replace.append(aliase)
branche_to_replace = '/'.join(branche_to_replace)
_new_branches = []
for branche in new_branches:
if (branche + '/').find(branche_to_replace + '/') >= 0:
_new_branches.append(
branche.replace(branche_to_replace, node_id))
else:
_new_branches.append(branche)
new_branches = _new_branches
return list(set(new_branches))
def normalize_tree(tree, mapping, type_=TranslationKind.out_):
branches = get_branches(tree)
if type_ == TranslationKind.out_:
branches = normalize_branches_out(branches, mapping)
else:
branches = normalize_branches_in(branches, mapping)
return branches_to_tree(branches)
def normalize_keywords_in(keywords, mapping):
new_keywords = []
mapping_nodes = {
node_mapping.get('node_id').split('/')[-1].lower(): node_mapping
for node_mapping in mapping}
for keyword in list(keywords):
new_keyword = [keyword]
if keyword.lower() in mapping_nodes:
node_mapping = mapping_nodes[keyword.lower()]
new_keyword = list(node_mapping.get('aliases'))
new_keywords.extend(new_keyword)
return new_keywords
def normalize_keywords_out(keywords, mapping):
new_keywords = []
mapping_nodes = {}
for node_mapping in mapping:
for alias in node_mapping.get('aliases'):
mapping_nodes[alias] = node_mapping
for keyword in list(keywords):
new_keyword = [keyword]
if keyword.lower() in mapping_nodes:
node_mapping = mapping_nodes[keyword.lower()]
new_keyword = [node_mapping.get('node_id').split('/')[-1]]
new_keywords.extend(new_keyword)
return new_keywords
def normalize_keywords(keywords, mapping, type_=TranslationKind.out_):
if type_ == TranslationKind.in_:
return normalize_keywords_in(keywords, mapping)
else:
return normalize_keywords_out(keywords, mapping)
return keywords
def node_to_keywords(node_name, children, source):
new_source = node_name
if source is not None:
new_source = source + '/' + node_name
result = [node_name, new_source]
path = []
for child in children:
child_keywords, child_path = node_to_keywords(
child, children[child], new_source)
result.extend(child_keywords)
result.extend(child_path)
path.extend([node_name + '/' + k for k in child_path])
if not children:
path = [node_name]
return result, path
def tree_to_keywords(tree, include_path=True):
result = []
for node in tree:
node_keywords, node_path = node_to_keywords(node, tree[node], None)
result.extend(node_keywords)
result.extend(node_path)
if not include_path:
flattened_result = []
for node in result:
flattened_result.extend(node.split('/'))
result = flattened_result
return list(set(result))
def get_keywords_by_level(tree, root, iskeywords=False):
keywords = []
if iskeywords:
keywords = tree
else:
keywords = tree_to_keywords(tree)
branches = sorted([k.split('/') for k in keywords
if k.startswith(root.lower()) or
k.startswith(root)],
key=lambda e: len(e), reverse=True)
len_tree = len(branches[0])
result = {}
for index in range(len_tree):
result[index] = []
for branche in branches:
if len(branche) > index:
result[index].append(branche[index])
result[index] = list(set(result[index]))
return list(result.values())
def get_tree_nodes_by_level(tree):
all_nodes = []
nodes = [(n[0], list(n[1].keys())) for n in tree.items()]
all_nodes.append(nodes)
nodes_values = [[(key, value) for value in n.items()]
for key, n in list(tree.items())]
sub_nodes = [item for sublist in nodes_values for item in sublist]
while sub_nodes:
nodes = list([(n[0]+'-'+n[1][0], list(n[1][1].keys()))
for n in sub_nodes])
all_nodes.append(nodes)
nodes_values = [[(origine+'-'+key, value) for value in n.items()]
for origine, (key, n) in list([n for n in sub_nodes])]
sub_nodes = [item for sublist in nodes_values for item in sublist]
return all_nodes
def merge_nodes(node1_name, children1, node2_name, children2):
if node1_name != node2_name:
return {node1_name: children1.copy(),
node2_name: children2.copy()}
node = {node1_name: merge_tree(children1, children2)}
return node
def merge_tree(tree1, tree2, mapping=[]):
if tree2 and mapping:
tree2 = normalize_tree(tree2, mapping)
if not tree1:
return tree2
if not tree2:
return tree1
result_tree = {}
merged_nodes = []
for node in tree1:
nodes_to_merge = [n for n in tree2
if node == n]
if not nodes_to_merge:
result_tree.update({node: tree1[node].copy()})
else:
node_to_merge = nodes_to_merge[0]
result_tree.update(merge_nodes(node, tree1[node],
node_to_merge, tree2[node_to_merge]))
merged_nodes.append(node_to_merge)
nodes_to_merge = {n: tree2[n] for n in tree2 if n not in merged_nodes}
result_tree.update(nodes_to_merge)
return result_tree
def get_branches_node(node_name, children):
result = []
for child in children:
result.extend([node_name + '/' + k for k
in get_branches_node(child, children[child])])
if not children:
result = [node_name]
return result
def get_branches(tree):
result = []
for node in tree:
result.extend(get_branches_node(node, tree[node]))
return result
def get_all_branches(tree):
branches = get_branches(tree)
result = []
for branche in branches:
result.append(branche)
parts = branche.split('/')
while parts:
parts.pop()
result.append('/'.join(parts))
return list(set(result))
def bran
|
ratschlab/ASP
|
examples/undocumented/python_modular/classifier_knn_modular.py
|
Python
|
gpl-2.0
| 1,033
| 0.03969
|
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
label_traindat = lm.load_labels('../data/label_train_multiclass.dat')
parameter_list = [[traindat,testdat,label_traindat,3],[traindat,testdat,label_traindat,3]]
def classifier_knn_modular(fm_train_real=traindat,fm_test_real=testd
|
at,label_train_multiclass=label_traindat, k=3 ):
from shogun.Features import RealFeatures, MulticlassLabels
from shogun.Classifier import KNN
from shogun.Distance import EuclidianDistance
feats_train=RealFeatures(fm_train_real)
feats_test=RealFeatures(fm_test_real)
dista
|
nce=EuclidianDistance(feats_train, feats_train)
labels=MulticlassLabels(label_train_multiclass)
knn=KNN(k, distance, labels)
knn_train = knn.train()
output=knn.apply(feats_test).get_labels()
multiple_k=knn.classify_for_multiple_k()
return knn,knn_train,output,multiple_k
if __name__=='__main__':
print('KNN')
classifier_knn_modular(*parameter_list[0])
|
lord63/getname
|
getname/__init__.py
|
Python
|
mit
| 385
| 0
|
#!
|
/usr/bin/env python
# -*- coding: utf-8 -*-
"""
getname
~~~~~~~
Get popular cat/dog/superhero/supervillain names.
:copyright: (c) 2015 by lord63.
:license: MIT, see LICENSE for more details.
"""
from getname.main import random_name
__title__ = "getname"
__version__ = '0.1.1'
__author__ = "lord63"
__license__ = "MIT"
__copyright__ = "Copyright 20
|
15 lord63"
|
eScatter/cstool
|
cstool/parse_input.py
|
Python
|
apache-2.0
| 7,511
| 0.001333
|
"""
Parses input files of the Cross-section tool, and generates valid input files
from (modified) settings in Python.
"""
from collections import OrderedDict
from ruamel import yaml
from cslib import (
units)
from cslib.settings import (
Type, Model, ModelType, Settings, each_value_conforms,
check_settings, generate_settings, parse_to_model)
from cslib.predicates import (
predicate,
is_string, is_integer, file_exists, has_units, is_none, is_)
from .phonon_loss import phonon_loss
from .elf import ELF
def pprint_settings(model, settings):
dumper = yaml.RoundTripDumper
dumper.add_representer(ELF, lambda dumper, data : dumper.represent_data(data.filename))
return yaml.dump(
generate_settings(settings),
indent=4, allow_unicode=True, Dumper=dumper)
def
|
quantity(description, uni
|
t_str, default=None):
return Type(description, default=default,
check=has_units(unit_str),
generator=lambda v: '{:~P}'.format(v),
parser=units.parse_expression)
def maybe_quantity(description, unit_str, default=None):
return Type(description, default=default,
check=is_none | has_units(unit_str),
generator=lambda v: v if v is None else '{:~P}'.format(v),
parser=lambda s: s if s is None else units.parse_expression(s))
element_model = Model([
('count', Type("Integer abundance", default=None,
check=is_integer)),
('Z', Type("Atomic number", default=None,
check=is_integer)),
('M', quantity("Molar mass", 'g/mol'))
])
phonon_branch_model = Model([
('alpha', maybe_quantity(
"Bending in dispersion relation. (TV Eq. 3.112)",
'm²/s', default=units('0 m²/s'))),
('eps_ac', quantity("Accoustic deformation potential", 'eV')),
('c_s', quantity("Speed of sound", 'km/s'))])
phonon_model = Model([
('model', Type(
"Whether the model is the `single` or `dual` mode.",
check=is_('single') | is_('dual'),
default="single")),
('m_eff', maybe_quantity(
"Effective mass.", 'g', default=units('1 m_e'))),
('m_dos', maybe_quantity(
"Density of state mass.", 'g', default=units('1 m_e'))),
('lattice', quantity("Lattice spacing", 'Å')),
('single', ModelType(
phonon_branch_model, "branch",
"Only given for single mode, parameters of model.")),
('longitudinal', ModelType(
phonon_branch_model, "branch",
"Only given for dual mode, parameters of model.")),
('transversal', ModelType(
phonon_branch_model, "branch",
"Only given for dual mode, parameters of model.")),
('energy_loss', maybe_quantity(
"Phonon loss.", 'eV',
default=phonon_loss)),
('E_BZ', maybe_quantity(
"Brioullon zone energy.", 'eV',
default=lambda s: (units.h**2 / (2*units.m_e * s.lattice**2))
.to('eV')))])
@predicate("Consistent branch model")
def phonon_check(s: Settings):
if s.model == 'single' and 'single' in s:
return True
if s.model == 'dual' and 'longitudinal' in s and 'transversal' in s:
return True
return False
@predicate("Consistent energy diagram")
def energy_check(s: Settings):
if s.model == 'insulator' or s.model == 'semiconductor':
if 'band_gap' in s and 'affinity' in s and 'work_func' not in s:
return True
if s.model == 'metal':
if 'band_gap' not in s and 'affinity' not in s and 'work_func' in s:
return True
return False
def get_barrier(s: Settings):
if s.model == 'insulator' or s.model == 'semiconductor':
if s.fermi > 0*units.eV:
return s.fermi + s.band_gap/2 + s.affinity
else:
return s.band_gap + s.affinity
if s.model == 'metal':
return s.fermi + s.work_func
# It should be impossible to get here, s.model is checked to be insul/semic/metal
return 0*units.eV
band_structure_model = Model([
('model', Type(
"Whether the material is of `insulator`, `semiconductor` or `metal` type."
" Insulators and semiconductors are treated in the same manner",
check=is_('insulator') | is_('semiconductor') | is_('metal'))),
('fermi', quantity("Fermi energy", 'eV')),
('barrier', quantity("Barrier energy", 'eV', default=get_barrier)),
# Metals
('work_func', maybe_quantity("Work function", 'eV')),
# Insulators / semiconductors
('affinity', maybe_quantity("Electron affinity", 'eV')),
('band_gap', maybe_quantity("Band gap", 'eV'))
])
cstool_model = Model([
('name', Type("Name of material", default=None,
check=is_string)),
('rho_m', quantity("Specific density", 'g/cm³')),
('band_structure', ModelType(
band_structure_model, "band_structure",
"Band structure of the material. There are two models: metals"
" and insulators (or semiconductors). Metals need a Fermi energy"
" and work function, insulators need a Fermi energy, band gap"
" and affinity. The barrier energy is calculated as Fermi +"
" work_func in the case of metals, or as Fermi + affinity +"
" band_gap/2 for insulators.",
check=energy_check, obligatory=True)),
('phonon', ModelType(
phonon_model, "phonon",
"We have two choices for modeling phonon scattering: single and"
" dual branch. The second option is important for crystaline"
" materials; we then split the scattering in transverse and"
" longitudinal modes.",
check=phonon_check, obligatory=True)),
('elf_file', Type(
"Filename of ELF data (Energy Loss Function). Data can be harvested"
" from http://henke.lbl.gov/optical_constants/getdb2.html.",
check=lambda s : True,
parser=lambda fname : ELF(fname))),
('elements', Type(
"Dictionary of elements contained in the substance.",
check=each_value_conforms(element_model, "element"),
parser=lambda d: OrderedDict((k, parse_to_model(element_model, v))
for k, v in d.items()),
generator=lambda d: yaml.comments.CommentedMap(
(k, generate_settings(v))
for k, v in d.items()))),
('M_tot', maybe_quantity(
"Total molar mass; this is computed from the `elements` entry.",
'g/mol',
default=lambda s: sum(e.M * e.count for e in s.elements.values()))),
('rho_n', maybe_quantity(
"Number density of atoms or molecules in compound. For instance "
"in the case of silicon dioxide this is the number density of "
"groups of two oxygen and one silicon atom, even if SiO2 is not "
"a molecule per se.", 'cm⁻³',
default=lambda s: (units.N_A / s.M_tot * s.rho_m).to('cm⁻³')))
])
cstool_model_type = ModelType(
cstool_model, "cstool",
"""The settings given to cstool should follow a certain hierarchy,
and each setting is required to have a particular dimensionality.""")
def read_input(filename):
raw_data = yaml.load(open(filename, 'r', encoding='utf-8'), Loader=yaml.RoundTripLoader)
settings = parse_to_model(cstool_model, raw_data)
if not check_settings(settings, cstool_model):
raise ValueError("Parsed settings do not conform the model.")
return settings
|
csxeba/ReSkiv
|
brainforge/costs/__init__.py
|
Python
|
gpl-3.0
| 22
| 0
|
f
|
rom ._costs import *
| |
liuxue1990/python-ll1-parser-generator
|
yaml_generator.py
|
Python
|
gpl-3.0
| 1,932
| 0.036232
|
from ll1_symbols import *
YAML_OUTPUT = """terminals: %s
non-terminals: %s
eof-marker: %s
error-marker: %s
start-symbol: %s
productions: %s
table: %s"
|
""
YAML_OUTPUT_NO_TABLE = """terminals: %s
non-terminals: %s
eof-marker: %s
error-marker: %s
start-symbol: %s
productions: %s"""
class YamlGenerator(object):
"""docstring for yaml_generator"""
def __init__(self, grammar):
self.grammar = grammar
def print_yaml(self, ll1_table = None):
def convert_list_str(a_list):
return "[%s]" % (", ".join(a_list))
def convert_dict_str(a_dict):
return "{%s}" % ", ".join(["%s: %s" % (key, value)
for key, value in a_di
|
ct.items()])
def convert_dict_dict_str(a_dict):
return "\n %s" % ("\n ".join(["%s: %s" % (key, convert_dict_str(value))
for key, value in a_dict.items()]))
def convert_dict_list_str(a_dict):
return "{%s}" % (", \n ".join(["%s: %s" % (key, convert_list_str(value))
for key, value in a_dict.items()]))
def convert_dict_dict_list_str(a_dict):
return "\n %s" % ("\n ".join(["%s: %s" % (key, convert_dict_list_str(value))
for key, value in a_dict.items()]))
if ll1_table:
return YAML_OUTPUT % (convert_list_str(list(self.grammar.term)),
convert_list_str(list(self.grammar.non_term)),
EOF,
ERROR_MARKER,
self.grammar.goal,
convert_dict_dict_list_str(self.convert_production()),
convert_dict_dict_str(ll1_table))
else:
return YAML_OUTPUT_NO_TABLE % (convert_list_str(list(self.grammar.term)),
convert_list_str(list(self.grammar.non_term)),
EOF,
ERROR_MARKER,
self.grammar.goal,
convert_dict_dict_list_str(self.convert_production()))
def convert_production(self):
return {idx : {production.left_hand.lexeme : [item.lexeme for item in production.right_hand if item.lexeme is not EPSILON]} for idx, production in enumerate(self.grammar.production)}
|
lafranceinsoumise/api-django
|
agir/donations/migrations/0007_auto_20190114_1514.py
|
Python
|
agpl-3.0
| 681
| 0
|
# Generated by Django 2.1.5 on 2019-01-14 14:14
|
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [("donations", "0006_document_deleted")]
operations = [
migrations.AlterField(
model_name="spendingrequest",
name="created",
field=models.DateTimeField(
default=django.utils.timezone.now, verbose_name="created"
),
),
migrations.AlterFie
|
ld(
model_name="spendingrequest",
name="modified",
field=models.DateTimeField(auto_now=True, verbose_name="modified"),
),
]
|
Adrianzatreanu/coala-decorators
|
coala_decorators/decorators.py
|
Python
|
mit
| 11,053
| 0.00009
|
import inspect
from functools import total_ordering
def yield_once(iterator):
"""
Decorator to make an iterator yield each result only once.
:param iterator: Any iterator
:return: An iterator that yields every result only once at most.
"""
def yield_once_generator(*args, **kwargs):
yielded = []
for item in iterator(*args, **kwargs):
if item in yielded:
pass
else:
yielded.append(item)
yield item
return yield_once_generator
def _to_list(var):
"""
Make variable to list.
:param var: variable of any type
:return: list
"""
if isinstance(var, list):
return var
elif var is None:
return []
elif isinstance(var, str) or isinstance(var, dict):
# We dont want to make a list out of those via the default constructor
return [var]
else:
try:
return list(var)
except TypeError:
return [var]
def arguments_to_lists(function):
"""
Decorator for a function that converts all arguments to lists.
:param function: target function
:return: target function with only lists as parameters
"""
def l_function(*args, **kwargs):
l_args = [_to_list(arg) for arg in args]
l_kwargs = {}
for key, value in kwargs.items():
l_kwargs[key] = _to_list(value)
return function(*l_args, **l_kwargs)
return l_function
def _get_member(obj, member):
# If not found, pass AttributeError to invoking function.
attribute = getattr(obj, member)
if callable(attribute) and hasattr(attribute, "__self__"):
# If the value is a bound method, invoke it like a getter and return
# its value.
try:
return attribute()
except TypeError:
# Don't use repr() to display the member more accurately, because
# invoking repr() on a bound method prints in this format:
# <bound method CLASS.METHOD of **repr(instance)**>
# This invokes repr() recursively.
raise TypeError("Given bound method '" + member + "' must be "
"callable like a getter, taking no arguments.")
else:
# Otherwise it's a member variable or property (or any other attribute
# that holds a value).
return attribute
def _construct_repr_string(obj, members):
# The passed entries have format (member-name, repr-function).
values = ", ".join(member + "=" + func(_get_member(obj, member))
for member, func in members)
return ("<" + type(obj).__name__ + " object(" + values + ") at "
+ hex(id(obj)) + ">")
def get_public_members(obj):
"""
Retrieves a list of member-like objects (members or properties) that are
publically exposed.
:param obj: The object to probe.
:return: A list of strings.
"""
return {attr: getattr(obj, attr) for attr in dir(obj)
if not attr.startswith("_")
and not hasattr(getattr(obj, attr), '__call__')}
def generate_repr(*members):
"""
Decorator that binds an auto-generated ``__repr__()`` function to a class.
The generated ``__repr__()`` function prints in following format:
<ClassName object(field1=1, field2='A string', field3=[1, 2, 3]) at 0xAAAA>
Note that this decorator modifies the given class in place!
:param members: An iterable of member names to include into the
representation-string. Providing no members yields
to inclusion of all member variables and properties
in alphabetical order (except if they start with an
underscore).
To control the representation of each member, you
can also pass a tuple where the first element
contains the member to print and the second one the
representation function (which defaults to the
built-in ``repr()``). Using None as representation
function is the same as using ``repr()``.
Supported members are fields/variables, properties
and getter-like functions (functions that accept no
arguments).
:raises ValueError: Raised when the passed
(member, repr-function)-tuples have not a length of
2.
:raises AttributeError: Raised when a given member/attribute was not found
in class.
:raises TypeError: Raised when a provided member is a bound method
that is not a getter-like function (means it must
accept no parameters).
:return: The class armed with an auto-generated __repr__
function.
"""
def decorator(cls):
cls.__repr__ = __repr__
return cls
if members:
# Prepare members list.
members_to_print = list(members)
for i, member in enumerate(members_to_print):
if isinstance(member, tuple):
# Check tuple dimensions.
length = len(member)
if length == 2:
members_to_print[i] = (member[0],
member[1] if member[1] else repr)
else:
raise ValueError("Passed tuple " + repr(member) +
" needs to be 2-dimensional, but has " +
str(length) + " dimensions.")
else:
members_to_print[i] = (member, repr)
def __repr__(self):
return _construct_repr_string(self, members_to_print)
else:
def __repr__(self):
# Need to fetch member variables every time since they are unknown
# until class instantation.
members_to_print = get_public_members(self)
member_repr_list = ((member, repr) for member in
sorted(members_to_print, key=str.lower))
return _construct_repr_string(self, member_repr_list)
return decorator
def generate_eq(*members):
"""
Decorator that generates equality and inequality operators for the
decorated class. The given members as well as the type of self and other
will be taken into account.
Note that this decorator modifies the given class in place!
:param members: A list of members to compare for equality.
"""
def decorator(cls):
def eq(self, other):
if type(other) is not type(self):
return False
return all(getattr(self, member) == getattr(other, member)
for member in members)
def ne(self, other):
return not eq(self, other)
cls.__eq__ = eq
cls.__ne__ = ne
return cls
return decorator
def generate_ordering(*members):
"""
Decorator that generates ordering operators for the decorated class based
on the
|
given member names. All ordering except equality functions will
raise a TypeError when a comparison with an unrelated class is attempted.
(Comparisons with child classes will thus work fine with the capabilities
of the base class as python will choose the base classes comparison
operator in that case.)
Note that this decorator modifies the given class in place!
:param members: A list of members to compare, ordered from high priority to
|
low. I.e. if the first member is equal the second will be
taken for comparison and so on. If a member is None it is
considered smaller than any other value except None.
"""
def decorator(cls):
def lt(self, other):
if not isinstance(other, cls):
raise TypeError("Comparison wi
|
imthomasking/MATLAB-files
|
Python/Project.Euler/Answers.Python/48.py
|
Python
|
mit
| 391
| 0.048593
|
# problem 48
# Project Euler
__author__ = 'Libao Jin'
__date__ = 'July 18, 2015'
def lastTenDigits(number):
string = str(number)
lastTen = int
|
(string[-10:])
return lastTen
def amazingSum(n):
s = 0
while n >= 1:
s += n ** n
n -= 1
return s
def selfPowers(n):
s = amazingSum(n)
l = lastTenDigits(s)
return (l, s)
def solution():
|
ls = selfPowers(1000)
print(ls)
solution()
|
andreimuntean/LinearRegression
|
LinearRegression/datahelpers.py
|
Python
|
mit
| 1,252
| 0.0088
|
#!/usr/bin/python3
"""datahelpers.py: Provides functions for handling data."""
__author__ = 'Andrei Muntean'
__license__ = 'MIT License'
import numpy as np
def shuffle_data(x, y):
random_indexes = np.random.permutation(x.shape[0])
shuffled_x = np.empty_like(x)
shuffled_y = np.empty_like(y)
for index in range(0, shuffled_x.shape[0]):
random_index = random_indexes[index]
shuffled_x[index] = x[random_index]
shuffled_y[inde
|
x] = y[random_index]
return x, y
def split_data(x, y, threshold = 0.7, shuffle = True):
"""Generates training and tests sets from the specified data."""
if shuffle:
x, y = shuffle_data(x, y)
pivot_index = round(threshold * x.shape[0])
training_data = {
'x': x[0 : pivot_index],
'y': y[0 : pivot_index]
}
test_data = {
'x': x[pivot_index:],
'y': y[pivot_index:
|
]
}
return training_data, test_data
def read_data(path):
"""Reads csv-formatted data from the specified path."""
data = np.loadtxt(path, delimiter = ',')
# Gets the dependent variables. They're stored in the first column.
y = data[:, 0]
# Gets the independent variables.
x = data[:, 1:]
return x, y
|
anushbmx/kitsune
|
kitsune/users/api.py
|
Python
|
bsd-3-clause
| 16,254
| 0.000984
|
import random
import re
from datetime import datetime, timedelta
from string import letters
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ValidationError
from django.db.models import Q, Count
from django.utils.encoding import force_text
from django.utils.http import int_to_base36
from django.views.decorators.http import require_GET
import waffle
from django_statsd.clients import statsd
import pytz
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import viewsets, serializers, mixins, filters, permissions, status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes, action
from rest_framework.authtoken.models import Token
from kitsune.access.decorators import login_required
from kitsune.questions.models import Answer
from kitsune.questions.ut
|
ils import num_answers, num_solutions, num_questions
from kitsune.sumo import email_utils
from kitsune.sumo.api_utils import DateTimeUTCField, GenericAPIException, PermissionMod
from kitsune.sumo.decorators import json_view
from kitsune.users.templatetags.jinja_helpers import profile_avatar
from kitsune.users.model
|
s import Profile, RegistrationProfile, Setting
def display_name_or_none(user):
try:
return user.profile.name
except (Profile.DoesNotExist, AttributeError):
return None
class TimezoneField(serializers.Field):
def to_representation(self, obj):
return force_text(obj)
def to_internal_value(self, data):
try:
return pytz.timezone(str(data))
except pytz.exceptions.UnknownTimeZoneError:
raise ValidationError('Unknown timezone')
@login_required
@require_GET
@json_view
def usernames(request):
"""An API to provide auto-complete data for user names."""
term = request.GET.get('term', '')
query = request.GET.get('query', '')
pre = term or query
if not pre:
return []
if not request.user.is_authenticated():
return []
with statsd.timer('users.api.usernames.search'):
profiles = (
Profile.objects.filter(Q(name__istartswith=pre))
.values_list('user_id', flat=True))
users = (
User.objects.filter(
Q(username__istartswith=pre) | Q(id__in=profiles))
.extra(select={'length': 'Length(username)'})
.order_by('length').select_related('profile'))
if not waffle.switch_is_active('users-dont-limit-by-login'):
last_login = datetime.now() - timedelta(weeks=12)
users = users.filter(last_login__gte=last_login)
return [{'username': u.username,
'display_name': display_name_or_none(u),
'avatar': profile_avatar(u, 24)}
for u in users[:10]]
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def test_auth(request):
return Response({
'username': request.user.username,
'authorized': True,
})
class OnlySelf(permissions.BasePermission):
"""
Only allows operations when the current user is the object in question.
Intended for use with PermissionsFields.
TODO: This should be tied to user and object permissions better, but
for now this is a bandaid.
"""
def has_object_permission(self, request, view, obj):
request_user = getattr(request, 'user', None)
user = getattr(obj, 'user', None)
return request_user == user
class OnlySelfEdits(OnlySelf):
"""
Only allow users/profiles to be edited and deleted by themselves.
TODO: This should be tied to user and object permissions better, but
for now this is a bandaid.
"""
def has_object_permission(self, request, view, obj):
# SAFE_METHODS is a list containing all the read-only methods.
if request.method in permissions.SAFE_METHODS:
return True
else:
return super(OnlySelfEdits, self).has_object_permission(request, view, obj)
class UserSettingSerializer(serializers.ModelSerializer):
user = serializers.PrimaryKeyRelatedField(
required=False,
write_only=True,
queryset=User.objects.all())
class Meta:
model = Setting
fields = ('name', 'value', 'user')
def get_identity(self, obj):
return obj['name']
def create(self, data):
user = data['user'] or self.context['view'].object
obj, created = self.Meta.model.objects.get_or_create(
user=user, name=data['name'], defaults={'value': data['value']})
if not created:
obj.value = data['value']
obj.save()
return obj
def update(self, instance, data):
for key in self.Meta.fields:
setattr(instance, key, data.get(key, getattr(instance, key)))
instance.save()
return instance
class ProfileSerializer(serializers.ModelSerializer):
id = serializers.IntegerField(source='user.id', read_only=True)
username = serializers.CharField(source='user.username')
display_name = serializers.CharField(source='name', required=False)
date_joined = DateTimeUTCField(source='user.date_joined', read_only=True)
avatar = serializers.SerializerMethodField('get_avatar_url')
email = (PermissionMod(serializers.EmailField, permissions=[OnlySelf])
(source='user.email', required=True))
settings = (PermissionMod(UserSettingSerializer, permissions=[OnlySelf])
(many=True, read_only=True))
helpfulness = serializers.ReadOnlyField(source='answer_helpfulness')
answer_count = serializers.SerializerMethodField()
question_count = serializers.SerializerMethodField()
solution_count = serializers.SerializerMethodField()
last_answer_date = serializers.SerializerMethodField()
is_active = serializers.BooleanField(source='user.is_active', read_only=True)
# This is a write only field. It is very important it stays that way!
password = serializers.CharField(source='user.password', write_only=True)
timezone = TimezoneField(required=False)
class Meta:
model = Profile
fields = [
'answer_count',
'avatar',
'bio',
'city',
'country',
'date_joined',
'display_name',
'email',
'facebook',
'helpfulness',
'id',
'irc_handle',
'is_active',
'last_answer_date',
'locale',
'mozillians',
# Password is here so it can be involved in write operations. It is
# marked as write-only above, so will not be visible.
'password',
'question_count',
'settings',
'solution_count',
'timezone',
'twitter',
'username',
'website',
]
def get_avatar_url(self, profile):
request = self.context.get('request')
size = request.GET.get('avatar_size', 48) if request else 48
return profile_avatar(profile.user, size=size)
def get_question_count(self, profile):
return num_questions(profile.user)
def get_answer_count(self, profile):
return num_answers(profile.user)
def get_solution_count(self, profile):
return num_solutions(profile.user)
def get_last_answer_date(self, profile):
last_answer = profile.user.answers.order_by('-created').first()
return last_answer.created if last_answer else None
def validate(self, data):
if data.get('name') is None:
username = data.get('user', {}).get('username')
data['name'] = username
return data
def create(self, validated_data):
user_data = validated_data.pop('user')
u = RegistrationProfile.objects.create_inactive_user(
user_data['username'],
user_data['password'],
user_data['
|
rbeardow/boki
|
boki/user.py
|
Python
|
mit
| 1,427
| 0.004905
|
from pushyou import db
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(255), unique=True)
status = db.Column(db.SmallInteger)
account_type = db.Column(db.SmallInteger) # Staff,Client,
email = db.Column(db.String(255), unique=True)
password_hash = db.Column(db.String(255))
password_salt = db.Column(db.String(255))
bus
|
iness_name = db.Column(db.String(255))
business_abn = db.Column(db.String(255))
contact_name = db.Column(db.String(255))
contact_phone = db.Column(db.String(255))
address_line1 = db.Column(db.String(255))
address_line2 = db.Column(db.String(255))
address_suburb = db.Column(db.String(255))
address_state = db.Column(db.String(255))
address_postcode = db.Column(db.String(255))
plan = db.Column(db.SmallInteger) # Basic
|
? Gold?
max_sites = db.Column(db.SmallInteger)
max_active_promo = db.Column(db.SmallInteger)
max_promo_per_site = db.Column(db.SmallInteger)
create_date = db.Column(db.Date)
update_date = db.Column(db.Date)
last_login_date = db.Column(db.Date)
last_login_ip = db.Column(db.Date)
locations = db.relationship('Location', backref='user', lazy='dynamic')
promotions = db.relationship('Promotion', backref='user', lazy='dynamic')
def __repr__(self):
return '<User %r>' % (self.username)
|
defivelo/db
|
apps/challenge/views/settings.py
|
Python
|
agpl-3.0
| 3,051
| 0.000657
|
# defivelo-intranet -- Outil métier pour la gestion du Défi Vélo
# Copyright (C) 2020 Didier 'OdyX' Raboud <didier.raboud@liip.ch>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.messages.views import SuccessMessageMixin
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.views.generic import ListView
from django.views.generic.edit import CreateView, UpdateView
from rolepermissions.mixins import HasPermissionsMixin
from apps.common import DV_STATES
from defivelo.roles import has_permission
from defivelo.views import MenuView
from ..forms import AnnualStateSettingForm
from ..models import AnnualStateSetting
class SettingsMixin(HasPermissionsMixin):
model = AnnualStateSetting
required_permission = "settings_crud"
def dispatch(self, request, *args, **kwargs):
self.year = kwargs.pop("year")
self.cantons = (
DV_STATES
if has_permission(request.user, "cantons_all")
else self.request.user.managedstates.all().values_list("canton", flat=True)
)
return super().dispatch(request, *args, **kwargs)
def get_queryset(self):
return super().get_que
|
ryset().filter(year=self.year, canton__in=self.cantons)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# Add our menu_category context
context["menu_category"] = "settings"
context["year"] = self.year
return context
def get_form_kwargs(self):
|
form_kwargs = super().get_form_kwargs()
form_kwargs["year"] = self.year
form_kwargs["cantons"] = self.cantons
return form_kwargs
def get_success_url(self):
return reverse_lazy(
"annualstatesettings-list", kwargs={"year": self.object.year}
)
class AnnualStateSettingsListView(SettingsMixin, MenuView, ListView):
context_object_name = "settings"
ordering = ["canton"]
class AnnualStateSettingMixin(SettingsMixin, SuccessMessageMixin, MenuView):
context_object_name = "setting"
form_class = AnnualStateSettingForm
class AnnualStateSettingCreateView(AnnualStateSettingMixin, CreateView):
success_message = _("Configuration cantonale par année créée")
class AnnualStateSettingUpdateView(AnnualStateSettingMixin, UpdateView):
success_message = _("Configuration cantonale par année mise à jour")
|
democrats/new-channel-bot
|
tests/test_new_channel_bot.py
|
Python
|
mit
| 4,261
| 0
|
""" Tests for new channel bot """
import datetime
import time
import unittest
import mock
import new_channel_bot
def _make_fake_api(channels, posts):
"""
Construct a fake slack API
Args: channels (dict): List of channels to mock in.
posts (array): Collection of all calls to the postMessage api.
"""
def api(method, **kwargs):
"""
Simple fake API for the methods we care about
Args:
method (string) Slack API method.
**kwargs: Arbitrary keyword arguments.
"""
if method == 'channels.list':
return channels
elif method == 'chat.postMessage':
posts.append(kwargs)
return
else:
raise Exception('Unexpected method: {}'.format(method))
return api
class NewChannelBotTests(unittest.TestCase):
""" Tests for new channel bot """
@mock.patch.object(new_channel_bot.slackclient.SlackClient, 'api_call')
def test_skips_old_channels(self, api):
""" Verify we only post new channels """
posts = []
old_channel_time = (
time.time() - datetime.timedelta(days=2).total_seconds()
)
new_channel_time = (
time.time() - datetime.timedelta(hours=23).total_seconds()
)
channels = {
'channels': [
{
'name': 'old-channel',
'purpose': {'value': 'not recently made!'},
'id': '1',
'created': old_channel_time
},
{
'name': 'new-channel',
'purpose': {'value': 'recently made!'},
'id': '2',
'created': new_channel_time
}
]
}
api.side_effect = _make_fake_api(channels, posts)
new_channel_bot.post_new_channels(channels, '#__TEST__')
self.assertEqual(len(posts), 1)
self.assertEqual('#__TEST__', posts[0].get('channel'))
self.assertIn('new-channel', posts[0].get('text'))
@mock.patch.object(new_channel_bot.slackclient.SlackClient, 'api_call')
def test_message_formatting(self, api):
""" Verify that we properly format messages """
posts = []
channels = {
'channels': [
{
'name': 'really-purposeless',
'id': '1',
'created': time.time()
},
{
'name': 'purposeless',
'purpose': {'value': ''},
'id': '2',
'created': time.time()
},
{
'name': 'purposeful',
'purpose': {'value': 'recently made!'},
'id': '3',
'created': time.time()
}
]
}
api.side_effect = _make_fake_api(channels, posts)
new_channel_bot.post_new_channels(channels, '#__TEST__')
self.assertEqual(len(posts), 3)
self.assertEqual(
'New channel <#1|really-purposeless>',
posts[0].get('text')
)
self.assertEqual(
'New channel <#2|purposeless>',
posts[1].get('text')
)
self.assertEqual(
"New channel <#3|purposeful>. Purpose: 'recently made!'",
posts[2].get('text')
)
@mock.patch.object(new_channel_bot.slackclient.SlackClient, 'api_call')
def test_unicode(self, api):
""" Tests that we can handle unicode names ""
|
"
posts = [
|
]
channels = {
'channels': [
{
'name': u'\U0001f604',
'id': '1',
'created': time.time(),
'purpose': {'value': u'something\U0001f604'},
}
]
}
api.side_effect = _make_fake_api(channels, posts)
new_channel_bot.post_new_channels(channels, '#__TEST__')
self.assertEqual(len(posts), 1)
self.assertEqual(
u"New channel <#1|\U0001f604>. Purpose: 'something\U0001f604'",
posts[0].get('text')
)
|
Azure/azure-sdk-for-python
|
sdk/sql/azure-mgmt-sql/azure/mgmt/sql/aio/operations/_sync_groups_operations.py
|
Python
|
mit
| 47,878
| 0.004511
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._sync_groups_operations import build_cancel_sync_request, build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_list_by_database_request, build_list_hub_schemas_request, build_list_logs_request, build_list_sync_database_ids_request, build_refresh_hub_schema_request_initial, build_trigger_sync_request, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SyncGroupsOperations:
"""SyncGroupsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.sql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_sync_database_ids(
self,
location_name: str,
**kwargs: Any
) -> AsyncIterable["_models.SyncDatabaseIdListResult"]:
"""Gets a collection of sync database ids.
:param location_name: The name of the region where the resource is located.
:type location_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SyncDatabaseIdListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.sql.models.SyncDatabaseIdListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SyncDatabaseIdListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_sync_database_ids_request(
location_name=location_name,
subscription_id=self._config.subscription_id,
template_url=self.list_sync_database_ids.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_sync_database_ids_request(
lo
|
cation_name=location_name,
subscription_id=self._config.subscription_id,
template
|
_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SyncDatabaseIdListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_sync_database_ids.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Sql/locations/{locationName}/syncDatabaseIds'} # type: ignore
async def _refresh_hub_schema_initial(
self,
resource_group_name: str,
server_name: str,
database_name: str,
sync_group_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_refresh_hub_schema_request_initial(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
sync_group_name=sync_group_name,
subscription_id=self._config.subscription_id,
template_url=self._refresh_hub_schema_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_refresh_hub_schema_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/syncGroups/{syncGroupName}/refreshHubSchema'} # type: ignore
@distributed_trace_async
async def begin_refresh_hub_schema(
self,
resource_group_name: str,
server_name: str,
database_name: str,
sync_group_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Refreshes a hub database schema.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database on which the sync group is hosted.
:type database_name: str
:param sync_group_name: The name of the sync group.
:type s
|
bptripp/grasp-convnet
|
py/cninit.py
|
Python
|
mit
| 7,083
| 0.009318
|
__author__ = 'bptripp'
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
"""
Initialization of CNNs via clustering of inputs and convex optimization
of outputs.
"""
def sigmoid(x, centre, gain):
y = 1 / (1 + np.exp(-gain*(x-centre)))
return y
def gaussian(x, mu, sigma):
return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sigma, 2.)))
def get_sigmoid_params(false_samples, true_samples, do_plot=False):
"""
Find gain and bias for sigmoid function that approximates probability
of class memberships. Probability based on Bayes' rule & gaussian
model of samples from each class.
"""
false_mu = np.mean(false_samples)
false_sigma = np.std(false_samples)
true_mu = np.mean(true_samples)
true_sigma = np.std(true_samples)
lowest = np.minimum(np.min(false_samples), np.min(true_samples))
highest = np.maximum(np.max(false_samples), np.max(true_samples))
a = np.arange(lowest, highest, (highest-lowest)/25)
p_x_false = gaussian(a, false_mu, false_sigma)
p_x_true = gaussian(a, true_mu, true_sigma)
p_x = p_x_true + p_x_false
p_true = p_x_true / p_x
popt, _ = curve_fit(sigmoid, a, p_true)
centre, gain = popt[0], popt[1]
if do_plot:
plt.hist(false_samples, a)
plt.hist(true_samples, a)
plt.plot(a, 100*sigmoid(a, centre, gain))
plt.plot(a, 100*p_true)
plt.title('centre: ' + str(centre) + ' gain: ' + str(gain))
plt.show()
return centre, gain
def check_sigmoid():
n = 1000
false_samples = 1 + .3*np.random.randn(n)
true_samples = -1 + 1*np.random.randn(n)
centre, gain = get_sigmoid_params(false_samples, true_samples, do_plot=True)
def get_convolutional_prototypes(samples, shape, patches_per_sample=5):
assert len(samples.shape) == 4
assert len(shape) == 4
wiggle = (samples.shape[2]-shape[2], samples.shape[3]-shape[3])
patches = []
for sample in samples:
for i in range(patches_per_sample):
corner = (np.random.randint(0, wiggle[0]), np.random.randint(0, wiggle[1]))
patches.append(sample[:,corner[0]:corner[0]+shape[2],corner[1]:corner[1]+shape[3]])
patches = np.array(patches)
flat = np.reshape(patches, (patches.shape[0], -1))
km = KMeans(shape[0])
km.fit(flat)
kernels = km.cluster_centers_
# normalize product of centre and corresponding kernel
for i in range(kernels.shape[0]):
kernels[i,:] = kernels[i,:] / np.linalg.norm(kernels[i,:])
return np.reshape(kernels, shape)
def get_dense_prototypes(samples, n):
km = KMeans(n)
km.fit(samples)
return km.cluster_centers_
def check_get_prototypes():
samples = np.random.rand(1000, 2, 28, 28)
prototypes = get_convolutional_prototypes(samples, (20,2,5,5))
print(prototypes.shape)
samples = np.random.rand(900, 2592)
prototypes = get_dense_prototypes(samples, 64)
print(prototypes.shape)
def get_discriminant(samples, labels):
lda = LinearDiscriminantAnalysis(solver='eigen', shrinkage='auto')
lda.fit(samples, labels)
return lda.coef_[0]
def check_discriminant():
n = 1000
labels = np.random.rand(n) < 0.5
samples = np.zeros((n,2))
for i in range(len(labels)):
if labels[i] > 0.5:
samples[i,:] = np.array([0,1]) + 1*np.random.randn(1,2)
else:
samples[i,:] = np.array([-2,-1]) + .5*np.random.randn(1,2)
coeff = get_discriminant(samples, labels)
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plt.scatter(samples[labels>.5,0], samples[labels>.5,1], color='g')
plt.scatter(samples[labels<.5,0], samples[labels<.5,1], color='r')
plt.plot([-coeff[0], coeff[0]], [-coeff[1], coeff[1]], color='k')
plt.subplot(1,2,2)
get_sigmoid_params(np.dot(samples[labels<.5], coeff),
np.dot(samples[labels>.5], coeff),
do_plot=True)
plt.show()
def init_model(model, X_train, Y_train):
if not (isinstance(model.layers[-1], Activation) \
and model.layers[-1].activation.__name__ == 'sigmoid'\
and isinstance(model.layers[-2], Dense)):
raise Exception('This does not look like an LDA-compatible network, which is all we support')
for i in range(len(model.layers)-2):
if isinstance(model.layers[i], Convolution2D):
inputs = get_inputs(model, X_train, i)
w, b = model.layers[i].get_weights()
w = get_convolutional_prototypes(inputs, w.shape)
b = .1 * np.ones_like(b)
model.layers[i].set_weights([w,b])
if isinstance(model.layers[i], Dense):
inputs = get_inputs(model, X_train, i)
w, b = model.layers[i].get_weights()
w = get_dense_prototypes(inputs, w.shape[1]).T
b = .1 * np.ones_like(b)
model.layers[i].set_weights([w,b])
inputs = get_inputs(model, X_train, len(model.layers)-3)
coeff = get_discriminant(inputs, Y_train)
centre, gain = get_sigmoid_params(np.dot(inputs[Y_train<.5], coeff),
np.dot(inputs[Y_train>.5], coeff))
w = coeff*gain
w = w[:,np.newaxis]
b = np.array([-centre])
model.layers[-2].set_weights([w,b])
sigmoid_inputs = get_inputs(model, X_train, len(model.layers)-1)
plt.figure()
plt.subplot(2,1,1)
bins = np.arange(np.min(Y_train), np.max(Y_train))
plt.hist(sigmoid_inputs[Y_train<.5])
plt.subplot(2,1
|
,2)
plt.hist(sigmoid_inputs[Y_train>.5])
plt.show()
def get_inputs(model, X_train, layer):
if layer == 0:
return X_train
else:
partial_model = Sequential(layers=model.layers[:layer])
partial_model.compile('sgd', 'mse')
return partial_model.p
|
redict(X_train)
if __name__ == '__main__':
# check_sigmoid()
# check_get_prototypes()
# check_discriminant()
import cPickle
f = file('../data/bowl-test.pkl', 'rb')
# f = file('../data/depths/24_bowl-29-Feb-2016-15-01-53.pkl', 'rb')
d, bd, l = cPickle.load(f)
f.close()
d = d - np.mean(d.flatten())
d = d / np.std(d.flatten())
# n = 900
n = 90
X_train = np.zeros((n,1,80,80))
X_train[:,0,:,:] = d[:n,:,:]
Y_train = l[:n]
model = Sequential()
model.add(Convolution2D(64,9,9,input_shape=(1,80,80)))
model.add(Activation('relu'))
model.add(MaxPooling2D())
# model.add(Convolution2D(64,3,3))
# model.add(Activation('relu'))
# model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
init_model(model, X_train, Y_train)
# from visualize import plot_kernels
# plot_kernels(model.layers[0].get_weights()[0])
|
guorendong/iridium-browser-ubuntu
|
tools/telemetry/telemetry/util/find_dependencies.py
|
Python
|
bsd-3-clause
| 9,310
| 0.009774
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import fnmatch
import imp
import logging
import modulefinder
import optparse
import os
import sys
import zipfile
from telemetry import benchmark
from telemetry.core import command_line
from telemetry.core import discover
from telemetry.util import bootstrap
from telemetry.util import cloud_storage
from telemetry.util import path
from telemetry.util import path_set
DEPS_FILE = 'bootstrap_deps'
def FindBootstrapDependencies(base_dir):
deps_file = os.path.join(base_dir, DEPS_FILE)
if not os.path.exists(deps_file):
return []
deps_paths = bootstrap.ListAllDepsPaths(deps_file)
return set(os.path.realpath(os.path.join(
path.GetChromiumSrcDir(), os.pardir, deps_path))
for deps_path in deps_paths)
def FindPythonDependencies(module_path):
logging.info('Finding Python dependencies of %s' % module_path)
# Load the module to inherit its sys.path modifications.
imp.load_source(
os.path.splitext(os.path.basename(module_path))[0], module_path)
# Analyze the module for its imports.
finder = modulefinder.ModuleFinder()
finder.run_script(module_path)
# Filter for only imports in Chromium.
for module in finder.modules.itervalues():
# If it's an __init__.py, module.__path__ gives the package's folder.
module_path = module.__path__[0] if module.__path__ else module.__file__
if not module_path:
continue
module_path = os.path.realpath(module_path)
if not path.IsSubpath(module_
|
path, path.GetChromiumSrcDir()):
continue
yield module_path
def FindPageSetDependencies(base_dir):
logging.info('Finding page sets in %s' % base_dir)
# Add base_dir to path so our imports relative to base_dir will work.
sys.path.append(base_dir)
tests = discover.DiscoverClasses(base_dir, base_dir, benchmark.Benchmark,
|
index_by_class_name=True)
for test_class in tests.itervalues():
test_obj = test_class()
# Ensure the test's default options are set if needed.
parser = optparse.OptionParser()
test_obj.AddCommandLineArgs(parser, None)
options = optparse.Values()
for k, v in parser.get_default_values().__dict__.iteritems():
options.ensure_value(k, v)
# Page set paths are relative to their runner script, not relative to us.
path.GetBaseDir = lambda: base_dir
# TODO: Loading the page set will automatically download its Cloud Storage
# deps. This is really expensive, and we don't want to do this by default.
page_set = test_obj.CreatePageSet(options)
# Add all of its serving_dirs as dependencies.
for serving_dir in page_set.serving_dirs:
yield serving_dir
def FindExcludedFiles(files, options):
# Define some filters for files.
def IsHidden(path_string):
for pathname_component in path_string.split(os.sep):
if pathname_component.startswith('.'):
return True
return False
def IsPyc(path_string):
return os.path.splitext(path_string)[1] == '.pyc'
def IsInCloudStorage(path_string):
return os.path.exists(path_string + '.sha1')
def MatchesExcludeOptions(path_string):
for pattern in options.exclude:
if (fnmatch.fnmatch(path_string, pattern) or
fnmatch.fnmatch(os.path.basename(path_string), pattern)):
return True
return False
# Collect filters we're going to use to exclude files.
exclude_conditions = [
IsHidden,
IsPyc,
IsInCloudStorage,
MatchesExcludeOptions,
]
# Check all the files against the filters.
for file_path in files:
if any(condition(file_path) for condition in exclude_conditions):
yield file_path
def FindDependencies(target_paths, options):
# Verify arguments.
for target_path in target_paths:
if not os.path.exists(target_path):
raise ValueError('Path does not exist: %s' % target_path)
dependencies = path_set.PathSet()
# Including Telemetry's major entry points will (hopefully) include Telemetry
# and all its dependencies. If the user doesn't pass any arguments, we just
# have Telemetry.
dependencies |= FindPythonDependencies(os.path.realpath(
os.path.join(path.GetTelemetryDir(), 'telemetry', 'benchmark_runner.py')))
dependencies |= FindPythonDependencies(os.path.realpath(
os.path.join(path.GetTelemetryDir(),
'telemetry', 'unittest_util', 'run_tests.py')))
dependencies |= FindBootstrapDependencies(path.GetTelemetryDir())
# Add dependencies.
for target_path in target_paths:
base_dir = os.path.dirname(os.path.realpath(target_path))
dependencies.add(base_dir)
dependencies |= FindBootstrapDependencies(base_dir)
dependencies |= FindPythonDependencies(target_path)
if options.include_page_set_data:
dependencies |= FindPageSetDependencies(base_dir)
# Remove excluded files.
dependencies -= FindExcludedFiles(set(dependencies), options)
return dependencies
def ZipDependencies(target_paths, dependencies, options):
base_dir = os.path.dirname(os.path.realpath(path.GetChromiumSrcDir()))
with zipfile.ZipFile(options.zip, 'w', zipfile.ZIP_DEFLATED) as zip_file:
# Add dependencies to archive.
for dependency_path in dependencies:
path_in_archive = os.path.join(
'telemetry', os.path.relpath(dependency_path, base_dir))
zip_file.write(dependency_path, path_in_archive)
# Add symlinks to executable paths, for ease of use.
for target_path in target_paths:
link_info = zipfile.ZipInfo(
os.path.join('telemetry', os.path.basename(target_path)))
link_info.create_system = 3 # Unix attributes.
# 010 is regular file, 0111 is the permission bits rwxrwxrwx.
link_info.external_attr = 0100777 << 16 # Octal.
relative_path = os.path.relpath(target_path, base_dir)
link_script = (
'#!/usr/bin/env python\n\n'
'import os\n'
'import sys\n\n\n'
'script = os.path.join(os.path.dirname(__file__), \'%s\')\n'
'os.execv(sys.executable, [sys.executable, script] + sys.argv[1:])'
% relative_path)
zip_file.writestr(link_info, link_script)
# Add gsutil to the archive, if it's available. The gsutil in
# depot_tools is modified to allow authentication using prodaccess.
# TODO: If there's a gsutil in telemetry/third_party/, bootstrap_deps
# will include it. Then there will be two copies of gsutil at the same
# location in the archive. This can be confusing for users.
gsutil_path = os.path.realpath(cloud_storage.FindGsutil())
if cloud_storage.SupportsProdaccess(gsutil_path):
gsutil_base_dir = os.path.join(os.path.dirname(gsutil_path), os.pardir)
gsutil_dependencies = path_set.PathSet()
gsutil_dependencies.add(os.path.dirname(gsutil_path))
# Also add modules from depot_tools that are needed by gsutil.
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'boto'))
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'fancy_urllib'))
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'retry_decorator'))
gsutil_dependencies -= FindExcludedFiles(
set(gsutil_dependencies), options)
# Also add upload.py to the archive from depot_tools, if it is available.
# This allows us to post patches without requiring a full depot_tools
# install. There's no real point in including upload.py if we do not
# also have gsutil, which is why this is inside the gsutil block.
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'upload.py'))
for dependency_path in gsutil_dependencies:
path_in_archive = os.path.join(
'telemetry', os.path.relpath(path.GetTelemetryDir(), base_dir),
'third_party', os.path.relpath(dependency_path, gsutil_base_dir))
zip_file.write(dependency_path, path_in_archive)
class FindDependenciesCommand(command_line.OptparseCommand):
"""Prints all dependencies"""
@classmethod
def AddCommandLineArgs(cls, parser, _):
parser.add_option(
|
shashi792/courtlistener
|
alert/alerts/forms.py
|
Python
|
agpl-3.0
| 1,815
| 0
|
from django.conf import settings
from django.core.exceptions import ValidationError
from django.forms import ModelForm
from django.forms.widgets import HiddenInput, TextInput, Select, CheckboxInput
from alert.userHandling.models import Alert
class CreateAlertForm(ModelForm):
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
super(CreateAlertForm, self).__init__(*args, **kwargs)
def clean_rate(self):
rate = self.cleaned_data['rate']
not_donated_enough = self.user.profile.total_donated_last_year < \
settings.MIN_DONATION['rt_alerts']
if rate == 'rt' and not_donated_enough:
# Somebody is trying to hack past the JS/HTML block on the front
# end. Don't let them create the alert until they've donated.
raise ValidationError(
u'You must donate more than $10 per year to create Real Time '
u'alerts.'
)
else:
return rate
class Meta:
model = Alert
fields = (
'name',
'query',
'rate',
'always_send_email',
)
widgets = {
'query': HiddenInput(
attrs={
'tabindex': '250'
|
}
),
'name': TextInput(
attrs={
'class': 'form-control',
'tabindex': '251'
}
),
|
'rate': Select(
attrs={
'class': 'form-control',
'tabindex': '252',
}
),
'always_send_email': CheckboxInput(
attrs={
'tabindex': '253',
}
),
}
|
ukanga/SickRage
|
sickbeard/providers/hdbits.py
|
Python
|
gpl-3.0
| 6,545
| 0.00275
|
# coding=utf-8
#
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
import datetime
from requests.compat import urlencode, urljoin
from sickbeard import classes, logger, tvcache
from sickrage.helper.exceptions import AuthException
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
try:
import json
except ImportError:
import simplejson as json
class HDBitsProvider(TorrentProvider):
def __init__(self):
TorrentProvider.__init__(self, "HDBits")
self.username = None
self.passkey = None
self.cache = HDBitsCache(self, min_time=15) # only poll HDBits every 15 minutes max
self.url = 'https://hdbits.org'
self.urls = {
'search': urljoin(self.url, '/api/torrents'),
'rss': urljoin(self.url, '/api/torrents'),
'download': urljoin(self.url, '/download.php')
}
def _check_auth(self):
if not self.username or not self.passkey:
raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
return True
def _checkAuthFromData(self, parsedJSON):
if 'status' in parsedJSON and 'message' in parsedJSON and parsedJSON.get('status') == 5:
logger.log("Invalid username or password. Check your settings", logger.WARNING)
return True
def _get_season_search_strings(self, ep_obj):
season_search_string = [self._make_post_data_JSON(show=ep_obj.show, season=ep_obj)]
return season_search_string
def _get_episode_search_strings(self, ep_obj, add_string=''):
episode_search_string = [self._make_post_data_JSON(show=ep_obj.show, episode=ep_obj)]
return episode_search_string
def _get_title_and_url(self, item):
title = item.get('name', '').replace(' ', '.')
url = self.urls['download'] + '?' + urlencode({'id': item['id'], 'passkey': self.passkey})
return title, url
def search(self, search_params, age=0, ep_obj=None):
# FIXME
results = []
logger.log("Search string: {0}".format
(search_params.decode('utf-8')), logger.DEBUG)
self._check_auth()
parsedJSON = self.get_url(self.urls['search'], post_data=search_params, returns='json')
if not parsedJSON:
return []
if self._checkAuthFromData(parsedJSON):
if parsedJSON and 'data' in parsedJSON:
items = parsedJSON['data']
else:
logger.log("Resulting JSON from provider isn't correct, not parsing it", logger.ERROR)
items = []
for item in items:
results.append(item)
# FIXME SORTING
return results
def find_propers(self, search_date=None):
results = []
search_terms = [' proper ', ' repack ']
for term in search_terms:
for item in self.search(self._make_post_data_JSON(search_term=term)):
if item['utadded']:
try:
result_date = datetime.datetime.fromtimestamp(int(item['utadded']))
except Exception:
result_date = None
if result_date and (not search_date or result_date > search_date):
title, url = self._get_title_and_url(item)
results.append(classes.Proper(title, url, result_date, self.show))
return results
def _make_post_data_JSON(self, show=None, episode=None, season=None, search_term=None):
post_data = {
'username': self.username,
'passkey': self.passkey,
'category': [2],
# TV Category
}
if episode:
if show.air_by_date:
post_data['tvdb'] = {
'id': show.indexerid,
'episode': str(episode.airdate).replace('-', '|')
}
elif show.sports:
post_data['tvdb'] = {
'id': show.indexerid,
'episode': episode.airdate.strftime('%b')
}
elif show.anime:
post_data['tvdb'] = {
'id': show.indexerid,
'episode': "{0:d}".format(int(episode.scene_absolute_number))
}
|
else:
post_data['tvdb'] = {
'id': show.indexerid,
'season': episode.scene_season,
'episode': episode.scene_episode
}
if season:
if show.
|
air_by_date or show.sports:
post_data['tvdb'] = {
'id': show.indexerid,
'season': str(season.airdate)[:7],
}
elif show.anime:
post_data['tvdb'] = {
'id': show.indexerid,
'season': "{0:d}".format(season.scene_absolute_number),
}
else:
post_data['tvdb'] = {
'id': show.indexerid,
'season': season.scene_season,
}
if search_term:
post_data['search'] = search_term
return json.dumps(post_data)
class HDBitsCache(tvcache.TVCache):
def _get_rss_data(self):
self.search_params = None # HDBits cache does not use search_params so set it to None
results = []
try:
parsedJSON = self.provider.get_url(self.provider.urls['rss'], post_data=self.provider._make_post_data_JSON(), returns='json')
if self.provider._checkAuthFromData(parsedJSON):
results = parsedJSON['data']
except Exception:
pass
return {'entries': results}
provider = HDBitsProvider()
|
Hybrid-Cloud/cinder
|
cinder/volume/drivers/fujitsu/eternus_dx_fc.py
|
Python
|
apache-2.0
| 8,064
| 0
|
# Copyright (c) 2015 FUJITSU LIMITED
# Copyright (c) 2012 EMC Corporation.
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
FibreChannel Cinder Volume driver for Fujitsu ETERNUS DX S3 series.
"""
from oslo_log import log as logging
import six
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.fujitsu import eternus_dx_common
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
@interface.volumedriver
class FJDXFCDriver(driver.FibreChannelDriver):
"""FC Cinder Volume Driver for Fujitsu ETERNUS DX S3 series."""
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Fujitsu_ETERNUS_CI"
VERSION = eternus_dx_common.FJDXCommon.VERSION
def __init__(self, *args, **kwargs):
super(FJDXFCDriver, self).__init__(*args, **kwargs)
self.common = eternus_dx_common.FJDXCommon(
'fc',
configuration=self.configuration)
self.VERSION = self.common.VERSION
def check_for_setup_error(self):
pass
def create_volume(self, volume):
"""Create volume."""
LOG.debug('create_volume, '
'volume id: %s, enter method.', volume['id'])
location, metadata = self.common.create_volume(volume)
v_metadata = self._get_metadata(volume)
metadata.update(v_metadata)
LOG.debug('create_volume, info: %s, exit method.', metadata)
return {'provider_location': six.text_type(location),
'metadata': metadata}
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
LOG.debug('create_volume_from_snapshot, '
'volume id: %(vid)s, snap id: %(sid)s, enter method.',
{'vid': volume['id'], 'sid': snapshot['id']})
location, metadata = (
self.common.create_volume_from_snapshot(volume, snapshot))
v_metadata = self._get_metadata(volume)
metadata.update(v_metadata)
LOG.debug('create_volume_from_snapshot, '
'info: %s, exit method.', metadata)
return {'provider_location': six.text_type(location),
'metadata': metadata}
def create_cloned_volume(self, volume, src_vref):
"""Create cloned volume."""
LOG.debug('create_cloned_volume, '
'target volume id: %(tid)s, '
'source volume id: %(sid)s, enter method.',
{'tid': volume['id'], 'sid': src_vref['id']})
location, metadata = (
self.common.create_cloned_volume(volume, src_vref))
v_metadata = self._get_metadata(volume)
metadata.update(v_metadata)
LOG.debug('create_cloned_volume, '
'info: %s, exit method.', metadata)
return {'provider_location': six.text_type(location),
'metadata': metadata}
def delete_volume(self, volume):
"""Delete volume on ETERNUS."""
LOG.debug('delete_volume, '
'volume id: %s, enter method.', volume['id'])
vol_exist = self.common.delete_volume(volume)
LOG.debug('delete_volume, '
'delete: %s, exit method.', vol_exist)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
LOG.debug('create_snapshot, '
'snap id: %(sid)s, volume id: %(vid)s, enter method.',
{'sid': snapshot['id'], 'vid': snapshot['volume_id']})
location, metadata = self.common.create_snapshot(snapshot)
LOG.debug('create_snapshot, info: %s, exit method.', metadata)
return {'provider_location': six.text_type(location)}
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
LOG.debug('delete_snapshot, '
'snap id: %(sid)s, volume id: %(vid)s, enter method.',
{'sid': snapshot['id'], 'vid': snapshot['volume_id']})
vol_exist = self.common.delete_snapshot(snapshot)
LOG.debug('delete_snapshot, '
'delete: %s, exit method.', vol_exist)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
return
def create_export(self, context, volume, connector):
"""Driver entry point to get the export info for a new volume."""
return
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume."""
return
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
""
|
"Allow connection to connector and retu
|
rn connection info."""
LOG.debug('initialize_connection, volume id: %(vid)s, '
'wwpns: %(wwpns)s, enter method.',
{'vid': volume['id'], 'wwpns': connector['wwpns']})
info = self.common.initialize_connection(volume, connector)
data = info['data']
init_tgt_map = (
self.common.build_fc_init_tgt_map(connector, data['target_wwn']))
data['initiator_target_map'] = init_tgt_map
info['data'] = data
LOG.debug('initialize_connection, '
'info: %s, exit method.', info)
return info
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
LOG.debug('terminate_connection, volume id: %(vid)s, '
'wwpns: %(wwpns)s, enter method.',
{'vid': volume['id'], 'wwpns': connector['wwpns']})
map_exist = self.common.terminate_connection(volume, connector)
attached = self.common.check_attached_volume_in_zone(connector)
info = {'driver_volume_type': 'fibre_channel',
'data': {}}
if not attached:
# No more volumes attached to the host
init_tgt_map = self.common.build_fc_init_tgt_map(connector)
info['data'] = {'initiator_target_map': init_tgt_map}
LOG.debug('terminate_connection, unmap: %(unmap)s, '
'connection info: %(info)s, exit method',
{'unmap': map_exist, 'info': info})
return info
def get_volume_stats(self, refresh=False):
"""Get volume stats."""
LOG.debug('get_volume_stats, refresh: %s, enter method.', refresh)
pool_name = None
if refresh is True:
data, pool_name = self.common.update_volume_stats()
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or 'FJDXFCDriver'
data['storage_protocol'] = 'FC'
self._stats = data
LOG.debug('get_volume_stats, '
'pool name: %s, exit method.', pool_name)
return self._stats
def extend_volume(self, volume, new_size):
"""Extend volume."""
LOG.debug('extend_volume, '
'volume id: %s, enter method.', volume['id'])
used_pool_name = self.common.extend_volume(volume, new_size)
LOG.debug('extend_volume, '
'used pool name: %s, exit method.', used_pool_name)
def _get_metadata(self, volume):
v_metadata = volume.get('volume_metadata')
if v_metadata:
ret = {data['key']: data['value'] for data in v_metadata}
else:
ret = volume.get('metadata', {})
return ret
|
agusmakmun/Some-Examples-of-Simple-Python-Script
|
list/katakan.py
|
Python
|
agpl-3.0
| 984
| 0.01626
|
"""
4
2 belas
seratus 4 puluh 0
9 ribu seratus 2 puluh 1
2 puluh 1 ribu 3 puluh 0
9 ratus 5 ribu 0
8 puluh 2 juta 8 ratus 8 belas ribu seratus 8 puluh 8
3 ratus 1 juta 4 puluh 8 ribu 5 ratus 8 puluh 8
"""
def kata(n):
angka = range(11)
temp = ""
if n < 12:
temp += str(angka[n])
elif n < 20:
temp += str(n-10)+" belas"
elif n < 100
|
:
temp += str(kata(n/10)) + " puluh "+ str(kata(n%10))
elif n < 200:
temp += "seratus "+ str(kata(n-100))
elif n < 1000:
temp += str(kata(n/100))+ " ratus " + str(kata(n%100))
elif n < 2000:
temp += "seribu "+str(kata(n-1000))
elif n < 1000000:
temp += str(kata(n/1000))+ " ribu "+ str(kata(n%1000))
elif n < 1000000000:
|
temp += str(kata(n/1000000)) +" juta " + str(kata(n%1000000))
return temp
print kata(4)
print kata(12)
print kata(140)
print kata(9121)
print kata(21030)
print kata(905000)
print kata(82818188)
print kata(301048588)
|
CERNDocumentServer/invenio
|
modules/bibsort/lib/bibsort_daemon.py
|
Python
|
gpl-2.0
| 15,964
| 0.003257
|
# -*- mode: python; coding: utf-8; -*-
#
# This file is part of Invenio.
# Copyright (C) 2010, 2011, 2012 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Usage: bibsort [options]
BibSort tool
Options:
-h, --help show this help message and exit
-l, --load-config Loads the configuration from bibsort.cfg into the
database
-d, --dump-config Outputs a database dump in form of a config file
-p, --print-sorting-methods
Prints the available sorting methods
-R, --rebalance Runs the sorting methods given in '--metods'and
rebalances all the buckets.
If no method is specified, the rebalance will be done
for all the methods in the config file.
-S, --update-sorting Runs the sorting methods given in '--methods' for the
recids given in '--id'.
If no method is specified, the update will be done for
all the methods in the config file.
If no recids are specified, the update will be done
for all the records that have been
modified/inserted from the last run of the sorting.
If you want to run the sorting for all records, you
should use the '-R' option
-M, --methods=METHODS Specify the sorting methods for which the
update_sorting or rebalancing will run
(ex: --methods=method1,method2,method3).
-i, --id=RECIDS Specify the records for which the update_sorting will
run (ex: --id=1,2-56,72)
"""
__revision__ = "$Id$"
import sys
import optparse
import time
import ConfigParser
from invenio.dateutils import strftime
from invenio.dbquery import run_sql, Error
from invenio.config import CFG_ETCDIR
from invenio.bibsort_engine import run_bibsort_update, \
run_bibsort_rebalance
from invenio.bibtask import task_init, write_message, \
task_set_option, task_get_option
def load_configuration():
"""Loads the configuration for the bibsort.cfg file into the database"""
config_file = CFG_ETCDIR + "/bibsort/bibsort.cfg"
write_message('Reading config data from: %s' %config_file)
config = ConfigParser.ConfigParser()
try:
config.readfp(open(config_file))
except StandardError, err:
write_message("Cannot find configuration file: %s" \
%config_file, stream=sys.stderr)
return False
to_insert = []
for section in config.sections():
try:
name = config.get(section, "name")
definition = config.get(section, "definition")
washer = config.get(section, "washer")
except (ConfigParser.NoOptionError, StandardError), err:
write_message("For each sort_field you need to define at least \
the name, the washer and the definition. \
[error: %s]" %err, stream=sys.stderr)
return False
to_insert.append((name, definition, washer))
# all the values were correctly read from the config file
run_sql("TRUNCATE TABLE bsrMETHOD")
write_message('Old data has been deleted from bsrMETHOD table', verbose=5)
for row in to_insert:
run_sql("INSERT INTO bsrMETHOD(name, definition, washer) \
VALUES (%s, %s, %s)", (row[0], row[1], row[2]))
write_message('Method %s has been inserted into bsrMETHOD table' \
%row[0], verbose=5)
return True
def dump_configuration():
"""Creates a dump of the data existing in the bibsort tables"""
try:
results = run_sql("SELECT id, name, definition, washer FROM bsrMETHOD")
except Error, err:
write_message("The error: [%s] occured while trying to get \
the bibsort data from the database." %err, sys.stderr)
return False
write_message('The bibsort data has been read from the database.', verbose=5)
if results:
config = ConfigParser.ConfigParser()
for item in results:
section = "sort_field_%s" % item[0]
config.add_section(section)
config.set(section, "name", item[1])
confi
|
g.set(section, "definition", item[2])
config.set(section, "washer", item[3])
output_file_name = CFG_ETCDIR + '/bibsort/bibsort_db_dump_%s.cfg' % \
strftime("%d%m%Y%H%M%S", time.localtime())
write_message('Opening the output file %s' %output_file_name)
try:
output_file = open(output_file_name, 'w')
config.write(output_fi
|
le)
output_file.close()
except Error, err:
write_message('Can not operate on the configuration file %s [%s].' \
%(output_file_name, err), stream=sys.stderr)
return False
write_message('Configuration data dumped to file.')
else:
write_message("The bsrMETHOD table does not contain any data.")
return True
def update_sorting(methods, recids):
"""Runs the updating of the sorting tables for methods and recids
Recids is a list of integer numbers(record ids)
but can also contain intervals"""
method_list = []
if methods:
method_list = methods.strip().split(',')
recid_list = []
if recids:
cli_recid_list = recids.strip().split(',')
for recid in cli_recid_list:
if recid.find('-') > 0:
rec_range = recid.split('-')
try:
recid_min = int(rec_range[0])
recid_max = int(rec_range[1])
for rec in range(recid_min, recid_max + 1):
recid_list.append(rec)
except Error, err:
write_message("Error: [%s] occured while trying \
to parse the recids argument." %err, sys.stderr)
return False
else:
recid_list.append(int(recid))
return run_bibsort_update(recid_list, method_list)
def rebalance(methods):
"""Runs the complete sorting and rebalancing of buckets for
the methods specified in 'methods' argument"""
method_list = []
if methods:
method_list = methods.strip().split(',')
return run_bibsort_rebalance(method_list)
def print_sorting_methods():
"""Outputs the available sorting methods from the DB"""
try:
results = run_sql("SELECT name FROM bsrMETHOD")
except Error, err:
write_message("The error: [%s] occured while trying to \
get the bibsort data from the database." %err)
return False
if results:
methods = []
for result in results:
methods.append(result[0])
if len(methods) > 0:
write_message('Methods: %s' %methods)
else:
write_message("There are no sorting methods configured.")
return True
# main with option parser
# to be used in case the connection with bibsched is not wanted
def main_op():
"""Runs program and handles command line options"""
option_parser = optparse.OptionParser(description="""BibSort tool""")
option_parser.add_option('-l', '--load-config', action='store_true', \
help='Loads the configuration from bibsort.conf into the database')
option_parser.add_option('-d', '--dump-con
|
gileno/djangoecommerce
|
checkout/migrations/0003_order_orderitem.py
|
Python
|
cc0-1.0
| 2,362
| 0.004671
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-31 17:48
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class M
|
igration(migrations.Migration):
dependencies = [
('catalog', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('checkout', '0002_auto_20160724_1533')
|
,
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.IntegerField(blank=True, choices=[(0, 'Aguardando Pagamento'), (1, 'Concluída'), (2, 'Cancelada')], default=0, verbose_name='Situação')),
('payment_option', models.CharField(choices=[('pagseguro', 'PagSeguro'), ('paypal', 'Paypal')], max_length=20, verbose_name='Opção de Pagamento')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Criado em')),
('modified', models.DateTimeField(auto_now=True, verbose_name='Modificado em')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Usuário')),
],
options={
'verbose_name_plural': 'Pedidos',
'verbose_name': 'Pedido',
},
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveIntegerField(default=1, verbose_name='Quantidade')),
('price', models.DecimalField(decimal_places=2, max_digits=8, verbose_name='Preço')),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='checkout.Order', verbose_name='Pedido')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='catalog.Product', verbose_name='Produto')),
],
options={
'verbose_name_plural': 'Itens dos pedidos',
'verbose_name': 'Item do pedido',
},
),
]
|
idkwim/pysmt
|
pysmt/solvers/pico.py
|
Python
|
apache-2.0
| 4,835
| 0.001861
|
#
# This file is part of pySMT.
#
# Copyright 2014 Andrea Micheli and Marco Gario
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import picosat
import pysmt.logics
from pysmt import typing as types
from pysmt.solvers.solver import Solver
from pysmt.solvers.eager import EagerModel
from pysmt.rewritings import CNFizer
from pysmt.decorators import clear_pending_pop, catch_conversion_error
from six.moves import xrange
from six import iteritems
class PicosatSolver(Solver):
"""PicoSAT solver"""
LOGICS = [ pysmt.logics.QF_BOOL ]
def __init__(self, environment, logic, user_options):
Solver.__init__(self,
environment=environment,
logic=logic,
user_options=user_options)
self.mgr = environment.formula_manager
self.pico = picosat.picosat_init()
self.converter = None
self.cnfizer = CNFizer(environment=environment)
self.latest_model = None
self._var_ids = {}
def _get_var_id(self, symbol):
if not symbol.is_symbol(types.BOOL):
raise NotImplementedError("No theory terms are supported in PicoSAT")
if symbol in self._var_ids:
return self._var_ids[symbol]
else:
vid = picosat.picosat_inc_max_var(self.pico)
self._var_ids[symbol] = vid
return vid
@clear_pending_pop
def reset_assertions(self):
picosat.picosat_reset(self.pico)
self.pico = picosat.picosat_init()
@clear_pending_pop
def declare_variable(self, var):
# no need to declare variables
pass
def _get_pico_lit(self, lit):
mult = 1
var = lit
if lit.is_not():
mult = -1
var = lit.arg(0)
vid = self._get_var_id(var)
return vid * mult
@clear_pending_pop
@catch_conversion_error
def add_assertion(self, formula, named=None):
# First, we get rid of True/False constants
formula = formula.simplify()
if formula.is_false():
picosat.picosat_add(self.pico, 0)
elif not formula.is_true():
cnf = self.cnfizer.convert(formula)
self._add_
|
cnf_assertion(cnf)
def _add_c
|
nf_assertion(self, cnf):
for clause in cnf:
for lit in clause:
v = self._get_pico_lit(lit)
picosat.picosat_add(self.pico, v)
picosat.picosat_add(self.pico, 0)
@clear_pending_pop
@catch_conversion_error
def solve(self, assumptions=None):
if assumptions is not None:
cnf = []
for a in assumptions:
cnf += self.cnfizer.convert(a)
missing = []
for clause in cnf:
if len(clause) == 1:
v = self._get_pico_lit(next(iter(clause)))
picosat.picosat_assume(self.pico, v)
else:
missing.append(clause)
if len(missing) > 0:
self.push()
self._add_cnf_assertion(missing)
self.pending_pop = True
res = picosat.picosat_sat(self.pico, -1)
if res == picosat.PICOSAT_SATISFIABLE:
self.latest_model = self.get_model()
return True
else:
self.latest_model = None
return False
def get_value(self, item):
if self.latest_model is None:
self.get_model()
return self.latest_model.get_value(item)
def get_model(self):
assignment = {}
for var, vid in iteritems(self._var_ids):
v = picosat.picosat_deref(self.pico, vid)
if v == 0:
assert False
value = self.mgr.Bool(v == 1)
assignment[var] = value
return EagerModel(assignment=assignment,
environment=self.environment)
@clear_pending_pop
def push(self, levels=1):
for _ in xrange(levels):
picosat.picosat_push(self.pico)
@clear_pending_pop
def pop(self, levels=1):
for _ in xrange(levels):
picosat.picosat_pop(self.pico)
def exit(self):
if not self._destroyed:
self._destroyed = True
picosat.picosat_reset(self.pico)
|
Azure/WALinuxAgent
|
azurelinuxagent/pa/deprovision/default.py
|
Python
|
apache-2.0
| 10,146
| 0.001774
|
# Microsoft Azure Linux Agent
#
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import glob
import os.path
import re
import signal
import sys
import azurelinuxagent.common.conf as conf
import azurelinuxagent.common.utils.fileutil as fileutil
from azurelinuxagent.commo
|
n import version
from azurelinuxagent.common.exception import ProtocolError
from azurelinuxagent.common.osutil import get_osutil
from azurelinuxagent.common.persist_firewall_rules import PersistFirewallRulesHandler
from azurelinuxagent.common.protocol.util import get_protocol_util
from azurelinuxagent.ga.exthandlers import HANDLER_COMPLETE_NAME_PATTERN
def
|
read_input(message):
if sys.version_info[0] >= 3:
return input(message)
else:
# This is not defined in python3, and the linter will thus
# throw an undefined-variable<E0602> error on this line.
# Suppress it here.
return raw_input(message) # pylint: disable=E0602
class DeprovisionAction(object):
def __init__(self, func, args=None, kwargs=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
self.func = func
self.args = args
self.kwargs = kwargs
def invoke(self):
self.func(*self.args, **self.kwargs)
class DeprovisionHandler(object):
def __init__(self):
self.osutil = get_osutil()
self.protocol_util = get_protocol_util()
self.actions_running = False
signal.signal(signal.SIGINT, self.handle_interrupt_signal)
def del_root_password(self, warnings, actions):
warnings.append("WARNING! root password will be disabled. "
"You will not be able to login as root.")
actions.append(DeprovisionAction(self.osutil.del_root_password))
def del_user(self, warnings, actions):
try:
ovfenv = self.protocol_util.get_ovf_env()
except ProtocolError:
warnings.append("WARNING! ovf-env.xml is not found.")
warnings.append("WARNING! Skip delete user.")
return
username = ovfenv.username
warnings.append(("WARNING! {0} account and entire home directory "
"will be deleted.").format(username))
actions.append(DeprovisionAction(self.osutil.del_account,
[username]))
def regen_ssh_host_key(self, warnings, actions):
warnings.append("WARNING! All SSH host key pairs will be deleted.")
actions.append(DeprovisionAction(fileutil.rm_files,
[conf.get_ssh_key_glob()]))
def stop_agent_service(self, warnings, actions):
warnings.append("WARNING! The waagent service will be stopped.")
actions.append(DeprovisionAction(self.osutil.stop_agent_service))
def del_dirs(self, warnings, actions): # pylint: disable=W0613
dirs = [conf.get_lib_dir(), conf.get_ext_log_dir()]
actions.append(DeprovisionAction(fileutil.rm_dirs, dirs))
def del_files(self, warnings, actions): # pylint: disable=W0613
files = ['/root/.bash_history', conf.get_agent_log_file()]
actions.append(DeprovisionAction(fileutil.rm_files, files))
# For OpenBSD
actions.append(DeprovisionAction(fileutil.rm_files,
["/etc/random.seed",
"/var/db/host.random",
"/etc/isakmpd/local.pub",
"/etc/isakmpd/private/local.key",
"/etc/iked/private/local.key",
"/etc/iked/local.pub"]))
def del_resolv(self, warnings, actions):
warnings.append("WARNING! /etc/resolv.conf will be deleted.")
files_to_del = ["/etc/resolv.conf"]
actions.append(DeprovisionAction(fileutil.rm_files, files_to_del))
def del_dhcp_lease(self, warnings, actions):
warnings.append("WARNING! Cached DHCP leases will be deleted.")
dirs_to_del = ["/var/lib/dhclient", "/var/lib/dhcpcd", "/var/lib/dhcp"]
actions.append(DeprovisionAction(fileutil.rm_dirs, dirs_to_del))
# For FreeBSD and OpenBSD
actions.append(DeprovisionAction(fileutil.rm_files,
["/var/db/dhclient.leases.*"]))
# For FreeBSD, NM controlled
actions.append(DeprovisionAction(fileutil.rm_files,
["/var/lib/NetworkManager/dhclient-*.lease"]))
def del_ext_handler_files(self, warnings, actions): # pylint: disable=W0613
ext_dirs = [d for d in os.listdir(conf.get_lib_dir())
if os.path.isdir(os.path.join(conf.get_lib_dir(), d))
and re.match(HANDLER_COMPLETE_NAME_PATTERN, d) is not None
and not version.is_agent_path(d)]
for ext_dir in ext_dirs:
ext_base = os.path.join(conf.get_lib_dir(), ext_dir)
files = glob.glob(os.path.join(ext_base, 'status', '*.status'))
files += glob.glob(os.path.join(ext_base, 'config', '*.settings'))
files += glob.glob(os.path.join(ext_base, 'config', 'HandlerStatus'))
files += glob.glob(os.path.join(ext_base, 'mrseq'))
if len(files) > 0:
actions.append(DeprovisionAction(fileutil.rm_files, files))
def del_lib_dir_files(self, warnings, actions): # pylint: disable=W0613
known_files = [
'HostingEnvironmentConfig.xml',
'Incarnation',
'partition',
'Protocol',
'SharedConfig.xml',
'WireServerEndpoint'
]
known_files_glob = [
'Extensions.*.xml',
'ExtensionsConfig.*.xml',
'GoalState.*.xml'
]
lib_dir = conf.get_lib_dir()
files = [f for f in \
[os.path.join(lib_dir, kf) for kf in known_files] \
if os.path.isfile(f)]
for p in known_files_glob:
files += glob.glob(os.path.join(lib_dir, p))
if len(files) > 0:
actions.append(DeprovisionAction(fileutil.rm_files, files))
def reset_hostname(self, warnings, actions): # pylint: disable=W0613
localhost = ["localhost.localdomain"]
actions.append(DeprovisionAction(self.osutil.set_hostname,
localhost))
actions.append(DeprovisionAction(self.osutil.set_dhcp_hostname,
localhost))
def setup(self, deluser):
warnings = []
actions = []
self.stop_agent_service(warnings, actions)
if conf.get_regenerate_ssh_host_key():
self.regen_ssh_host_key(warnings, actions)
self.del_dhcp_lease(warnings, actions)
self.reset_hostname(warnings, actions)
if conf.get_delete_root_password():
self.del_root_password(warnings, actions)
self.del_dirs(warnings, actions)
self.del_files(warnings, actions)
self.del_resolv(warnings, actions)
if deluser:
self.del_user(warnings, actions)
self.del_persist_firewall_rules(actions)
return warnings, actions
def setup_changed_unique_id(self):
warnings = []
actions = []
self.del_dhcp_lease(warnings, actions)
self.del_lib_dir_files(warnings, actions)
self.del_ext_handler_files(warnings, actions)
self.del_persist_firewall_rules(actions)
|
jrosebr1/imutils
|
demos/sorting_contours.py
|
Python
|
mit
| 1,343
| 0.008935
|
# author: Adrian Rosebrock
# website: http://www.pyimagesearch.com
# USAGE
# BE SURE TO INSTALL 'imutils' PRIOR TO EXECUTING THIS COMMAND
# python sorting_contours.py
# import the necessary packages
from imutils import contours
import imutils
import cv2
# load the shapes image clone it, convert it to grayscale, and
# detect edges in the image
image = cv2.imread("../demo_images/shapes.png")
orig = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
edged = imutils.auto_canny(gray)
# find contours in the edge map
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# loop over the (unsorted) contours and label them
for (i, c) in enumerate(cnts):
orig = contours.label_contour(orig, c, i, color=(240, 0, 159))
# show the original image
cv2.imshow("Original", orig)
# loop over the sorting methods
for method in ("left-to-right", "right-to-left", "top-to-bottom", "bottom-to-top"):
# sort the contours
(cnts, boundingBoxes) = contours.sort_contours(cnts, method=method)
clone = image.copy()
# loop over the sorted contours and label them
for
|
(i, c) in enumerate(cnts):
sort
|
edImage = contours.label_contour(clone, c, i, color=(240, 0, 159))
# show the sorted contour image
cv2.imshow(method, sortedImage)
# wait for a keypress
cv2.waitKey(0)
|
uogbuji/Library.Link
|
pylib/util.py
|
Python
|
apache-2.0
| 17,711
| 0.005533
|
'''
'''
import re
import http
import logging
import urllib
import urllib.request
from itertools import *
import collections.abc
from versa.driver import memory
from versa import I, VERSA_BASEIRI, ORIGIN, RELATIONSHIP, TARGET, ATTRIBUTES
from versa.reader import rdfalite
from versa.reader.rdfalite import RDF_NS, SCHEMAORG_NS
from versa imp
|
ort util as versautil
from bibframe import BFZ, BL
from bibframe.zextra import LL
from rdflib import URIRef, Literal
from rdflib import BNode
from amara3 import iri
from amara3.uxml import tree
from amara3.uxml import xmliter
from amara3.uxml.treeutil import *
from amara3.uxml import html5
RDFTYPE = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#t
|
ype'
SCHEMAORG = 'http://schema.org/'
def load_rdfa_page(site, max_retries=1):
'''
Helper to load RDFa page as text, plus load a Versa model with the metadata
Returns a versa memory model and the raw site text, except in eror case where it returns None and the error
'''
retry_count = 0
while True:
model = memory.connection()
try:
with urllib.request.urlopen(site) as resourcefp:
sitetext = resourcefp.read()
rdfalite.toversa(sitetext, model, site)
break #Success, so break out of retry loop
except (urllib.error.HTTPError, urllib.error.URLError, http.client.RemoteDisconnected) as e:
retry_count += 1
if retry_count >= max_retries:
return None, e
return model, sitetext
async def rdfa_from_page(url, session=None, max_retries=1):
'''
Async helper to load RDFa page as text, plus load a Versa model with the metadata
Yields a versa memory model, the raw site text and HTTP response info, except in error case where it returns None and the exception
>>> from amara3.asynctools import go_async
>>> from librarylink.util import rdfa_from_page
>>> from versa import util as versautil
>>> url = "http://link.crlibrary.org/portal/Estamos-en-un-libro-por-Mo-Willems--traducido/ZAxkTVTDCxE/"
>>> model, sitetext, response = go_async(rdfa_from_page(url))
>>> next(versautil.lookup(model, 'http://link.crlibrary.org/resource/zXft1yv0T9k/', 'http://schema.org/name'))
'Libros y lectura -- Novela juvenil'
'''
retry_count = 0
while True:
model = memory.connection()
try:
if session == None:
import aiohttp
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
body = await response.read()
rdfalite.toversa(body, model, url)
return model, body, response
else:
async with session.get(url) as response:
body = await response.read()
rdfalite.toversa(body, model, url)
return model, body, response
except Exception as e:
#print(url, f'[EXCEPTION {e}], context: {context}')
retry_count += 1
if retry_count >= max_retries:
return None, e, None
#Legacy name
prep_site_model = load_rdfa_page
def rdf_from_site(site, rules=None):
'''
>>> from librarylink.util import rdf_from_site
>>> g = rdf_from_site('http://link.denverlibrary.org')
>>> s = g.serialize(format='json-ld', indent=2)
>>> with open('denverlibrary.ld.json', 'wb') as fp: fp.write(s)
>>> rules = {'ignore-predicates': ['http://bibfra.me/', 'http://library.link/'], 'rename-predicates': {'http://library.link/vocab/branchOf': 'http://schema.org/branch'}}
>>> g = rdf_from_site('http://link.denverlibrary.org', rules=rules)
>>> s = g.serialize(format='json-ld', indent=2)
>>> with open('denverlibrary.ld.json', 'wb') as fp: fp.write(s)
'''
from rdflib import ConjunctiveGraph, URIRef, Literal, RDF, RDFS
from versa.writer.rdf import mock_bnode, prep, RDF_TYPE
#Also requires: pip install rdflib-jsonld
rules = rules or {}
ignore_pred = rules.get('ignore-predicates', set())
rename_pred = rules.get('rename-predicates', {})
model, sitetext = load_rdfa_page(site)
if not model:
return None
g = ConjunctiveGraph()
#Hoover up everything with a type
for o, r, t, a in model.match():
for oldp, newp in rename_pred.items():
if r == oldp: r = newp
for igp in ignore_pred:
if r.startswith(igp):
break
else:
g.add(prep(o, r, t))
return g
def jsonize_site(site, rules=None):
'''
>>> from librarylink.util import jsonize_site
>>> obj = jsonize_site('http://link.denverlibrary.org')
>>> with open('denverlibrary.ld.json', 'w') as fp: json.dump(obj, fp, indent=2)
>>> rules = {'ignore-predicates': ['http://bibfra.me/', 'http://library.link/'], 'rename-predicates': {'http://library.link/vocab/branchOf': 'http://schema.org/branch'}}
>>> obj = jsonize_site('http://link.denverlibrary.org', rules=rules)
>>> with open('denverlibrary.ld.json', 'w') as fp: json.dump(obj, fp, indent=2)
'''
from versa.util import uniquify
from versa.writer import jsonld
rules = rules or {}
ignore_pred = rules.get('ignore-predicates', set())
rename_pred = rules.get('rename-predicates', {})
ignore_oftypes = rules.get('ignore-oftypes', [])
invert = rules.get('invert', {})
context = rules.get('context', {})
pre_model, _ = load_rdfa_page(site)
if not pre_model:
return None
uniquify(pre_model)
post_model = memory.connection()
for o, r, t, a in pre_model.match():
#print(o, r, t)
for oldp, newp in rename_pred:
if r == oldp: r = newp
for rpre, rpost in invert:
if r == rpre:
assert isinstance(t, I)
o, r, t = t, rpost, o
for igp in ignore_pred:
if r.startswith(igp):
break
else:
post_model.add(o, r, t, a)
obj = jsonld.bind(post_model, context=context, ignore_oftypes=ignore_oftypes)
return obj
def get_orgname(site, reuse=None):
'''
Given a site URL return the org's name
>>> from librarylink.util import all_sites, get_orgname
>>> org = next(s for s in all_sites() if 'denverlibrary' in s.host )
>>> get_orgname(org)
'Denver Public Library'
>>> get_orgname('http://link.denverlibrary.org/')
'Denver Public Library'
'''
if reuse:
model, sitetext = reuse
else:
model, sitetext = load_rdfa_page(site)
if not model:
return None
for o, r, t, a in model.match(None, RDF_NS + 'type', SCHEMAORG_NS + 'Organization'):
name = versautil.simple_lookup(model, o, SCHEMAORG_NS + 'name')
if name is not None: return name
#schema:Organization not reliable the way it's used in LLN
#orgentity = versautil.simple_lookup_byvalue(model, RDF_NS + 'type', SCHEMAORG_NS + 'LibrarySystem')
#orgentity = versautil.simple_lookup_byvalue(model, SCHEMAORG_NS + 'url', baseurl)
#print(orgentity)
#name = versautil.simple_lookup(model, orgentity, SCHEMAORG_NS + 'name')
#name = versautil.simple_lookup(model, baseurl + '#_default', BL + 'name')
#return name
NETWORK_HINTS = {
#e.g. from http://augusta.library.link/
#<link href="/static/liblink_ebsco/css/network.css" rel="stylesheet">
b'liblink_ebsco/css/network.css': 'ebsco',
#e.g. from http://msu.library.link/
#<link href="/static/liblink_iii/css/network.css" rel="stylesheet"/>
b'liblink_iii/css/network.css': 'iii',
#e.g. from http://link.houstonlibrary.org/
#<link href="/static/liblink_bcv/css/network.css" rel="stylesheet"/>
b'liblink_bcv/css/network.css': 'bcv',
#e.g. from http://link.library.gmu.edu/
#<link href="/static/liblink_atlas/css/network.css" rel="stylesheet"/>
b'liblink_atlas/css/network.css': 'atlas',
}
PIPELINE_VERSION_PAT = re.compile(b'<dt>Transformation Pipeline</dt>\s*<dd>([^<]*)</dd>', re.MULTILINE)
TEMPLATE_VERSION_PAT = re.compile(b'<dt>Template Version</dt>\s*<dd>([^<]*)</dd>', re
|
Taapat/enigma2-openpli-fulan
|
lib/python/Plugins/SystemPlugins/SoftwareManager/plugin.py
|
Python
|
gpl-2.0
| 81,511
| 0.028143
|
import os
import time
import cPickle
from Plugins.Plugin import PluginDescriptor
from Screens.Console import Console
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from Screens.Screen import Screen
from Screens.Standby import TryQuitMainloop
from Screens.Ipkg import Ipkg
from Screens.SoftwareUpdate import UpdatePlugin
from Components.ActionMap import ActionMap, NumberActionMap
from Components.Input import Input
from Components.Ipkg import IpkgComponent
from Components.Sources.StaticText import StaticText
from Components.ScrollLabel import ScrollLabel
from Components.Pixmap import Pixmap
from Components.MenuList import MenuList
from Components.Sources.List import List
from Components.Harddisk import harddiskmanager
from Components.config import config, getConfigListEntry, ConfigSubsection, ConfigText, ConfigLocations, ConfigYesNo, ConfigSelection
from Components.ConfigList import ConfigListScreen
from Components.Console import Console
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest
from Components.SelectionList import SelectionList
from Components.PluginComponent import plugins
from Components.About import about
from Components.PackageInfo import PackageInfoHandler
from Components.Language import language
from Components.AVSwitch import AVSwitch
from Tools.Directories import resolveFilename, SCOPE_PLUGINS, SCOPE_CURRENT_PLUGIN, SCOPE_CURRENT_SKIN, SCOPE_METADIR
from Tools.LoadPixmap import LoadPixmap
from Tools.NumericalTextInput import NumericalTextInput
from enigma import RT_HALIGN_LEFT, RT_VALIGN_CENTER, eListbox, gFont, getDesktop, e
|
PicLoad, eRCInput, getPrevAsciiCode, eEnv
from twisted.web import client
from ImageWizard import ImageWizard
from BackupRestore import BackupSelection, RestoreMenu, BackupScreen, Res
|
toreScreen, getBackupPath, getBackupFilename
from SoftwareTools import iSoftwareTools
config.plugins.configurationbackup = ConfigSubsection()
config.plugins.configurationbackup.backuplocation = ConfigText(default = '/media/hdd/', visible_width = 50, fixed_size = False)
config.plugins.configurationbackup.backupdirs = ConfigLocations(default=[eEnv.resolve('${sysconfdir}/enigma2/'), '/etc/network/interfaces', '/etc/wpa_supplicant.conf', '/etc/wpa_supplicant.ath0.conf', '/etc/wpa_supplicant.wlan0.conf', '/etc/resolv.conf', '/etc/default_gw', '/etc/hostname'])
config.plugins.softwaremanager = ConfigSubsection()
config.plugins.softwaremanager.overwriteConfigFiles = ConfigSelection(
[
("Y", _("Yes, always")),
("N", _("No, never")),
("ask", _("Always ask"))
], "Y")
config.plugins.softwaremanager.onSetupMenu = ConfigYesNo(default=False)
config.plugins.softwaremanager.onBlueButton = ConfigYesNo(default=False)
config.plugins.softwaremanager.epgcache = ConfigYesNo(default=False)
def write_cache(cache_file, cache_data):
try:
path = os.path.dirname(cache_file)
if not os.path.isdir(path):
os.mkdir(path)
cPickle.dump(cache_data, open(cache_file, 'w'), -1)
except Exception, ex:
print "Failed to write cache data to %s:" % cache_file, ex
def valid_cache(cache_file, cache_ttl):
#See if the cache file exists and is still living
try:
mtime = os.stat(cache_file)[os.stat.ST_MTIME]
except:
return 0
curr_time = time.time()
if (curr_time - mtime) > cache_ttl:
return 0
else:
return 1
def load_cache(cache_file):
return cPickle.load(open(cache_file))
class UpdatePluginMenu(Screen):
skin = """
<screen name="UpdatePluginMenu" position="center,center" size="610,410" title="Software management" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<ePixmap pixmap="skin_default/border_menu_350.png" position="5,50" zPosition="1" size="350,300" transparent="1" alphatest="on" />
<widget source="menu" render="Listbox" position="15,60" size="330,290" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (2, 2), size = (330, 24), flags = RT_HALIGN_LEFT, text = 1), # index 0 is the MenuText,
],
"fonts": [gFont("Regular", 22)],
"itemHeight": 25
}
</convert>
</widget>
<widget source="menu" render="Listbox" position="360,50" size="240,300" scrollbarMode="showNever" selectionDisabled="1">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (2, 2), size = (240, 300), flags = RT_HALIGN_CENTER|RT_VALIGN_CENTER|RT_WRAP, text = 2), # index 2 is the Description,
],
"fonts": [gFont("Regular", 22)],
"itemHeight": 300
}
</convert>
</widget>
<widget source="status" render="Label" position="5,360" zPosition="10" size="600,50" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session, args = 0):
Screen.__init__(self, session)
self.skin_path = plugin_path
self.menu = args
self.list = []
self.oktext = _("\nPress OK on your remote control to continue.")
self.menutext = _("Press MENU on your remote control for additional options.")
self.infotext = _("Press INFO on your remote control for additional information.")
self.text = ""
self.backupdirs = ' '.join( config.plugins.configurationbackup.backupdirs.value )
if self.menu == 0:
print "building menu entries"
self.list.append(("install-extensions", _("Manage extensions"), _("\nManage extensions or plugins for your receiver" ) + self.oktext, None))
self.list.append(("software-update", _("Software update"), _("\nOnline update of your receiver software." ) + self.oktext, None))
self.list.append(("software-restore", _("Software restore"), _("\nRestore your receiver with a new firmware." ) + self.oktext, None))
self.list.append(("system-backup", _("Backup system settings"), _("\nBackup your receiver settings." ) + self.oktext + "\n\n" + self.infotext, None))
self.list.append(("system-restore",_("Restore system settings"), _("\nRestore your receiver settings." ) + self.oktext, None))
self.list.append(("ipkg-install", _("Install local extension"), _("\nScan for local extensions and install them." ) + self.oktext, None))
for p in plugins.getPlugins(PluginDescriptor.WHERE_SOFTWAREMANAGER):
if "SoftwareSupported" in p.__call__:
callFnc = p.__call__["SoftwareSupported"](None)
if callFnc is not None:
if "menuEntryName" in p.__call__:
menuEntryName = p.__call__["menuEntryName"](None)
else:
menuEntryName = _('Extended Software')
if "menuEntryDescription" in p.__call__:
menuEntryDescription = p.__call__["menuEntryDescription"](None)
else:
menuEntryDescription = _('Extended Software Plugin')
self.list.append(('default-plugin', menuEntryName, menuEntryDescription + self.oktext, callFnc))
if config.usage.setup_level.index >= 2: # expert+
self.list.append(("advanced", _("Advanced options"), _("\nAdvanced options and settings." ) + self.oktext, None))
elif self.menu == 1:
self.list.append(("advancedrestore", _("Advanced restore"), _("\nRestore your backups by date." ) + self.oktext, None))
self.list.append(("backuplocation", _("Select backup location"), _("\nSelect your backup device.\nCurrent device: " ) + config.plugins.configurationbackup.backuplocation.value + self.oktext, None))
self.list.append(("backupfiles", _("Select backup files"), _("Select files for backup.") + self.oktext + "\n\n" + self.infotext, None))
if config.usage.setup_level.index >= 2: # expert+
self.list.append(("ipkg-manager", _("Packet management"), _("\nView, install and remove available or installed packages." ) + self.oktext, None))
self.list.append(("ipkg-source",_("Select upgrade source"), _("\nEdit the upgrade source address." ) + self.oktext, None))
for p in plugins.getPlugins(PluginDescriptor.WHERE_SOFTWAREMANAGER):
if "AdvancedSoftwareSupported" in p.__call__:
callFnc = p
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractWwwTccedwardsCom.py
|
Python
|
bsd-3-clause
| 548
| 0.034672
|
def extractWwwTccedwardsCom(item):
'''
Parser for 'www.tccedwards.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_typ
|
e in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWit
|
hType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
gileno/djangoecommerce
|
catalog/apps.py
|
Python
|
cc0-1.0
| 272
| 0
|
# coding=ut
|
f-8
from django.apps import AppConfig
from watson import search as watson
class CatalogConfig(AppConfig):
name = 'catalog'
verbose_name = 'Catálogo'
def ready(self):
Product = self.get_model('Product')
watson.regi
|
ster(Product)
|
boh1996/LectioAPI
|
importers/importGroupMembers.py
|
Python
|
mit
| 3,867
| 0.041376
|
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'scrapers'))
sys.path.append("..")
from datetime import datetime
from pymongo import MongoClient
from database import *
import error
import sync
import group_members as groupMembersApi
def importGroupMembers ( school_id, branch_id, team_element_id, session = False, username = False, password = False ):
try:
objectList = groupMembersApi.group_members({
"school_id" : school_id,
"branch_id" : branch_id,
"team_element_id" : team_element_id,
"username" : username,
"password" : password
}, session)
if objectList is None:
error.log(__file__, False, "Unknown Object")
return False
if not "status" in objectList:
error.log(__file__, False, "Unknown Object")
return False
if objectList["status"] == "ok":
members = []
for row in objectList["objects"]:
if row["type"] == "student":
unique = {
"student_id" : row["person_id"]
}
contextCards = []
contextCards.append(row["context_card_id"])
existsing = db.persons.find(unique).limit(1)
if existsing.count() > 0:
existsing = existsing[0]
if "context_cards" in existsing:
for card in existsing["context_cards"]:
if not card in contextCards:
contextCards.append(card)
element = {
"type" : "student",
"student_id" : row["person_id"],
"name" : unicode(str(row["full_name"]).decode("utf8")),
"class_student_id" : unicode(str(row["person_text_id"]).decode("utf8")),
"last_name" : unicode(str(row["last_name"]).decode("utf8")),
"first_name" : unicode(str(row["first_name"]).decode("utf8")),
"context
|
_cards" : contextCards,
"school_id" : str(school_id),
"branch_id" : str(branch_id)
}
if "field_of_study" in row:
# Add Field of Study Sybc
element["field_of_study"] = {
"name" : row["field_of_study"]["name"],
|
"field_of_study_id" : row["field_of_study"]["field_of_study_id"]
}
if "picture_id" in row:
# Launch Fetch Picture Task
element["picture_id"] = row["picture_id"]
else:
unique = {
"teacher_id" : row["person_id"]
}
contextCards = []
contextCards.append(row["context_card_id"])
existsing = db.persons.find(unique).limit(1)
if existsing.count() > 0:
existsing = existsing[0]
if "context_cards" in existsing:
for card in existsing["context_cards"]:
if not card in contextCards:
contextCards.append(card)
element = {
"teacher_id" : str(row["person_id"]),
"last_name" : unicode(str(row["last_name"]).decode("utf8")),
"first_name" : unicode(str(row["first_name"]).decode("utf8")),
"type" : "teacher",
"name" : unicode(str(row["full_name"]).decode("utf8")),
"abbrevation" : unicode(str(row["person_text_id"]).decode("utf8")),
"context_cards" : contextCards,
"school_id" : str(school_id),
"branch_id" : str(branch_id)
}
# Add Team to teacher
if "picture_id" in row:
# Launch Fetch Picture Task
element["picture_id"] = row["picture_id"]
status = sync.sync(db.persons, unique, element)
members.append(status["_id"])
unique = {
"school_id" : str(school_id),
"branch_id" : str(branch_id),
"team_element_id" : str(team_element_id)
}
element = {
"school_id" : str(school_id),
"branch_id" : str(branch_id),
"team_element_id" : str(team_element_id),
"members" : members
}
status = sync.sync(db.team_elements, unique, element)
return True
else:
if "error" in objectList:
error.log(__file__, False, objectList["error"])
return False
else:
error.log(__file__, False, "Unknown error")
return False
except Exception, e:
error.log(__file__, False, str(e))
return False
|
Nexolight/wtstamp
|
src/utils.py
|
Python
|
gpl-2.0
| 17,014
| 0.020336
|
from datetime import datetime
from src import yamlsettings as settings
from models.models import Workday
from calendar import monthrange
import time
from math import ceil,floor
class Utils:
WEEKDAYS=["monday","tuesday","wednesday","thursday", "friday", "saturday", "sunday"]
MONTHS=["january","february","march","april","may","june","july","august","september","november","december"]
def __init__(self):
pass
@staticmethod
def getDatesFromRange(dS,dE):
'''
Returns an array with all dates within the given range
(includes start & end)
like:
[
{"date": <Date>, "time
|
stamp":<utc_seconds>}...
]
'''
iterts = datetime.strptime(str(dS.day)+"."+str(dS.month)+"."+str(dS.year), "%d.%m.%Y").timestamp()
|
iterdate=datetime.fromtimestamp(iterts).date()
dates=[{"date":iterdate,"timestamp":iterts}]
while True:
iterts=iterts+86400
iterdate=datetime.fromtimestamp(iterts).date()
dates.append({"date":iterdate, "timestamp":iterts})
if(iterdate == dE):
break
return dates
@staticmethod
def getWeekdates(ts):
'''
Returns an array with all dates within the week of the given timestamp
like:
[
{"date": <Date>, "timestamp":<utc_seconds>}...
]
'''
daystr = datetime.fromtimestamp(ts).strftime("%A").lower()
weekdates=[]
weekday_index = Utils.WEEKDAYS.index(daystr)
weekstartts = ts - (weekday_index*86400)
weekendts = ts + ((len(Utils.WEEKDAYS)-(weekday_index+1))*86400)
weekend = datetime.fromtimestamp(weekendts).strftime("%d.%m.%Y")
iterts=weekstartts
while True:
#add date to array
iterdate=datetime.fromtimestamp(iterts).strftime("%d.%m.%Y")
weekdates.append({"date":datetime.fromtimestamp(iterts).date(),"timestamp":iterts})
#break when the end of the week is reached
if(iterdate==weekend):
break
#Add a day
iterts+=86400
return weekdates
@staticmethod
def getMonthdates(ts):
'''
Returns an array with all dates within the month of the given timestamp
like:
[
{"date": <Date>, "timestamp":<utc_seconds>}...
]
'''
monthdates=[]
dateobj=datetime.fromtimestamp(ts).date()
monthdays = monthrange(dateobj.year,dateobj.month)[1]#get the days of the month
for day in range(1,monthdays+1):
datets=datetime.strptime(str(day)+"."+str(dateobj.month)+"."+str(dateobj.year), "%d.%m.%Y").timestamp()
monthdates.append({"date":datetime.fromtimestamp(datets).date(),"timestamp":datets})
return monthdates
@staticmethod
def getYearDates(ts):
'''
Returns an array with all dates within the year of the given timestamp
based on the year_swap setting
like:
[
{"date": <Date>, "timestamp":<utc_seconds>}...
]
'''
yeardates=[]
dateobj=datetime.fromtimestamp(ts).date()
swap=datetime.strptime(settings.get("year_swap")+"."+str(dateobj.year),"%d.%m.%Y").timestamp()-86400
if(ts<swap):
ts=swap=datetime.strptime(settings.get("year_swap")+"."+str(dateobj.year-1),"%d.%m.%Y").timestamp()-8640
dateobj=datetime.fromtimestamp(ts).date()
dStart=datetime.strptime(settings.get("year_swap")+"."+str(dateobj.year),"%d.%m.%Y").date()
lDayTs=datetime.strptime(settings.get("year_swap")+"."+str(dateobj.year),"%d.%m.%Y").timestamp()-86400
lDayDt=datetime.fromtimestamp(lDayTs).date()
dEnd=datetime.strptime(str(lDayDt.day)+"."+str(lDayDt.month)+"."+str(dateobj.year+1),"%d.%m.%Y").date()
0
return Utils.getDatesFromRange(dStart,dEnd)
@staticmethod
def getDoneWork(historydir, ts):
'''
Returns a dict with the work done in the current:
{
"now":<utc_seconds>
"day":<utc_seconds>
"week":<utc_seconds>
"month":<utc_seconds>
"year":<utc_seconds>
}
'''
day=Utils.getDoneWorkT("day",historydir,ts)
week=Utils.getDoneWorkT("week",historydir,ts)
month=Utils.getDoneWorkT("month",historydir,ts)
year=Utils.getDoneWorkT("year",historydir,ts)
now=year
return {
"now":now,
"day":day,
"week":week,
"month":month,
"year":year
}
@staticmethod
def getDoneWorkT(type,historydir,ts):
'''
Returns the done work depending on <type> in seconds
starting from year_swap as long as <ts> is above year_swap
type can be:
"year"
"month"
"week"
"day"
"until"
'''
work=0
wdos=[]
if(type=="year"):
wdos=Workday.loadYear(historydir,ts)
elif(type=="month"):
wdos=Workday.loadMonth(historydir,ts)
elif(type=="week"):
wdos=Workday.loadWeek(historydir,ts)
elif(type=="day"):
wdd=Workday.loadDay(historydir,ts)
if(not wdd.get("workday") and datetime.fromtimestamp(time.time()).date() == givenDate):
wdos.append(Workday.loadLast(historydir)) #this happens when a workday is longer than 12pm
else:
wdos.append(wdd)
elif(type=="until"):
wdos=Workday.loadYear(historydir,ts)
givenDate=datetime.fromtimestamp(ts).date()
for wdo in wdos:
if(not type=="until"):
if(not wdo.get("workday") or not Utils.inCalc(wdo.get("workday").start)):
continue #Just a day without a saved workday object or out of our calc range
work+=Utils.getWDStats(wdo.get("workday")).get("worktime")
else:#type==until
if(ts<wdos[0].get("timestamp")):# year_swap workaround
return work
if(wdo.get("workday") and Utils.inCalc(wdo.get("workday").start)):
work+=Utils.getWDStats(wdo.get("workday")).get("worktime")
if(type=="until" and wdo.get("date") == givenDate):
break # for "until" we break here
return work
@staticmethod
def getRequiredWork(ts):
'''
Returns a dict with the work required for the current:
{
"now":<utc_seconds>
"day":<utc_seconds>
"week":<utc_seconds>
"month":<utc_seconds>
"year":<utc_seconds>
}
'''
now=Utils.getRequiredWorkT("until",time.time())
day=Utils.getRequiredWorkT("day",ts)
week=Utils.getRequiredWorkT("week",ts)
month=Utils.getRequiredWorkT("month",ts)
year=Utils.getRequiredWorkT("year",ts)
return{
"now":now,
"day":day,
"week":week,
"month":month,
"year":year
}
@staticmethod
def getRequiredWorkT(type,ts):
'''
Returns the required work depending on <type> in seconds
starting from year_swap as long as <ts> is above year_swap
type can be:
"year"
"month"
"week"
"day"
"until"
'''
work=0
dates=[]
if(type=="year"):
dates=Utils.getYearDates(ts)
elif(type=="month"):
dates=Utils.getMonthdates(ts)
elif(type=="week"):
dates=Utils.getWeekdates(ts)
elif(type=="day"):
return Utils.getMinutesPerDay(ts)
elif(type=="until"):
dates=Utils.getYearDates(ts)
if(ts<dates[0].get("timestamp")):# year_swap workaround
return work
givenDate=datetime.fromtimestamp(ts
|
imcleod/anaconda-ec2
|
ozutil.py
|
Python
|
lgpl-2.1
| 22,263
| 0.001887
|
# Copyright (C) 2010,2011,2012 Chris Lalancette <clalance@redhat.com>
# Copyright (C) 2012,2013 Chris Lalancette <clalancette@gmail.com>
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 2.1 of the License.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# Originally borrowed from Oz 29-Mar-2013
"""
Miscellaneous utility functions.
"""
import os
import random
import subprocess
import tempfile
import errno
import stat
import shutil
import pycurl
import collections
def generate_full_auto_path(relative):
"""
Function to find the absolute path to an unattended installation file.
"""
# all of the automated installation paths are installed to $pkg_path/auto,
# so we just need to find it and generate the right path here
if relative is None:
raise Exception("The relative path cannot be None")
pkg_path = os.path.dirname(__file__)
return os.path.abspath(os.path.join(pkg_path, "auto", relative))
def executable_exists(program):
"""
Function to find out whether an executable exists in the PATH
of the user. If so, the absolute path to the executable is returned.
If not, an exception is raised.
"""
def is_exe(fpath):
"""
Helper method to check if a file exists and is executable
"""
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
if program is None:
raise Exception("Invalid program name passed")
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
raise Exception("Could not find %s" % (program))
def copyfile_sparse(src, dest):
"""
Function to copy a file sparsely if possible. The logic here is
all taken from coreutils cp, specifically the 'sparse_copy' function.
"""
if src is None:
raise Exception("Source of copy cannot be None")
if dest is None:
raise Exception("Destination of copy cannot be None")
src_fd = os.open(src, os.O_RDONLY)
dest_fd = os.open(dest, os.O_WRONLY|os.O_CREAT|os.O_TRUNC)
sb = os.fstat(src_fd)
# See io_blksize() in coreutils for an explanation of why 32*1024
buf_size = max(32*1024, sb.st_blksize)
size = sb.st_size
destlen = 0
while size != 0:
buf = os.read(src_fd, min(buf_size, size))
if len(buf) == 0:
break
buflen = len(buf)
if buf == '\0'*buflen:
os.lseek(dest_fd, buflen, os.SEEK_CUR)
else:
# FIXME: check out the python implementation of write, we might have
# to handle EINTR here
os.write(dest_fd, buf)
destlen += len(buf)
size -= len(buf)
os.ftruncate(dest_fd, destlen)
os.close(src_fd)
os.close(dest_fd)
def bsd_split(line, digest_type):
"""
Function to split a BSD-style checksum line into a filename and
checksum.
"""
current = len(digest_type)
if line[current] == ' ':
current += 1
if line[current] != '(':
return None, None
current += 1
# find end of filename. The BSD 'md5' and 'sha1' commands do not escape
# filenames, so search backwards for the last ')'
file_end = line.rfind(')')
if file_end == -1:
# could not find the ending ), fail
return None, None
filename = line[current:file_end]
line = line[(file_end + 1):]
line = line.lstrip()
if line[0] != '=':
return None, None
line = line[1:]
line = line.lstrip()
if line[-1] == '\n':
line = line[:-1]
return line, filename
def sum_split(line, digest_bits):
"""
Function to split a normal Linux checksum line into a filename and
checksum.
"""
digest_hex_bytes = digest_bits / 4
min_digest_line_length = digest_hex_bytes + 2 + 1 # length of hex message digest + blank and binary indicator (2 bytes) + minimum file length (1 byte)
min_length = min_digest_line_length
if line[0] == '\\':
min_length = min_length + 1
if len(line) < min_length:
# if the line is too short, skip it
return None, None
if line[0] == '\\':
current = digest_hex_bytes + 1
hex_digest = line[1:current]
escaped_filename = True
else:
current = digest_hex_bytes
hex_digest = line[0:current]
escaped_filename = False
# if the digest is not immediately followed by a white space, it is an
# e
|
rror
if line[current] != ' ' and line[current] != '\t':
return None, None
current += 1
# if the whitespace is not immediately followed by another space or a *,
# it is an error
if line[current] != ' ' and line[current] != '*':
return None, None
if line[current] == '*':
binary = True
current += 1
if line[-1] == '\n':
|
filename = line[current:-1]
else:
filename = line[current:]
if escaped_filename:
# FIXME: a \0 is not allowed in the sum file format, but
# string_escape allows it. We'd probably have to implement our
# own codec to fix this
filename = filename.decode('string_escape')
return hex_digest, filename
def get_sum_from_file(sumfile, file_to_find, digest_bits, digest_type):
"""
Function to get a checksum digest out of a checksum file given a
filename.
"""
retval = None
f = open(sumfile, 'r')
for line in f:
binary = False
# remove any leading whitespace
line = line.lstrip()
# ignore blank lines
if len(line) == 0:
continue
# ignore comment lines
if line[0] == '#':
continue
if line.startswith(digest_type):
# OK, if it starts with a string of ["MD5", "SHA1", "SHA256"], then
# this is a BSD-style sumfile
hex_digest, filename = bsd_split(line, digest_type)
else:
# regular sumfile
hex_digest, filename = sum_split(line, digest_bits)
if hex_digest is None or filename is None:
continue
if filename == file_to_find:
retval = hex_digest
break
f.close()
return retval
def get_md5sum_from_file(sumfile, file_to_find):
"""
Function to get an MD5 checksum out of a checksum file given a filename.
"""
return get_sum_from_file(sumfile, file_to_find, 128, "MD5")
def get_sha1sum_from_file(sumfile, file_to_find):
"""
Function to get a SHA1 checksum out of a checksum file given a filename.
"""
return get_sum_from_file(sumfile, file_to_find, 160, "SHA1")
def get_sha256sum_from_file(sumfile, file_to_find):
"""
Function to get a SHA256 checksum out of a checksum file given a
filename.
"""
return get_sum_from_file(sumfile, file_to_find, 256, "SHA256")
def string_to_bool(instr):
"""
Function to take a string and determine whether it is True, Yes, False,
or No. It takes a single argument, which is the string to examine.
Returns True if instr is "Yes" or "True", False if instr is "No"
or "False", and None otherwise.
"""
if instr is None:
raise Exception("Input string was None!")
lower = instr.lower()
if lower == 'no' or lower == 'false':
return False
if lower == 'yes' or lower == 'true':
return True
return None
def generate_macaddress():
"""
Function to generate a random
|
rohitranjan1991/home-assistant
|
tests/components/tibber/test_statistics.py
|
Python
|
mit
| 2,649
| 0.00151
|
"""Test adding external statistics from Tibber."""
from unittest.mock import AsyncMock
from homeassistant.components.recorder.statistics import statistics_during_period
from homeassistant.components.tibber.sensor import TibberDataCoordinator
from homeassistant.util import dt as dt_util
from .test_common import CONSUMPTION_DATA_1, mock_get_homes
from tests.common import async_init_recorder_component
from tests.components.recorder.common import async_wait_recording_done_without_instance
async def test_async_setup_entry(hass):
"""Test setup Tibber."""
await async_init_recorder_component(hass)
tibber_connection = AsyncMock()
tibber_connection.name = "tibber"
tibber_connection.fetch_consumption_data_active_homes.return_value = None
tibber_connection.get_homes = mock_get_homes
coordinator = TibberDataCoordinator(hass, tibber_connection)
await coordinator._async_update_data()
await async_wait_recording_done_without_instance(hass)
# Validate consumption
statistic_id = "tibber:energy_consumption_home_id"
stats = await hass.async_add_executor_job(
statistics_during_period,
hass,
|
dt_util.parse_datetime(CONSUMPTION_DATA_1[0]["from"]),
None,
[statistic_id],
"hour",
True,
)
assert len(stats) == 1
assert len(stats[statistic_id]) == 3
_sum = 0
for k, stat in enumerate(stats[statistic_id]):
assert stat["start"] == dt_util.parse_datetime(CONSUMPTION_DATA_1[k]["from"])
assert stat["state"] == CONSUMPTION_DATA_1[k]["consumption"]
assert stat["mean"] is None
assert stat["m
|
in"] is None
assert stat["max"] is None
assert stat["last_reset"] is None
_sum += CONSUMPTION_DATA_1[k]["consumption"]
assert stat["sum"] == _sum
# Validate cost
statistic_id = "tibber:energy_totalcost_home_id"
stats = await hass.async_add_executor_job(
statistics_during_period,
hass,
dt_util.parse_datetime(CONSUMPTION_DATA_1[0]["from"]),
None,
[statistic_id],
"hour",
True,
)
assert len(stats) == 1
assert len(stats[statistic_id]) == 3
_sum = 0
for k, stat in enumerate(stats[statistic_id]):
assert stat["start"] == dt_util.parse_datetime(CONSUMPTION_DATA_1[k]["from"])
assert stat["state"] == CONSUMPTION_DATA_1[k]["totalCost"]
assert stat["mean"] is None
assert stat["min"] is None
assert stat["max"] is None
assert stat["last_reset"] is None
_sum += CONSUMPTION_DATA_1[k]["totalCost"]
assert stat["sum"] == _sum
|
sciyoshi/gini
|
frontend/src/gbuilder/Core/globals.py
|
Python
|
mit
| 1,542
| 0.029183
|
""" Various global variables """
import os
PROG_NAME = "gBuilder"
PROG_VERSION = "2.0.0"
environ = {"os":"Windows",
"path":os.environ["GINI_HOME"]+"/",
"remotepath":"./",
"images":os.environ["GINI_HOME"]+"/share/gbuilder/images/",
"config":os.environ["GINI_HOME"]+"/etc/",
"sav":os.environ["GINI_HOME"]+"/sav/",
"tmp":os.environ["GINI_HOME"]+"/tmp/",
"doc":os.environ["GINI_HOME"]+"/doc/"}
options = {"names":True,
"systray":False,
"elasticMode":False, "keepElasticMode":False,
"smoothing":True, "glowingLights":True, "style":"Plastique",
"grid":True, "gridColor":"(240,240,
|
240)",
"background":environ["images"] + "background.jpg",
"windowTheme":environ["images"] + "background2.jpg",
"baseTheme":environ["images"] + "background3.jpg",
"autorouting":True, "autogen":True, "autocompile":True,
"graphing":True, "username":"",
"server":"localhost", "session":"GINI", "autoconnect":True,
"localPort":"10001", "remotePort":"10000",
"restore":True}
mainWidgets = {"app":None,
"main
|
":None,
"canvas":None,
"tab":None,
"popup":None,
"log":None,
"tm":None,
"properties":None,
"interfaces":None,
"routes":None,
"drop":None,
"client":None}
defaultOptions = {"palette":None}
|
izapolsk/integration_tests
|
cfme/tests/automate/test_vmware_methods.py
|
Python
|
gpl-2.0
| 4,838
| 0.00186
|
"""This module contains tests that exercise the canned VMware Automate stuff."""
from textwrap import dedent
import fauxfactory
import pytest
from widgetastic.widget import View
from widgetastic_patternfly import Dropdown
from cfme import test_requirements
from cfme.common import BaseLoggedInPage
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.wait import wait_for
pytestmark = [
test_requirements.automate,
pytest.mark.meta(server_roles="+automate"),
pytest.mark.long_running,
pytest.mark.ignore_stream("upstream"),
pytest.mark.tier(3),
pytest.mark.provider(
[VMwareProvider], required_fields=[['provisioning', 'template']],
scope="module")
]
@pytest.fixture(scope="module")
def cls(domain):
original_class = domain.parent\
.instantiate(name='ManageIQ')\
.namespaces.instantiate(name='System')\
.classes.instantiate(name='Request')
original_class.copy
|
_to(domain=domain)
return domain.namespaces.instantiate(name='System').classes.ins
|
tantiate(name='Request')
@pytest.fixture(scope="module")
def testing_group(appliance):
group_desc = fauxfactory.gen_alphanumeric()
group = appliance.collections.button_groups.create(
text=group_desc,
hover=group_desc,
type=appliance.collections.button_groups.VM_INSTANCE
)
yield group
group.delete_if_exists()
@pytest.fixture(scope="function")
def testing_vm(setup_provider, provider):
collection = provider.appliance.provider_based_collection(provider)
try:
template_name = provider.data['templates']['full_template']['name']
except KeyError:
pytest.skip('Unable to identify full_template for provider: {}'.format(provider))
vm = collection.instantiate(
random_vm_name("ae-hd"),
provider,
template_name=template_name
)
try:
vm.create_on_provider(find_in_cfme=True, allow_skip="default")
yield vm
finally:
vm.cleanup_on_provider()
def test_vmware_vimapi_hotadd_disk(
appliance, request, testing_group, testing_vm, domain, cls):
"""Tests hot adding a disk to vmware vm. This test exercises the `VMware_HotAdd_Disk` method,
located in `/Integration/VMware/VimApi`
Polarion:
assignee: ghubale
initialEstimate: 1/8h
casecomponent: Automate
caseimportance: critical
tags: automate
testSteps:
1. It creates an instance in ``System/Request`` that can be accessible from eg. button
2. Then it creates a button, that refers to the ``VMware_HotAdd_Disk`` in ``Request``.
The button shall belong in the VM and instance button group.
3. After the button is created, it goes to a VM's summary page, clicks the button.
4. The test waits until the capacity of disks is raised.
Bugzilla:
1211627
1311221
"""
meth = cls.methods.create(
name=fauxfactory.gen_alpha(15, start="load_value_"),
script=dedent('''\
# Sets the capacity of the new disk.
$evm.root['size'] = 1 # GB
exit MIQ_OK
'''))
request.addfinalizer(meth.delete_if_exists)
# Instance that calls the method and is accessible from the button
instance = cls.instances.create(
name=fauxfactory.gen_alpha(23, start="VMware_HotAdd_Disk_"),
fields={
"meth4": {'value': meth.name}, # To get the value
"rel5": {'value': "/Integration/VMware/VimApi/VMware_HotAdd_Disk"},
},
)
request.addfinalizer(instance.delete_if_exists)
# Button that will invoke the dialog and action
button_name = fauxfactory.gen_alphanumeric()
button = testing_group.buttons.create(
text=button_name,
hover=button_name,
system="Request",
request=instance.name)
request.addfinalizer(button.delete_if_exists)
def _get_disk_capacity():
view = testing_vm.load_details(refresh=True)
return view.entities.summary('Datastore Allocation Summary').get_text_of('Total Allocation')
original_disk_capacity = _get_disk_capacity()
logger.info('Initial disk allocation: %s', original_disk_capacity)
class CustomButtonView(View):
custom_button = Dropdown(testing_group.text)
view = appliance.browser.create_view(CustomButtonView)
view.custom_button.item_select(button.text)
view = appliance.browser.create_view(BaseLoggedInPage)
view.flash.assert_no_error()
try:
wait_for(
lambda: _get_disk_capacity() > original_disk_capacity, num_sec=180, delay=5)
finally:
logger.info('End disk capacity: %s', _get_disk_capacity())
|
kevinlee9/cnn-text-classification-tf
|
load.py
|
Python
|
apache-2.0
| 1,773
| 0.003384
|
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
# Parameters
# ==================================================
# Data Parameters
# t
|
f.flags.DEFINE_string("positive_data_file", "./data/rt-polarit
|
ydata/rt-polarity.pos", "Data source for the positive data.")
# tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the positive data.")
# Eval Parameters
# tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_string("checkpoint_dir", "./runs/1495355705/checkpoints", "Checkpoint directory from training run")
# tf.flags.DEFINE_boolean("eval_train", False, "Evaluate on all training data")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
# Evaluation
# ==================================================
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
embeddings_ts = graph.get_operation_by_name("embedding/W").outputs[0]
embeddings = sess.run(embeddings_ts)[1:]
np.save("embedding", embeddings)
print "hello"
|
nickyc4/coolq-telegram-bot
|
plugins/qq_namelist.py
|
Python
|
gpl-3.0
| 1,401
| 0.012274
|
from bot_constant import FORWARD_LIST
import global_vars
from utils import send_both_side
from command import command_listener
import telegram
import logging
logger = logging.getLogger("CTBPlugin." + __name__)
logger.
|
debug(__name__ + " loading")
global_vars.create_variable('group_members', [[]] * len(FORWARD_LIST))
def reload_all_qq_namelist():
for i in range(len(FORWARD_LIST)):
global_vars.group_members[i] = global_vars.qq_bot.get_group_member_list(group_id=FORWARD_LIST[i]['QQ'])
@command_listener('update namelist'
|
, 'name', description='update namelist for current group')
def update_namelist(forward_index: int,
tg_group_id: int=None,
tg_user: telegram.User=None,
tg_message_id: int=None,
tg_reply_to: telegram.Message=None,
qq_group_id: int=None,
qq_discuss_id: int=None,
qq_user: int=None):
global_vars.group_members[forward_index] = global_vars.qq_bot.get_group_member_list(group_id=FORWARD_LIST[forward_index]['QQ'])
message = 'QQ群名片已重新加载'
return send_both_side(forward_index,
message,
qq_group_id,
qq_discuss_id,
tg_group_id,
tg_message_id)
reload_all_qq_namelist()
|
its-dirg/Flask-pyoidc
|
setup.py
|
Python
|
apache-2.0
| 704
| 0
|
from setuptools import setup
with open('README.md') as f:
long_description = f.read()
setup(
name='Flask-pyoidc',
version='3.9.0',
packages=['flask_pyoidc'],
package
|
_dir={'': 'src'},
url='https://github.com/zamzterz/flask-pyoidc',
license='Apache 2.0',
author='Samuel Gulliksson',
author_email='samuel.gulliksson@gmail.com',
description='Flask extension for OpenID Connect authentication.',
install_requires=[
'oic>=1.2.1',
'Flask',
'requests',
'importlib_resources'
],
|
package_data={'flask_pyoidc': ['parse_fragment.html']},
long_description=long_description,
long_description_content_type='text/markdown',
)
|
google/grr
|
grr/core/grr_response_core/lib/rdfvalues/chipsec_types.py
|
Python
|
apache-2.0
| 1,216
| 0.010691
|
#!/usr/bin/env python
"""RDFValues used to communicate with Chipsec."""
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_proto import chipsec_pb2
class DumpFlashImageRequest(rdf_structs.RDFProtoStruct):
"""A request to Chipsec to dump the fla
|
sh image (BIOS)."""
protobuf = chipsec_pb2.DumpFlashImageRequest
class DumpFlashImageResponse(rdf_structs.RDFProtoStruct):
"""A response from Chipsec to dump the flash image (BIOS)."""
protobuf = chipsec_pb2.DumpFlashImageResponse
rdf_deps = [
rdf_paths.PathSpec,
]
class ACPITableData(rdf_structs.RDFProtoStruct):
"""Response from Chipsec for one ACPI table."""
protobuf = chipsec_pb2.ACPITableData
rdf_deps = [
rdfvalue.RDFBytes,
]
class DumpACPITableRequest(rdf_struct
|
s.RDFProtoStruct):
"""A request to Chipsec to dump an ACPI table."""
protobuf = chipsec_pb2.DumpACPITableRequest
class DumpACPITableResponse(rdf_structs.RDFProtoStruct):
"""A response from Chipsec to dump an ACPI table."""
protobuf = chipsec_pb2.DumpACPITableResponse
rdf_deps = [
ACPITableData,
]
|
appium/python-client
|
test/unit/webdriver/device/remote_fs_test.py
|
Python
|
apache-2.0
| 3,434
| 0.000874
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You ma
|
y obtain a copy of the License at
#
#
|
http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import httpretty
import pytest
from selenium.common.exceptions import InvalidArgumentException
from appium.webdriver.webdriver import WebDriver
from test.unit.helper.test_helper import android_w3c_driver, appium_command, get_httpretty_request_body
class TestWebDriverRemoteFs(object):
@httpretty.activate
def test_push_file(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/push_file'),
)
dest_path = '/path/to/file.txt'
data = base64.b64encode(bytes('HelloWorld', 'utf-8')).decode('utf-8')
assert isinstance(driver.push_file(dest_path, data), WebDriver)
d = get_httpretty_request_body(httpretty.last_request())
assert d['path'] == dest_path
assert d['data'] == str(data)
@httpretty.activate
def test_push_file_invalid_arg_exception_without_src_path_and_base64data(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/push_file'),
)
dest_path = '/path/to/file.txt'
with pytest.raises(InvalidArgumentException):
driver.push_file(dest_path)
@httpretty.activate
def test_push_file_invalid_arg_exception_with_src_file_not_found(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/push_file'),
)
dest_path = '/dest_path/to/file.txt'
src_path = '/src_path/to/file.txt'
with pytest.raises(InvalidArgumentException):
driver.push_file(dest_path, source_path=src_path)
@httpretty.activate
def test_pull_file(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/pull_file'),
body='{"value": "SGVsbG9Xb3JsZA=="}',
)
dest_path = '/path/to/file.txt'
assert driver.pull_file(dest_path) == str(base64.b64encode(bytes('HelloWorld', 'utf-8')).decode('utf-8'))
d = get_httpretty_request_body(httpretty.last_request())
assert d['path'] == dest_path
@httpretty.activate
def test_pull_folder(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/pull_folder'),
body='{"value": "base64EncodedZippedFolderData"}',
)
dest_path = '/path/to/file.txt'
assert driver.pull_folder(dest_path) == 'base64EncodedZippedFolderData'
d = get_httpretty_request_body(httpretty.last_request())
assert d['path'] == dest_path
|
dmarx/praw
|
praw/errors.py
|
Python
|
gpl-3.0
| 14,656
| 0.000068
|
# This file is part of PRAW.
#
# PRAW is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# PRAW is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# PRAW. If not, see <http://www.gnu.org/licenses/>.
"""
Error classes.
Includes two main exceptions: ClientException, when something goes
wrong on our end, and APIExeception for when something goes wrong on the
server side. A number of classes extend these two main exceptions for more
specific exceptions.
"""
from __future__ import print_function, unicode_literals
import inspect
import six
import sys
class PRAWException(Exception):
"""The base PRAW Exception class.
Ideally, this can be caught to handle any exception from PRAW.
"""
class ClientException(PRAWException):
"""Base exception class for errors that don't involve the remote API."""
def __init__(self, message=None):
"""Construct a ClientException.
:param message: The error message to display.
"""
if not message:
message = 'Clientside error'
super(ClientException, self).__init__()
self.message = message
def __str__(self):
"""Return the message of the error."""
return self.message
class OAuthScopeRequired(ClientException):
"""Indicates that an OAuth2 scope is required to make the function call.
The attribute `scope` will contain the name of the necessary scope.
"""
def __init__(self, function, scope, message=None):
"""Contruct an OAuthScopeRequiredClientException.
:param function: The function that requires a scope.
:param scope: The scope required for the function.
:param message: A custom message to as
|
sociate with the
exception. Default: `function` requires the OAuth2 scope `scope`
"""
if not message:
message = '`{0}` requires the OAuth
|
2 scope `{1}`'.format(function,
scope)
super(OAuthScopeRequired, self).__init__(message)
self.scope = scope
class LoginRequired(ClientException):
"""Indicates that a logged in session is required.
This exception is raised on a preemptive basis, whereas NotLoggedIn occurs
in response to a lack of credentials on a privileged API call.
"""
def __init__(self, function, message=None):
"""Construct a LoginRequired exception.
:param function: The function that requires login-based authentication.
:param message: A custom message to associate with the exception.
Default: `function` requires a logged in session
"""
if not message:
message = '`{0}` requires a logged in session'.format(function)
super(LoginRequired, self).__init__(message)
class LoginOrScopeRequired(OAuthScopeRequired, LoginRequired):
"""Indicates that either a logged in session or OAuth2 scope is required.
The attribute `scope` will contain the name of the necessary scope.
"""
def __init__(self, function, scope, message=None):
"""Construct a LoginOrScopeRequired exception.
:param function: The function that requires authentication.
:param scope: The scope that is required if not logged in.
:param message: A custom message to associate with the exception.
Default: `function` requires a logged in session or the OAuth2
scope `scope`
"""
if not message:
message = ('`{0}` requires a logged in session or the '
'OAuth2 scope `{1}`').format(function, scope)
super(LoginOrScopeRequired, self).__init__(function, scope, message)
class ModeratorRequired(LoginRequired):
"""Indicates that a moderator of the subreddit is required."""
def __init__(self, function):
"""Construct a ModeratorRequired exception.
:param function: The function that requires moderator access.
"""
message = ('`{0}` requires a moderator '
'of the subreddit').format(function)
super(ModeratorRequired, self).__init__(message)
class ModeratorOrScopeRequired(LoginOrScopeRequired, ModeratorRequired):
"""Indicates that a moderator of the sub or OAuth2 scope is required.
The attribute `scope` will contain the name of the necessary scope.
"""
def __init__(self, function, scope):
"""Construct a ModeratorOrScopeRequired exception.
:param function: The function that requires moderator authentication or
a moderator scope..
:param scope: The scope that is required if not logged in with
moderator access..
"""
message = ('`{0}` requires a moderator of the subreddit or the '
'OAuth2 scope `{1}`').format(function, scope)
super(ModeratorOrScopeRequired, self).__init__(function, scope,
message)
class OAuthAppRequired(ClientException):
"""Raised when an OAuth client cannot be initialized.
This occurs when any one of the OAuth config values are not set.
"""
class HTTPException(PRAWException):
"""Base class for HTTP related exceptions."""
def __init__(self, _raw, message=None):
"""Construct a HTTPException.
:params _raw: The internal request library response object. This object
is mapped to attribute `_raw` whose format may change at any time.
"""
if not message:
message = 'HTTP error'
super(HTTPException, self).__init__()
self._raw = _raw
self.message = message
def __str__(self):
"""Return the message of the error."""
return self.message
class Forbidden(HTTPException):
"""Raised when the user does not have permission to the entity."""
class NotFound(HTTPException):
"""Raised when the requested entity is not found."""
class InvalidComment(PRAWException):
"""Indicate that the comment is no longer available on reddit."""
ERROR_TYPE = 'DELETED_COMMENT'
def __str__(self):
"""Return the message of the error."""
return self.ERROR_TYPE
class InvalidSubmission(PRAWException):
"""Indicates that the submission is no longer available on reddit."""
ERROR_TYPE = 'DELETED_LINK'
def __str__(self):
"""Return the message of the error."""
return self.ERROR_TYPE
class InvalidSubreddit(PRAWException):
"""Indicates that an invalid subreddit name was supplied."""
ERROR_TYPE = 'SUBREDDIT_NOEXIST'
def __str__(self):
"""Return the message of the error."""
return self.ERROR_TYPE
class RedirectException(PRAWException):
"""Raised when a redirect response occurs that is not expected."""
def __init__(self, request_url, response_url, message=None):
"""Construct a RedirectException.
:param request_url: The url requested.
:param response_url: The url being redirected to.
:param message: A custom message to associate with the exception.
"""
if not message:
message = ('Unexpected redirect '
'from {0} to {1}').format(request_url, response_url)
super(RedirectException, self).__init__()
self.request_url = request_url
self.response_url = response_url
self.message = message
def __str__(self):
"""Return the message of the error."""
return self.message
class OAuthException(PRAWException):
"""Base exception class for OAuth API calls.
Attribute `message` contains the error message.
Attribute `url` contains the url that resulted in the error.
"""
def __
|
AlbertoPeon/invenio
|
modules/bibupload/lib/batchuploader_engine.py
|
Python
|
gpl-2.0
| 28,252
| 0.004071
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Batch Uploader core functions. Uploading metadata and documents.
"""
import os
import pwd
import grp
import sys
import time
import tempfile
import cgi
import re
from invenio.dbquery import run_sql, Error
from invenio.access_control_engine import acc_authorize_action
from invenio.webuser import collect_user_info, page_not_authorized
from invenio.config import CFG_BINDIR, CFG_TMPSHAREDDIR, CFG_LOGDIR, \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, \
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, \
CFG_OAI_ID_FIELD, CFG_BATCHUPLOADER_DAEMON_DIR, \
CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS, \
CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS, \
CFG_PREFIX, CFG_SITE_LANG
from invenio.textutils import encode_for_xml
from invenio.bibtask import task_low_level_submission
from invenio.messages import gettext_set_language
from invenio.textmarc2xmlmarc import transform_file
from invenio.shellutils import run_shell_command
from invenio.bibupload import xml_marc_to_records, bibupload
import invenio.bibupload as bibupload_module
from invenio.bibrecord import create_records, \
record_strip_empty_volatile_subfields, \
record_strip_empty_fields
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
PERMITTED_MODES = ['-i', '-r', '-c', '-a', '-ir',
'--insert', '--replace', '--correct', '--append']
_CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS_RE = re.compile(CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS)
def cli_allocate_record(req):
req.content_type = "text/plain"
req.send_http_header()
# check IP and useragent:
if not _check_client_ip(req):
msg = "[ERROR] Sorry, client IP %s cannot use the service." % _get_client_ip(req)
_log(msg)
return _write(req, msg)
if not _check_client_useragent(req):
msg = '[ERROR] Sorry, the "%s" useragent cannot use the service.' % _get_useragent(req)
_log(msg)
return _write(req, msg)
recid = run_sql("insert into bibrec (creation_date,modification_date) values(NOW(),NOW())")
return recid
def cli_upload(req, file_content=None, mode=None, callback_url=None, nonce=None, special_treatment=None):
""" Robot interface for uploading MARC files
"""
req.content_type = "text/plain"
req.send_http_header()
# check IP and useragent:
if not _check_client_ip(req):
msg = "[ERROR] Sorry, client IP %s cannot use the service." % _get_client_ip(req)
_log(msg)
return _write(req, msg)
if not _check_client_useragent(req):
msg = "[ERROR] Sorry, the %s useragent cannot use the service." % _get_useragent(req)
_log(msg)
return _write(req, msg)
arg_mode = mode
if not arg_mode:
msg = "[ERROR] Please specify upload mode to use."
_log(msg)
return _write(req, msg)
if not arg_mode in PERMITTED_MODES:
msg = "[ERROR] Invalid upload mode."
_log(msg)
return _write(req, msg)
arg_file = file_content
if hasattr(arg_file, 'read'):
## We've been passed a readable file, e.g. req
arg_file = arg_file.read()
if not arg_file:
msg = "[ERROR] Please provide a body to your request."
_log(msg)
return _write(req, msg)
else:
if not arg_file:
msg = "[ERROR] Please specify file body to input."
_log(msg)
return _write(req, msg)
if hasattr(arg_file, "filename"):
arg_file = arg_file.value
else:
msg = "[ERROR] 'file' parameter must be a (single) file"
_log(msg)
return _write(req, msg)
# write temporary file:
tempfile.tempdir = CFG_TMPSHAREDDIR
filename = tempfile.mktemp(prefix="batchupload_" + \
time.strftime("%Y%m%d%H%M%S", time.localtime()) + "_")
filedesc = open(filename, 'w')
filedesc.write(arg_file)
filedesc.close()
# check if this client can run this file:
client_ip = _get_client_ip(req)
permitted_dbcollids = CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS[client_ip]
if permitted_dbcollids != ['*']: # wildcard
allow = _check_client_can_submit_file(client_ip, filename, req, 0)
if not allow:
msg = "[ERROR] Cannot submit such a file from this IP. (Wrong collection.)"
_log(msg)
return _write(req, msg)
# check validity of marcxml
xmlmarclint_path = CFG_BINDIR + '/xmlmarclint'
xmlmarclint_output, dummy1, dummy2 = run_shell_command('%s %s' % (xmlmarclint_path, filename))
if xmlmarclint_output != 0:
msg = "[ERROR] MARCXML is not valid."
_log(msg)
return _write(req, msg)
args = ['bibupload', "batchupload", arg_mode, filename]
# run upload command
if callback_url:
args += ["--callback-url", callback_url]
if nonce:
args += ["--nonce", nonce]
if special_treatment:
args += ["--special-treatment", special_treatment]
task_low_level_submission(*args)
msg = "[INFO] %s" % ' '.join(args)
_log(msg)
return _write(req, msg)
def metadata_upload(req, metafile=None, filetype=None, mode=None, exec_date=None,
exec_time=None, metafilename=None, ln=CFG_SITE_LANG,
priority="1", email_logs_to=None):
"""
Metadata web upload service. Get upload parameters and ex
|
ec bibupload for the given file.
Finally, write upload history.
@return: tuple (error code, message)
error code: code that indicates if an error ocurred
message: message describing the error
"""
# start output:
req.content_type = "text/html"
req.send_http_header()
error_codes = {'not_authorized': 1}
# write temporary fi
|
le:
if filetype != 'marcxml':
metafile = _transform_input_to_marcxml(file_input=metafile)
user_info = collect_user_info(req)
tempfile.tempdir = CFG_TMPSHAREDDIR
filename = tempfile.mktemp(prefix="batchupload_" + \
user_info['nickname'] + "_" + time.strftime("%Y%m%d%H%M%S",
time.localtime()) + "_")
filedesc = open(filename, 'w')
filedesc.write(metafile)
filedesc.close()
# check if this client can run this file:
if req is not None:
allow = _check_client_can_submit_file(req=req, metafile=metafile, webupload=1, ln=ln)
if allow[0] != 0:
return (error_codes['not_authorized'], allow[1])
# run upload command:
task_arguments = ('bibupload', user_info['nickname'], mode, "--name=" + metafilename, "--priority=" + priority)
if exec_date:
date = exec_date
if exec_time:
date += ' ' + exec_time
task_arguments += ("-t", date)
if email_logs_to:
task_arguments += ('--email-logs-to', email_logs_to)
task_arguments += (filename, )
jobid = task_low_level_submission(*task_arguments)
# write batch upload history
run_sql("""INSERT INTO hstBATCHUPLOAD (user, submitdate,
filename, execdate, id_schTASK, batch_mode)
VALUES (%s, NOW(), %s, %s, %s, "metadata")""",
(user_info['nickname'], metafilename,
exec_date != "" and (exec_date
|
UManPychron/pychron
|
alembic_dvc/versions/45f4b2dbc41a_update_sample_prep_s.py
|
Python
|
apache-2.0
| 607
| 0.003295
|
"""update sample prep steps
Revision ID: 45f4b2dbc41a
Revises: d1653e552ab
Create Date: 2018-07-18 10:01:26.668385
"""
# revision identifiers, used by Alembic.
revision = '45f4b2dbc41a'
down_revision =
|
'd1653e552ab'
import sqlalchemy as sa
from alembic import op
def upgrade():
for step in ('mount', 'gold_table', 'us_wand', 'eds', 'cl', 'bse', 'se'):
op.add_column('SamplePrepStepTbl',
sa.Column(step, sa.String(140)))
def downgrade():
for step in ('mount', 'gold_table', 'us_wand', 'eds', 'cl', 'bse', 'se'):
op.drop_column('SamplePrepStepTbl', s
|
tep)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.