repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
amueller/ThisPlace
|
app.py
|
Python
|
mit
| 1,992
| 0.006024
|
#!/usr/bin/env python
import bottle
from bottle import (
get,
run,
abort,
static_file,
template
)
import thisplace
example_locs = [("sydney", (-33.867480754852295, 151.20700120925903)),
("battery", (40.70329427719116, -74.0170168876648)),
("san_fran", (37.790114879608154, -122.4202036857605))]
example_locs = dict((name, thisplace.four_words(pos)) for name,pos in example_locs)
@get('/static/<filename:path>')
def serve_static(filename):
return static_file(filename, root='static')
@get('/')
def index():
return template('map', lat=None, lng=None)
@get('/help.html')
def help():
return template('help', err=None, **example_locs)
@get('/<fourwords>')
def showMap(fourwords):
try:
lat, lng = thisplace.decode(fourwords)
return template('map', lat=lat, lng=lng, fourwords=fourwords)
except:
return template('help',
err="Could not find location {}".format(fourwords),
**example_locs)
@get('/latlng/<lat:float>,<lng:float>')
def showMapFromLatLng(lat, lng):
try:
fourwords = thisplace.four_words((lat, lng))
re
|
turn template('map', lat=lat, lng=lng, fourwords=fourwords)
except:
return template('index',
err="Could not find location {}".format(fourwords),
**example_locs)
# API
@get('/api/<lat:
|
float>,<lng:float>')
def latLngToHash(lat, lng):
try:
three = thisplace.three_words((lat,lng))
four = thisplace.four_words((lat,lng))
six = thisplace.six_words((lat,lng))
return {'three': three, 'four': four, 'six': six}
except:
return {}
@get('/api/<fourwords>')
def hashToLatLng(fourwords):
try:
lat,lng = thisplace.decode(fourwords)
return {"lat": lat, "lng": lng}
except:
abort(404)
if __name__ == '__main__':
run(host='localhost', port=8080)
app = bottle.default_app()
|
rg3915/orcamentos
|
selenium/selenium_login.py
|
Python
|
mit
| 613
| 0
|
from decouple import config
from selenium import webdriver
HOME = config('HOME'
|
)
# page = webdriver.Firefox()
page = webdriver.Chrome(executable_path=HOME + '/chromedriver/chromedriver')
page.get('http://localhost:8000/admin/login/')
# pegar o campo de busca onde podemos digitar algum termo
campo_busca = page.find_element_by_id('id_username')
campo_busca.send_keys('admin')
campo_busca = page.find_element_by_id('id_password')
campo_busca.send_keys('demodemo')
# button =
|
page.findElement(By.cssSelector("input[type='submit']"))
button = page.find_element_by_xpath("//input[@type='submit']")
button.click()
|
furritos/mercado-api
|
mercado/core/safeway.py
|
Python
|
mit
| 6,259
| 0.003834
|
# -*- coding: UTF-8 -*-
import datetime
import json
import logging
import re
from mercado.core.base import Mercado
from mercado.core.common import nt_merge
log = logging.getLogger(__name__)
class Safeway(Mercado):
def __init__(self, auth, urls, headers, sleep_multiplier=1.0):
self.auth = auth
self.urls = urls
self.headers = headers
self.raw_unclipped_list = []
self.prepared_coupon_list = []
self.store_id = 0
self.sleep_multiplier = sleep_multiplier
def execute(self):
self._init_session()
self._login()
self._get_store_id()
self._get_unclipped_coupon_list()
self._get_coupon_details()
self._post_coupon()
def _login(self):
log.info("Logging in as {}".format(self.auth.get("username")))
login_data = json.dumps(
{"source": "WEB",
"rememberMe": False,
"us
|
erId": self.auth.get("username"),
"password": self.auth.get("password")})
rsp = self._run_request(self.urls.login, login_data, self.headers.extra)
rsp_data = json.loads(rsp.content.decode("UTF-8"))
if not rsp_data.get("token") or rsp_data.get("errors"):
raise Exception("Authentication failure")
self.headers.extra.update(
{"X-swyConsumerDirecto
|
ryPro": self.r_s.cookies.get_dict().get("swyConsumerDirectoryPro")})
self.headers.extra.update(
{"X-swyConsumerlbcookie": self.r_s.cookies.get_dict().get("swyConsumerlbcookie")})
def _get_store_id(self):
log.info("Determining Safeway store ID")
rsp = self._run_request(self.urls.store, data=None, headers=self.headers.extra)
r_dict = json.loads(rsp.text)
if str(r_dict.get("code")).lower() != "success":
log.warn("Unable to retrieve store ID, might not be able to pull all coupons back")
else:
definitions = r_dict.get("definitions")[0]
self.store_id = definitions.get("values")[0]
log.info("Determined store ID [{0}] as the preferred store".format(self.store_id))
def _get_unclipped_coupon_list(self):
rsp = self._run_request(self.urls.listing.format(self.store_id), headers=nt_merge(self.headers))
coupons = json.loads(rsp.text)
self.raw_unclipped_list = [offer for offer in coupons["offers"] if
offer["clipStatus"] != "C" and offer["listStatus"] != "S"]
log.info("Retrieved [{0}] unclipped coupons".format(len(self.raw_unclipped_list)))
def _get_coupon_details(self):
for coupon in self.raw_unclipped_list:
detailed_coupon = dict(oid=coupon["offerId"],
ots=coupon["offerTs"],
opgm=coupon["offerPgm"])
url = self.urls.details.format(detailed_coupon.get("oid"), detailed_coupon.get("ots"))
rsp = self._run_request(url, headers=nt_merge(self.headers))
coupon_data = json.loads(rsp.content.decode("UTF-8"))
detailed_coupon.update(dict(vbc=coupon_data.get("vndrBannerCd", "")))
extra_cpn_detail = self._extract_coupon_detail(coupon_data)
detailed_coupon.update(extra_cpn_detail)
self.prepared_coupon_list.append(detailed_coupon)
log.info("Retrieved details for {} coupons".format(len(self.prepared_coupon_list)))
def _post_coupon(self):
self.headers.extra.update({"Referer": self.urls.referer})
for index, coupon in enumerate(self.prepared_coupon_list):
items = json.dumps({
"items": [{
"clipType": "C",
"itemId": coupon["oid"],
"itemType": coupon["opgm"],
"vndrBannerCd": coupon["vbc"]
}, {
"clipType": "L",
"itemId": coupon["oid"],
"itemType": coupon["opgm"]
}]})
rsp = self._run_request(self.urls.post, data=items, headers=nt_merge(self.headers))
rsp.stream = False
if rsp.status_code == (200 or 204):
rsp_dict = json.loads(rsp.text)
for response in rsp_dict.get("items"):
if "errorCd" not in response:
coupon["added"] = True
log.info("Successfully added coupon [{}] [{}]".format(coupon["title"], coupon["value"]).encode('utf-8').strip())
break
else:
log.error("Unable to add Coupon ID [{0}] || Error Code [{1}] || Error Message [{2}]".format(
coupon["oid"], response.get("errorCd"), response.get("errorMsg")).encode("utf-8").strip())
break
self._mock_delayer(index + 1)
def _extract_coupon_detail(self, coupon_data):
coupon_detail = coupon_data.get("offerDetail", {})
title = "{} {} {}".format(coupon_detail.get("titleDsc1", "").strip(),
coupon_detail.get("titleDsc2", "").strip(),
coupon_detail.get("prodDsc1", "")).strip()
savings_value = "{}".format(coupon_detail.get("savingsValue", "")).strip()
price_value = "{}".format(coupon_detail.get("priceValue1", "")).strip()
true_value = savings_value if savings_value else price_value
expiration = self._extract_coupon_expiration(coupon_data)
full_coupon = {"title": title, "value": true_value, "added": False, "expiration": expiration}
log.debug("The following coupon was processed successfully {}".format(json.dumps(full_coupon, indent=4)))
return full_coupon
def _extract_coupon_expiration(self, coupon_data):
try:
end_date_regex = "\\\/Date\(([0-9]{10})[0-9]{3}\)\\\/"
expire_ts_unix = int(re.match(end_date_regex, coupon_data["offerEndDt"]).group(1))
expires = (datetime.datetime.fromtimestamp(expire_ts_unix)
.strftime("%-m/%-d/%Y"))
except Exception as e:
log.error(e, "Exception getting coupon details")
expires = "Unknown"
return expires
|
hawkphantomnet/leetcode
|
WaterAndJugProblem/Solution.py
|
Python
|
mit
| 545
| 0
|
class Solution(object):
def myGCD(self, x, y):
|
if y == 0:
return x
else:
return self.my
|
GCD(y, x % y)
def canMeasureWater(self, x, y, z):
"""
:type x: int
:type y: int
:type z: int
:rtype: bool
"""
if x == 0 and y == z:
return True
if x == z and y == 0:
return True
if x == z or y == z:
return True
if z > x + y:
return False
return (z % self.myGCD(x, y) == 0)
|
EarToEarOak/RTLSDR-Scanner
|
setup.py
|
Python
|
gpl-3.0
| 2,016
| 0.001488
|
#
# rtlsdr_scan
#
# http://eartoearoak.com/software/rtlsdr-scanner
#
# Copyright 2012 - 2017 Al Brown
#
# A frequency scanning GUI for the OsmoSDR rtl-sdr library at
# http://sdr.osmocom.org/trac/wiki/rtl-sdr
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from setuptools import setup, find_packages
from rtlsdr_scanner.version import VERSION
setup(name='rtlsdr_scanner',
version='.'.join([str(x) for x in VERSION]),
description='A simple spectrum analyser for scanning\n with a RTL-SDR compatible USB device',
classifiers=['Development Status :: 4 - Beta',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Topic :: Communications :: Ham Radio',
'Topic :: Scientific/Engineering',
'Topic :
|
: Scientific/Engineering :: Visualization'],
keywords='rtlsdr spectrum analyser',
url='http://eartoearoak.com/software/rtlsdr-scanner',
author='Al Brown',
author_email=
|
'al@eartoearok.com',
license='GPLv3',
packages=find_packages(),
package_data={'rtlsdr_scanner.res': ['*']},
install_requires=['numpy', 'matplotlib', 'Pillow', 'pyrtlsdr', 'pyserial', 'visvis'])
|
Chilledheart/chromium
|
tools/valgrind/drmemory/PRESUBMIT.py
|
Python
|
bsd-3-clause
| 1,175
| 0.009362
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into depot_tools.
"""
def CheckChange(input_api, out
|
put_api):
"""Checks the DrMemory suppression files for bad suppressions."""
# TODO(timurrrr): find out how to do relative imports
# and remove this ugly hack. Also, the CheckChange fu
|
nction won't be needed.
tools_vg_path = input_api.os_path.join(input_api.PresubmitLocalPath(), '..')
import sys
old_path = sys.path
try:
sys.path = sys.path + [tools_vg_path]
import suppressions
return suppressions.PresubmitCheck(input_api, output_api)
finally:
sys.path = old_path
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
def GetPreferredTryMasters(project, change):
return {
'tryserver.chromium.win': {
'win_drmemory': set(['defaulttests']),
}
}
|
rowhit/h2o-2
|
py/testdir_0xdata_only/test_hdfs_multi_copies.py
|
Python
|
apache-2.0
| 1,029
| 0.013605
|
import unittest, time, sys
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@c
|
lassmethod
def setUpClass(cls):
# assume we're at 0xdata with it's hdfs namenode
h2o.init(1, use_hdfs=True, hdfs_version='cdh4', hdfs_name_node='mr-0x6')
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_hdfs_multi_copies(self):
print "\nUse the new regex capabilities for selecting hdfs: try *copies* at /datasets"
# pop open a browser on the cloud
# h2b.
|
browseTheCloud()
# defaults to /datasets
parseResult = h2i.import_parse(path='datasets/manyfiles-nflx-gz/*', schema='hdfs', hex_key='manyfiles.hex',
exclude=None, header=None, timeoutSecs=600)
print "parse result:", parseResult['destination_key']
sys.stdout.flush()
if __name__ == '__main__':
h2o.unit_main()
|
marvinpinto/charlesbot
|
tests/util/parse/test_message_parser.py
|
Python
|
mit
| 1,817
| 0
|
import unittest
from charlesbot.util.parse import parse_msg_with_prefix
class TestMessageParser(unittest.TestCase):
def test_prefix_uppercase(self):
msg = "!ALL hi, there!"
retval = parse_msg_with_prefix("!all", msg)
self.assertEqual("hi, there!", retval)
def test_prefix_mixed(self):
msg = "!AlL hi, there!"
retval = parse_msg_with_prefix("!all", msg)
self.assertEqual("hi, there!", retval)
def test_prefix_colon(self):
msg = "!all: hi, there!"
retval = parse_msg_with_prefix("!all", msg)
self.assertEqual("hi, there!", retval)
def test_prefix_colon_two(self):
msg = "!all:hi, there!"
retval = parse_msg_with_prefix("!all", msg)
self.assertEqual("hi, there!", retval)
def test_prefix_space(self):
msg = "!all hi, there!"
retval = parse_msg_with_prefix("!all", msg)
self.assertEqual("hi, there!", retval)
def test_prefix_whitespace(self):
msg = "!all hi, there!"
retval = parse_msg_with_prefix("!all", msg)
self.assertEqual("hi, there!", retval)
def test_prefix_leading_whitespace(self):
msg = " !all hi, there!"
retval = parse_msg_with_prefix("!all", msg)
self.assertEqual("hi, there!", retval)
def test_prefix_leading_whitespace_two(self):
msg = " !all hi, there!"
retval = parse_msg_with_prefix("!all", msg)
self.assertEqual("hi, there!", retval)
def test_prefix_invalid_one(self):
msg = "s !all hi, there!"
retval = parse_msg_with_prefi
|
x("!all", msg)
self.assertEqual(None, retval)
def test_prefix_invalid_two(self):
msg = "!allhi, there!"
retval = parse_msg_with_prefix("!all", msg)
self.asse
|
rtEqual(None, retval)
|
starrify/scrapy
|
scrapy/settings/default_settings.py
|
Python
|
bsd-3-clause
| 9,161
| 0.000982
|
"""
This module contains the default values for all settings used by Scrapy.
For more information about these settings you can read the settings
documentation in docs/topics/settings.rst
Scrapy developers, if you add a setting here remember to:
* add it in alphabetical order
* group similar settings without leaving blank lines
* add its documentation to the available settings documentation
(docs/topics/settings.rst)
"""
import sys
from importlib import import_module
from os.path import join, abspath, dirname
AJAXCRAWL_ENABLED = False
ASYNCIO_EVENT_LOOP = None
AUTOTHROTTLE_ENABLED = False
AUTOTHROTTLE_DEBUG = False
AUTOTHROTTLE_MAX_DELAY = 60.0
AUTOTHROTTLE_START_DELAY = 5.0
AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
BOT_NAME = 'scrapybot'
CLOSESPIDER_TIMEOUT = 0
CLOSESPIDER_PAGECOUNT = 0
CLOSESPIDER_ITEMCOUNT = 0
CLOSESPIDER_ERRORCOUNT = 0
COMMANDS_MODULE = ''
COMPRESSION_ENABLED = True
CONCURRENT_ITEMS = 100
CONCURRENT_REQUESTS = 16
CONCURRENT_REQUESTS_PER_DOMAIN = 8
CONCURRENT_REQUESTS_PER_IP = 0
COOKIES_ENABLED = True
COOKIES_DEBUG = False
DEFAULT_ITEM_CLASS = 'scrapy.item.Item'
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
}
DEPTH_LIMIT = 0
DEPTH_STATS_VERBOSE = False
DEPTH_PRIORITY = 0
DNSCACHE_ENABLED = True
DNSCACHE_SIZE = 10000
DNS_RESOLVER = 'scrapy.resolver.CachingThreadedResolver'
DNS_TIMEOUT = 60
DOWNLOAD_DELAY = 0
DOWNLOAD_HANDLERS = {}
DOWNLOAD_HANDLERS_BASE = {
'data': 'scrapy.core.downloader.handlers.datauri.DataURIDownloadHandler',
'file': 'scrapy.core.downloader.handlers.file.FileDownloadHandler',
'http': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler',
'https': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler',
's3': 'scrapy.core.downloader.handlers.s3.S3DownloadHandler',
'ftp': 'scrapy.core.downloader.handlers.ftp.FTPDownloadHandler',
}
DOWNLOAD_TIMEOUT = 180 # 3mins
DOWNLOAD_MAXSIZE = 1024 * 1024 * 1024 # 1024m
DOWNLOAD_WARNSIZE = 32 * 1024 * 1024 # 32m
DOWNLOAD_FAIL_ON_DATALOSS = True
DOWNLOADER = 'scrapy.core.downloader.Downloader'
DOWNLOADER_HTTPCLIENTFACTORY = 'scrapy.core.downloader.webclient.ScrapyHTTPClientFactory'
DOWNLOADER_CLIENTCONTEXTFACTORY = 'scrapy.core.downloader.contextfactory.ScrapyClientContextFactory'
DOWNLOADER_CLIENT_TLS_CIPHERS = 'DEFAULT'
# Use highest TLS/SSL protocol version supported by the platform, also allowing negotiation:
DOWNLOADER_CLIENT_TLS_METHOD = 'TLS'
DOWNLOADER_CLIENT_TLS_VERBOSE_LOGGING = False
DOWNLOADER_MIDDLEWARES = {}
DOWNLOADER_MIDDLEWARES_BASE = {
# Engine side
'scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware': 100,
'scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware': 300,
'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware': 350,
'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware': 400,
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': 500,
'scrapy.downloadermiddlewares.retry.RetryMiddleware': 550,
'scrapy.downloadermiddlewares.ajaxcrawl.AjaxCrawlMiddleware': 560,
'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware': 580,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 590,
'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': 600,
'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': 700,
'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 750,
'scrapy.downloadermiddlewares.stats.DownloaderStats': 850,
'scrapy.downloadermiddlewares.httpcache.HttpCacheMiddleware': 900,
# Downloader side
}
DOWNLOADER_STATS = True
DUPEFILTER_CLASS = 'scrapy.dupefilters.RFPDupeFilter'
EDITOR = 'vi'
if sys.platform == 'win32':
EDITOR = '%s -m idlelib.idle'
EXTENSIONS = {}
EXTENSIONS_BASE = {
'scrapy.extensions.corestats.CoreStats': 0,
'scrapy.extensions.telnet.TelnetConsole': 0,
'scrapy.extensions.memusage.MemoryUsage': 0,
'scrapy.extensions.memdebug.MemoryDebugger': 0,
'scrapy.extensions.closespider.CloseSpider': 0,
'scrapy.extensions.feedexport.FeedExporter': 0,
'scrapy.extensions.logstats.LogStats': 0,
'scrapy.extensions.spiderstate.SpiderState': 0,
'scrapy.extensions.throttle.AutoThrottle': 0,
}
FEED_TEMPDIR = None
FEEDS = {}
FEED_URI_PARAMS = None # a function to extend uri arguments
FEED_STORE_EMPTY = False
FEED_EXPORT_ENCODING = None
FEED_EXPORT_FIELDS = None
FEED_STORAGES = {}
FEED_STORAGES_BASE = {
'': 'scrapy.extensions.feedexport.FileFeedStorage',
'file': 'scrapy.extensions.feedexport.FileFeedStorage',
'ftp': 'scrapy.extensions.feedexport.FTPFeedStorage',
'gs':
|
'scrapy.extensions.feedexport.GCSFeedStorage',
's3': 'scrapy.extensions.feedexport.S3FeedStorage',
'stdout':
|
'scrapy.extensions.feedexport.StdoutFeedStorage',
}
FEED_EXPORT_BATCH_ITEM_COUNT = 0
FEED_EXPORTERS = {}
FEED_EXPORTERS_BASE = {
'json': 'scrapy.exporters.JsonItemExporter',
'jsonlines': 'scrapy.exporters.JsonLinesItemExporter',
'jl': 'scrapy.exporters.JsonLinesItemExporter',
'csv': 'scrapy.exporters.CsvItemExporter',
'xml': 'scrapy.exporters.XmlItemExporter',
'marshal': 'scrapy.exporters.MarshalItemExporter',
'pickle': 'scrapy.exporters.PickleItemExporter',
}
FEED_EXPORT_INDENT = 0
FEED_STORAGE_FTP_ACTIVE = False
FEED_STORAGE_GCS_ACL = ''
FEED_STORAGE_S3_ACL = ''
FILES_STORE_S3_ACL = 'private'
FILES_STORE_GCS_ACL = ''
FTP_USER = 'anonymous'
FTP_PASSWORD = 'guest'
FTP_PASSIVE_MODE = True
GCS_PROJECT_ID = None
HTTPCACHE_ENABLED = False
HTTPCACHE_DIR = 'httpcache'
HTTPCACHE_IGNORE_MISSING = False
HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
HTTPCACHE_EXPIRATION_SECS = 0
HTTPCACHE_ALWAYS_STORE = False
HTTPCACHE_IGNORE_HTTP_CODES = []
HTTPCACHE_IGNORE_SCHEMES = ['file']
HTTPCACHE_IGNORE_RESPONSE_CACHE_CONTROLS = []
HTTPCACHE_DBM_MODULE = 'dbm'
HTTPCACHE_POLICY = 'scrapy.extensions.httpcache.DummyPolicy'
HTTPCACHE_GZIP = False
HTTPPROXY_ENABLED = True
HTTPPROXY_AUTH_ENCODING = 'latin-1'
IMAGES_STORE_S3_ACL = 'private'
IMAGES_STORE_GCS_ACL = ''
ITEM_PROCESSOR = 'scrapy.pipelines.ItemPipelineManager'
ITEM_PIPELINES = {}
ITEM_PIPELINES_BASE = {}
LOG_ENABLED = True
LOG_ENCODING = 'utf-8'
LOG_FORMATTER = 'scrapy.logformatter.LogFormatter'
LOG_FORMAT = '%(asctime)s [%(name)s] %(levelname)s: %(message)s'
LOG_DATEFORMAT = '%Y-%m-%d %H:%M:%S'
LOG_STDOUT = False
LOG_LEVEL = 'DEBUG'
LOG_FILE = None
LOG_SHORT_NAMES = False
SCHEDULER_DEBUG = False
LOGSTATS_INTERVAL = 60.0
MAIL_HOST = 'localhost'
MAIL_PORT = 25
MAIL_FROM = 'scrapy@localhost'
MAIL_PASS = None
MAIL_USER = None
MEMDEBUG_ENABLED = False # enable memory debugging
MEMDEBUG_NOTIFY = [] # send memory debugging report by mail at engine shutdown
MEMUSAGE_CHECK_INTERVAL_SECONDS = 60.0
MEMUSAGE_ENABLED = True
MEMUSAGE_LIMIT_MB = 0
MEMUSAGE_NOTIFY_MAIL = []
MEMUSAGE_WARNING_MB = 0
METAREFRESH_ENABLED = True
METAREFRESH_IGNORE_TAGS = []
METAREFRESH_MAXDELAY = 100
NEWSPIDER_MODULE = ''
RANDOMIZE_DOWNLOAD_DELAY = True
REACTOR_THREADPOOL_MAXSIZE = 10
REDIRECT_ENABLED = True
REDIRECT_MAX_TIMES = 20 # uses Firefox default setting
REDIRECT_PRIORITY_ADJUST = +2
REFERER_ENABLED = True
REFERRER_POLICY = 'scrapy.spidermiddlewares.referer.DefaultReferrerPolicy'
RETRY_ENABLED = True
RETRY_TIMES = 2 # initial response + 2 retries = 3 requests
RETRY_HTTP_CODES = [500, 502, 503, 504, 522, 524, 408, 429]
RETRY_PRIORITY_ADJUST = -1
ROBOTSTXT_OBEY = False
ROBOTSTXT_PARSER = 'scrapy.robotstxt.ProtegoRobotParser'
ROBOTSTXT_USER_AGENT = None
SCHEDULER = 'scrapy.core.scheduler.Scheduler'
SCHEDULER_DISK_QUEUE = 'scrapy.squeues.PickleLifoDiskQueue'
SCHEDULER_MEMORY_QUEUE = 'scrapy.squeues.LifoMemoryQueue'
SCHEDULER_PRIORITY_QUEUE = 'scrapy.pqueues.ScrapyPriorityQueue'
SCRAPER_SLOT_MAX_ACTIVE_SIZE = 5000000
SPIDER_LOADER_CLASS = 'scrapy.spiderloader.SpiderLoader'
SPIDER_LOADER_WARN_ONLY = False
SPIDER_MIDDLEWARES = {}
SPIDER_MIDDLEWARES_BASE = {
# Engine side
'scrapy.spidermiddlewares.httperror.HttpErrorMiddleware': 50,
'scrapy.spidermiddlewares.offsite.Offs
|
prheenan/BioModel
|
EnergyLandscapes/InverseWeierstrass/Python/TestExamples/Testing/MainTestingWeightedHistograms.py
|
Python
|
gpl-2.0
| 747
| 0.013387
|
# force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.py
|
plot as plt
import sys
sys.path.append("../../../../../../")
from Util import Test
from Util.Test import _f_assert,HummerData,load_simulated_data
from FitUtil.EnergyLandscapes.InverseWeierstrass.Python.Code import \
InverseWeierstrass,WeierstrassUtil,WeightedHistogram
def assert_all_digitization_correct(objs):
for o in objs:
_assert_digitization_correct(o)
def run():
fwd,rev = load_simulated_data(n=2)
assert_all_digitiza
|
tion_correct(fwd)
assert_all_digitization_correct(rev)
if __name__ == "__main__":
run()
|
LLNL/spack
|
var/spack/repos/builtin/packages/tbl2asn/package.py
|
Python
|
lgpl-2.1
| 866
| 0.002309
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from os import chmod
from spack import *
class Tbl2asn(Package):
"""Tbl2asn is a co
|
mmand-line program that automates the creation of
sequence records for submission to GenBank."""
homepage = "https://www.ncbi.nlm.nih.gov/genbank/tbl2asn2/"
version('2020-03-01', sha256='7cc1119d3cfcbbffdbd4ecf33cef8bbdd44fc5625c729
|
76bee08b1157625377e')
def url_for_version(self, ver):
return "https://ftp.ncbi.nih.gov/toolbox/ncbi_tools/converters/by_program/tbl2asn/linux.tbl2asn.gz"
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('../linux.tbl2asn', prefix.bin.tbl2asn)
chmod(prefix.bin.tbl2asn, 0o775)
|
zfrenchee/pandas
|
pandas/tests/test_base.py
|
Python
|
bsd-3-clause
| 43,476
| 0
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import re
import sys
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.compat as compat
from pandas.core.dtypes.common import (
is_object_dtype, is_datetimetz,
needs_i8_conversion)
import pandas.util.testing as tm
from pandas import (Series, Index, DatetimeIndex, TimedeltaIndex,
PeriodIndex, Timedelta, IntervalIndex, Interval,
CategoricalIndex, Timestamp)
from pandas.compat import StringIO, PYPY, long
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.accessor import PandasDelegate
from pandas.core.base import PandasObject, NoNewAttributesMixin
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
from pandas._libs.tslib import iNaT
class CheckStringMixin(object):
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
if not compat.PY3:
unicode(self.container) # noqa
def test_tricky_container(self):
if not hasattr(self, 'unicode_container'):
pytest.skip('Need unicode_container to test with this')
repr(self.unicode_container)
str(self.unicode_container)
bytes(self.unicode_container)
if not compat.PY3:
unicode(self.unicode_container) # noqa
class CheckImmutable(object):
mutable_regex = re.compile('does not support mutable operations')
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to assert_raises_regex
# (after the Exception kind).
tm.assert_raises_regex(
TypeError, self.mutable_regex, *args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert isinstance(result, klass)
assert result == expected
class TestPandasDelegate(object):
class Delegator(object):
_properties = ['foo']
_methods = ['bar']
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalida_delgation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ='property'
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._methods,
typ='method'
)
delegate = self.Delegate(self.Delegator())
def f():
delegate.foo
pytest.raises(TypeError, f)
def f():
delegate.foo = 5
pytest.raises(TypeError, f)
def f():
delegate.foo()
pytest.raises(TypeError, f)
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class Ops(object):
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and
(obj.is_boolean() or not obj._can_hold_na)):
# don't test boolean / int64 index
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name='a')
self.int_index = tm.makeIntIndex(10, name='a')
self.float_index = tm.makeFloatIndex(10, name='a')
self.dt_index = tm.makeDateIndex(10, name='a')
self.dt_tz_index = tm.makeDateIndex(10, name='a').tz_localize(
tz='US/Eastern')
self.period_index = tm.makePeriodIndex(10, name='a')
self.string_index = tm.makeStringIndex(10, name='a')
self.unicode_index = tm.makeUnicodeIndex(10, name='a')
arr = np.random.randn(10)
self.int_series = Series(arr, index=self.int_index, name='a')
self.float_series = Series(arr, index=self.float_index, name='a')
self.dt_series = Series(arr, index=self.dt_index, name='a')
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index, name='a')
self.string_series = Series(arr, index=self.string_index, name='a')
types = ['bool', 'int', 'float', 'dt', 'dt_tz', 'period', 'string',
'unicode']
fmts = ["{0}_{1}".format(t, f)
for t in types for f in ['index', 'series']]
self.objs = [getattr(self, f)
for f in fmts if getattr(self, f, None) is not None]
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(
getattr(o.index, op), index=o.index, name='a')
else:
expected = getattr(o, op)
except (AttributeError):
if ignore_failures:
continue
|
result = getattr(o, op)
# these couuld be series, arrays or scalars
if isinstance(result, Series) and isinstance(expected, Series):
tm.assert_series_equal(
|
result, expected)
elif isinstance(result, Index) and isinstance(expected, Index):
tm.assert_index_equal(result, expected)
elif isinstance(result, np.ndarray) and isinstance(expected,
np.ndarray):
tm.assert_numpy_array_equal(result, expected)
else:
assert result == expected
# freq raises AttributeError on an Int64Index because its not
# defined we mostly care about Series here anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError,
# otherwise an AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
pytest.raises(TypeError, lambda: getattr(o, op))
else:
pytest.raises(AttributeError,
lambda: getattr(o, op))
def test_binary_ops_docs(self):
|
magenta/magenta
|
magenta/models/onsets_frames_transcription/infer_util_test.py
|
Python
|
apache-2.0
| 1,356
| 0.001475
|
# Copyright 2022 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from magenta.models.onsets_frames_transcription import infer_util
import numpy as np
import tensorflow.compat.v1 as t
|
f
tf.disable_v2_behavior()
class InferUtilTest(tf.test.TestCase):
|
def testProbsToPianorollViterbi(self):
frame_probs = np.array([[0.2, 0.1], [0.5, 0.1], [0.5, 0.1], [0.8, 0.1]])
onset_probs = np.array([[0.1, 0.1], [0.1, 0.1], [0.9, 0.1], [0.1, 0.1]])
pianoroll = infer_util.probs_to_pianoroll_viterbi(frame_probs, onset_probs)
np.testing.assert_array_equal(
[[False, False], [False, False], [True, False], [True, False]],
pianoroll)
if __name__ == '__main__':
tf.test.main()
|
whbruce/upm
|
examples/python/bmx055.py
|
Python
|
mit
| 2,781
| 0.001798
|
#!/usr/bin/python
# Author: Jon Trulson <jtrulson@ics.com>
# Copyright (c) 2016 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF
|
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_bmx055 as sensorObj
def main():
# Instantiate a BMX055 instance using default i2c bus and address
sensor = sensorObj.BMX055()
## Exit handlers ##
# This
|
function stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This function lets you run code on exit
def exitHandler():
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
x = sensorObj.new_floatp()
y = sensorObj.new_floatp()
z = sensorObj.new_floatp()
# now output data every 250 milliseconds
while (1):
sensor.update()
sensor.getAccelerometer(x, y, z)
print("Accelerometer x:", sensorObj.floatp_value(x), end=' ')
print(" y:", sensorObj.floatp_value(y), end=' ')
print(" z:", sensorObj.floatp_value(z), end=' ')
print(" g")
sensor.getGyroscope(x, y, z)
print("Gyroscope x:", sensorObj.floatp_value(x), end=' ')
print(" y:", sensorObj.floatp_value(y), end=' ')
print(" z:", sensorObj.floatp_value(z), end=' ')
print(" degrees/s")
sensor.getMagnetometer(x, y, z)
print("Magnetometer x:", sensorObj.floatp_value(x), end=' ')
print(" y:", sensorObj.floatp_value(y), end=' ')
print(" z:", sensorObj.floatp_value(z), end=' ')
print(" uT")
print()
time.sleep(.250)
if __name__ == '__main__':
main()
|
romanorac/discomll
|
discomll/ensemble/core/k_medoids.py
|
Python
|
apache-2.0
| 1,504
| 0.001995
|
"""
Special purpose k - medoids algorithm
"""
import numpy as np
def fit(sim_mat, D_len, cidx):
"""
Algorithm maximizes energy between clusters, which is distinction in this algorithm. Distance matrix contain
|
s mostly 0, which are overlooked due to search of maximal distances. Algorithm does not try to retain k clusters.
D: numpy array - Symmetric distance matrix
k: int - number of clusters
"""
min_energy = np.inf
for j in range(3):
# select indices in each sample that maximizes its dimension
inds = [np.argmin([sim_mat[idy].get(idx, 0) for idx in cidx]) for idy in range(D_len) if idy in sim_mat]
cidx = []
energy = 0 # curr
|
ent enengy
for i in np.unique(inds):
indsi = np.where(inds == i)[0] # find indices for every cluster
minind, min_value = 0, 0
for index, idy in enumerate(indsi):
if idy in sim_mat:
# value = sum([sim_mat[idy].get(idx,0) for idx in indsi])
value = 0
for idx in indsi:
value += sim_mat[idy].get(idx, 0)
if value < min_value:
minind, min_value = index, value
energy += min_value
cidx.append(indsi[minind]) # new centers
if energy < min_energy:
min_energy, inds_min, cidx_min = energy, inds, cidx
return inds_min, cidx_min # cluster for every instance, medoids indices
|
andreyvit/pyjamas
|
pyjs/src/pyjs/sm.py
|
Python
|
apache-2.0
| 5,725
| 0.003493
|
import os
from pyjs import linker
from pyjs import translator
from pyjs import util
from optparse import OptionParser
import pyjs
PLATFORM='spidermonkey'
APP_TEMPLATE = """
var $wnd = new Object();
$wnd.document = new Object();
var $doc = $wnd.document;
var $moduleName = "%(app_name)s";
var $pyjs = new Object();
$pyjs.__modules__ = {};
$pyjs.modules = {};
$pyjs.modules_hash = {};
$pyjs.available_modules = %(available_modules)s;
$pyjs.loaded_modules = {};
$pyjs.options = new Object();
$pyjs.options.set_all = function (v) {
$pyjs.options.arg_ignore = v;
$pyjs.options.arg_count = v;
$pyjs.options.arg_is_instance = v;
$pyjs.options.arg_instance_type = v;
$pyjs.options.arg_kwarg_dup = v;
$pyjs.options.arg_kwarg_unexpected_keyword = v;
$pyjs.options.arg_kwarg_multiple_values = v;
}
$pyjs.options.set_all(true);
$pyjs.trackstack = [];
$pyjs.track = {module:'__main__', lineno: 1};
$pyjs.trackstack.push($pyjs.track);
$pyjs.__last_exception_stack__ = null;
$pyjs.__last_exception__ = null;
/*
* prepare app system vars
*/
$pyjs.platform = 'spidermonkey';
$pyjs.appname = '%(app_name)s';
$pyjs.loadpath = './';
load(%(module_files)s);
load(%(js_lib_files)s);
/* late static js libs */
%(late_static_js_libs)s
try {
$pyjs.loaded_modules['pyjslib']('pyjslib');
$pyjs.loaded_modules['pyjslib'].___import___('%(app_name)s', '%(app_name)s', '__ma
|
in__');
} catch(exception)
{
var fullMessage = exception.name + ': ' + exception.message;
var uri = exception.fileName;
//var stack = exception.stack;
var line = exception.lineNumber;
fullMessage += "\\n at " + uri + "
|
: " + line;
print (fullMessage );
//print (stack.toString() );
}
"""
class SpidermonkeyLinker(linker.BaseLinker):
"""Spidermonkey linker, which links together files by using the
load function of the spidermonkey shell."""
# we derive from mozilla
platform_parents = {
PLATFORM:['mozilla', 'array_extras']
}
def __init__(self, *args, **kwargs):
kwargs['platforms'] = [PLATFORM]
super(SpidermonkeyLinker, self).__init__(*args, **kwargs)
def visit_start(self):
super(SpidermonkeyLinker, self).visit_start()
self.js_libs.append('_pyjs.js')
self.merged_public = set()
def merge_resources(self, dir_name):
"""find the absolute paths of js includes"""
if not self.js_libs or dir_name in self.merged_public:
return
public_folder = os.path.join(dir_name, 'public')
if not os.path.isdir(public_folder):
return
for i, js_lib in enumerate(self.js_libs):
p = os.path.join(public_folder, js_lib)
if os.path.isfile(p):
self.js_libs[i] = p
def visit_end(self):
def static_code(libs, msg = None):
code = []
for lib in libs:
fname = lib
if not os.path.isfile(fname):
fname = os.path.join(self.output, lib)
if not os.path.isfile(fname):
raise RuntimeError('File not found %r' % lib)
if fname[len_ouput_dir:] == self.output:
name = fname[len_ouput_dir:]
else:
name = os.path.basename(lib)
if not msg is None:
code.append("/* start %s: %s */" % (msg, name))
f = file(fname)
code.append(f.read())
if not msg is None:
code.append("/* end %s */" % (name,))
self.remove_files[fname] = True
fname = fname.split('.')
if fname[-2] == '__%s__' % platform_name:
del fname[-2]
fname = '.'.join(fname)
if os.path.isfile(fname):
self.remove_files[fname] = True
return "\n".join(code)
done = self.done[PLATFORM]
# locals - go into template via locals()
module_files=str(done)[1:-1]
js_lib_files=str(self.js_libs)[1:-1]
early_static_js_libs=str(self.js_libs)[1:-1]
late_static_js_libs = [] + self.late_static_js_libs
late_static_js_libs = static_code(late_static_js_libs, "javascript lib")
app_name = self.top_module
available_modules = self.visited_modules[PLATFORM]
out_file = open(
os.path.join(self.output, self.top_module + '.js'), 'w')
out_file.write(APP_TEMPLATE % locals())
out_file.close()
def build_script():
usage = """
usage: %prog [options] module_name
"""
parser = OptionParser(usage = usage)
translator.add_compile_options(parser)
# override the default because we want print
parser.set_defaults(print_statements=True)
linker.add_linker_options(parser)
options, args = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
top_module = args[0]
for d in options.library_dirs:
pyjs.path.append(os.path.abspath(d))
translator_arguments=dict(
debug=options.debug,
print_statements = options.print_statements,
function_argument_checking=options.function_argument_checking,
attribute_checking=options.attribute_checking,
source_tracking=options.source_tracking,
line_tracking=options.line_tracking,
store_source=options.store_source
)
l = SpidermonkeyLinker(top_module,
output=options.output,
platforms=[PLATFORM],
path=pyjs.path,
translator_arguments=translator_arguments)
l()
|
boldprogressives/django-opendebates
|
opendebates/opendebates_comments/forms.py
|
Python
|
apache-2.0
| 7,408
| 0.006479
|
import time
from django import forms
from django.forms.util import ErrorDict
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.utils.crypto import salted_hmac, constant_time_compare
from django.utils.encoding import force_text
from django.utils.text import get_text_list
from django.utils import timezone
from django.utils.translation import ungettext, ugettext, ugettext_lazy as _
from opendebates_comments.models import Comment
COMMENT_MAX_LENGTH = getattr(settings,'COMMENT_MAX_LENGTH', 3000)
class CommentSecurityForm(forms.Form):
"""
Handles the security aspects (anti-spoofing) for comment forms.
"""
object_id = forms.CharField(widget=forms.HiddenInput)
timestamp = forms.IntegerField(widget=forms.HiddenInput)
security_hash = forms.CharField(min_length=40, max_length=40, widget=forms.HiddenInput)
def __init__(self, target_object, data=None, initial=None):
self.target_object = target_object
if initial is None:
initial = {}
initial.update(self.generate_security_data())
super(CommentSecurityForm, self).__init__(data=data, initial=initial)
def security_errors(self):
"""Return just those errors associated with security"""
errors = ErrorDict()
for f in ["honeypot", "timestamp", "security_hash"]:
if f in self.errors:
errors[f] = self.errors[f]
return errors
def clean_security_hash(self):
"""Check the security hash."""
security_hash_dict = {
'object_id' : self.data.get("object_id", ""),
'timestamp' : self.data.get("timestamp", ""),
}
expected_hash = self.generate_security_hash(**security_hash_dict)
actual_hash = self.cleaned_data["security_hash"]
if not constant_time_compare(expected_hash, actual_hash):
raise forms.ValidationError("Security hash check failed.")
return actual_hash
def clean_timestamp(self):
"""Make sure the timestamp isn't too far (> 2 hours) in the past."""
ts = self.cleaned_data["timestamp"]
if time.time() - ts > (2 * 60 * 60):
raise forms.ValidationError("Timestamp check failed")
return ts
def generate_security_data(self):
"""Generate a dict of security data for "initial" data."""
timestamp = int(time.time())
security_dict = {
'object_id' : str(self.target_object.id),
'timestamp' : str(timestamp),
'security_hash' : self.initial_security_hash(timestamp),
}
return security_dict
def initial_security_hash(self, timestamp):
"""
Generate the initial security hash from self.content_object
and a (unix) timestamp.
"""
initial_security_dict = {
'object_id' : str(self.target_object.id),
'timestamp' : str(timestamp),
}
return self.generate_security_hash(**initial_security_dict)
def generate_security_hash(self, object_id, timestamp):
"""
Generate a HMAC security hash from the provided info.
"""
info = (object_id, timestamp)
key_salt = "django.contrib.forms.CommentSecurityForm"
value = "-".join(info)
return salted_hmac(key_salt, value).hexdigest()
class CommentDetailsForm(CommentSecurityForm):
"""
Handles the specific details of the comment (name, comment, etc.).
"""
name = forms.CharField(label=_("Name"), max_length=50, widget=forms.HiddenInput)
email = forms.E
|
mailField(label=_("Email address"), widget=forms.Hidd
|
enInput)
url = forms.URLField(label=_("URL"), required=False, widget=forms.HiddenInput)
comment = forms.CharField(label=_('Comment'), widget=forms.Textarea,
max_length=COMMENT_MAX_LENGTH)
def get_comment_object(self):
"""
Return a new (unsaved) comment object based on the information in this
form. Assumes that the form is already validated and will throw a
ValueError if not.
Does not set any of the fields that would come from a Request object
(i.e. ``user`` or ``ip_address``).
"""
if not self.is_valid():
raise ValueError("get_comment_object may only be called on valid forms")
CommentModel = self.get_comment_model()
new = CommentModel(**self.get_comment_create_data())
new = self.check_for_duplicate_comment(new)
return new
def get_comment_model(self):
"""
Get the comment model to create with this form. Subclasses in custom
comment apps should override this, get_comment_create_data, and perhaps
check_for_duplicate_comment to provide custom comment models.
"""
return Comment
def get_comment_create_data(self):
"""
Returns the dict of data to be used to create a comment. Subclasses in
custom comment apps that override get_comment_model can override this
method to add extra fields onto a custom comment model.
"""
return dict(
object_id = force_text(self.target_object.id),
comment = self.cleaned_data["comment"],
submit_date = timezone.now(),
is_public = True,
is_removed = False,
)
def check_for_duplicate_comment(self, new):
"""
Check that a submitted comment isn't a duplicate. This might be caused
by someone posting a comment twice. If it is a dup, silently return the *previous* comment.
"""
possible_duplicates = self.get_comment_model()._default_manager.using(
self.target_object._state.db
).filter(
object_id = new.id,
)
for old in possible_duplicates:
if old.submit_date.date() == new.submit_date.date() and old.comment == new.comment:
return old
return new
def clean_comment(self):
"""
If COMMENTS_ALLOW_PROFANITIES is False, check that the comment doesn't
contain anything in PROFANITIES_LIST.
"""
comment = self.cleaned_data["comment"]
if getattr(settings, 'COMMENTS_ALLOW_PROFANITIES', True) == False:
bad_words = [w for w in settings.PROFANITIES_LIST if w in comment.lower()]
if bad_words:
raise forms.ValidationError(ungettext(
"Watch your mouth! The word %s is not allowed here.",
"Watch your mouth! The words %s are not allowed here.",
len(bad_words)) % get_text_list(
['"%s%s%s"' % (i[0], '-'*(len(i)-2), i[-1])
for i in bad_words], ugettext('and')))
return comment
class CommentForm(CommentDetailsForm):
honeypot = forms.CharField(required=False,
label=_('If you enter anything in this field '\
'your comment will be treated as spam'))
def clean_honeypot(self):
"""Check that nothing's been entered into the honeypot."""
value = self.cleaned_data["honeypot"]
if value:
raise forms.ValidationError(self.fields["honeypot"].label)
return value
|
idea4bsd/idea4bsd
|
python/helpers/pydev/build_tools/generate_code.py
|
Python
|
apache-2.0
| 5,381
| 0.003531
|
'''
This module should be run to recreate the files that we generate automatically
(i.e.: modules that shouldn't be traced and cython .pyx)
'''
from __future__ import print_function
import os
import struct
def is_python_64bit():
return (struct.calcsize('P') == 8)
root_dir = os.path.join(os.path.dirname(__file__), '..')
def get_cython_contents(filename):
if filename.endswith('.pyc'):
filename = filename[:-1]
state = 'regular'
new_contents = []
with open(filename, 'r') as stream:
for line in stream:
strip = line.strip()
if state == 'regular':
if strip == '# IFDEF CYTHON':
state = 'cython'
new_contents.append('%s -- DONT EDIT THIS FILE (it is automatically generated)\n' % line.replace('\n', '').replace('\r', ''))
continue
new_contents.append(line)
elif state == 'cython':
if strip == '# ELSE':
state = 'nocython'
new_contents.append(line)
continue
elif strip == '# ENDIF':
state = 'regular'
new_contents.append(line)
continue
assert strip.startswith('# '), 'Line inside # IFDEF CYTHON must start with "# ".'
new_contents.append(line.replace('# ', '', 1))
elif state == 'nocython':
if strip == '# ENDIF':
state = 'regular'
new_contents.append(line)
continue
new_contents.append('# %s' % line)
assert state == 'regular', 'Error: # IFDEF CYTHON found without # ENDIF'
return ''.join(new_contents)
def _generate_cython_from_files(target, modules):
contents = ['''# Important: Autogenerated file.
# DO NOT edit manually!
# DO NOT edit manually!
''']
for mod in modules:
contents.append(get_cython_contents(mod.__file__))
with open(target, 'w') as stream:
stream.write(''.join(contents))
def generate_dont_trace_files():
template = '''# Important: Autogenerated file.
# DO NOT edit manually!
# DO NOT edit manually!
from _pydevd_bundle.pydevd_constants import IS_PY3K
LIB_FILE = 1
PYDEV_FILE = 2
DONT_TRACE = {
# commonly used things from the stdlib that we don't want to trace
'Queue.py':LIB_FILE,
'queue.py':LIB_FILE,
'socket.py':LIB_FILE,
'weakref.py':LIB_FILE,
'_weakrefset.py':LIB_FILE,
'linecache.py':LIB_FILE,
'threading.py':LIB_FILE,
#things from pydev that we don't want to trace
'_pydev_execfile.py':PYDEV_FILE,
%(pydev_files)s
}
if IS_PY3K:
# if we try to trace io.py it seems it can get halted (see http://bugs.python.org/issue4716)
DONT_TRACE['io.py'] = LIB_FILE
# Don't trace common encodings too
DONT_TRACE['cp1252.py'] = LIB_FILE
DONT_TRACE['utf_8.py'] = LIB_FILE
'''
pydev_files = []
for root, dirs, files in os.walk(root_dir):
for d in [
'.git',
'.settings',
'build',
'build_tools',
'dist',
'pydevd.egg-info',
'pydevd_attach_to_process',
'pydev_sitecustomize',
'stubs',
'tests',
'tests_mainloop',
'tests_python',
'tests_runfiles',
'test_pydevd_reload',
'third_party',
'__pycache__',
'_pydev_runfiles',
'pydev_ipython',
]:
try:
dirs.remove(d)
except:
pass
for f in files:
if f.endswith('.py'):
if f not in (
'__init__.py',
'runfiles.py',
'pydev_coverage.py',
'pydev_pysrc.py',
'setup.py',
'
|
setup_cython.py',
'interpreterInfo.py',
):
pydev_files.append(" '%s': PYDEV_FILE," % (f,))
contents = template % (dict(pydev_files='\n'.join(sorted(pydev_files))))
assert 'pydevd.py' in contents
assert 'pydevd_dont_trace.py' in contents
with open(os.path.join(root_dir, '_pydevd_bundle', 'pydevd_dont_trace_files.py'), 'w') as stream:
stream.write(contents)
def r
|
emove_if_exists(f):
try:
if os.path.exists(f):
os.remove(f)
except:
import traceback;traceback.print_exc()
def generate_cython_module():
remove_if_exists(os.path.join(root_dir, '_pydevd_bundle', 'pydevd_cython.pyx'))
target = os.path.join(root_dir, '_pydevd_bundle', 'pydevd_cython.pyx')
curr = os.environ.get('PYDEVD_USE_CYTHON')
try:
os.environ['PYDEVD_USE_CYTHON'] = 'NO'
from _pydevd_bundle import pydevd_additional_thread_info_regular
from _pydevd_bundle import pydevd_frame, pydevd_trace_dispatch_regular
_generate_cython_from_files(target, [pydevd_additional_thread_info_regular, pydevd_frame, pydevd_trace_dispatch_regular])
finally:
if curr is None:
del os.environ['PYDEVD_USE_CYTHON']
else:
os.environ['PYDEVD_USE_CYTHON'] = curr
if __name__ == '__main__':
generate_dont_trace_files()
generate_cython_module()
|
eonpatapon/contrail-controller
|
src/nodemgr/config_nodemgr/event_manager.py
|
Python
|
apache-2.0
| 616
| 0.006494
|
#
#
|
Copyright (c) 2015 Juniper Networks, Inc. All rights reserved.
#
from gevent import monkey
monkey.patch_all()
from pysandesh.sandesh_base import sandesh_global
from sandesh_common.vns.ttypes import Module
from nodemgr.com
|
mon.event_manager import EventManager, EventManagerTypeInfo
class ConfigEventManager(EventManager):
def __init__(self, config, unit_names):
type_info = EventManagerTypeInfo(
module_type=Module.CONFIG_NODE_MGR,
object_table='ObjectConfigNode')
super(ConfigEventManager, self).__init__(config, type_info,
sandesh_global, unit_names)
|
uclouvain/osis
|
program_management/ddd/repositories/program_tree_version.py
|
Python
|
agpl-3.0
| 19,400
| 0.002165
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2022 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import contextlib
import warnings
from _decimal import Decimal
from typing import Optional, List
from django.db import IntegrityError
from django.db.models import F, Case, When, IntegerField, QuerySet, Max, OuterRef, Subquery
from django.db.models import Q
from base.models.academic_year import AcademicYear
from base.models.education_group_year import EducationGroupYear
from base.models.enums.education_group_categories import Categories
from education_group.ddd.domain.exception import TrainingNotFoundException
from education_group.models.group import Group
from education_group.models.group_year import GroupYear
from osis_common.ddd import interface
from osis_common.ddd.interface import RootEntity
from program_management import formatter
from program_management.ddd import command
from program_management.ddd.business_types import *
from program_management.ddd.domain import exception
from program_management.ddd.domain import program_tree
from program_management.ddd.domain import program_tree_version
from program_management.ddd.domain.exception import ProgramTreeVersionNotFoundException
from program_management.ddd.domain.program_tree_version import ProgramTreeVersionIdentity, STANDARD, NOT_A_TRANSITION
from program_management.ddd.dtos import UniteEnseignementDTO, ContenuNoeudDTO, ProgrammeDeFormationDTO
from program_management.ddd.repositories import program_tree as program_tree_repository
from program_management.models.education_group_version import EducationGroupVersion
class ProgramTreeVersionRepository(interface.AbstractRepository):
@classmethod
def save(cls, entity: RootEntity) -> None:
raise NotImplementedError
@classmethod
def create(
cls,
program_tree_version: 'ProgramTreeVersion',
**_
) -> 'ProgramTreeVersionIdentity':
warnings.warn("DEPRECATED : use .save() function instead", DeprecationWarning, stacklevel=2)
offer_acronym = program_tree_version.entity_id.offer_acronym
year = program_tree_version.entity_id.year
try:
education_group_year_id = EducationGroupYear.objects.filter(
acronym=offer_acronym,
academic_year__year=year,
).values_list(
'pk', flat=True
)[0]
except IndexError:
raise TrainingNotFoundException(acronym=offer_acronym, year=year)
group_year_id = GroupYear.objects.filter(
partial_acronym=program_tree_version.program_tree_identity.code,
academic_year__year=program_tree_version.program_tree_identity.year,
).values_list(
'pk', flat=True
)[0]
try:
educ_group_version = EducationGroupVersion.objects.create(
version_name=program_tree_version.version_name,
title_fr=program_tree_version.title_fr,
title_en=program_tree_version.title_en,
offer_id=education_group_year_id,
transition_name=program_tree_version.entity_id.transition_name,
root_group_id=group_year_id,
)
_update_start_year_and_end_year(
educ_group_version,
program_tree_version.start_year,
program_tree_version.end_year_of_existence
)
except IntegrityError as ie:
raise exception.ProgramTreeAlreadyExistsException
return program_tree_version.entity_id
@classmethod
def update(cls, program_tree_version: 'ProgramTreeVersion', **_) -> 'ProgramTreeVersionIdentity':
warnings.warn("DEPRECATED : use .save() function instead", DeprecationWarning, stacklevel=2)
obj = EducationGroupVersion.objects.get(
offer__acronym=program_tree_version.entity_identity.offer_acronym,
offer__academic_year__year=program_tree_version.entity_identity.year,
version_name=program_tree_version.entity_identity.version_name,
transition_name=program_tree_version.entity_identity.transition_name,
)
obj.version_name = program_tree_version.version_name
obj.title_fr = program_tree_version.title_fr
obj.title_en = program_tree_version.title_en
obj.save()
_update_start_year_and_end_year(
obj,
program_tree_version.start_year,
program_tree_version.end_year_of_existence
)
return program_tree_version.entity_id
@classmethod
def get(cls, entity_id: 'ProgramTreeVersionIdentity') -> 'ProgramTreeVersion':
qs = _get_co
|
mmon_queryset().filter(
version_name=entity_id.version_name,
offer__acronym=entity_id.offer_acronym,
offer__academic_year__year=entity_id.year,
transition_nam
|
e=entity_id.transition_name,
)
try:
return _instanciate_tree_version(qs.get())
except EducationGroupVersion.DoesNotExist:
raise exception.ProgramTreeVersionNotFoundException()
@classmethod
def get_last_in_past(cls, entity_id: 'ProgramTreeVersionIdentity') -> 'ProgramTreeVersion':
qs = EducationGroupVersion.objects.filter(
version_name=entity_id.version_name,
offer__acronym=entity_id.offer_acronym,
offer__academic_year__year__lt=entity_id.year,
transition_name=entity_id.transition_name
).order_by(
'offer__academic_year'
).values_list(
'offer__academic_year__year',
flat=True,
)
if qs:
last_past_year = qs.last()
last_identity = ProgramTreeVersionIdentity(
offer_acronym=entity_id.offer_acronym,
year=last_past_year,
version_name=entity_id.version_name,
transition_name=entity_id.transition_name,
)
return cls.get(entity_id=last_identity)
@classmethod
def search(
cls,
entity_ids: Optional[List['ProgramTreeVersionIdentity']] = None,
version_name: str = None,
offer_acronym: str = None,
transition_name: str = None,
code: str = None,
year: int = None,
**kwargs
) -> List['ProgramTreeVersion']:
qs = _get_common_queryset()
if "element_ids" in kwargs:
qs = qs.filter(root_group__element__in=kwargs['element_ids'])
if version_name is not None:
qs = qs.filter(version_name=version_name)
if offer_acronym is not None:
qs = qs.filter(offer__acronym=offer_acronym)
if transition_name is not None:
qs = qs.filter(transition_name=transition_name)
if year is not None:
qs = qs.filter(offer__academic_year__year=year)
if code is not None:
qs = qs.filter(root_group__partial_acronym=code)
|
nephomaniac/eucio
|
eucio/topology/userfacing/__init__.py
|
Python
|
apache-2.0
| 611
| 0.001637
|
#!/usr/bin/env python
# Copyright 2009-2014 Eucalyptus Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apach
|
e.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed un
|
der the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
znegva/pelican-plugins
|
pelican_comment_system/avatars.py
|
Python
|
agpl-3.0
| 2,305
| 0.023861
|
# -*- coding: utf-8 -*-
"""
"""
from __future__ import unicode_literals
import logging
import os
import hashlib
logger = logging.getLogger(__name__)
_log = "pelican_comment_system: avatars: "
try:
from . identicon import identicon
_identiconImported = True
except ImportError as e:
logger.warning(_log + "identicon deactivated: " + str(e))
_identiconImported = False
# Global Variables
_identicon_save_path = None
_identicon_output_path = None
_identicon_data = None
_identicon_size = None
_initialized = False
_authors = None
_missingAvatars = []
def _ready():
if not _initialized:
logger.warning(_log + "Module not initialized. use init")
if not _identicon_data:
logger.debug(_log + "No identicon data set")
return _identiconImported and _initialized and _identicon_data
def init(pelican_output_path, identicon_output_path, identicon_data, identicon_size, authors):
global _identicon_save_path
global _identicon_output_path
global _identicon_data
global _identicon_size
global _initialized
global _authors
_identicon_save_path = os.path.join(pelican_output_path, identicon_output_path)
_identicon_output_path = identicon_output_path
_identicon_data = identicon_data
_identicon_size = identicon_size
_authors = authors
_initialized
|
= True
def _createIdenticonOutputFolder():
if not _ready():
return
if not os.path.exists(_identicon_save_path):
os.makedirs(_identicon_save_path)
def getAvatarPath(comment_id, metadata):
if not _ready():
return ''
md5 = hashlib.md5()
author = tuple()
for data in _identicon_data:
if data in metadata:
string = str(metadata[data
|
])
md5.update(string.encode('utf-8'))
author += tuple([string])
else:
logger.warning(_log + data + " is missing in comment: " + comment_id)
if author in _authors:
return _authors[author]
global _missingAvatars
code = md5.hexdigest()
if not code in _missingAvatars:
_missingAvatars.append(code)
return os.path.join(_identicon_output_path, '%s.png' % code)
def generateAndSaveMissingAvatars():
_createIdenticonOutputFolder()
for code in _missingAvatars:
avatar_path = '%s.png' % code
avatar = identicon.render_identicon(int(code, 16), _identicon_size)
avatar_save_path = os.path.join(_identicon_save_path, avatar_path)
avatar.save(avatar_save_path, 'PNG')
|
LauritzThaulow/fakelargefile
|
tests/test_segmenttail.py
|
Python
|
agpl-3.0
| 365
| 0
|
'''
Created on Nov 10, 2014
@author: lauritz
'''
from mock import Mock
|
from fakelargefile.segmenttail import OverlapSearcher
def test_index_iter_stop():
os = OverlapSearcher("asdf")
segment = Mock()
segment.start = 11
try:
os.index_iter(segment, stop=10).next()
except ValueError:
assert True
else:
|
assert False
|
anhstudios/swganh
|
data/scripts/templates/object/static/structure/general/shared_palette_supply_01.py
|
Python
|
mit
| 455
| 0.048352
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMEN
|
TATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/structure/general/shared_palette_supp
|
ly_01.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
duncanmmacleod/gwsumm
|
gwsumm/plot/__init__.py
|
Python
|
gpl-3.0
| 1,636
| 0
|
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2013)
#
# This file is part of GWSumm.
#
# GWSumm is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWSumm is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied w
|
arranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU
|
General Public License
# along with GWSumm. If not, see <http://www.gnu.org/licenses/>.
"""A `Plot` is a representation of an image to be included in the HTML
output a :doc:`tab </tabs>`.
For simple purposes, a `Plot` is just a reference to an existing image file
that can be imported into an HTML page via the ``<img>`` tag.
For more complicated purposes, a number of data plot classes are provided to
allow users to generate images on-the-fly.
The available classes are:
.. autosummary::
:toctree: api
TimeSeriesDataPlot
SpectrogramDataPlot
SegmentDataPlot
StateVectorDataPlot
SpectrumDataPlot
TimeSeriesHistogramPlot
TriggerTimeSeriesDataPlot
TriggerHistogramPlot
TriggerRateDataPlot
"""
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
from .registry import *
from .utils import *
from .core import *
from .builtin import *
from .segments import *
from .triggers import *
from .range import *
from .noisebudget import *
from .guardian import *
from .sei import *
|
jambonsw/django-improved-user
|
example_integration_project/config/urls.py
|
Python
|
bsd-2-clause
| 312
| 0
|
"""Integration project URL Configuration"""
from django.contrib import admin
from django.urls import re_path
from dj
|
ango.views.generic import Templat
|
eView
urlpatterns = [
re_path(r"^admin/", admin.site.urls),
re_path(
r"^$", TemplateView.as_view(template_name="home.html"), name="home"
),
]
|
tzpBingo/github-trending
|
codespace/python/tencentcloud/ie/v20200304/models.py
|
Python
|
mit
| 146,031
| 0.003101
|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class ArtifactReduction(AbstractModel):
"""去编码毛刺、伪影参数
"""
def __init__(self):
r"""
:param Type: 去毛刺方式:weak,,strong
:type Type: str
:param Algorithm: 去毛刺算法,可选项:
edaf,
wdaf,
默认edaf。
注意:此参数已经弃用
:type Algorithm: str
"""
self.Type = None
self.Algorithm = None
def _deserialize(self, params):
self.Type = params.get("Type")
self.Algorithm = params.get("Algorithm")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AudioEnhance(AbstractModel):
"""音频音效增强,只支持无背景音的音频
"""
def __init__(self):
r"""
:param Type: 音效增强种类,可选项:normal
:type Type: str
"""
self.Type = None
def _deserialize(self, params):
self.Type = params.get("Type")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(me
|
meber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(m
|
emeber_set))
class AudioInfo(AbstractModel):
"""音频参数信息
"""
def __init__(self):
r"""
:param Bitrate: 音频码率,取值范围:0 和 [26, 256],单位:kbps。
注意:当取值为 0,表示音频码率和原始音频保持一致。
:type Bitrate: int
:param Codec: 音频编码器,可选项:aac,mp3,ac3,flac,mp2。
:type Codec: str
:param Channel: 声道数,可选项:
1:单声道,
2:双声道,
6:立体声。
:type Channel: int
:param SampleRate: 采样率,单位:Hz。可选项:32000,44100,48000
:type SampleRate: int
:param Denoise: 音频降噪信息
:type Denoise: :class:`tencentcloud.ie.v20200304.models.Denoise`
:param EnableMuteAudio: 开启添加静音,可选项:
0:不开启,
1:开启,
默认不开启
:type EnableMuteAudio: int
:param LoudnessInfo: 音频响度信息
:type LoudnessInfo: :class:`tencentcloud.ie.v20200304.models.LoudnessInfo`
:param AudioEnhance: 音频音效增强
:type AudioEnhance: :class:`tencentcloud.ie.v20200304.models.AudioEnhance`
:param RemoveReverb: 去除混音
:type RemoveReverb: :class:`tencentcloud.ie.v20200304.models.RemoveReverb`
"""
self.Bitrate = None
self.Codec = None
self.Channel = None
self.SampleRate = None
self.Denoise = None
self.EnableMuteAudio = None
self.LoudnessInfo = None
self.AudioEnhance = None
self.RemoveReverb = None
def _deserialize(self, params):
self.Bitrate = params.get("Bitrate")
self.Codec = params.get("Codec")
self.Channel = params.get("Channel")
self.SampleRate = params.get("SampleRate")
if params.get("Denoise") is not None:
self.Denoise = Denoise()
self.Denoise._deserialize(params.get("Denoise"))
self.EnableMuteAudio = params.get("EnableMuteAudio")
if params.get("LoudnessInfo") is not None:
self.LoudnessInfo = LoudnessInfo()
self.LoudnessInfo._deserialize(params.get("LoudnessInfo"))
if params.get("AudioEnhance") is not None:
self.AudioEnhance = AudioEnhance()
self.AudioEnhance._deserialize(params.get("AudioEnhance"))
if params.get("RemoveReverb") is not None:
self.RemoveReverb = RemoveReverb()
self.RemoveReverb._deserialize(params.get("RemoveReverb"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AudioInfoResultItem(AbstractModel):
"""任务结束后生成的文件音频信息
"""
def __init__(self):
r"""
:param Stream: 音频流的流id。
:type Stream: int
:param Sample: 音频采样率 。
注意:此字段可能返回 null,表示取不到有效值。
:type Sample: int
:param Channel: 音频声道数。
注意:此字段可能返回 null,表示取不到有效值。
:type Channel: int
:param Codec: 编码格式,如aac, mp3等。
注意:此字段可能返回 null,表示取不到有效值。
:type Codec: str
:param Bitrate: 码率,单位:bps。
注意:此字段可能返回 null,表示取不到有效值。
:type Bitrate: int
:param Duration: 音频时长,单位:ms。
注意:此字段可能返回 null,表示取不到有效值。
:type Duration: int
"""
self.Stream = None
self.Sample = None
self.Channel = None
self.Codec = None
self.Bitrate = None
self.Duration = None
def _deserialize(self, params):
self.Stream = params.get("Stream")
self.Sample = params.get("Sample")
self.Channel = params.get("Channel")
self.Codec = params.get("Codec")
self.Bitrate = params.get("Bitrate")
self.Duration = params.get("Duration")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CallbackInfo(AbstractModel):
"""任务结果回调地址信息
"""
def __init__(self):
r"""
:param Url: 回调URL。
:type Url: str
"""
self.Url = None
def _deserialize(self, params):
self.Url = params.get("Url")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ClassificationEditingInfo(AbstractModel):
"""视频分类识别任务参数信息
"""
def __init__(self):
r"""
:param Switch: 是否开启视频分类识别。0为关闭,1为开启。其他非0非1值默认为0。
:type Switch: int
:param CustomInfo: 额外定制化服务参数。参数为序列化的Json字符串,例如:{"k1":"v1"}。
:type CustomInfo: str
"""
self.Switch = None
self.CustomInfo = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
self.CustomInfo = params.get("CustomInfo")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ClassificationTaskResult(AbstractModel):
"""视频分类识别结果信息
"""
def __init__(self):
r"""
:param Status: 编辑任务状态。
1:执行中;2:成功;3:失败。
:type Status: int
:param ErrCode: 编辑任务失败错误码。
0:成功;其他值:失败。
:type ErrCode: int
:param ErrMsg: 编辑任务失败错误描述。
:type ErrMsg: str
:param ItemSet: 视频分类识别结果集。
注意:此字段可能返回 null,表示取不到有效值。
:type ItemSet: list of ClassificationTaskResultItem
"""
self.Status = None
self.ErrCode = None
self.ErrMsg = None
self.ItemSet = None
def _deserialize(self, params):
self.Status = params.get("Status")
self.ErrCode = params.get("ErrCode")
self.ErrMsg = params.get("ErrMsg")
if params.get("ItemSet") is not None:
self.ItemSet = []
for item in params.get("ItemSet"):
obj = ClassificationTaskResultItem()
|
voxelbrain/dibble
|
dibble/update.py
|
Python
|
bsd-3-clause
| 2,635
| 0.000759
|
# -*- coding: utf-8 -*-
import collections
class InvalidOperatorError(ValueError):
pass
class DuplicateFieldError(ValueError):
pass
class FieldDict(dict):
def __setitem__(self, k, v):
if k in self:
raise DuplicateFieldError('Field "{0}" already set.'.format(k))
super(FieldDict, self).__setitem__(k, v)
def update(self, E=None, **F):
raise NotImplementedError()
class OperatorDict(collections.defaultdict):
OPERATORS = ('$inc', '$rename', '$set', '$unset', '$push', '$pushAll', '$addToSet',
'$pop', '$pull', '$pullAll')
def __init__(self):
super(OperatorDict, self).__init__(FieldDict)
def __setitem__(self, k, v):
if k not in self.OPERATORS:
raise InvalidOperatorError('"{0}" is not a valid operator'.format(k))
super(OperatorDict, self).__setitem__(k, v)
def update(self, E=None, **F):
raise NotImplementedError()
class Update(object):
def __init__(self):
self._ops = OperatorDict()
def __iter__(self):
return self._ops.iteritems()
def __contains__(self, item):
return any(item in fields for _, fields in self._ops.items())
def clear(self):
self._ops.clear()
def drop_field(self, field):
empty_keys = []
for k, updates in self._ops.iteritems():
updates.pop(field, None)
if not updates:
empty_keys.append(k)
for k in empty_keys:
del self._ops[k]
def set(self, field, value):
self._ops['$set'][field] = value
def inc(self, field, increment):
"""
>>> update = Update()
>>> update.inc('foo', 'bar')
>>> dict(update)
{'$inc': {'foo': 'bar'}}
"""
self._ops['$inc'][field] = increment
def rename(self, old, new):
"""
>>> update = Update()
|
>>> update.rename('old', 'new')
>>> dict(update)
{'$rename': {'old': 'new'}}
"""
self._ops['$rename'][old] = new
def unset(self, name):
self._ops['$unset'][name] = 1
def push(self, name, value):
self._ops['$push'][name] = value
def pushAll(self, name, values):
self._ops['$pushAll'][name] = values
def addToSet(self, name, value):
self._ops['$addToSet'][name] = value
def pop(self, name, first=False
|
):
v = (-1 if first else 1)
self._ops['$pop'][name] = v
def pull(self, name, value):
self._ops['$pull'][name] = value
def pullAll(self, name, values):
self._ops['$pullAll'][name] = values
|
ogata-lab/rtmsdk-mac
|
x86_64/lib/python2.7/site-packages/omniidl_be/cxx/header/template.py
|
Python
|
lgpl-2.1
| 39,776
| 0.001282
|
# -*- python -*-
# Package : omniidl
# template.py Created on: 2000/01/18
# Author : David Scott (djs)
#
# Copyright (C) 2003-2008 Apasphere Ltd
# Copyright (C) 1999 AT&T Laboratories Cambridge
#
# This file is part of omniidl.
#
# omniidl is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
# 02111-1307, USA.
#
# Description:
#
# C++ templates for the .hh file
# $Id: template.py 5867 2009-05-06 16:16:18Z dgrisby $
# $Log$
# Revision 1.8.2.20 2008/12/29 18:44:38 dgrisby
# Globally scope array functions to avoid ambiguities.
#
# Revision 1.8.2.19 2008/12/03 10:53:58 dgrisby
# Tweaks leading to Python 3 support; other minor clean-ups.
#
# Revision 1.8.2.18 2007/09/19 14:16:07 dgrisby
# Avoid namespace clashes if IDL defines modules named CORBA.
#
# Revision 1.8.2.17 2007/05/11 09:52:27 dgrisby
# New -Wbguard_prefix option. Thanks Austin Bingham.
#
# Revision 1.8.2.16 2006/10/23 15:36:25 dgrisby
# Undefine USE_stub_in_nt_dll at the end of header if it was not defined
# at the start.
#
# Revision 1.8.2.15 2006/09/04 11:40:06 dgrisby
# Remove crazy switch code in enum marshalling.
#
# Revision 1.8.2.14 2006/01/10 12:24:03 dgrisby
# Merge from omni4_0_develop pre 4.0.7 release.
#
# Revision 1.8.2.13 2005/11/14 11:02:16 dgrisby
# Local interface fixes.
#
# Revision 1.8.2.12 2005/11/09 12:22:17 dgrisby
# Local interfaces support.
#
# Revision 1.8.2.11 2005/08/16 13:51:21 dgrisby
# Problems with valuetype / abstract interface C++ mapping.
#
# Revision 1.8.2.10 2005/07/22 17:18:37 dgrisby
# Another merge from omni4_0_develop.
#
# Revision 1.8.2.9 2005/01/06 23:10:06 dgrisby
# Big merge from omni4_0_develop.
#
# Revision 1.8.2.8 2005/01/06 16:35:18 dgrisby
# Narrowing for abstract interfaces.
#
# Revision 1.8.2.7 2004/10/13 17:58:24 dgrisby
# Abstract interfaces support; values support interfaces; value bug fixes.
#
# Revision 1.8.2.6 2004/07/31 23:46:27 dgrisby
# Correct constness of exception Any insertion operator.
#
# Revision 1.8.2.5 2004/07/23 10:29:59 dgrisby
# Completely new, much simpler Any implementation.
#
# Revision 1.8.2.4 2004/07/04 23:53:39 dgrisby
# More ValueType TypeCode and Any support.
#
# Revision 1.8.2.3 2004/02/16 10:10:32 dgrisby
# More valuetype, including value boxes. C++ mapping updates.
#
# Revision 1.8.2.2 2003/10/23 11:25:55 dgrisby
# More valuetype support.
#
# Revision 1.8.2.1 2003/03/23 21:02:36 dgrisby
# Start of omniORB 4.1.x development branch.
#
# Revision 1.5.2.19 2001/11/12 13:46:07 dpg1
# _unchecked_narrow, improved _narrow.
#
# Revision 1.5.2.18 2001/11/08 16:33:51 dpg1
# Local servant POA shortcut policy.
#
# Revision 1.5.2.17 2001/10/29 17:42:41 dpg1
# Support forward-declared structs/unions, ORB::create_recursive_tc().
#
# Revision 1.5.2.16 2001/10/18 12:45:28 dpg1
# IDL compiler tweaks.
#
# Revision 1.5.2.15 2001/10/17 16:44:05 dpg1
# Update DynAny to CORBA 2.5 spec, const Any exception extraction.
#
# Revision 1.5.2.14 2001/09/19 17:29:04 dpg1
# Cosmetic changes.
#
# Revision 1.5.2.13 2001/08/17 13:45:56 dpg1
# C++ mapping fixes.
#
# Revision 1.5.2.12 2001/08/15 10:2
|
6:10 dpg1
# New object
|
table behaviour, correct POA semantics.
#
# Revision 1.5.2.11 2001/08/03 17:41:17 sll
# System exception minor code overhaul. When a system exeception is raised,
# a meaning minor code is provided.
#
# Revision 1.5.2.10 2001/07/31 19:25:11 sll
# Array _var should be separated into fixed and variable size ones.
#
# Revision 1.5.2.9 2001/06/18 20:30:51 sll
# Only define 1 conversion operator from T_var to T* if the compiler is
# gcc. Previously, this is only done for gcc 2.7.2. It seems that gcc 3.0
# requires this to be the case. This is the default for all versions of
# gcc.
#
# Revision 1.5.2.8 2001/05/29 17:03:50 dpg1
# In process identity.
#
# Revision 1.5.2.7 2001/04/19 09:30:12 sll
# Big checkin with the brand new internal APIs.
# Scoped where appropriate with the omni namespace.
#
# Revision 1.5.2.6 2001/03/13 10:32:09 dpg1
# Fixed point support.
#
# Revision 1.5.2.5 2000/11/20 14:43:25 sll
# Added support for wchar and wstring.
#
# Revision 1.5.2.4 2000/11/09 12:27:55 dpg1
# Huge merge from omni3_develop, plus full long long from omni3_1_develop.
#
# Revision 1.5.2.3 2000/11/03 19:20:41 sll
# Replaced old marshal operators with a unified operator for cdrStream.
#
# Revision 1.5.2.2 2000/10/12 15:37:51 sll
# Updated from omni3_1_develop.
#
# Revision 1.6.2.2 2000/08/21 11:35:18 djs
# Lots of tidying
#
# Revision 1.6.2.1 2000/08/02 10:52:02 dpg1
# New omni3_1_develop branch, merged from omni3_develop.
#
# Revision 1.6 2000/07/13 15:26:00 dpg1
# Merge from omni3_develop for 3.0 release.
#
# Revision 1.3.2.15 2000/07/26 15:29:11 djs
# Missing typedef and forward when generating BOA skeletons
#
# Revision 1.3.2.14 2000/07/24 09:35:20 dpg1
# Adding the missing constructor meant that there was no longer a
# default constructor.
#
# Revision 1.3.2.13 2000/07/24 10:17:31 djs
# Added missing BOA skeleton constructor
#
# Revision 1.3.2.12 2000/07/04 12:57:55 djs
# Fixed Any insertion/extraction operators for unions and exceptions
#
# Revision 1.3.2.11 2000/06/26 16:24:00 djs
# Better handling of #include'd files (via new commandline options)
# Refactoring of configuration state mechanism.
#
# Revision 1.3.2.10 2000/06/19 18:19:50 djs
# Implemented union discriminant setting function _d(_value) with checks for
# illegal uses (setting to a label corresponding to a non-current member and
# setting before initialisation)
#
# Revision 1.3.2.9 2000/06/05 13:03:57 djs
# Removed union member name clash (x & pd_x, pd__default, pd__d)
# Removed name clash when a sequence is called "pd_seq"
# Nested union within union fix
# Actually generates BOA non-flattened tie templates
#
# Revision 1.3.2.8 2000/05/31 18:02:58 djs
# Better output indenting (and preprocessor directives now correctly output at
# the beginning of lines)
#
# Revision 1.3.2.7 2000/05/30 15:59:25 djs
# Removed inheritance ambiguity in generated BOA _sk_ and POA_ classes
#
# Revision 1.3.2.6 2000/05/18 15:57:33 djs
# Added missing T* data constructor for bounded sequence types
#
# Revision 1.3.2.5 2000/03/20 11:50:20 djs
# Removed excess buffering- output templates have code attached which is
# lazily evaluated when required.
#
# Revision 1.3.2.4 2000/03/10 12:01:03 djr
# Re-fixed omniidl (make exception _NP_duplicate() public).
#
# Revision 1.3.2.3 2000/03/09 15:22:42 djs
# Changing the protection status of an exception method, mirroring a change
# in omniidl3
#
# Revision 1.3.2.2 2000/03/07 18:07:33 djr
# Fixed user-exceptions when can't catch by base class.
#
# Revision 1.3.2.1 2000/03/03 14:29:17 djr
# Improvement to BOA skeletons (less generated code).
#
# Revision 1.3 2000/02/01 09:26:45 djs
# Tracking fixes in old compiler: powerpc-aix scoped identifier workarounds
#
# Revision 1.2 2000/01/19 11:23:29 djs
# Moved most C++ code to template file
#
# Revision 1.1 2000/01/18 18:05:53 djs
# Extracted most C++ from header/defs and put in a template file.
# General refactoring.
#
"""C++ templates for the .hh file"""
##
## File header
##
header = """\
// This file is generated by @program@- @library@. Do not edit.
#ifndef @guard_prefix@__@guard@_hh__
#define @guard_prefix@__@guard@_hh__
"""
footer = """\
#endif
"""
##
## Main file
##
main = """\
#ifndef __CORBA_H_EXTERNAL_GUARD__
#include <omniORB4/CORBA.h>
#endif
#ifndef USE_stub_in_nt_dll
# define USE_s
|
Wevolver/HAVE
|
docs/source/conf.py
|
Python
|
gpl-3.0
| 9,459
| 0.006026
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Multiple documentation build configuration file, created by
# sphinx-quickstart on Thu Apr 14 09:34:49 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
# 'sphinx.ext.doc
|
test',
# 'sphinx.ext.todo',
# 'sphinx.ext.coverage',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '
|
.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Multiple'
copyright = '2016, Wevolver'
author = 'Wevolver'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0'
# The full version, including alpha/beta/rc tags.
release = '0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = 'Multiple v0'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Multipledoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Multiple.tex', 'Multiple Documentation',
'Wevolver', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external lin
|
huangshenno1/algo
|
project_euler/1.py
|
Python
|
mit
| 84
| 0.02381
|
sum = 0
for i i
|
n range(1, 1000):
if i % 3 == 0 or i % 5 == 0:
sum += i
print sum
| |
cpennington/edx-platform
|
common/djangoapps/student/rules.py
|
Python
|
agpl-3.0
| 894
| 0.004474
|
"""
Django rules for student roles
"""
from __future__ import absolute_import
import rules
fro
|
m lms.djangoapps.courseware.access import has_access
from openedx.core.djangoapps.waffle_utils import CourseWaffleFlag, WaffleFlag, WaffleFlagNamespace
from .roles import CourseDataResearcherRole
# Waffle flag to enable the separate course outline page and full width content.
RESEARCHER_ROLE = CourseWaffleFlag(WaffleFlagNamespace(name='instructor'), 'researcher')
@rules.predicate
def can_access_reports(user, course_id):
|
"""
Returns whether the user can access the course data downloads.
"""
is_staff = user.is_staff
if RESEARCHER_ROLE.is_enabled(course_id):
return is_staff or CourseDataResearcherRole(course_id).has_user(user)
else:
return is_staff or has_access(user, 'staff', course_id)
rules.add_perm('student.can_research', can_access_reports)
|
obi-two/Rebelion
|
data/scripts/templates/object/tangible/component/weapon/shared_projectile_feed_mechanism.py
|
Python
|
mit
| 498
| 0.044177
|
####
|
NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/component/weapon/shared_projectile_feed_mechanism.iff"
result.attribute_template_id = -1
result.
|
stfName("craft_weapon_ingredients_n","projectile_feed_mechanism")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
BrianLusina/Arco
|
server/app/mod_home/views.py
|
Python
|
mit
| 557
| 0.001795
|
"""
Entry point to API application. This will be for running simple checks on the application
"""
from flask import jsonify, url_for, redirect, request
from flask_login import current_user
from . import home
from ..__meta__ import __version__, __project__, __copyright__
|
@home.route("")
@home.route("home")
@home.route("index")
def index():
"""
Entry point into the app
:return: renders the api information
"""
return jsonify({
"version": __version__,
"pro
|
ject": __project__,
"copyright": __copyright__
})
|
botswana-harvard/edc-data-manager
|
edc_data_manager/view_mixins/data_manager_view_mixin.py
|
Python
|
gpl-2.0
| 2,162
| 0
|
from django.contrib import messages
from django.views.generic.base import ContextMixin
from edc_constants.constants import OPEN
from ..models import DataActionItem
from ..model_wrappers import DataActionItemModelWrapper
from .user_details_check_view_mixin import UserDetailsCheckViewMixin
class DataActionItemsViewMixin(UserDetailsCheckViewMixin, ContextMixin):
data_action_item_template = 'edc_data_manager/data_manager.html'
|
@property
def data_action_item(self):
"""Returns a wrapped saved or unsaved consent version.
"""
model_obj = DataActionItem(subject_identifier=self.subject_identifier)
return DataActionItemModelWrapper(model_obj=model_obj)
def data_action_items(self):
"""Return a list of action items.
"""
wrapped_data_action_items = []
status = [OPEN, 'stall
|
ed', 'resolved']
data_action_items = DataActionItem.objects.filter(
subject_identifier=self.subject_identifier,
status__in=status).order_by('issue_number')
for data_action_item in data_action_items:
wrapped_data_action_items.append(data_action_item)
return wrapped_data_action_items
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
status = [OPEN, 'stalled', 'resolved']
data_action_items = DataActionItem.objects.filter(
subject_identifier=self.subject_identifier,
status__in=status).order_by('issue_number')
msg = ''
for data_action_item in data_action_items:
msg = (f'Issue {data_action_item.issue_number}. Pending action'
f' created by {data_action_item.user_created}. '
f'{data_action_item.subject} Assigned to '
f'{data_action_item.assigned}')
messages.add_message(
self.request, messages.ERROR, msg)
context.update(
data_action_item_template=self.data_action_item_template,
data_action_item_add_url=self.data_action_item.href,
data_action_items=self.data_action_items)
return context
|
dims/heat
|
heat/tests/convergence/scenarios/update_replace_rollback.py
|
Python
|
apache-2.0
| 1,550
| 0.000645
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#
|
License for the specific language governing permissions and limitations
# under the License.
def check_c_count(expected_count):
test.assertEqual(expected_count,
len(reality.resources_by_
|
logical_name('C')))
example_template = Template({
'A': RsrcDef({'a': 'initial'}, []),
'B': RsrcDef({}, []),
'C': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('C')}, []),
'E': RsrcDef({'ca': GetAtt('C', '!a')}, []),
})
engine.create_stack('foo', example_template)
engine.noop(5)
engine.call(verify, example_template)
example_template2 = Template({
'A': RsrcDef({'a': 'updated'}, []),
'B': RsrcDef({}, []),
'C': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('C')}, []),
'E': RsrcDef({'ca': GetAtt('C', '!a')}, []),
})
engine.update_stack('foo', example_template2)
engine.noop(4)
engine.rollback_stack('foo')
engine.call(check_c_count, 2)
engine.noop(11)
engine.call(verify, example_template)
engine.delete_stack('foo')
engine.noop(12)
engine.call(verify, Template({}))
|
1kastner/analyse_weather_data
|
plot_weather_data/__init__.py
|
Python
|
agpl-3.0
| 1,869
| 0.003215
|
"""
"""
import os
import pandas
from matplotlib import pyplot
from matplotlib import dates as mdates
import matplotlib.ticker as mticker
PROCESSED_DATA_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir,
"processed_data"
)
def insert_nans(station_df):
"""
Only when NaNs are present, the line is discontinued.
:param station_df:
:return:
"""
reference_df = pandas.DataFrame(
index=pandas.date_range(station_df.index[0], station_df.index[-1], freq='H', name="datetime"),
)
return station_df.join(reference_df, how="outer")
class GermanDateFormatter(mdates.DateFormatter):
"""
As the Windows locales are wrong (no dot after abbreviations like what the Duden tells us to do)
this is the home-brew solution
"""
def __init__(self):
super().__init__(self)
import locale
locale.setlocale(locale.LC_ALL, 'de')
self.month
|
_formatter = mdates.DateFormatter('%b')
def strftime(self, dt, fmt=None):
windows_month_name = dt.strftime("%b")
if windows_month_name =
|
= "Mrz":
return "März"
if windows_month_name == "Mai":
return "Mai"
if windows_month_name == "Jun":
return "Juni"
if windows_month_name == "Jul":
return "Juli"
if windows_month_name == "Sep":
return "Sept."
abbreviated_month_name = windows_month_name + "."
return abbreviated_month_name
def style_year_2016_plot(ax):
ax.set_ylabel('Temperatur (°C)')
ax.set_xlabel('2016')
ax.margins(x=0)
ax.yaxis.set_major_locator(mticker.MultipleLocator(5)) # draw line every 5 °C
pyplot.grid(color='.9') # a very light gray
ax.xaxis.set_major_locator(mdates.MonthLocator())
ax.xaxis.set_major_formatter(GermanDateFormatter())
|
CalvinHsu1223/LinuxCNC-EtherCAT-HAL-Driver
|
src/emc/usr_intf/touchy/mdi.py
|
Python
|
gpl-2.0
| 10,007
| 0.006196
|
# Touchy is Copyright (c) 2009 Chris Radek <chris@timeguy.com>
#
# Touchy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Touchy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# self.mcodes = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 30, 48, 49, 50, 51,
# 52, 53, 60, 61, 62, 63, 64, 65, 66, 67, 68)
#
# self.gcodes = (0, 10, 20, 30, 40, 50, 51, 52, 53, 70, 80, 100,
# 170, 171, 180, 181, 190, 191, 200, 210, 280, 281,
# 300, 301, 330, 331, 382, 383, 384, 385, 400, 410,
# 411, 420, 421, 430, 431, 490, 530, 540, 550, 560,
# 570, 580, 590, 591, 592, 593, 610, 611, 640, 730,
# 760, 800, 810, 820, 830, 840, 850, 860, 870, 880,
# 890, 900, 901, 910, 911, 920, 921, 922, 923, 930,
# 940, 950, 960, 970, 980, 990)
class mdi:
def __init__(self, emc):
self.clear()
self.emc = emc
self.emcstat = emc.stat()
self.emccommand = emc.command()
self.emcstat.poll()
am = self.emcstat.axis_mask
self.axes = []
self.polar = 0
axisnames = ['X', 'Y', 'Z', 'A', 'B', 'C', 'U', 'V', 'W']
for i in range(9):
if am & (1<<i):
self.axes.append(axisnames[i])
self.gcode = 'M2'
self.codes = {
'M3' : [_('Spindle CW'), 'S'],
'M4' : [_('Spindle CCW'), 'S'],
'M6' : [_('Tool change'), 'T'],
'M61' : [_('Set tool number'), 'Q'],
'M66' : [_('Input control'), 'P', 'E', 'L', 'Q'],
# 'A' means 'the axes'
'G0' : [_('Straight rapid'), 'A'],
'G00' : [_('Straight rapid'), 'A'],
'G1' : [_('Straight feed'), 'A', 'F'],
'G01' : [_('Straight feed'), 'A', 'F'],
'G2' : [_('Arc CW'), 'A', 'I', 'J', 'K', 'R', 'F'],
'G02' : [_('Arc CW'), 'A', 'I', 'J', 'K', 'R', 'F'],
'G3' : [_('Arc CCW'), 'A', 'I', 'J', 'K', 'R', 'F'],
'G03' : [_('Arc CCW'), 'A', 'I', 'J', 'K', 'R', 'F'],
'G4' : [_('Dwell'), 'P'],
'G04' : [_('Dwell'), 'P'],
'G10' : [_('Setup'), 'L', 'P', 'A', 'Q', 'R'],
'G33' : [_('Spindle synchronized feed'), 'A', 'K'],
'G33.1' : [_('Rigid tap'), 'Z', 'K'],
'G38.2' : [_('Probe'), 'A', 'F'],
'G38.3' : [_('Probe'), 'A', 'F'],
'G38.4' : [_('Probe'), 'A', 'F'],
'G38.5' : [_('Probe'), 'A', 'F'],
'G41' : [_('Radius compensation left'), 'D'],
'G42' : [_('Radius compensation right'), 'D'],
'G41.1' : [_('Radius compensation left, immediate'), 'D', 'L'],
'G42.1' : [_('Radius compensation right, immediate'), 'D', 'L'],
'G43' : [_('Tool length offset'), 'H'],
'G43.1' : [_('Tool length offset immediate'), 'I', 'K'],
'G53' : [_('Motion in unoffset coordinates'), 'G', 'A', 'F'],
'G64' : [_('Continuous mode'), 'P'],
'G76' : [_('Thread'), 'Z', 'P', 'I', 'J', 'K', 'R', 'Q', 'H', 'E', 'L'],
'G81' : [_('Drill'), 'A', 'R', 'L', 'F'],
'G82' : [_('Drill with dwell'), 'A', 'R', 'L', 'P', 'F'],
'G83' : [_('Peck drill'), 'A', 'R', 'L', 'Q', 'F'],
'G73' : [_('Chip-break drill'), 'A', 'R', 'L', 'Q', 'F'],
'G85' : [_('Bore'), 'A', 'R', 'L', 'F'],
'G89' : [_('Bore with dwell'), 'A', 'R', 'L', 'P', 'F'],
'G92' : [_('Offset all coordinate systems'), 'A'],
'G96' : [_('CSS Mode'), 'S', 'D'],
}
self.ocodes = []
def add_macros(self, macros):
for m in macros:
words = m.split()
call = "O<%s> call" % words[0]
args = [''] + [w + ' ' for w in words[1:]]
self.
|
ocodes.append(call)
self.codes[call] = args
def get_description(self, gcode):
return self.codes[gcode][0]
|
def get_words(self, gcode):
self.gcode = gcode
if gcode[0] == 'M' and gcode.find(".") == -1 and int(gcode[1:]) >= 100 and int(gcode[1:]) <= 199:
return ['P', 'Q']
if not self.codes.has_key(gcode):
return []
# strip description
words = self.codes[gcode][1:]
# replace A with the real axis names
if 'A' in words:
i = words.index('A')
words = words[:i] + self.axes + words[i+1:]
if self.polar and 'X' in self.axes and 'Y' in self.axes:
words[self.axes.index('X')] = '@'
words[self.axes.index('Y')] = '^'
return words
def clear(self):
self.words = {}
def set_word(self, word, value):
self.words[word] = value
def set_polar(self, p):
self.polar = p;
def issue(self):
m = self.gcode
if m.lower().startswith('o'):
codes = self.codes[m]
for code in self.codes[m][1:]:
v = self.words[code] or "0"
m = m + " [%s]" % v
else:
w = [i for i in self.words if len(self.words.get(i)) > 0]
if '@' in w:
m += '@' + self.words.get('@')
w.remove('@')
if '^' in w:
m += '^' + self.words.get('^')
w.remove('^')
for i in w:
if len(self.words.get(i)) > 0:
m += i + self.words.get(i)
self.emcstat.poll()
if self.emcstat.task_mode != self.emc.MODE_MDI:
self.emccommand.mode(self.emc.MODE_MDI)
self.emccommand.wait_complete()
self.emccommand.mdi(m)
class mdi_control:
def __init__(self, gtk, emc, labels, eventboxes):
self.labels = labels
self.eventboxes = eventboxes
self.numlabels = len(labels)
self.numwords = 1
self.selected = 0
self.gtk = gtk
self.mdi = mdi(emc)
for i in range(self.numlabels):
self.not_editing(i)
self.editing(self.selected)
self.set_text("G")
def not_editing(self, n):
e = self.eventboxes[n]
e.modify_bg(self.gtk.STATE_NORMAL, self.gtk.gdk.color_parse("#ccc"))
def editing(self, n):
self.not_editing(self.selected)
self.selected = n
e = self.eventboxes[n]
e.modify_bg(self.gtk.STATE_NORMAL, self.gtk.gdk.color_parse("#fff"))
def get_text(self):
w = self.labels[self.selected]
return w.get_text()
def set_text(self, t, n = -1):
if n == -1: n = self.selected
w = self.labels[n]
w.set_text(t)
if n > 0:
head = t.rstrip("0123456789.-")
tail = t[len(head):]
self.mdi.set_word(head, tail)
if len(t) < 2:
w.set_alignment(1.0, 0.5)
else:
w.set_alignment(0.0, 0.5)
def clear(self, b):
t = self.get_text()
self.set_text(t.rstrip("0123456789.-"))
def back(self, b):
t = self.get_text()
if t[-1:] in "0123456789.-":
self.set_text(t[:-1])
def fill_out(self):
if self.selected == 0:
w = self.mdi.get_words(self.get_text())
self.numwords = len(w)
for i in range(1,self.numlabels):
if i <= len(w):
self.set_text(w[i-1], i)
else:
self.set_text("", i)
def next(self, b):
self.fill_out();
if self.numwords > 0:
self.editing(max(1,(self.selected+1) % (self.numwords+1)))
def ok(self, b):
self.fill_out();
self.mdi.issue()
def decimal(self, b):
t = self.get_text()
if t.find(".") == -1:
self.set_text(t
|
hgl888/chromium-crosswalk-efl
|
tools/binary_size/run_binary_size_analysis.py
|
Python
|
bsd-3-clause
| 35,816
| 0.010051
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate a spatial analysis against an arbitrary library.
To use, build the 'binary_size_tool' target. Then run this tool, passing
in the location of the library to be analyzed along with any other options
you desire.
"""
import collections
import json
import logging
import multiprocessing
import optparse
import os
import re
import shutil
import struct
import subprocess
import sys
import tempfile
import time
import binary_size_utils
# This path changee is not beautiful. Temporary (I hope) measure until
# the chromium project has figured out a proper way to organize the
# library of python tools. http://crbug.com/375725
elf_symbolizer_path = os.path.abspath(os.path.join(
os.path.dirname(__file__),
'..',
'..',
'build',
'android',
'pylib'))
sys.path.append(elf_symbolizer_path)
import symbols.elf_symbolizer as elf_symbolizer # pylint: disabl
|
e=F0401
# Node dictionary keys. These are output in json read by the webapp so
# keep them short to save file size.
# Note: If these change, the webapp must also change.
NODE_TYPE_KEY = 'k'
NODE_NAME_KEY = 'n'
NODE_CHILDREN_KEY = 'children'
NO
|
DE_SYMBOL_TYPE_KEY = 't'
NODE_SYMBOL_SIZE_KEY = 'value'
NODE_MAX_DEPTH_KEY = 'maxDepth'
NODE_LAST_PATH_ELEMENT_KEY = 'lastPathElement'
# The display name of the bucket where we put symbols without path.
NAME_NO_PATH_BUCKET = '(No Path)'
# Try to keep data buckets smaller than this to avoid killing the
# graphing lib.
BIG_BUCKET_LIMIT = 3000
# TODO(andrewhayden): Only used for legacy reports. Delete.
def FormatBytes(byte_count):
"""Pretty-print a number of bytes."""
if byte_count > 1e6:
byte_count = byte_count / 1.0e6
return '%.1fm' % byte_count
if byte_count > 1e3:
byte_count = byte_count / 1.0e3
return '%.1fk' % byte_count
return str(byte_count)
# TODO(andrewhayden): Only used for legacy reports. Delete.
def SymbolTypeToHuman(symbol_type):
"""Convert a symbol type as printed by nm into a human-readable name."""
return {'b': 'bss',
'd': 'data',
'r': 'read-only data',
't': 'code',
'w': 'weak symbol',
'v': 'weak symbol'}[symbol_type]
def _MkChild(node, name):
child = node[NODE_CHILDREN_KEY].get(name)
if child is None:
child = {NODE_NAME_KEY: name,
NODE_CHILDREN_KEY: {}}
node[NODE_CHILDREN_KEY][name] = child
return child
def SplitNoPathBucket(node):
"""NAME_NO_PATH_BUCKET can be too large for the graphing lib to
handle. Split it into sub-buckets in that case."""
root_children = node[NODE_CHILDREN_KEY]
if NAME_NO_PATH_BUCKET in root_children:
no_path_bucket = root_children[NAME_NO_PATH_BUCKET]
old_children = no_path_bucket[NODE_CHILDREN_KEY]
count = 0
for symbol_type, symbol_bucket in old_children.iteritems():
count += len(symbol_bucket[NODE_CHILDREN_KEY])
if count > BIG_BUCKET_LIMIT:
new_children = {}
no_path_bucket[NODE_CHILDREN_KEY] = new_children
current_bucket = None
index = 0
for symbol_type, symbol_bucket in old_children.iteritems():
for symbol_name, value in symbol_bucket[NODE_CHILDREN_KEY].iteritems():
if index % BIG_BUCKET_LIMIT == 0:
group_no = (index / BIG_BUCKET_LIMIT) + 1
current_bucket = _MkChild(no_path_bucket,
'%s subgroup %d' % (NAME_NO_PATH_BUCKET,
group_no))
assert not NODE_TYPE_KEY in node or node[NODE_TYPE_KEY] == 'p'
node[NODE_TYPE_KEY] = 'p' # p for path
index += 1
symbol_size = value[NODE_SYMBOL_SIZE_KEY]
AddSymbolIntoFileNode(current_bucket, symbol_type,
symbol_name, symbol_size)
def MakeChildrenDictsIntoLists(node):
largest_list_len = 0
if NODE_CHILDREN_KEY in node:
largest_list_len = len(node[NODE_CHILDREN_KEY])
child_list = []
for child in node[NODE_CHILDREN_KEY].itervalues():
child_largest_list_len = MakeChildrenDictsIntoLists(child)
if child_largest_list_len > largest_list_len:
largest_list_len = child_largest_list_len
child_list.append(child)
node[NODE_CHILDREN_KEY] = child_list
return largest_list_len
def AddSymbolIntoFileNode(node, symbol_type, symbol_name, symbol_size):
"""Puts symbol into the file path node |node|.
Returns the number of added levels in tree. I.e. returns 2."""
# 'node' is the file node and first step is to find its symbol-type bucket.
node[NODE_LAST_PATH_ELEMENT_KEY] = True
node = _MkChild(node, symbol_type)
assert not NODE_TYPE_KEY in node or node[NODE_TYPE_KEY] == 'b'
node[NODE_SYMBOL_TYPE_KEY] = symbol_type
node[NODE_TYPE_KEY] = 'b' # b for bucket
# 'node' is now the symbol-type bucket. Make the child entry.
node = _MkChild(node, symbol_name)
if NODE_CHILDREN_KEY in node:
if node[NODE_CHILDREN_KEY]:
logging.warning('A container node used as symbol for %s.' % symbol_name)
# This is going to be used as a leaf so no use for child list.
del node[NODE_CHILDREN_KEY]
node[NODE_SYMBOL_SIZE_KEY] = symbol_size
node[NODE_SYMBOL_TYPE_KEY] = symbol_type
node[NODE_TYPE_KEY] = 's' # s for symbol
return 2 # Depth of the added subtree.
def MakeCompactTree(symbols, symbol_path_origin_dir):
result = {NODE_NAME_KEY: '/',
NODE_CHILDREN_KEY: {},
NODE_TYPE_KEY: 'p',
NODE_MAX_DEPTH_KEY: 0}
seen_symbol_with_path = False
cwd = os.path.abspath(os.getcwd())
for symbol_name, symbol_type, symbol_size, file_path in symbols:
if 'vtable for ' in symbol_name:
symbol_type = '@' # hack to categorize these separately
# Take path like '/foo/bar/baz', convert to ['foo', 'bar', 'baz']
if file_path and file_path != "??":
file_path = os.path.abspath(os.path.join(symbol_path_origin_dir,
file_path))
# Let the output structure be relative to $CWD if inside $CWD,
# otherwise relative to the disk root. This is to avoid
# unnecessary click-through levels in the output.
if file_path.startswith(cwd + os.sep):
file_path = file_path[len(cwd):]
if file_path.startswith('/'):
file_path = file_path[1:]
seen_symbol_with_path = True
else:
file_path = NAME_NO_PATH_BUCKET
path_parts = file_path.split('/')
# Find pre-existing node in tree, or update if it already exists
node = result
depth = 0
while len(path_parts) > 0:
path_part = path_parts.pop(0)
if len(path_part) == 0:
continue
depth += 1
node = _MkChild(node, path_part)
assert not NODE_TYPE_KEY in node or node[NODE_TYPE_KEY] == 'p'
node[NODE_TYPE_KEY] = 'p' # p for path
depth += AddSymbolIntoFileNode(node, symbol_type, symbol_name, symbol_size)
result[NODE_MAX_DEPTH_KEY] = max(result[NODE_MAX_DEPTH_KEY], depth)
if not seen_symbol_with_path:
logging.warning('Symbols lack paths. Data will not be structured.')
# The (no path) bucket can be extremely large if we failed to get
# path information. Split it into subgroups if needed.
SplitNoPathBucket(result)
largest_list_len = MakeChildrenDictsIntoLists(result)
if largest_list_len > BIG_BUCKET_LIMIT:
logging.warning('There are sections with %d nodes. '
'Results might be unusable.' % largest_list_len)
return result
# TODO(andrewhayden): Only used for legacy reports. Delete.
def TreeifySymbols(symbols):
"""Convert symbols into a path-based tree, calculating size information
along the way.
The result is a dictionary that contains two kinds of nodes:
1. Leaf nodes, representing source code locations (e.g., c++ files)
These nodes have the following dictionary entries:
sizes: a dictionary whose keys are categories (such as code, data,
vtable, etceteras) and whose values are the size, in bytes, of
those cate
|
qunying/gps
|
docs/tutorial/conf.py
|
Python
|
gpl-3.0
| 8,459
| 0.006975
|
# -*- coding: utf-8 -*-
#
# Tutorial documentation build configuration file, created by
# sphinx-quickstart on Thu Dec 8 12:57:03 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, time
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
def get_copyright():
return u'2001-%s, AdaCore' % time.strftime("%Y")
# General information about the project.
project = u'Tutorial'
copyright = get_copyright()
def get_version():
"""Extract the version from VERSION.txt"""
version_file = "../../VERSION.txt"
if os.path.isfile(version_file):
return file(version_file).readline()
else:
return "0.0"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = get_version()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '../users_guide/adacore_transparent.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be
|
a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '../users_guide/favicon.ico'
#
|
Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Tutorialdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Tutorial.tex', u'Tutorial Documentation',
u'AdaCore', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tutorial', u'Tutorial Documentation',
[u'AdaCore'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Tutorial'
epub_author = u'AdaCore'
epub_publisher = u'AdaCore'
epub_copyright = copyright
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a
|
insolite/alarme
|
alarme/extras/sensor/web/views/core.py
|
Python
|
mit
| 386
| 0
|
from aiohttp.web import View, HTTPFound
def http_found(func):
|
async
|
def wrapped(self, *args, **kwargs):
await func(self, *args, **kwargs)
return HTTPFound(self.request.rel_url)
return wrapped
class CoreView(View):
sensor = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.logger = self.sensor.logger
|
ESOedX/edx-platform
|
lms/djangoapps/discussion/rest_api/tests/test_serializers.py
|
Python
|
agpl-3.0
| 34,613
| 0.001416
|
"""
Tests for Discussion API serializers
"""
from __future__ import absolute_import
import itertools
import ddt
import httpretty
import mock
import six
from django.test.client import RequestFactory
from six.moves.urllib.parse import urlparse # pylint: disable=import-error
from lms.djangoapps.discussion.django_comment_client.tests.utils import ForumsEnableMixin
from lms.djangoapps.discussion.rest_api.serializers import CommentSerializer, ThreadSerializer, get_context
from lms.djangoapps.discussion.rest_api.tests.utils import (
CommentsServiceMockMixin,
make_minimal_cs_comment,
make_minimal_cs_thread
)
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory
from openedx.core.djangoapps.django_comment_common.comment_client.comment import Comment
from openedx.core.djangoapps.django_comment_common.comment_client.thread import Thread
from openedx.core.djangoapps.django_comment_common.models import (
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_STUDENT,
Role
)
from student.tests.factories import UserFactory
from util.testing import UrlResetMixin
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@ddt.ddt
class SerializerTestMixin(ForumsEnableMixin, CommentsServiceMockMixin, UrlResetMixin):
"""
Test Mixin for Serializer tests
"""
@classmethod
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUpClass(cls):
super(SerializerTestMixin, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(SerializerTestMixin, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.maxDiff = None # pylint: disable=invalid-name
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
self.author = UserFactory.create()
def create_role(self, role_name, users, course=None):
"""Create a Role in self.course with the given name and users"""
course = course or self.course
role = Role.objects.create(name=role_name, course_id=course.id)
role.users = users
@ddt.data(
(FORUM_ROLE_ADMINISTRATOR, True, False, True),
(FORUM_ROLE_ADMINISTRATOR, False, True, False),
(FORUM_ROLE_MODERATOR, True, False, True),
(FORUM_ROLE_MODERATOR, False, True, False),
(FORUM_ROLE_COMMUNITY_TA, True, False, True),
(FORUM_ROLE_COMMUNITY_TA, False, True, False),
(FORUM_ROLE_STUDENT, True, False, True),
(FORUM_ROLE_STUDENT, False, True, True),
)
@ddt.unpack
def test_anonymity(self, role_name, anonymous, anonymous_to_peers, expected_serialized_anonymous):
"""
Test that content is properly made anonymous.
Content should be anonymous iff the anonymous field is true or the
anonymous_to_peers field is true and the requester does not have a
privileged role.
role_name is the name of the requester's role.
anonymous is the value of the anonymous field in the content.
anonymous_to_peers is the value of the anonymous_to_peers field in the
content.
expected_serialized_anonymous is whether the content should actually be
anonymous in the API output when requested by a user with the given
role.
"""
self.create_role(role_name, [self.user])
serialized = self.serialize(
self.make_cs_content({"anonymous": anonymous, "anonymous_to_peers": anonymous_to_peers})
)
actual_serialized_anonymous = serialized["author"] is None
self.assertEqual(actual_serialized_anonymous, expected_serialized_anonymous)
@ddt.data(
(FORUM_ROLE_ADMINISTRATOR, False, "Staff"),
(FORUM_ROLE_ADMINISTRATOR, True, None),
(FORUM_ROLE_MODERATOR, False, "Staff"),
(FORUM_ROLE_MODERATOR, True, None),
(FORUM_ROLE_COMMUNITY_TA, False, "Community TA"),
(FORUM_ROLE_COMMUNITY_TA, True, None),
(FORUM_ROLE_STUDENT, False, None),
(FORUM_ROLE_STUDENT, True, None),
)
@ddt.unpack
def test_author_labels(self, role_name, anonymous, expected_label):
"""
Test correctness of the author_label field.
The label should be "Staff", "Staff", or "Community TA" for the
Administrator, Moderator, and Community TA roles, respectively, but
the label should not be present if the content is anonymous.
role_name is the name of the author's role.
anonymous is the value of the anonymous field in the content.
expected_label is the expected value of the author_label field in the
API output.
"""
self.create_role(role_name, [self.author])
serialized = self.serialize(self.make_cs_content({"anonymous": anonymous}))
self.assertEqual(serialized["author_label"], expected_label)
def test_abuse_flagged(self):
serialized = self.serialize(self.make_cs_content({"abuse_flaggers": [str(self.user.id)]}))
self.assertEqual(serialized["abuse_flagged"], True)
def test_voted(self):
thread_id = "test_thread"
self.register_get_user_response(self.user, upvoted_ids=[thread_id])
serialized = self.serialize(self.make_cs_content({"id": thread_id}))
self.assertEqual(serialized["voted"], True)
@ddt.ddt
class ThreadSerializerSerializationTest(SerializerTestMixin, SharedModuleStoreTestCase):
"""Tests for ThreadSerializer serialization."""
def make_cs_content(self, overrides):
"""
Create a thread with the given overrides, plus some useful test data.
"""
merged_overrides = {
"course_id": six.text_type(self.course.id),
"user_id": str(self.author.id),
"username": self.author.username,
"read": True,
"endorsed": True,
"resp_total": 0,
}
merged_overrides.update(overrides)
return make_minimal_cs_thread(merged_overrides)
def serialize(self, thread):
"""
Create a serializer with an appropriate context and use it to serialize
the given thread, returning the result.
"""
return ThreadSerializer(thread, context=get_context(self.course, self.request)).data
def test_basic(self):
thread = make_minimal_cs_thread({
"id": "test_thread",
|
"course_id": six.text_type(self.course.id),
"commentable_id": "test_topic",
"user_id": str(self.author.id),
"username": self.author.username,
"title": "Test Title",
"body": "Test body",
"pinned": True,
"votes": {"up_count": 4},
"comments_count": 5,
"unread_comments_count": 3,
})
expected = self.expected_thread_data({
"author": self.author.username,
"vote_coun
|
t": 4,
"comment_count": 6,
"unread_comment_count": 3,
"pinned": True,
"editable_fields": ["abuse_flagged", "following", "read", "voted"],
})
self.assertEqual(self.serialize(thread), expected)
thread["thread_type"] = "question"
expected.update({
"type": "question",
"comment_list_url": None,
"endorsed_comment_list_url": (
"http://testserver/api/discussion/v1/comments/?thread_id=test_thread&endorsed=True"
),
"non_endorsed_comment_list_url": (
"http://testserver/api/discussion/v1/comments/?thread_id=test_thread&endorsed=False"
),
})
self.assertEqua
|
victorkeophila/alien4cloud-cloudify3-provider
|
src/test/resources/outputs/blueprints/openstack/tomcat/plugins/custom_wf_plugin/setup.py
|
Python
|
apache-2.0
| 650
| 0.001538
|
from setuptools import setup
# Replace the place holders with values for your project
setup(
# Do not use underscores in the plugin name.
name='custom-wf-plugin',
version='0.1',
author='alien',
author_email='alien@fastc
|
onnect.fr',
description='custom generated workflows',
# This must correspond to the actual packages in the plugin.
packages=['plugin'],
license='Apache',
zip_safe=True,
install_requires=[
# Necessary dependency for developing plugins, do not rem
|
ove!
"cloudify-plugins-common>=3.2"
],
test_requires=[
"cloudify-dsl-parser>=3.2"
"nose"
]
)
|
ergoregion/pyqt-units
|
pyqt_units/MeasurementWidgets.py
|
Python
|
mit
| 6,390
| 0.002191
|
#Created on 14 Aug 2014
#@author: neil.butcher
from PySide2 import QtCore, QtWidgets
from pyqt_units.CurrentUnitSetter import setter
class UnitDisplay(QtWidgets.QWidget):
def __init__(self, parent, measurement=None, measurementLabel='normal'):
QtWidgets.QWidget.__init__(self, parent)
self.layout = QtWidgets.QVBoxLayout()
self._label = QtWidgets.QLabel('', self)
self.layout.addWidget(self._label)
self.layout.setMargin(2)
self.measurement = measurement
self._measurementLabel = measurementLabel
setter.changed.connect(self.currentUnitChangedElsewhere)
self._update()
@QtCore.Slot(str, str, str)
def currentUnitChangedElsewhere(self, measName, unitName, measurementLabel):
if self.measurement == None:
pass
elif not measName == self.measurement.name:
pass
elif not measurementLabel == self._measurementLabel:
pass
else:
self._updateText(unitName)
def setMeasurement(self, measurement):
self.measurement = measurement
self._update()
def setMargin(self, margin):
self.layout.setMargin(margin)
def _update(self):
if self.measurement == None:
self._updateText('')
else:
self._updateText(self.measurement.currentUnit(label=self._measurementLabel).name)
def _updateText(self, txt):
self._label.setText(txt)
class UnitComboBox(QtWidgets.QWidget):
def __init__(self, parent, measurement=None, measurementLabel='normal'):
QtWidgets.QWidget.__init__(self, parent)
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.setMargin(2)
self._box = QtWidgets.QComboBox(self)
self.layout.addWidget(self._box)
self._m
|
easurementLabel = measurementLabel
self._box.currentIndexChanged.connect(self.changedToIndex)
setter.changed.connect(self.currentUnitChangedElsewhere)
self.setMeasurement(measurement)
self._update()
@QtCore.Slot(str, str, str)
def currentUnitChangedElsewhere(self, measName, unitName, measurementLabel):
if self.measurement == None:
pass
elif not measName =
|
= self.measurement.name:
pass
elif not measurementLabel == self._measurementLabel:
pass
else:
self._update()
def setMeasurement(self, measurement):
self.measurement = None
self._box.clear()
if measurement is None:
pass
else:
self.itemslist = measurement.units
namesList = []
for i in self.itemslist:
namesList.append(i.name)
self._box.addItems(namesList)
self.measurement = measurement
self._update()
def setMargin(self, margin):
self.layout.setMargin(margin)
def _update(self):
if self.measurement is None:
pass
else:
text = self.measurement.currentUnit(label=self._measurementLabel).name
pos = self._box.findText(text)
if pos == -1:
pos = 0
self._box.setCurrentIndex(pos)
@QtCore.Slot(int)
def changedToIndex(self, i):
if not self.measurement == None:
unit = self.itemslist[i]
setter.setMeasurementUnit(self.measurement, unit, self._measurementLabel)
class AddaptiveDoubleSpinBox(QtWidgets.QDoubleSpinBox):
def textFromValue(self, value):
s = '{0:g}'.format(value)
return s
class UnitSpinBox(QtWidgets.QWidget):
valueChanged = QtCore.Signal(float)
editingFinished = QtCore.Signal()
def __init__(self, parent, measurement=None, delta=False, measurementLabel='normal'):
QtWidgets.QWidget.__init__(self, parent)
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.setMargin(2)
self._box = AddaptiveDoubleSpinBox(self)
self._box.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self._box.setMaximum(2.0e30)
self._box.setMinimum(-2.0e30)
self._box.setDecimals(12)
self.layout.addWidget(self._box)
self._box.valueChanged.connect(self._valueChanged)
self._box.editingFinished.connect(self._editingFinished)
setter.changed.connect(self.currentUnitChangedElsewhere)
self.delta = delta
self._baseValue = None
self._measurementLabel = measurementLabel
self.setMeasurement(measurement)
self._update()
@QtCore.Slot(str, str, str)
def currentUnitChangedElsewhere(self, measName, unitName, measurementLabel):
if self.measurement is None:
pass
elif not measName == self.measurement.name:
pass
elif not measurementLabel == self._measurementLabel:
pass
else:
self._update()
def setMeasurement(self, measurement):
self.measurement = measurement
self._update()
def setMargin(self, margin):
self.layout.setMargin(margin)
def unit(self):
return self.measurement.currentUnit(label=self._measurementLabel)
def _update(self):
if self._baseValue is None:
self._box.clear()
elif self.measurement is None:
self._box.setValue(self._baseValue)
elif self.delta:
scaledValue = self.unit().scaledDeltaValueOf(self._baseValue)
self._box.setValue(scaledValue)
else:
scaledValue = self.unit().scaledValueOf(self._baseValue)
self._box.setValue(scaledValue)
def setValue(self, baseValue):
self._baseValue = baseValue
self._update()
def _valueChanged(self, scaledValue):
if scaledValue is None:
newValue = None
elif self.measurement is None:
newValue = scaledValue
elif self.delta:
newValue = self.unit().baseDeltaValueFrom(scaledValue)
else:
newValue = self.unit().baseValueFrom(scaledValue)
a = self._baseValue
b = newValue
if a is None or abs(a - b) > max(abs(a), abs(b)) * 1e-8:
self._baseValue = newValue
self.valueChanged.emit(self._baseValue)
def _editingFinished(self):
self.editingFinished.emit()
def value(self):
return self._baseValue
|
onelab-eu/sfa
|
sfa/util/xrn.py
|
Python
|
mit
| 10,231
| 0.010849
|
#----------------------------------------------------------------------
# Copyright (c) 2008 Board of Trustees, Princeton University
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
import re
from sfa.util.faults import SfaAPIError
# for convenience and smoother translation - we should get rid of these functions eventually
def get_leaf(hrn): return Xrn(hrn).get_leaf()
def get_authority(hrn): return Xrn(hrn).get_authority_hrn()
def urn_to_hrn(urn): xrn=Xrn(urn); return (xrn.hrn, xrn.type)
def hrn_to_urn(hrn,type): return Xrn(hrn, type=type).urn
def hrn_authfor_hrn(parenthrn, hrn): return Xrn.hrn_is_auth_for_hrn(parenthrn, hrn)
class Xrn:
########## basic tools on HRNs
# split a HRN-like string into pieces
# this is like split('.') except for escaped (backslashed) dots
# e.g. hrn_split ('a\.b.c.d') -> [ 'a\.b','c','d']
@staticmethod
def hrn_split(hrn):
return [ x.replace('--sep--','\\.') for x in hrn.replace('\\.','--sep--').split('.') ]
# e.g. hrn_leaf ('a\.b.c.d') -> 'd'
@staticmethod
def hrn_leaf(hrn): return Xrn.hrn_split(hrn)[-1]
# e.g. hrn_auth_list ('a\.b.c.d') -> ['a\.b', 'c']
@staticmethod
def hrn_auth_list(hrn): return Xrn.hrn_split(hrn)[0:-1]
# e.g. hrn_auth ('a\.b.c.d') -> 'a\.b.c'
@staticmethod
def hrn_auth(hrn): return '.'.join(Xrn.hrn_auth_list(hrn))
# e.g. escape ('a.b') -> 'a\.b'
@staticmethod
def escape(token): return re.sub(r'([^\\])\.', r'\1\.', token)
# e.g. unescape ('a\.b') -> 'a.b'
@staticmethod
def unescape(token): return token.replace('\\.','.')
# Return the HRN authority chain from top to bottom.
# e.g. hrn_auth_chain('a\.b.c.d') -> ['a\.b', 'a\.b.c']
@staticmethod
def hrn_auth_chain(hrn):
parts = Xrn.hrn_auth_list(hrn)
chain = []
for i in range(len(parts)):
chain.append('.'.join(parts[:i+1]))
# Include the HRN itself?
#chain.append(hrn)
return chain
# Is the given HRN a true authority over the namespace of the other
# child HRN?
# A better alternative than childHRN.startswith(parentHRN)
# e.g. hrn_is_auth_for_hrn('a\.b', 'a\.b.c.d') -> True,
# but hrn_is_auth_for_hrn('a', 'a\.b.c.d') -> False
# Also hrn_is_auth_for_hrn('a\.b.c.d', 'a\.b.c.d') -> True
@staticmethod
def hrn_is_auth_for_hrn(parenthrn, hrn):
if parenthrn == hrn:
return True
for auth in Xrn.hrn_auth_chain(hrn):
if parenthrn == auth:
return True
return False
########## basic tools on URNs
URN_PREFIX = "urn:publicid:IDN"
URN_PREFIX_lower = "urn:publicid:idn"
@staticmethod
def is_urn (text):
return text.lower().startswith(Xrn.URN_PREFIX_lower)
@staticmethod
def urn_full (urn):
if Xrn.is_urn(urn): return urn
else: return Xrn.URN_PREFIX+urn
@staticmethod
def urn_meaningful (urn):
if Xrn.is_urn(urn): return urn[len(Xrn.URN_PREFIX):]
else: return urn
@staticmethod
def urn_split (urn):
return Xrn.urn_meaningful(urn).split('+')
@staticmethod
def filter_type(urns=None, type=None):
if urns is None: urns=[]
urn_list = []
if not type:
return urns
for urn in urns:
xrn = Xrn(xrn=urn)
if (xrn.type == type):
# Xrn is probably a urn so we can just compare types
urn_list.append(urn)
return urn_list
####################
# the local fields that are kept consistent
# self.urn
# self.hrn
# self.type
# self.path
# provide either urn, or (hrn + type)
def __init__ (self, xrn="", type=None, id=None):
if not xrn: xrn = ""
# user has specified xrn : guess if urn or hrn
self.id = id
if Xrn.is_urn(xrn):
self.hrn=None
self.urn=xrn
if id:
self.urn = "%s:%s" % (self.urn, str(id))
self.urn_to_hrn()
else:
self.urn=None
self.hrn=xrn
self.type=type
self.hrn_to_urn()
self._normalize()
# happens all the time ..
# if not type:
# debug_logger.debug("type-less Xrn's are not safe")
def __repr__ (self):
result="<XRN u=%s h=%s"%(self.urn,self.hrn)
if hasattr(self,'leaf'): result += " leaf=%s"%self.leaf
if hasattr(self,'authority'): result += " auth=%s"%self.authority
result += ">"
return result
def get_urn(self): return self.urn
def get_hrn(self): return self.hrn
def get_type(self): return self.type
def get_hrn_type(self): return (self.hrn, self.type)
def _normalize(self):
if self.hrn is None: raise SfaAPIError, "Xrn._normalize"
if not hasattr(self,'leaf'):
self.leaf=Xrn.hrn_split(self.hrn)[-1]
# self.authority keeps a list
if not hasattr(self,'authority'):
self.authority=Xrn.hrn_auth_list(self.hrn)
def get_leaf(self):
self._normalize()
return self.leaf
def get_authority_hrn(self):
self._normalize()
return '.'.join( self.authority )
def get_authority_urn(self):
self._normalize()
return ':'.join( [Xrn.unescape(x) for x in self.authority] )
def set_authority(self, authority):
"""
update the authority section of an existing urn
"""
authority_hrn = self.get_authority_hrn()
if not authority_hrn.startswith(authority):
hrn = ".".join([authority,authority_hrn, self.get_leaf()])
else:
hrn = ".".join([authority_hrn, self.get_leaf()])
self.hrn = hrn
self.hrn_to_urn()
self._normalize()
# sliver_id_parts is list that contains the sliver's
# slice id and node id
def get_sliver_id_parts(self):
sliver_id_parts = []
if self.type == 'sliver' or '-' in self.leaf:
sliver_id_parts = self.leaf.split('-')
return sliver_id_parts
def ur
|
n_to_hrn(self):
"""
compute tuple (hrn, type) from urn
"""
# if not self.urn or not self.urn.startswith(Xrn.URN_PREFIX):
if not Xrn.is_urn(self.urn):
raise SfaAPIError, "Xrn.urn_to_hrn"
parts = Xrn.urn_split(self.urn)
type=parts.pop(2)
# Remove the authority name (e.g. '.sa')
if type == 'authority':
name = parts.pop()
# Drop the sa. Th
|
is is a bad hack, but its either this
# or completely change how record types are generated/stored
if name != 'sa':
type = type + "+" + name
name =""
else:
name = parts.pop(len(parts)-1)
# convert parts (list) into hrn (str) by doing the following
# 1. remove blank parts
# 2. escape dots inside parts
# 3. replace ':' with '.' inside parts
# 3. join parts using '.'
hrn = '.'.join([Xrn.escape(part).replace(':','
|
paul-jean/ud858
|
Lesson_2/000_Hello_Endpoints/helloworld_api.py
|
Python
|
gpl-3.0
| 1,742
| 0.012055
|
"""Hello World API implemented using Google Cloud Endpoints.
Contains declarations of endpoint, endpoint methods,
as well as the ProtoRPC message class and container required
for endpoint method definition.
"""
import endpoints
from protorpc import messages
from protorpc import message_types
|
from protorpc import remote
# If the request contain
|
s path or querystring arguments,
# you cannot use a simple Message class.
# Instead, you must use a ResourceContainer class
REQUEST_CONTAINER = endpoints.ResourceContainer(
message_types.VoidMessage,
name=messages.StringField(1)
)
REQUEST_GREETING_CONTAINER = endpoints.ResourceContainer(
period=messages.StringField(1),
name=messages.StringField(2)
)
package = 'Hello'
class Hello(messages.Message):
"""String that stores a message."""
greeting = messages.StringField(1)
@endpoints.api(name='helloworldendpoints', version='v1')
class HelloWorldApi(remote.Service):
"""Helloworld API v1."""
@endpoints.method(message_types.VoidMessage, Hello,
path = "sayHello", http_method='GET', name = "sayHello")
def say_hello(self, request):
return Hello(greeting="Hello World")
@endpoints.method(REQUEST_CONTAINER, Hello,
path = "sayHelloByName", http_method='GET', name = "sayHelloByName")
def say_hello_by_name(self, request):
greet = "Hello {}".format(request.name)
return Hello(greeting=greet)
@endpoints.method(REQUEST_GREETING_CONTAINER, Hello,
path = "greetByPeriod", http_method='GET', name = "greetByPeriod")
def greet_by_period(self, request):
greet = "Good {}, {}!".format(request.period, request.name)
return Hello(greeting=greet)
APPLICATION = endpoints.api_server([HelloWorldApi])
|
vikramsunkara/PyME
|
pyme/lazy_dict.py
|
Python
|
agpl-3.0
| 3,974
| 0.007549
|
"""
Dictionary with lazy evaluation on access, via a supplied update function
"""
import itertools
class LazyDict(dict):
"""
A dictionary type that lazily updates values when they are accessed.
All the usual dictionary methods work as expected, with automatic lazy
updates occuring behind the
|
scenes whenever values are read from the
dictionary.
The optional ``items`` argument, if specified, is a mapping instance used
to initialise the items in the :class:`LazyDict`.
The ``update_value`` argument required by the :class:`LazyDict` constructor
must be a function of the form:
update_value(k, existing_value, member) -> updated_va
|
lue
This function is called whenever an item with the key ``k`` is read
from the :class:`LazyDict`. The second argument ``existing_value``, is
the value corresponding to the key ``k`` stored in the :class:`LazyDict`,
or ``None``, if the key ``k`` is not contained in the :class:`LazyDict`.
The third argument ``member`` is a boolean value indicating if there is
an existing value stored under the key ``k``.
This function is used as follows by the :class:`LazyDict`. Suppose that the
value ``v`` has been stored in a :class:`LazyDict` object ``lazy_dict``
under the key ``k``, that is, ``lazy_dict[k] = v``. Then subsequently
accessing this value in the usual manner::
v_updated = lazy_dict[k]
is equivalent to the following two statements::
lazy_dict[k] = update_value(k, v, (k in lazy_dict))
v_updated = update_value(k, v, (k in lazy_dict))
Observe how the value stored in the :class:`LazyDict` under the key ``k``
is first updated, using the provided function,
with the updated value then being the one returned.
"""
def __init__(self, update_value, items = None):
"""
Returns a LazyDict using the specified ``update_value`` function
and optional initial dictionary arguments.
"""
self.update_value = update_value
if items is None:
dict.__init__(self)
else:
dict.__init__(items)
def __getitem__(self, key):
member = dict.__contains__(self, key)
if member:
existing_value = dict.__getitem__(self, key)
else:
existing_value = None
# ensure measurement is up to date
updated_value = self.update_value(key, existing_value, member)
self[key] = updated_value
return updated_value
def copy(self):
return LazyDict(self.update_value, dict.copy(self))
def itervalues(self):
return itertools.imap((lambda k : self[k]), dict.iterkeys(self))
def iteritems(self):
return itertools.imap((lambda k : (k, self[k])), dict.iterkeys(self))
def pop(self, *args):
n_args = len(args)
if n_args < 1:
raise TypeError('pop expected at least 1 argument, got %d' % n_args)
if n_args > 2:
raise TypeError('pop expected at most 2 arguments, got %d' % n_args)
k = args[0]
if k in self:
value = self[k]
del self[k]
return value
else:
if n_args == 2:
return args[1]
else:
raise KeyError(str(k))
def popitem(self):
key, value = dict.popitem(self)
self[key] = value
updated_value = self[key]
del self[key]
return key, updated_value
def setdefault(self, k, x=None):
if k in self:
return self[k]
else:
self[k] = x
return x
def get(self, k, x=None):
if k in self:
return self[k]
else:
return x
def values(self):
return list(self.itervalues())
def items(self):
return list(self.iteritems())
|
opencloudinfra/orchestrator
|
venv/Lib/site-packages/registration/__init__.py
|
Python
|
gpl-3.0
| 666
| 0
|
VERSION = (2, 0, 4, 'final', 0)
def get_version():
"""
Re
|
turns a PEP 386-compliant version number from VERSION.
"""
assert len(VERSION) == 5
assert VERSION[3] in ('alpha', 'beta', 'rc', 'final')
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
parts = 2 if VERSION[2] == 0 else 3
main = '.'.join(str(x) for x in VERSION[:parts])
sub = ''
if VER
|
SION[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[VERSION[3]] + str(VERSION[4])
return str(main + sub)
|
Open365/Open365
|
lib/Wrappers/Logger.py
|
Python
|
agpl-3.0
| 1,007
| 0.000993
|
import logging
import os
from lib.Settings import Settings
from lib.Wrappers.NullLo
|
gger import NullLogger
class Logger:
def __init__(self, name):
if 'UNITTESTING' in os.environ:
self.logging = NullLogger()
else:
settings = Settings().getSettings()
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=settings["logs"]["level"])
|
self.logging = logging.getLogger(name)
def debug(self, *args, **kwargs):
self.logging.debug(*args, **kwargs)
def info(self, *args, **kwargs):
self.logging.info(*args, **kwargs)
def warning(self, *args, **kwargs):
self.logging.warning(*args, **kwargs)
def error(self, *args, **kwargs):
self.logging.error(*args, **kwargs)
def critical(self, *args, **kwargs):
self.logging.critical(*args, **kwargs)
def log(self, *args, **kwargs):
self.logging.log(*args, **kwargs)
|
nimbusproject/epumgmt
|
src/python/epumgmt/main/em_args.py
|
Python
|
apache-2.0
| 3,947
| 0.00532
|
from epumgmt.api.actions import ACTIONS
from epumgmt.main import ControlArg
import optparse
a = []
ALL_EC_ARGS_LIST = a
################################################################################
# EM ARGUMENTS
#
# The following cmdline arguments may be queried via Parameters, using either
# the 'name' as the argument or simply the object like:
#
# params.get_arg_or_none(em_args.GRACE_PERIOD)
#
################################################################################
ACTION = ControlArg("action", "-a")
ACTION.help = optparse.SUPPRESS_HELP
a.append(ACTION)
CONF = ControlArg("conf", "-c", metavar="PATH")
a.append(CONF)
CONF.help = "Absolute path to main.conf. Required
|
(shell script adds the default)."
DRYRUN = ControlArg("dryrun", None, noval=True)
#a.append(DRYRUN)
DRYRUN.help = "Do as little real things as possible, will
|
still affect filesystem, for example logs and information persistence. (not implemented yet)"
KILLNUM = ControlArg("killnum", "-k", metavar="NUM")
a.append(KILLNUM)
KILLNUM.help = "For the fetchkill action, number of VMs to terminate."
NAME = ControlArg("name", "-n", metavar="RUN_NAME")
a.append(NAME)
NAME.help = "Unique run name for logs and management. Can use across multiple invocations for launches that belong together."
GRAPH_NAME = ControlArg("graphname", "-r", metavar="GRAPH_NAME")
a.append(GRAPH_NAME)
GRAPH_NAME.help = "For the generate-graph action, name of graph to generate: stacked-vms, job-tts, job-rate, node-info, or controller."
GRAPH_TYPE = ControlArg("graphtype", "-t", metavar="GRAPH_TYPE")
a.append(GRAPH_TYPE)
GRAPH_TYPE.help = "For the generate-graph action, output file type: eps or png."
WORKLOAD_FILE = ControlArg("workloadfilename", "-f", metavar="WORKLOAD_FILE")
a.append(WORKLOAD_FILE)
WORKLOAD_FILE.help = "For the execute-workload-test action, file name of workload definition file."
WORKLOAD_TYPE = ControlArg("workloadtype", "-w", metavar="WORKLOAD_TYPE")
a.append(WORKLOAD_TYPE)
WORKLOAD_TYPE.help = "For the execute-workload-test and generate-graph actions: amqp or torque"
CLOUDINITD_DIR = ControlArg("cloudinitdir", "-C", metavar="PATH")
a.append(CLOUDINITD_DIR)
CLOUDINITD_DIR.help = "Path to the directory where cloudinit databases are kept. default is ~/.cloudinit"
REPORT_INSTANCE = ControlArg("instance-report", None, metavar="COLUMNS")
#a.append(REPORT_INSTANCE)
REPORT_INSTANCE.help = "Used with '--action %s'. Batch mode for machine parsing instance status. Report selected columns from choice of the following separated by comma: service,instanceid,iaas_state,iaas_state_time,heartbeat_time,heartbeat_state" % ACTIONS.STATUS
REPORT_SERVICE = ControlArg("service-report", None, metavar="COLUMNS")
#a.append(REPORT_SERVICE)
REPORT_SERVICE.help = "Used with '--action %s'. Batch mode for machine parsing service status. Report selected columns from choice of the following separated by comma: service,de_state,de_conf" % ACTIONS.STATUS
STATUS_NOUPDATE = ControlArg("no-update", None, noval=True)
a.append(STATUS_NOUPDATE)
STATUS_NOUPDATE.help = "Used with '--action %s'. If used, %s does not try to find any new information." % (ACTIONS.STATUS, ACTIONS.STATUS)
KILLRUN_NOFETCH = ControlArg("no-fetch", None, noval=True)
a.append(KILLRUN_NOFETCH)
KILLRUN_NOFETCH.help = "Can be used with action %s and %s. If used, does not try to find any new information or get any logs." % (ACTIONS.KILLRUN, ACTIONS.FIND_VERSIONS)
WRITE_REPORT = ControlArg("write-report", None, metavar="PATH")
a.append(WRITE_REPORT)
WRITE_REPORT.help = "Used with action %s. Also write report to the given path if it does not exist." % ACTIONS.FIND_VERSIONS
NEWN = ControlArg("newn", None)
a.append(NEWN)
NEWN.help = "Used with '--action %s'. Syntax is controller_name:N[,controller_name:N,...]" % ACTIONS.RECONFIGURE_N
CONTROLLER = ControlArg("controller", None)
a.append(CONTROLLER)
CONTROLLER.help = "Some actions only work on a specific controller"
|
garoa/pingo
|
pingo/test/level1/cases.py
|
Python
|
mit
| 1,331
| 0.000751
|
import pingo
'''
In order to use this set of cases, it is necessary to set
the following attributes on your TestCase setUp:
self.analog_input_pin_number = 0
self.expected_analog_input = 1004
self.expected_analog_ratio = 0.98
'''
class AnalogReadBasics(object):
'''
Wire a 10K Ohm resistence from the AnalogPin to the GND.
Then wire a 200 Ohm from the AnalogPin to the VND.
This schema will provide a read of ~98%
'''
def test_200ohmRead(self):
pin = self.board.pins[self.analog_input_pin_number]
pin.mode = pingo.ANALOG
_input = pin.value
# print "Value Read: ", _input
assert self.expected_analog_input - 3 <= _input <= self.expected_
|
analog_input + 3
def test_pin_ratio(self):
pin = self.board.pins[self.analog_input_pin_number]
pin.mode = pingo.ANALOG
bits_resolution = (2 ** pin.bits) - 1
_input = pin.ratio(0, bits_resolution, 0.0, 1.0)
# print "Value Read: ", _input
# Two decimal places check
assert abs(_input - self.expected_analog_ratio) < 10e-1
class AnalogExceptions(object):
|
def test_wrong_output_mode(self):
pin = self.board.pins[self.analog_input_pin_number]
with self.assertRaises(pingo.ModeNotSuported):
pin.mode = pingo.OUT
|
ReconCell/smacha
|
smacha_ros/test/smacha_diff_test_examples.py
|
Python
|
bsd-3-clause
| 3,604
| 0.003607
|
#!/usr/bin/env python
import sys
import argparse
import os
import unittest2 as unittest
from ruamel import yaml
from smacha.util import Tester
import rospy
import rospkg
import rostest
ROS_TEMPLATES_DIR = '../src/smacha_ros/templates'
TEMPLATES_DIR = 'smacha_templates/smacha_test_examples'
WRITE_OUTPUT_FILES = False
OUTPUT_PY_DIR = '/tmp/smacha/smacha_test_examples/smacha_generated_py'
OUTPUT_YML_DIR = '/tmp/smacha/smacha_test_examples/smacha_generated_scripts'
CONF_FILE = 'test_examples_config.yml'
DEBUG_LEVEL = 1
CONF_DICT = {}
class TestGenerate(Tester):
"""Tester class for general unit testing of various SMACHA tool
functionalities.
The tests run by this class are performed by generating code using SMACHA
scripts and templates and comparing the generated output code to the
expected code from hand-written code samples.
This includes testing both SMACHA YAML scripts generated by, e.g. the
:func:`smacha.parser.contain` and :func:`smacha.parser.extract` methods,
and Python code generated by the :func:`smacha.generator.run` method.
"""
def __init__(self, *args, **kwargs):
# Set Tester member variables
self.set_write_output_files(WRITE_OUTPUT_FILES)
self.set_output_py_dir(OUTPUT_PY_DIR)
self.set_output_yml_dir(OUTPUT_YML_DIR)
self.set_debug_level(DEBUG_LEVEL)
# Store the base path
self._base_path = os.path.dirname(os.path.abspath(__file__))
# Call the parent constructor
super(TestGenerate, self).__init__(
*args,
script_dirs=[os.path.join(self._base_path, 'smacha_scripts/smacha_test_examples')],
template_dirs=[
os.path.join(self._base_path, ROS_TEMPLATES_DIR),
os.path.join(self._base_path, TEMPLATES_DIR)
],
**kwargs)
def test_generate(self):
"""Test generating against baseline files"""
for test_case in CONF_DICT['TEST_GENERATE']:
with self.subTest(test_case=test_case):
test_params = test_case.values()[0]
script_file = test_params['script']
baseline = test_params['baseline']
with open(os.path.join(self._base_path, 'smacha_test_examples/{}'.format(baseline))) as original_file:
generated_code = self._strip_uuids(self._generate(os.path.join(self._base_path, 'smacha_scripts/smacha_test_examples/{}'.format(script_file))))
original_code = original_file.read()
self.assertTrue(self._compare(generated_code, original_code, file_a='gene
|
rated', file_b='original'))
if __name__=="__main__":
# Read the configuration file before parsing arguments,
try:
base_path = os.path.dirname(os.path.abspath(__file__))
conf_file_loc = os.path.join(base_path, CONF_FILE)
f
|
= open(conf_file_loc)
CONF_DICT = yaml.load(f)
except Exception as e:
print('Failed to read the configuration file. See error:\n{}'.format(e))
exit()
if CONF_DICT.has_key('WRITE_OUTPUT_FILES'):
WRITE_OUTPUT_FILES = CONF_DICT['WRITE_OUTPUT_FILES']
if CONF_DICT.has_key('OUTPUT_PY_DIR'):
OUTPUT_PY_DIR = CONF_DICT['OUTPUT_PY_DIR']
if CONF_DICT.has_key('OUTPUT_YML_DIR'):
OUTPUT_YML_DIR = CONF_DICT['OUTPUT_YML_DIR']
if CONF_DICT.has_key('DEBUG_LEVEL'):
DEBUG_LEVEL = CONF_DICT['DEBUG_LEVEL']
rospy.init_node('test_smacha_ros_generate',log_level=rospy.DEBUG)
rostest.rosrun('smacha_ros', 'test_smacha_ros_generate', TestGenerate)
|
pliniopereira/ccd10
|
src/business/shooters/EphemerisShooter.py
|
Python
|
gpl-3.0
| 6,596
| 0.001365
|
import datetime
import math
import time
import ephem
from PyQt5 import QtCore
from src.business.EphemObserverFactory import EphemObserverFactory
from src.business.configuration.configProject import ConfigProject
from src.business.configuration.settingsCamera import SettingsCamera
from src.business.consoleThreadOutput import ConsoleThreadOutput
from src.business.shooters.ContinuousShooterThread import ContinuousShooterThread
class EphemerisShooter(QtCore.QThread):
'''
classe para modo automatico
'''
signal_started_shooting = QtCore.pyqtSignal(name="signalStartedShooting")
signal_temp = QtCore.pyqtSignal(name="signalTemp")
def __init__(self):
super(EphemerisShooter, self).__init__()
self.camconfig = SettingsCamera()
self.camconfig.setup_settings()
infocam = self.camconfig.get_camera_settings()
self.ObserverFactory = EphemObserverFactory()
self.continuousShooterThread = ContinuousShooterThread(int(infocam[4]))
self.console = ConsoleThreadOutput()
self.config = ConfigProject()
info = self.config.get_geographic_settings()
self.latitude = info[0] # '-45.51'
self.longitude = info[1] # '-23.12'
self.elevation = info[2] # 350
info_sun = self.config.get_moonsun_settings()
self.max_solar_elevation = float(info_sun[0]) # -12
self.ignore_lunar_position = info_sun[1]
self.max_lunar_elevation = float(info_sun[2]) # 8
self.max_lunar_phase = float(info_sun[3]) # 1
self.wait_temperature = False
print(int(infocam[4]))
try:
self.s = int(infocam[4])
self.continuousShooterThread.set_sleep_time(self.s)
except Exception as e:
self.s = 5
self.shootOn = False
self.controller = True
self.count = 1
def refresh_data(self):
try:
info = self.config.get_geographic_settings()
self.latitude = info[0] # '-45.51'
self.longitude = info[1] # '-23.12'
self.elevation = info[2] # 350
infosun = self.config.get_moonsun_settings()
self.max_solar_elevation = float(infosun[0]) # -12
self.ignore_lunar_position = infosun[1]
self.max_lunar_elevation = float(infosun[2]) # 8
self.max_lunar_phase = float(infosun[3]) # 1
except Exception as e:
self.console.raise_text("Exception thrown to acquire information\n"
"Please set an observatory information on settings\n" + str(e), level=3)
self.latitude = 0
self.longitude = 0
self.elevation = 0
self.max_solar_elevation = 0
|
self.max_lunar_elevation = 0
self.max_lunar_phase = 0
infocam = self.camconfig.get_camera_settings()
try:
self.s = int(infocam[4])
except Exception as e:
self.s = 0
|
def calculate_moon(self, obs):
aux = obs
aux.compute_pressure()
aux.horizon = '8'
moon = ephem.Moon(aux)
return aux.previous_setting(moon), aux.next_rising(moon)
def calculate_sun(self, obs):
aux = obs
aux.compute_pressure()
aux.horizon = '-12'
sun = ephem.Sun(aux)
return aux.previous_setting(sun), aux.next_rising(sun)
def set_solar_and_lunar_parameters(self, maxSolarElevation, maxLunarElevation, maxLunarPhase):
self.max_solar_elevation = maxSolarElevation
self.max_lunar_elevation = maxLunarElevation
self.max_lunar_phase = maxLunarPhase
def run(self):
self.refresh_data()
obs = self.ObserverFactory.create_observer(longitude=self.longitude,
latitude=self.latitude,
elevation=self.elevation)
self.controller = True
self.shootOn = False
c = 0
try:
while self.controller:
obs.date = ephem.date(datetime.datetime.utcnow())
sun = ephem.Sun(obs)
moon = ephem.Moon(obs)
frac = moon.moon_phase
a = ephem.degrees(sun.alt)
b = ephem.degrees(str(moon.alt))
# Variavel de controle do shooter
t = 0
# print("\n\n")
# print("math.degrees(a) = " + str(math.degrees(a)))
# print("self.max_solar_elevation = " + str(self.max_solar_elevation))
# print("self.ignore_lunar_position = " + str(self.ignore_lunar_position))
# print("math.degrees(b) = " + str(math.degrees(b)))
# print("self.max_lunar_elevation = " + str(self.max_lunar_elevation))
# print("self.max_lunar_phase = " + str(self.max_lunar_phase))
# print("\n\n")
if float(math.degrees(a)) < self.max_solar_elevation or t == 1:
if (not self.ignore_lunar_position and float(math.degrees(b)) < self.max_lunar_elevation
and frac < self.max_lunar_phase) or self.ignore_lunar_position:
if not self.shootOn:
if not c:
self.signal_started_shooting.emit()
c = 1
self.signal_temp.emit()
time.sleep(5)
if self.wait_temperature:
# Iniciar as Observações
self.start_taking_photo()
self.shootOn = True
else:
if self.shootOn:
# Finalizar as Observações
self.stop_taking_photo()
c = 0
self.t = False
self.shootOn = False
time.sleep(5)
except Exception as e:
self.console.raise_text("Exception no Ephemeris Shooter -> " + str(e))
def stop_shooter(self):
self.controller = False
self.continuousShooterThread.stop_continuous_shooter()
def start_taking_photo(self):
self.continuousShooterThread.set_sleep_time(self.s)
self.continuousShooterThread.start_continuous_shooter()
self.continuousShooterThread.start()
def stop_taking_photo(self):
self.continuousShooterThread.stop_continuous_shooter()
|
valmynd/MediaFetcher
|
src/plugins/youtube_dl/youtube_dl/extractor/voicerepublic.py
|
Python
|
gpl-3.0
| 3,272
| 0.025978
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urlparse,
)
from ..utils import (
ExtractorError,
determine_ext,
int_or_none,
sanitized_Request,
)
class VoiceRepublicIE(InfoExtractor):
_VALID_URL = r'https?://voicerepublic\.com/(?:talks|embed)/(?P<id>[0-9a-z-]+)'
_TESTS = [{
'url': 'http://voicerepublic.com/talks/watching-the-watchers-building-a-sousveillance-state',
'md5': 'b9174d651323f17783000876347116e3',
'info_dict': {
'id': '2296',
'display_id': 'watching-the-watchers-building-a-sousveillance-state',
'ext': 'm4a',
'title': 'Watching the Watchers: Building a Sousveillance State',
'description': 'Secret surveillance programs have metadata too. The people
|
and companies that operate secret surveillance programs can be surveilled.',
'thumbnail': r're:^https?://.*\.(?:png|jpg)$',
'duration': 1800,
'view_count': int,
}
}, {
'url': 'http://voicerepublic.com/embed/watching-the-watchers-building-a-sousveillance-state',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
req = sanitized_Request(
compat_u
|
rlparse.urljoin(url, '/talks/%s' % display_id))
# Older versions of Firefox get redirected to an "upgrade browser" page
req.add_header('User-Agent', 'youtube-dl')
webpage = self._download_webpage(req, display_id)
if '>Queued for processing, please stand by...<' in webpage:
raise ExtractorError(
'Audio is still queued for processing', expected=True)
config = self._search_regex(
r'(?s)return ({.+?});\s*\n', webpage,
'data', default=None)
data = self._parse_json(config, display_id, fatal=False) if config else None
if data:
title = data['title']
description = data.get('teaser')
talk_id = compat_str(data.get('talk_id') or display_id)
talk = data['talk']
duration = int_or_none(talk.get('duration'))
formats = [{
'url': compat_urlparse.urljoin(url, talk_url),
'format_id': format_id,
'ext': determine_ext(talk_url) or format_id,
'vcodec': 'none',
} for format_id, talk_url in talk['links'].items()]
else:
title = self._og_search_title(webpage)
description = self._html_search_regex(
r"(?s)<div class='talk-teaser'[^>]*>(.+?)</div>",
webpage, 'description', fatal=False)
talk_id = self._search_regex(
[r"id='jc-(\d+)'", r"data-shareable-id='(\d+)'"],
webpage, 'talk id', default=None) or display_id
duration = None
player = self._search_regex(
r"class='vr-player jp-jplayer'([^>]+)>", webpage, 'player')
formats = [{
'url': compat_urlparse.urljoin(url, talk_url),
'format_id': format_id,
'ext': determine_ext(talk_url) or format_id,
'vcodec': 'none',
} for format_id, talk_url in re.findall(r"data-([^=]+)='([^']+)'", player)]
self._sort_formats(formats)
thumbnail = self._og_search_thumbnail(webpage)
view_count = int_or_none(self._search_regex(
r"class='play-count[^']*'>\s*(\d+) plays",
webpage, 'play count', fatal=False))
return {
'id': talk_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'view_count': view_count,
'formats': formats,
}
|
brettwooldridge/buck
|
programs/buck_project.py
|
Python
|
apache-2.0
| 6,332
| 0.000632
|
# Copyright 2018-present Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import errno
import hashlib
import os
import shutil
import sys
import tempfile
import textwrap
import file_locks
from tracing import Tracing
def get_file_contents_if_exists(path, default=None):
with Tracing("BuckProject.get_file_contents_if_it_exists", args={"path": path}):
if not os.path.exists(path):
return default
with open(path) as f:
contents = f.read().strip()
return default if not contents else contents
def write_contents_to_file(path, contents):
with Tracing("BuckProject.write_contents_to_file", args={"path": path}):
with open(path, "w") as output_file:
output_file.write(str(contents))
def makedirs(path):
try:
os.makedirs(path)
except OSError as e:
# Potentially the case that multiple processes are running in parallel
# (e.g. a series of linters running buck query without buckd), so we
# should just swallow the error.
# This is mostly equivalent to os.makedirs(path, exist_ok=True) in
# Python 3.
if e.errno != errno.EEXIST and os.path.isdir(path):
raise
class BuckProject:
def __init__(self, root):
self.root = root
self._buck_out = os.path.join(root, "buck-out")
buck_out_tmp = os.path.join(self._buck_out, "tmp")
makedirs(buck_out_tmp)
self._buck_out_log = os.path.join(self._buck_out, "log")
makedirs(self._buck_out_log)
self.tmp_dir = tempfile.mkdtemp(prefix="buck_run.", dir=buck_out_tmp)
# Only created if buckd is used.
self.buckd_tmp_dir = None
self.buckd_dir = os.path.join(root, ".buckd")
self.buckd_version_file = os.path.join(self.buckd_dir, "buckd.version")
self.buckd_pid_file = os.path.join(self.buckd_dir, "pid")
self.buckd_stdout = os.path.join(self.buckd_dir, "stdout")
self.buckd_stderr = os.path.join(self.buckd_dir, "stderr")
buck_javaargs_path = os.path.join(s
|
elf.root, ".buckjavaargs")
self.buck_javaargs = get_file_contents_if_exists(buck_javaargs_path)
buck_javaargs_path_local = os.path.join(self.root, ".buckjavaargs.local")
self.buck_javaargs_local = get_file_contents_if_exists(buck_javaargs_path_local)
def get_root_hash(self):
return hashlib.sha256(self.root.encode("utf-8")).hexdigest()
def get_buckd_transpor
|
t_file_path(self):
if os.name == "nt":
return u"\\\\.\\pipe\\buckd_{0}".format(self.get_root_hash())
else:
return os.path.join(self.buckd_dir, "sock")
def get_buckd_transport_address(self):
if os.name == "nt":
return "local:buckd_{0}".format(self.get_root_hash())
else:
return "local:.buckd/sock"
def get_running_buckd_version(self):
return get_file_contents_if_exists(self.buckd_version_file)
def get_running_buckd_pid(self):
try:
return int(get_file_contents_if_exists(self.buckd_pid_file))
except ValueError:
return None
except TypeError:
return None
def get_buckd_stdout(self):
return self.buckd_stdout
def get_buckd_stderr(self):
return self.buckd_stderr
def get_buck_out_log_dir(self):
return self._buck_out_log
def clean_up_buckd(self):
with Tracing("BuckProject.clean_up_buckd"):
if os.path.exists(self.buckd_dir):
file_locks.rmtree_if_can_lock(self.buckd_dir)
def create_buckd_tmp_dir(self):
if self.buckd_tmp_dir is not None:
return self.buckd_tmp_dir
tmp_dir_parent = os.path.join(self.buckd_dir, "tmp")
makedirs(tmp_dir_parent)
self.buckd_tmp_dir = tempfile.mkdtemp(prefix="buck_run.", dir=tmp_dir_parent)
return self.buckd_tmp_dir
def save_buckd_version(self, version):
write_contents_to_file(self.buckd_version_file, version)
def save_buckd_pid(self, pid):
write_contents_to_file(self.buckd_pid_file, str(pid))
@staticmethod
def from_current_dir():
with Tracing("BuckProject.from_current_dir"):
current_dir = os.getcwd()
if "--version" in sys.argv or "-V" in sys.argv:
return BuckProject(current_dir)
at_root_dir = False
while not at_root_dir:
if os.path.exists(os.path.join(current_dir, ".buckconfig")):
return BuckProject(current_dir)
parent_dir = os.path.dirname(current_dir)
at_root_dir = current_dir == parent_dir
current_dir = parent_dir
raise NoBuckConfigFoundException()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
with Tracing("BuckProject.__exit__"):
if os.path.exists(self.tmp_dir):
try:
shutil.rmtree(self.tmp_dir)
except OSError as e:
if e.errno != errno.ENOENT:
raise
class NoBuckConfigFoundException(Exception):
def __init__(self):
no_buckconfig_message_path = ".no_buckconfig_message"
default_message = textwrap.dedent(
"""\
This does not appear to be the root of a Buck project. Please 'cd'
to the root of your project before running buck. If this really is
the root of your project, run
'touch .buckconfig'
and then re-run your buck command."""
)
message = get_file_contents_if_exists(
no_buckconfig_message_path, default_message
)
Exception.__init__(self, message)
|
palmtree5/Red-DiscordBot
|
redbot/cogs/audio/core/__init__.py
|
Python
|
gpl-3.0
| 5,241
| 0.000382
|
import asyncio
import datetime
import json
from collections import Counter, defaultdict
from pathlib import Path
from typing import Mapping
import aiohttp
import discord
from redbot.core import Config
from redbot.core.bot import Red
from redbot.core.commands import Cog
from redbot.core.data_manager import cog_data_path
from redbot.core.i18n import Translator, cog_i18n
from ..utils import CacheLevel, PlaylistScope
from . import abc, cog_utils, commands, events, tasks, utilities
from .cog_utils import CompositeMetaClass
_ = Translator("Audio", Path(__file__))
@cog_i18n(_)
class Audio(
commands.Commands,
events.Events,
tasks.Tasks,
utilities.Utilities,
Cog,
metaclass=CompositeMetaClass,
):
"""Play audio through voice channels."""
_default_lavalink_settings = {
"host": "localhost",
"rest_port": 2333,
"ws_port": 2333,
"password": "youshallnotpass",
}
def __init__(self, bot: Red):
super().__init__()
self.bot = bot
self.config = Config.get_conf(self, 2711759130, force_registration=True)
self.api_interface = None
self.player_manager = None
self.playlist_api = None
self.local_folder_current_path = None
self.db_conn = None
self._error_counter = Counter()
self._error_timer = {}
self._disconnected_players = {}
self._daily_playlist_cache = {}
self._daily_global_playlist_cache = {}
self._persist_queue_cache = {}
self._dj_status_cache = {}
self._dj_role_cache = {}
self.skip_votes = {}
self.play_lock = {}
self.lavalink_connect_task = None
self._restore_task = None
self.player_automated_timer_task = None
self.cog_cleaned_up = False
self.lavalink_connection_aborted = False
self.permission_cache = discord.Permissions(
embed_links=True,
read_messages=True,
send_messages=True,
read_message_history=True,
add_reactions=True,
)
self.session = aiohttp.ClientSession(json_serialize=json.dumps)
self.cog_ready_event = asyncio.Event()
self._ws_resume = defaultdict(asyncio.Event)
self._ws_op_codes = defaultdict(asyncio.LifoQueue)
self.cog_init_task = None
self.global_api_user = {
"fetched": False,
"can_read": False,
"can_post": False,
"can_delete": False,
}
self._ll_guild_updates = set()
self._diconnected_shard = set()
self._last_ll_update = datetime.datetime.now(datetime.timezone.utc)
default_global = dict(
schema_version=1,
bundled_playlist_version=0,
owner_notification=0,
cache_level=CacheLevel.all().value,
cache_age=365,
daily_playlists=False,
global_db_enabled=False,
global_db_get_timeout=5,
status=False,
use_external_lavalink=False,
restrict=True,
localpath=str(cog_data_path(raw_name="Audio")),
url_keyword_blacklist=[],
url_keyword_whitelist=[],
java_exc_path="java",
**self._default_lavalink_settings,
)
default_guild = dict(
auto_play=False,
currently_auto_playing_in=None,
auto_deafen=True,
autoplaylist=dict(
enabled=True,
id=42069,
name="Aikaterna's curated tracks",
scope=PlaylistScope.GLOBAL.value,
),
persist_queue=True,
disconnect=False,
dj_enabled=False,
dj_role=None,
daily_playlists=False,
emptydc_enabled=False,
emptydc_timer=0,
emptypause_enabled=False,
emptypause_timer=0,
jukebox=False,
jukebox_price=0,
maxlength=0,
max_volume=150,
notify=False,
prefer_lyrics=False,
repeat=False,
shuffle=False,
shuffle_bumped=True,
thumbnail=False,
volume=100,
vote_enabled=False,
vote_percent=0,
room_lock=None,
|
url_keyword_blacklist=[],
url_keyword_whitelist=[],
country_code="US",
)
_playlist: Mapping = dict(id=None, author=None, name=None, playlist_url=None, tracks=[])
self.config.init_custom("EQUALIZER", 1)
self.config.register_custom("EQUALIZER", eq_bands=[], eq_presets={})
self.
|
config.init_custom(PlaylistScope.GLOBAL.value, 1)
self.config.register_custom(PlaylistScope.GLOBAL.value, **_playlist)
self.config.init_custom(PlaylistScope.GUILD.value, 2)
self.config.register_custom(PlaylistScope.GUILD.value, **_playlist)
self.config.init_custom(PlaylistScope.USER.value, 2)
self.config.register_custom(PlaylistScope.USER.value, **_playlist)
self.config.register_guild(**default_guild)
self.config.register_global(**default_global)
self.config.register_user(country_code=None)
|
chemelnucfin/tensorflow
|
tensorflow/python/data/experimental/kernel_tests/csv_dataset_test.py
|
Python
|
apache-2.0
| 20,082
| 0.004531
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.CsvDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
from tensorflow.python.data.experimental.ops import error_ops
from tensorflow.python.data.experimental.ops import readers
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class CsvDatasetTest(test_base.DatasetTestBase):
def _setup_files(self, inputs, linebreak='\n', compression_type=None):
filenames = []
for i, ip in enumerate(inputs):
fn = os.path.join(self.get_temp_dir(), 'temp_%d.csv' % i)
contents = linebreak.join(ip).encode('utf-8')
if compression_type is None:
with open(fn, 'wb') as f:
f.write(contents)
elif compression_type == 'GZIP':
with gzip.GzipFile(fn, 'wb') as f:
f.write(contents)
elif compression_type == 'ZLIB':
contents = zlib.compress(contents)
with open(fn, 'wb') as f:
f.write(contents)
else:
raise ValueError('Unsupported compression_type', compression_type)
filenames.append(fn)
return filenames
def _make_test_datasets(self, inputs, **kwargs):
# Test by comparing its output to what we could get with map->decode_csv
filenames = self._setup_files(inputs)
dataset_expected = core_readers.TextLineDataset(filenames)
dataset_expected = dataset_expected.map(
lambda l: parsing_ops.decode_csv(l, **kwargs))
dataset_actual = readers.CsvDataset(filenames, **kwargs)
return (dataset_actual, dataset_expected)
def _test_by_comparison(self, inputs, **kwargs):
"""Checks that CsvDataset is equiv to TextLineDataset->map(decode_csv)."""
dataset_actual, dataset_expected = self._make_test_datasets(
inputs, **kwargs)
self.assertDatasetsEqual(dataset_actual, dataset_expected)
def _verify_output_or_err(self,
dataset,
expected_output=None,
expected_err_re=None):
if expected_err_re is None:
# Verify that output is expected, without errors
nxt = self.getNext(dataset)
expected_output = [[
v.encode('utf-8') if isinstance(v, str) else v for v in op
] for op in expected_output]
for value in expected_output:
op = self.evaluate(nxt())
self.assertAllEqual(op, value)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(nxt())
else:
nxt = self.getNext(dataset)
while True:
try:
self.evaluate(nxt())
except errors.OutOfRangeError:
break
def _test_dataset(
self,
inputs,
expected_output=None,
expected_err_re=None,
linebreak='\n',
compression_type=None, # Used for both setup and parsing
**kwargs):
"""Checks that elements produced by CsvDataset match expected output."""
# Convert str type because py3 tf strings are bytestrings
filenames = self._setup_files(inputs, linebreak, compression_type)
kwargs['compression_type'] = compression_type
if expected_err_re is not None:
# Verify that OpError is produced as expected
with self.assertRaisesOpError(expected_err_re):
dataset = readers.CsvDataset(filenames, **kwargs)
self._verify_output_or_err(dataset, expected_output, expected_err_re)
else:
dataset = readers.CsvDataset(filenames, **kwargs)
self._verify_output_or_err(dataset, expected_output, expected_err_re)
def testCsvDataset_requiredFields(self):
record_defaults = [[]] * 4
inputs = [['1,2,3,4']]
self._test_by_comparison(inputs, record_defaults=record_defaults)
def testCsvDataset_int(self):
record_defaults = [[0]] * 4
inputs = [['1,2,3,4', '5,6,7,8']]
self._test_by_comparison(inputs, record_defaults=record_defaults)
def testCsvDataset_float(self):
record_defaults = [[0.0]] * 4
inputs = [['1.0,2.1,3.2,4.3', '5.4,6.5,7.6,8.7
|
']]
self._test_by_comparison(inputs, record_defaults=record_defaults)
def testCsvDataset_string(self):
record_defaults = [['']] * 4
inputs = [['1.0,2.1,hello,4.3', '5.4,6.5,goodbye,8.7']]
self._test_by_com
|
parison(inputs, record_defaults=record_defaults)
def testCsvDataset_withEmptyFields(self):
record_defaults = [[0]] * 4
inputs = [[',,,', '1,1,1,', ',2,2,2']]
self._test_dataset(
inputs, [[0, 0, 0, 0], [1, 1, 1, 0], [0, 2, 2, 2]],
record_defaults=record_defaults)
def testCsvDataset_errWithUnquotedQuotes(self):
record_defaults = [['']] * 3
inputs = [['1,2"3,4']]
self._test_dataset(
inputs,
expected_err_re='Unquoted fields cannot have quotes inside',
record_defaults=record_defaults)
def testCsvDataset_errWithUnescapedQuotes(self):
record_defaults = [['']] * 3
inputs = [['"a"b","c","d"']]
self._test_dataset(
inputs,
expected_err_re=
'Quote inside a string has to be escaped by another quote',
record_defaults=record_defaults)
def testCsvDataset_ignoreErrWithUnescapedQuotes(self):
record_defaults = [['']] * 3
inputs = [['1,"2"3",4', '1,"2"3",4",5,5', 'a,b,"c"d"', 'e,f,g']]
filenames = self._setup_files(inputs)
dataset = readers.CsvDataset(filenames, record_defaults=record_defaults)
dataset = dataset.apply(error_ops.ignore_errors())
self._verify_output_or_err(dataset, [['e', 'f', 'g']])
def testCsvDataset_ignoreErrWithUnquotedQuotes(self):
record_defaults = [['']] * 3
inputs = [['1,2"3,4', 'a,b,c"d', '9,8"7,6,5', 'e,f,g']]
filenames = self._setup_files(inputs)
dataset = readers.CsvDataset(filenames, record_defaults=record_defaults)
dataset = dataset.apply(error_ops.ignore_errors())
self._verify_output_or_err(dataset, [['e', 'f', 'g']])
def testCsvDataset_withNoQuoteDelimAndUnquotedQuotes(self):
record_defaults = [['']] * 3
inputs = [['1,2"3,4']]
self._test_by_comparison(
inputs, record_defaults=record_defaults, use_quote_delim=False)
def testCsvDataset_mixedTypes(self):
record_defaults = [
constant_op.constant([], dtype=dtypes.int32),
constant_op.constant([], dtype=dtypes.float32),
constant_op.constant([], dtype=dtypes.string),
constant_op.constant([], dtype=dtypes.float64)
]
inputs = [['1,2.1,3.2,4.3', '5,6.5,7.6,8.7']]
self._test_by_comparison(inputs, record_defaults=record_defaults)
def testCsvDataset_withUseQuoteDelimFalse(self):
record_defaults = [['']] * 4
inputs = [['1,2,"3,4"', '"5,6",7,8']]
self._test_by_comparison(
inputs, record_defaults=record_defaults, use_quote_delim=False)
def testCsvDataset_withFieldDelim(self):
record_defaults = [[0]] * 4
inputs = [['1:2:3:4', '5:6:7:8']]
self._test_by_comparison(
inputs, record_defaults=record_defaults, field_delim=':')
def testCsvDataset_withNaValue(self):
record_defaults = [[0]] * 4
inputs = [['1,NA,3,4', 'NA,6,7,8']]
s
|
HAOYU-LI/UniDOE
|
Scraper/WD2.py
|
Python
|
apache-2.0
| 590
| 0.020339
|
from scraper import *
WD2_url = "http://www.cms-ud.com/UD/table/WD2.htm"; crit_name_WD2 = "WD2/"
WD2_list = find_urls(url=WD2_url,crit_name=crit_name_WD2)
for url in WD2_list:
try:
|
url = url.replace('^','%5E')
url = "http://www.cms-ud.com/UD/table/"+url
design = find_design(url)
#print(design)
run,factor,level = find_design_size(url,"WD2")
file_name = "WD2_"+str(run)+"_"+str(factor)+"_"+str(level)
save_design(desig
|
n,name=file_name,save_path='./design_data/WD2/')
except:
print("Some errors happen at url: %s",url)
|
googleapis/python-aiplatform
|
samples/generated_samples/aiplatform_v1_generated_metadata_service_create_context_sync.py
|
Python
|
apache-2.0
| 1,468
| 0.000681
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateContext
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_v1_generated_MetadataService_CreateContext_sync]
from google.cloud import aiplatform_v1
def sample_create_context():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateContextRequest(
|
parent="parent_value",
)
# Make the request
response = client.create_context(request=request)
# Handle the response
print(response)
# [END aiplat
|
form_v1_generated_MetadataService_CreateContext_sync]
|
tensorflow/similarity
|
tensorflow_similarity/stores/__init__.py
|
Python
|
apache-2.0
| 1,292
| 0.000774
|
# Copyright 2021 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Key Values Stores store the data associated with the embeddings indexed by
the `Indexer()`.
Each key of the store represent a **record** that contains information
about a given embedding.
The main use-case for the store is to retrieve the records associated
with the ids returne
|
d by a nearest neigboor search performed with the
[`Search()` module](../search/).
Additionally one might want to inspect the content of the index which is why
`Store()` class may implement an export to
a [Pandas Dataframe](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html)
via the `to_pandas()` method.
|
"""
from .store import Store # noqa
from .memory_store import MemoryStore # noqa
|
telefonicaid/fiware-keystone-spassword
|
keystone_spassword/tests/unit/contrib/spassword/test_checker.py
|
Python
|
apache-2.0
| 1,246
| 0.002408
|
#
# Copyright 2014 Telefonica Investigacion y Desarrollo, S.A.U
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for SPASSWORD checker."""
from keystone import tests
from keystone import exception
import keystone_spassword.contrib.spassword.checker
class TestPasswordChecker(tests.BaseTestCase):
def test_checker(self)
|
:
new_password = "stronger"
self.assertRaises(exception.ValidationError,
checker.strong_check_password(new_password))
|
cwdtom/qqbot
|
tom/findbilibili.py
|
Python
|
gpl-3.0
| 4,769
| 0.009097
|
# -*- coding: utf-8 -*-
__author__ = 'Tom Chen'
import urllib2,sys,re,time
from sgmllib import SGMLParser
from datetime import datetime,date
from urllib import unquote,quote
default_encoding = 'utf-8' #设置文件使用UTF-8编码
if sys.getdefaultencoding() != default_encoding:
reload(sys)
sys.setdefaultencoding(default_encoding)
class findbilibili(SGMLParser): #分析HTML源代码
def __init__(self):
SGMLParser.__init__(self)
self.is_script = ''
self.url = []
self.videonum = []
self.is_li = ''
self.seasonnum = []
self.season = []
self.is_a = ''
self.num = []
def start_script(self,attrs):
try:
if attrs[0][0] == 'language' and attrs[0][1] == 'javascript':
self.is_script = 'num'
except IndexError:
pass
def end_script(self):
self.is_script = ""
def start_li(self,attrs):
try:
if attrs[0][0] == 'sea
|
son_id' and attrs[1][0] == 'id':
if re.match(r's_\
|
d+',attrs[1][1]):
self.is_li = 'season'
self.seasonnum.append(attrs[0][1])
except IndexError:
pass
def end_li(self):
self.is_li = ''
def start_a(self,attrs):
try:
if attrs[0][0] == 'class' and attrs[0][1] == 't':
if attrs[1][0] == 'href' and re.match(r'/video/av\d+',attrs[1][1]):
if attrs[2][0] == 'target' and attrs[2][1] == '_blank':
self.is_a = 'url'
self.url.append(attrs[1][1])
except IndexError:
pass
def end_a(self):
self.is_a = ''
def handle_data(self, data):
if self.is_script == 'num':
self.videonum.append(data)
if self.is_li == 'season':
self.season.append(data)
if self.is_a == 'url':
self.num.append(data)
#funtion name [bilibili]
#在bilibili上抓取动画网址
#param string 动画名字
#return array[array] 2维数组 [1,[第一集][地址]][2,[][]][3...]...
def bilibili(sname):
name = sname
name = unquote(name)
l = name.split(' ')
m = []
s = ''
rename = re.compile('第')
if len(l) != 1:
s = l[len(l)-1]
if rename.findall(s):
m = name.split(s)
else:
m.append(name)
if s == '续':
s = '第二季'
else:
m.append(name)
m[0] = quote(m[0])
if name == '无头骑士异闻录×2 转':
s = name
if name == '无头骑士异闻录×2 承':
s = name
url = 'http://www.bilibili.com/sp/'+m[0]
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)' #伪装浏览器请求数据
headers = { 'User-Agent' : user_agent }
request = urllib2.Request(url, headers=headers)
try:
content = urllib2.urlopen(request).read()
except urllib2.HTTPError:
return []
listname = findbilibili()
listname.feed(content)
rename = re.compile(r'\d+')
try:
videoid = rename.findall(listname.videonum[0])
except IndexError:
return []
videoid2 = ''
try:
n = len(listname.season)
a = 0
for a in range(n):
if listname.season[a] == s:
videoid2 = listname.seasonnum[a]
break
if videoid2 == '':
videoid2 = listname.seasonnum[0]
except IndexError:
pass
if videoid2:
y = '-'
else:
y = ''
try:
url = 'http://www.bilibili.com/sppage/bangumi-'+videoid[0]+y+videoid2+'-1.html'
except IndexError:
return []
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)' #伪装浏览器请求数据
headers = { 'User-Agent' : user_agent }
request = urllib2.Request(url, headers=headers)
content = urllib2.urlopen(request).read()
listname = findbilibili()
listname.feed(content)
n = len(listname.url)
a = 0
for a in range(n):
listname.url[a] = 'http://www.bilibili.com'+listname.url[a]
rename = re.compile(r'\d+')
l = []
for a in range(n):
z = rename.findall(listname.num[a])
zz = ''.join(z)
l.append(zz)
dname = []
qname = []
a = 0
for a in range(n):
x = []
x.append(l[a])
x.append(listname.url[a])
qname.append(x)
for a in range(n):
dname.append(qname[n - a -1])
return dname
if __name__ == '__main__':
name = '噬神者'.encode('gbk')
newname = name.decode('gbk')
newname = newname.encode('utf-8')
print newname
bilibili(newname)
|
Jozhogg/iris
|
tools/generate_std_names.py
|
Python
|
lgpl-3.0
| 4,434
| 0.001579
|
# (C) British Crown Copyright 2010 - 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
A script to convert the standard names information from the provided XML
file into a Python dictionary format.
Takes two arguments: the first is the XML file to process and the second
is the name of the file to write the Python dictionary file into.
By default, Iris will use the source XML file:
etc/cf-standard-name-table.xml
as obtained from:
http://cf-pcmdi.llnl.gov/documents/cf-standard-names
"""
from __future__ import (absolute_import, division, print_function)
import argparse
import pprint
import xml.etree.ElementTree as ET
STD_VALUES_FILE_TEMPLATE = '''
# (C) British Crown Copyright 2010 - 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
This file contains a dictionary of standard value names that are mapped
to another dictionary of other standard name attributes. Currently only
the `canonical_unit` exists in these attribute dictionaries.
This file is automatically generated. Do not edit this file by hand.
The file will be generated during a standard build/installation:
python setup.py build
python setup.py install
Also, the file can be re-generated in the source distribution via:
python setup.py std_names
Or for more control (e.g. to use an alternative XML file) via:
python tools/generate_std_names.py XML_FILE MODULE_FILE
"""
from __future__ import (absolute_import, division, print_function)
STD_NAMES = '''.lstrip()
def process_name_table(tree, element_name, *child_elements):
"""
Yields a series of dictionaries with the key being the id of the entry element and the value containing
another dictionary mapping other attributes of the standard name to their values, e.g. units, description, grib value etc.
"""
for elem in tree.iterfind(element_name):
sub_section = {}
for child_elem in child_elements:
found_elem = elem.find(child_elem)
sub_section[child_elem] = found_elem.text if found_elem is not None else None
yield {elem.get("id") : sub_section}
def to_dict(infile, outfile):
values = {}
aliases = {}
tree = ET.parse(infile)
for section in process_name_table(tree, 'entry', 'canonical_units'):
values.update(section)
for section in process_name_table(tree, 'alias', 'entry_id'):
aliases.update(section)
for key, valued in aliases.iteritems():
values.update({
key : {'canonical_units' : values.get(valued['entry_id']).get('canonical_units')}
})
outfile.write(STD_VALUES_FILE_TEMPLATE + pprint.pformat(values))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Create Python code from CF standard name XML.')
parser.add_argument('input', type=argparse.FileType(),
metavar='INPUT',
|
help='Path to CF standard name XML')
parser.add_argument('output', type=argparse.FileType('w'),
metavar='OUTPUT',
help='Path to resulting Python code')
args = parser.parse_args(
|
)
to_dict(args.input, args.output)
|
rh-s/heat
|
heat_integrationtests/scenario/test_ceilometer_alarm.py
|
Python
|
apache-2.0
| 2,412
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from heat_integrationtests.common import test
LOG = logging.getLogger(__name__)
class CeilometerAlarmTest(test.HeatIntegrationTest):
"""Class is responsible for testing of ceilometer usage."""
def setUp(self):
super(CeilometerAlarmTest, self).setUp()
self.client = self.orchestration_client
self.template = self._load_template(__file__,
'test_ceilometer_alarm.yaml',
'templates')
def check_instance_count(self, stack_identifier, expected):
stack = self.client.stacks.get(stack_identifier)
actual = self._stack_output(stack, 'asg_size')
|
if actual != expected:
LOG.warn('check_instance_count exp:%d, act:%s' % (expected,
actual))
return actual == expected
def test_alarm(self):
"""Confirm we can create an alarm and trigger it."""
# 1. create the stack
stack_identifier = self.stack_create(template=self.template)
# 2. send ceilometer a metric (should cause the alarm to fire)
sample
|
= {}
sample['counter_type'] = 'gauge'
sample['counter_name'] = 'test_meter'
sample['counter_volume'] = 1
sample['counter_unit'] = 'count'
sample['resource_metadata'] = {'metering.stack_id':
stack_identifier.split('/')[-1]}
sample['resource_id'] = 'shouldnt_matter'
self.metering_client.samples.create(**sample)
# 3. confirm we get a scaleup.
# Note: there is little point waiting more than 60s+time to scale up.
self.assertTrue(test.call_until_true(
120, 2, self.check_instance_count, stack_identifier, 2))
|
pinax/pinax_theme_tester
|
pinax_theme_tester/configs/stripe.py
|
Python
|
mit
| 3,913
| 0.0046
|
import decimal
from datetime import datetime
from django.conf import settings
from django.conf.urls import url, include
from pinax.stripe.forms import PlanForm
from .base import ViewConfig
invoices = [
dict(date=datetime(2017, 10, 1), subscription=dict(plan=dict(name="Pro")), period_start=datetime(2017, 10, 1), period_end=datetime(2017, 10,
|
31), total=decimal.Decimal("9.99"), paid=False),
dict(date=datetime(2017, 9, 1), subscription=dict(plan=dict(name="Pro")), period_start=datetime(2017, 9, 1), period_end=datetime(2017, 9, 30), total=decimal.Decimal("9.99"), paid=True),
dict(d
|
ate=datetime(2017, 8, 1), subscription=dict(plan=dict(name="Beginner")), period_start=datetime(2017, 8, 1), period_end=datetime(2017, 8, 31), total=decimal.Decimal("5.99"), paid=True),
dict(date=datetime(2017, 7, 1), subscription=dict(plan=dict(name="Beginner")), period_start=datetime(2017, 7, 1), period_end=datetime(2017, 7, 30), total=decimal.Decimal("5.99"), paid=True),
]
card = dict(pk=1, brand="Visa", last4="4242", exp_month="10", exp_year="2030", created_at=datetime(2016, 4, 5))
methods = [
card
]
subscription = dict(pk=1, current_period_start=datetime(2017, 10, 1), current_period_end=datetime(2017, 10, 31), plan=dict(name="Pro"), start=datetime(2017, 10, 1), status="active", invoice_set=dict(all=invoices))
subscriptions = [
subscription
]
patch = "http://pinaxproject.com/pinax-design/patches/pinax-stripe.svg"
label = "stripe"
title = "Pinax Stripe"
views = [
ViewConfig(pattern=r"^invoices-empty/$", template="pinax/stripe/invoice_list.html", name="invoice_list_empty", pattern_kwargs={}, object_list=[]),
ViewConfig(pattern=r"^invoices/$", template="pinax/stripe/invoice_list.html", name="pinax_stripe_invoice_list", pattern_kwargs={}, object_list=invoices),
ViewConfig(pattern=r"^methods-empty/$", template="pinax/stripe/paymentmethod_list.html", name="method_list_empty", pattern_kwargs={}, object_list=[]),
ViewConfig(pattern=r"^methods/$", template="pinax/stripe/paymentmethod_list.html", name="pinax_stripe_payment_method_list", pattern_kwargs={}, object_list=methods),
ViewConfig(pattern=r"^methods/create/$", template="pinax/stripe/paymentmethod_create.html", name="pinax_stripe_payment_method_create", pattern_kwargs={}, PINAX_STRIPE_PUBLIC_KEY=settings.PINAX_STRIPE_PUBLIC_KEY),
ViewConfig(pattern=r"^methods/update/(?P<pk>\d+)/$", template="pinax/stripe/paymentmethod_update.html", name="pinax_stripe_payment_method_update", pattern_kwargs={"pk": 1}, object=card),
ViewConfig(pattern=r"^methods/delete/(?P<pk>\d+)/", template="pinax/stripe/paymentmethod_delete.html", name="pinax_stripe_payment_method_delete", pattern_kwargs={"pk": 1}, object=card),
ViewConfig(pattern=r"^subscriptions-empty/$", template="pinax/stripe/subscription_list.html", name="subscription_list_empty", pattern_kwargs={}, object_list=[]),
ViewConfig(pattern=r"^subscriptions/$", template="pinax/stripe/subscription_list.html", name="pinax_stripe_subscription_list", pattern_kwargs={}, object_list=subscriptions),
ViewConfig(pattern=r"^subscriptions/create/$", template="pinax/stripe/subscription_create.html", name="pinax_stripe_subscription_create", pattern_kwargs={}, form=PlanForm(), request=dict(user=dict(customer=dict(default_source="foo")))),
ViewConfig(pattern=r"^subscriptions/update/(?P<pk>\d+)/$", template="pinax/stripe/subscription_update.html", name="pinax_stripe_subscription_update", pattern_kwargs={"pk": 1}, object=subscription, form=PlanForm(), PINAX_STRIPE_PUBLIC_KEY=settings.PINAX_STRIPE_PUBLIC_KEY),
ViewConfig(pattern=r"^subscriptions/delete/(?P<pk>\d+)/", template="pinax/stripe/subscription_delete.html", name="pinax_stripe_subscription_delete", pattern_kwargs={"pk": 1}, object=subscription),
]
urlpatterns = [
view.url()
for view in views
]
url = url(r"payments/", include("pinax_theme_tester.configs.stripe"))
|
fsalmoir/PyGeM
|
pygem/stlhandler.py
|
Python
|
mit
| 3,986
| 0.032614
|
"""
Derived module from filehandler.py to handle STereoLithography files.
"""
import numpy as np
from mpl_toolkits import mplot3d
from matplotlib import pyplot
from stl import mesh, Mode
import pygem.filehandler as fh
class StlHandler(fh.FileHandler):
"""
STereoLithography file handler class
:cvar string infile: name of the input file to be processed.
:cvar string outfile: name of the output file where to write in.
:cvar string extension: extension of the input/output files. It is equal to '.stl'.
"""
def __init__(self):
super(StlHandler, self).__init__()
self.extension = '.stl'
def parse(self, filename):
"""
Method to parse the `filename`. It returns a matrix with all the coordinates.
:param string filename: name of the input file.
:return: mesh_points: it is a `n_points`-by-3 matrix containing the coordinates of
the points of the mesh
:rtype: numpy.ndarray
.. todo::
- specify when it works
"""
self._check_filename_type(filename)
self._check_extension(filename)
self.infile = filename
stl_mesh = mesh.Mesh.from_file(self.infile)
mesh_points = np.array([stl_mesh.x.ravel(), stl_mesh.y.ravel(), stl_mesh.z.ravel()])
mesh_points = mesh_points.T
return mesh_points
def write(self, mesh_points, filename, write_bin=False):
"""
Writes a stl file, called filename, copying all the lines from self.filename but
the coordinates. mesh_points is a matrix that contains the new coordinates to
write in the stl file.
:param numpy.ndarray mesh_points: it is a `n_points`-by-3 matrix containing
the coordinates of the points of the mesh.
:param string filename: name of the output file.
:param boolean write_bin: flag to write in the binary format. Default is False.
"""
self._check_filename_type(filename)
self._check_extension(filename)
self._check_infile_instantiation(self.infile)
self.outfile = filename
n_vertices = mesh_points.shape[0]
# number of triplets of vertices
n_triplets = n_vertices/3
data = np.zeros(n_triplets, dtype=mesh.Mesh.dtype)
stl_mesh = mesh.Mesh(data, remove_empty_areas=False)
for i in range(0, n_triplets):
for j in range(0, 3):
data['vectors'][i][j] = mesh_points[3*i + j]
if not write_bin:
stl_mesh.save(self.outfile, mode=Mode.ASCII, update_normals=True)
else:
stl_mesh.save(self.outfile, update_normals=True)
def plot(self, plot_file=None, save_fig=False):
"""
Method to plot an stl file. If `plot_file` is not given it plots `self.infile`.
:param string plot_file: the stl filename you want to plot.
:param bool save_fig: a flag to save the figure in png or not. If True the
plot is not
|
shown.
:return: figure: matlplotlib structure for the figure of the chosen geometry
:rtype: matplotlib.pyplot.figure
"""
if plot_file is None:
plot_file = self.infile
else:
self._check_filename_type(plot_file)
# Create a new plot
|
figure = pyplot.figure()
axes = mplot3d.Axes3D(figure)
# Load the STL files and add the vectors to the plot
stl_mesh = mesh.Mesh.from_file(plot_file)
axes.add_collection3d(mplot3d.art3d.Poly3DCollection(stl_mesh.vectors))
## Get the limits of the axis and center the geometry
max_dim = np.array([np.max(stl_mesh.vectors[:,:,0]), \
np.max(stl_mesh.vectors[:,:,1]), \
np.max(stl_mesh.vectors[:,:,2])])
min_dim = np.array([np.min(stl_mesh.vectors[:,:,0]), \
np.min(stl_mesh.vectors[:,:,1]), \
np.min(stl_mesh.vectors[:,:,2])])
max_lenght = np.max(max_dim - min_dim)
axes.set_xlim(-.6*max_lenght + (max_dim[0]+min_dim[0])/2, .6*max_lenght + (max_dim[0]+min_dim[0])/2)
axes.set_ylim(-.6*max_lenght + (max_dim[1]+min_dim[1])/2, .6*max_lenght + (max_dim[1]+min_dim[1])/2)
axes.set_zlim(-.6*max_lenght + (max_dim[2]+min_dim[2])/2, .6*max_lenght + (max_dim[2]+min_dim[2])/2)
# Show the plot to the screen
if not save_fig:
pyplot.show()
else:
figure.savefig(plot_file.split('.')[0] + '.png')
return figure
|
nagyistoce/devide
|
module_kits/vtk_kit/__init__.py
|
Python
|
bsd-3-clause
| 3,965
| 0.003279
|
# $Id$
# importing this module shouldn't directly cause other large imports
# do large imports in the init() hook so that you can call back to the
# ModuleManager progress handler methods.
"""vtk_kit package driver file.
This performs all initialisation necessary to use VTK from DeVIDE. Makes
sure that all VTK classes have ErrorEvent handlers that report back to
the ModuleManager.
Inserts the following modules in sys.modules: vtk, vtkdevide.
@author: Charl P. Botha <http://cpbotha.net/>
"""
import re
import sys
import traceback
import types
VERSION = ''
def preImportVTK(progressMethod):
vtkImportList = [('vtk.common', 'VTK Common.'),
('vtk.filtering', 'VTK Filtering.'),
('vtk.io', 'VTK IO.'),
('vtk.imaging', 'VTK Imaging.'),
('vtk.graphics', 'VTK Graphics.'),
('vtk.rendering', 'VTK Rendering.'),
('vtk.hybrid', 'VTK Hybrid.'),
#('vtk.patented', 'VTK Patented.'),
('vtk', 'Other VTK symbols')]
# set the dynamic loading flags. If we don't do this, we get strange
# errors on 64 bit machines. To see this happen, comment this statement
# and then run the VTK->ITK connection test case.
oldflags = setDLFlags()
percentStep = 100.0 / len(vtkImportList)
currentPercent = 0.0
# do the imports
for module, message in vtkImportList:
currentPercent += percentStep
progressMethod(currentPercent, 'Initialising vtk_kit: %s' % (message,),
noTime=True)
exec('import %s' % (module,))
# restore previous dynamic loading flags
resetDLFlags(oldflags)
def setDLFlags():
# brought over from ITK Wrapping/CSwig/Python
# Python "help(sys.setdlopenflags)" states:
#
# setdlopenflags(...)
# setdlopenflags(n) -> None
#
# Set the flags that will be used for dlopen() calls. Among other
# things, this will enable a lazy resolving of symbols when
# importing a module, if called as sys.setdlopenflags(0) To share
# symbols across extension modules, call as
#
# sys.setdlopenflags(dl.RTLD_NOW|dl.RTLD_GLOBAL)
#
# GCC 3.x depends on proper merging of symbols for RTTI:
# http://gcc.gnu.org/faq.html#dso
#
try:
import dl
newflags = dl.RTLD_NOW|dl.RTLD_GLOBAL
except:
|
newflags = 0x102 # No dl module, so guess (see above).
try:
oldflags = sys.getdlopenflags()
sys.setdlopenflags(newflags)
except:
oldflags = None
return oldflags
def resetDLFlag
|
s(data):
# brought over from ITK Wrapping/CSwig/Python
# Restore the original dlopen flags.
try:
sys.setdlopenflags(data)
except:
pass
def init(module_manager, pre_import=True):
# first do the VTK pre-imports: this is here ONLY to keep the user happy
# it's not necessary for normal functioning
if pre_import:
preImportVTK(module_manager.setProgress)
# import the main module itself
# the global is so that users can also do:
# from module_kits import vtk_kit
# vtk_kit.vtk.vtkSomeFilter()
global vtk
import vtk
# and do the same for vtkdevide
global vtkdevide
import vtkdevide
# load up some generic functions into this namespace
# user can, after import of module_kits.vtk_kit, address these as
# module_kits.vtk_kit.blaat. In this case we don't need "global",
# as these are modules directly in this package.
import module_kits.vtk_kit.misc as misc
import module_kits.vtk_kit.mixins as mixins
import module_kits.vtk_kit.utils as utils
import module_kits.vtk_kit.constants as constants
import module_kits.vtk_kit.color_scales as color_scales
# setup the kit version
global VERSION
VERSION = '%s' % (vtk.vtkVersion.GetVTKVersion(),)
|
ivyl/patchwork
|
patchwork/models.py
|
Python
|
gpl-2.0
| 33,987
| 0.000382
|
# Patchwork - automated patch tracking system
# Copyright (C) 2008 Jeremy Kerr <jk@ozlabs.org>
# Copyright (C) 2015 Intel Corporation
#
# This file is part of the Patchwork package.
#
# Patchwork is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Patchwork is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Patchwork; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from collections import Counter, OrderedDict
import datetime
import jsonfield
import random
import re
import patchwork.threadlocalrequest as threadlocalrequest
from django.conf import settings
from django.contrib import auth
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import Q
import django.dispatch
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.six.moves import filter
from patchwork.fields import HashField
from patchwork.parser import hash_patch, extract_tags
@python_2_unicode_compatible
class Person(models.Model):
email = models.CharField(max_length=255, unique=True)
name = models.CharField(max_length=255, null=True, blank=True)
user = models.ForeignKey(User, null=True, blank=True,
on_delete=models.SET_NULL)
def display_name(self):
if self.name:
return self.name
else:
return self.email
def email_name(self):
if (self.name):
return "\"%s\" <%s>" % (self.name, self.email)
else:
return self.email
def link_to_user(self, user):
self.name = user.profile.name()
self.user = user
def __str__(self):
return self.display_name()
class Meta:
verbose_name_plural = 'People'
def get_comma_separated_field(value):
if not value:
return []
tags = [v.strip() for v in value.split(',')]
tags = [tag for tag in tags if tag]
return tags
@python_2_unicode_compatible
class Project(models.Model):
linkname = models.CharField(max_length=255, unique=True)
name = models.CharField(max_length=255, unique=True)
description = models.TextField(blank=True, null=True)
listid = models.CharField(max_length=255)
listemail = models.CharField(max_length=200)
web_url = models.CharField(max_length=2000, blank=True)
scm_url = models.CharField(max_length=2000, blank=True)
webscm_url = models.CharField(max_length=2000, blank=True)
send_notifications = models.BooleanField(default=False)
use_tags = models.BooleanField(default=True)
git_send_email_only = models.BooleanField(default=False)
subject_prefix_tags = models.CharField(max_length=255, blank=True,
help_text='Comma separated list of tags')
@cached_property
def tags(self):
if not self.use_tags:
return []
return list(Tag.objects.all())
def get_subject_prefix_tags(self):
return get_comma_separated_field(self.subject_prefix_tags)
def get_listemail_tag(self):
return self.listemail.split("@")[0]
def __str__(self):
return self.name
class Meta:
ordering = ['linkname']
def user_name(user):
if user.first_name or user.last_name:
names = list(filter(bool, [user.first_name, user.last_name]))
return u' '.join(names)
return user.username
auth.models.User.add_to_class('name', user_name)
@python_2_unicode_compatible
class DelegationRule(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
path = models.CharField(max_length=255)
project = models.ForeignKey(Project,
|
on_delete=models.CASCADE)
priority = models.IntegerField(default=0)
|
def __str__(self):
return self.path
class Meta:
ordering = ['-priority', 'path']
unique_together = (('path', 'project'))
@python_2_unicode_compatible
class UserProfile(models.Model):
user = models.OneToOneField(User, unique=True, related_name='profile',
on_delete=models.CASCADE)
primary_project = models.ForeignKey(Project, null=True, blank=True,
on_delete=models.CASCADE)
maintainer_projects = models.ManyToManyField(Project,
related_name='maintainer_project', blank=True)
send_email = models.BooleanField(default=False,
help_text='Selecting this option allows patchwork to send '
'email on your behalf')
patches_per_page = models.PositiveIntegerField(
default=100, null=False, blank=False,
help_text='Number of patches to display per page')
def name(self):
return user_name(self.user)
def contributor_projects(self):
submitters = Person.objects.filter(user=self.user)
return Project.objects.filter(id__in=Patch.objects.filter(
submitter__in=submitters)
.values('project_id').query)
def sync_person(self):
pass
def n_todo(self):
return self.todo_patches().count() + self.todo_series().count()
def todo_patches(self, project=None):
# filter on project, if necessary
if project:
qs = Patch.objects.filter(project=project)
else:
qs = Patch.objects
qs = qs.filter(archived=False) \
.filter(delegate=self.user) \
.filter(state__in=State.objects.filter(action_required=True)
.values('pk').query)
return qs
def todo_series(self, project=None):
# filter on project, if necessary
if project:
qs = Series.objects.filter(project=project)
else:
qs = Series.objects
qs = qs.filter(Q(reviewer=self.user),
~Q(last_revision__state=RevisionState.DONE))
return qs
def __str__(self):
return self.name()
def _user_saved_callback(sender, created, instance, **kwargs):
try:
profile = instance.profile
except UserProfile.DoesNotExist:
profile = UserProfile(user=instance)
profile.save()
models.signals.post_save.connect(_user_saved_callback, sender=User)
@python_2_unicode_compatible
class State(models.Model):
name = models.CharField(max_length=100)
ordering = models.IntegerField(unique=True)
action_required = models.BooleanField(default=True)
@classmethod
def from_string(cls, name):
return State.objects.get(name__iexact=name)
def __str__(self):
return self.name
class Meta:
ordering = ['ordering']
@python_2_unicode_compatible
class Tag(models.Model):
name = models.CharField(max_length=20)
pattern = models.CharField(max_length=50,
help_text='A simple regex to match the tag in the content of '
'a message. Will be used with MULTILINE and IGNORECASE '
'flags. eg. ^Acked-by:')
abbrev = models.CharField(max_length=2, unique=True,
help_text='Short (one-or-two letter) abbreviation for the tag, '
'used in table column headers')
@property
def attr_name(self):
return 'tag_%d_count' % self.id
def __str__(self):
return self.name
class Meta:
ordering = ['abbrev']
class PatchTag(models.Model):
patch = models.ForeignKey('Patch', on_delete=models.CASCADE)
tag = models.ForeignKey('Tag', on_delete=models.CASCADE)
count = models.IntegerField(default=1)
class Meta:
unique_together = [('patch', 'tag')]
def get_default_initial_patch_state():
|
GordonWang/JM-VIP
|
JMVIPCrawler/items.py
|
Python
|
apache-2.0
| 7,573
| 0.015325
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
import scrapy.log
import datetime
def now():
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
class JmProductItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
name = scrapy.Field() #品名
count = scrapy.Field() #品牌商品数
regular_price = scrapy.Field() #原价
deal_price = scrapy.Field() #折扣价
saled_volumn = scrapy.Field() #销量
discount = scrapy.Field() #折扣
capacity = scrapy.Field() #容量. eg:30ml
is_sample = scrapy.Field() # 是否是小样
is_jumei = scrapy.Field() # 是否是聚美优品自营
vendor = scrapy.Field() #供货商
stored_date = scrapy.Field() #抓取时间
is_top3 = scrapy.Field() #是否是top3商品
brand = scrapy.Field() #品牌
favorite = scrapy.Field() #收藏数
brand_id = scrapy.Field() #品牌主键id
is_hot_sale = scrapy.Field() # 是否热卖
def record_exist(self, db_connection):
cur = db_connection.cursor()
count = cur.execute("SELECT * FROM product WHERE name = %s AND stored_date = %s", (self['name'], self['stored_date']))
cur.close()
return count > 0
def save2mysql(self, db_connection):
cur = db_connection.cursor()
ts = now()
is_jumei = (self['vendor'].find("聚美优品") >= 0)
cur.execute("INSERT INTO product (brand_id, name, count, regular_price, deal_price, saled_volumn, discount, capacity, is_sample, is_jumei, vendor, stored_date, brand_name, insert_time, is_hot_sale) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)", (self['brand_id'], self['name'], self['count'], self['regular_price'], self['deal_price'], self['saled_volumn'], self['discount'], self['capacity'], self['is_sample'], is_jumei, self['vendor'], self['stored_date'], self['brand'], ts, self['is_hot_sale']))
db_connection.commit()
cur.close()
def save2file(self, f):
# test function
f.write("<JmProductItem> name:{NAME}, count:{COUNT}, regular_price:{RPRICE}, deal_price:{DPRICE}, saled_volumn:{SVOLUMN}, discount:{DISCOUNT}, capacity:{CAPACITY}, is_sample:{SAMPLE}, vendor:{JUMEI}, stored_date:{SDATE}, is_top3:{TOP3}, brand:{BRAND}, favorite:{FAVORITE, is_hot_sale:{HOT_SALE}}".format(NAME = self['name'], COUNT = self['count'], RPRICE = self['regular_price'], DPRICE = self['deal_price'], SVOLUMN = self['saled_volumn'], DISCOUNT = self['discount'], CAPACITY = self['capacity'], SAMPLE = self['is_sample'], JUMEI = self['vendor'], SDATE = self['stored_date'], TOP3 = self['is_top3'], BRAND = self['brand'], FAVORITE = self['favorite'], HOT_SALE = self['is_hot_sale']))
def log_self(self, loglevel = scrapy.log.DEBUG):
scrapy.log.msg("<JmProductItem> name:{NAME}, count:{COUNT}, regular_price:{RPRICE}, deal_price:{DPRICE}, saled_volumn:{SVOLUMN}, discount:{DISCOUNT}, capacity:{CAPACITY}, is_sample:{SAMPLE}, vendor:{JUMEI}, stored_date:{SDATE}, is_top3:{TOP3}, brand:{BRAND}, favorite:{FAVORITE}, is_hot_sale:{HOT_SALE}".format(NAME = self['name'], COUNT = self['count'], RPRICE = self['regular_price'], DPRICE = self['deal_price'], SVOLUMN = self['saled_volumn'], DISCOUNT = self['discount'], CAPACITY = self['capacity'], SAMPLE = self['is_sample'], JUMEI = self['vendor'], SDATE = self['stored_date'], TOP3 = self['is_top3'], BRAND = self['brand'], FAVORITE = self['favorite'], HOT_SALE = self['is_hot_sale']), loglevel)
def to_str(self):
print "name:%s, count:%s, regular_price:%s, deal_price:%s, saled_volumn:%s, discount:%s, capacity:%s, is_sample:%s, vendor:%s, stored_date:%s, is_top3:%s, brand:%s, favorite:%s, is_hot_sale:%s" % (self.name, self.count, self.regular_price, self.deal_price, self.saled_volumn, self.discount, self.capacity, self.is_sample, self.vendor, self.stored_date, self.is_top3, self.brand, self.favorite, self.is_hot_sale)
class JmPromotionItem(scrapy.Item):
name = scrapy.Field() # 专场名称
promotion_count = scrapy.Field() # 场次
stored_date = scrapy.Field()
def record_exist(self, db_connection):
cur = db_connection.cursor()
count = cur.execute("SELECT * FROM promotion where name = %s AND stored_date = %s", (self['name'], self['stored_date']))
cur.close()
return count > 0
def save2mysql(self, db_connection):
cur = db_connection.cursor()
ts = now()
cur.execute("INSERT INTO promotion (name, promotion_count, stored_date, insert_time) VALUES (%s, %s, %s, %s)", (self['name'], self['promotion_count'], self['stored_date'], ts))
db_connection.commit()
cur.close()
def to_str(self):
return "name:{NAME}, promotion_count:{PCOUNT}, stored_date:{SDATE}".format(NAME = self['name'], PCOUNT = self['promotion_count'], SDATE = self['stored_date'])
def log_self(self, loglevel = scrapy.log.DEBUG):
scrapy.log.msg("<JmPromotionItem> name:%s, promotion_count:%s, stored_date:%s" % (self['name'], self['promotion_count'], self['stored_date']), loglevel)
class VipPromotionItem(scrapy.Item):
name = scrapy.Field()
location = scrapy.Field()
chs_brand = scrapy.Field()
eng_brand = scrapy.Field()
sku_count = scrapy.Field()
discount = scrapy.Field()
category_id =
|
scrapy.Field()
stored_date = scrapy.Field()
is_hot_sale = scrapy.Field()
def is_crawled(self, db_connection):
cur = db_connection.cursor()
|
count = cur.execute("SELECT id FROM promotion WHERE name=%s AND stored_date=%s AND sku_count IS NOT NULL", (self['name'], self['stored_date']))
cur.close()
return count > 0
def record_exist(self, db_connection):
cur = db_connection.cursor()
count = cur.execute("SELECT id FROM promotion WHERE name=%s AND stored_date=%s AND location_in_page=%s", (self['name'], self['stored_date'], self['location']))
cur.close()
return count > 0
def save2mysql(self, db_connection):
cur = db_connection.cursor()
ts = now()
cur.execute("INSERT INTO promotion (name, location_in_page, chs_brand, eng_brand, discount, stored_date, insert_time, category_id, is_hot_sale) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)", (self['name'], self['location'], self['chs_brand'], self['eng_brand'], self['discount'], self['stored_date'], ts, self['category_id'], self['is_hot_sale']))
db_connection.commit()
cur.close()
def update_sku_count_in_mysql(self, db_connection):
cur = db_connection.cursor()
cur.execute("UPDATE promotion SET sku_count=%s WHERE name=%s AND location_in_page=%s AND stored_date=%s", (self['sku_count'], self['name'], self['location'], self['stored_date']))
db_connection.commit()
cur.close()
def to_str(self):
return "name:%s, location:%s, chs_brand:%s, eng_brand:%s, sku_count:%s, discount:%s, category:%s, is_hot_sale:%s" % (self['name'], self['location'], self['chs_brand'], self['eng_brand'], self['sku_count'], self['discount'], self['category'], self['is_hot_sale'])
def log_self(self, loglevel = scrapy.log.DEBUG):
scrapy.log.msg("name:%s, location:%s, chs_brand:%s, eng_brand:%s, sku_count:%s, discount:%s, category:%s, is_hot_sale:%s" % (self['name'], self['location'], self['chs_brand'], self['eng_brand'], self['sku_count'], self['discount'], self['category'], self['is_hot_sale']), loglevel)
|
cheesyc/basicpython
|
mmm.py
|
Python
|
mit
| 628
| 0.017516
|
howmany = input("How many numbers are you using?: ")
count = howmany
num = []
while count > 0:
for i in howmany:
x = input("Insert number ",i,": ")
num.appe
|
nd(x)
count -= 1
def sort(num):
size = len(num)
for i in range(size):
for j in range(size-i-1):
if(num[j] > num[j+1]):
tmp = num[j]
num[j] = num[j+1]
num[j+1] = tmp
return num
srted = sort(num)
def mean (num):
mean = sum(num)/howmany
return mean
def median (srted):
return srted((howmany + 1)/2)
def mode (num):
for i in howmany:
|
if
|
iesugrace/log-with-git
|
xmlstorage.py
|
Python
|
gpl-2.0
| 13,529
| 0.001626
|
import os
from record import Record
from timeutils import isodate
from git import Git
import applib
import re
class XmlStorage:
""" XML storage engine for the record
"""
@staticmethod
def setup(dataDir):
engineDir = os.path.join(dataDir, 'xml')
os.makedirs(engineDir, exist_ok=True)
XmlStorage.dataDir = engineDir
XmlStorage.git = Git(engineDir)
return XmlStorage.git
@staticmethod
def sourceToDom(code):
""" Parse the raw record data which is XML code,
return a Xml DOM object.
"""
from xml.dom.minidom import parseString
return parseString(code)
@staticmethod
def load(id, path=None):
""" Load the content of the record from disk,
parse it, and return a record instance.
"""
if not path:
path = XmlStorage.idToPath(id)
try:
code = open(path).read()
doc = XmlStorage.sourceToDom(code)
except:
return None
# collect all fields' data
fields = {}
for node in doc.firstChild.childNodes:
if node.nodeType == node.ELEMENT_NODE:
name = node.localName
textNode = node.firstChild
data = textNode.data if textNode else ''
fields[name] = data
fields = Record.convertFields(fields.items())
return Record(**fields)
@staticmethod
def idToPath(id):
""" Find and return the absolute path of a record
"""
cmd = 'find %s -name %s' % (XmlStorage.dataDir, id)
stat, lines = applib.get_status_text_output(cmd)
if stat and lines:
return lines[0]
else:
return None
@staticmethod
def matchId(id):
""" Return all IDs that starts with 'id'
"""
cmd = 'find %s -name .git -prune -o -name "%s*" -type f -print'
cmd = cmd % (XmlStorage.dataDir, id)
stat, lines = applib.get_status_text_output(cmd)
ids = list(map(os.path.basename, lines))
return ids
@staticmethod
def createNode(root, nodeName, nodeText):
""" Add an element node with nodeText to the 'root' element
"""
from xml.dom.minidom import Element, Text
ele = Element(nodeName)
text = Text()
text.data = nodeText
ele.appendChild(text)
root.appendChild(ele)
@staticmethod
def recordToSource(recordData):
""" Compose Xml source code from a
record's data which is a dict object.
"""
from xml.dom.minidom import Document, Text
import re
doc = Document()
root = doc.createElement("log")
doc.appendChild(root)
items = dict(recordData).items()
fields = Record.convertFields(items, False)
# sort the fields data according to the definition order
orders = {k: v['order'] for k, v in Record.fields.items()}
sortKey = lambda x: orders[x[0]]
fields = sorted(fields.items(), key=sortKey)
for name, value in fields:
XmlStorage.createNode(root, name, value)
xmlCode = doc.toprettyxml()
xmlCode = re.sub('\t', ' ' * 4, xmlCode) # replace tabs with spaces
return xmlCode
@staticmethod
def save(record, oldRecord=None):
""" Convert the record to Xml code, and Write
the code to the disk, record id is the basename
of the record file.
If the oldRecord is provided, this is to change
an existing record. When to change an existing
log, the new log may be saved to a new directory
if its timestamp been changed, in such case the
old log will be deleted.
"""
paths = []
if not getattr(record, 'id', None):
record.id = applib.genId(record.time)
if not oldRecord: # add new record
commitMsg = 'Add log\n\n%s' % record.id
else:
commitMsg = 'Change log\n\n%s' % record.id
if record != oldRecord:
path = XmlStorage.idToPath(oldRecord.id)
paths.append(path)
XmlStorage.__delete(None, path=path)
else:
return
path = XmlStorage.saveRecord(record.elements())
paths.append(path)
# create a git commit
XmlStorage.git.commit(paths, commitMsg)
return record
@staticmethod
def saveRecord(recordData, dir=None):
if not dir:
dir = XmlStorage.dataDir
dateEle = isodate(recordData['time']).split('-')
absDirPath = os.path.join(dir, *dateEle)
os.makedirs(absDirPath, exist_ok=True)
path = os.path.join(absDirPath, recordData['id'])
code = XmlStorage.recordToSource(recordData)
open(path, 'w').write(code)
return path
@staticmethod
def allIds():
""" Return a generator which yields IDs of all log records.
"""
dataDir = XmlStorage.dataDir
cmd = ['find', dataDir, '-name', '.git',
'-prune', '-o', '-type', 'f', '-print0']
res = applib.get_status_byte_output(cmd)
if not res[0]:
print('find command failed:', file=sys.stderr)
print(res[2].decode(), file=sys.stderr, end='')
return
lines = res[1].split(b'\x00')[:-1] # remove the last empty one
for path in lines:
yield os.path.basename(path.decode())
@staticmethod
def __delete(id, path=None):
""" Delete a record, either by id or by path
"""
if not path:
path = XmlStorage.idToPath(id)
os.unlink(path)
@staticmethod
def delete(ids, preAction=(lambda x:False), postAction=(lambda x:0)):
""" Delete multiple records, create a commit
"""
paths = list(map(XmlStorage.idToPath, ids))
deletedPaths = []
deletedBNames = []
for path in paths:
record = XmlStorage.load(None, path)
if not preAction(record):
continue
XmlStorage.__delete(None, path)
postAction(record)
deletedPaths.append(path)
deletedBNames.append(record.id)
if deletedPaths:
message = 'Delete log\n\n%s' % '\n'.join(deletedBNames)
XmlStorage.git.commit(deletedPaths, message)
return True
@staticmethod
def lastLog():
""" Fetch the last added/changed log record
"""
logs = XmlStorage.lastLogs()
if logs:
return logs[0]
else:
return None
@staticmethod
def lastLogs(count=1):
""" Fetch the last 'count' logs record
The paths returned by the git.last may contain
paths that been deleted,
|
it shall be ignored.
When a single record was collected multiple times,
the redundant shall be removed. It can happen
when the record was changed multiple times (maybe
plus added action) within the range.
""
|
"
vCount = count
while True:
ps = XmlStorage.git.last(vCount)
if len(set(ps)) == count:
break
else:
vCount += 1
paths = []
for p in ps:
if p not in paths:
paths.append(p)
records = []
for path in paths:
path = os.path.join(XmlStorage.dataDir, path)
if os.path.exists(path):
record = XmlStorage.load(None, path=path)
records.append(record)
return records
@staticmethod
def makeFilter(tmField, tmPoints, regexps, allMatch=False):
""" Create a filter function for filtering
the record with the given regular expression,
and the time points. The filter function
expects a Record instance object.
"""
def logFilter(record, regexps=regexps, allMatch=allMatch,
tmField=tmField, tmPoints=tmPoints):
""" timeMatch is True if the time of the record is
within any pair of
|
OrkoHunter/nxcpy
|
setup.py
|
Python
|
bsd-3-clause
| 1,193
| 0.020117
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from glob import glob
import os
import sys
from setuptools import setup, Extension
from Cython.Build import cythonize
if sys.version_info[:2] < (2, 7):
print(
'nxcpy requires Python version 2.7 or later' +
' ({}.{} detected).'.format(*sys.version_info[:2]))
# Because networkx does
sys.exit(-1)
libraries = [
('nxcpy', {'sources': glob('src/*.c') + glob('src/*/*.c'),
'depends': glob('src/*.h') + glob('src/*/*.h'),
'include_dirs': ['src']})]
ext_modules = cythonize([
Extension('*.*
|
', ['*/*.pyx'],
include_dirs=['src'],
libraries=['nxcpy']),
Extension('*.*
|
.*', ['*/*/*.pyx'],
include_dirs=['src'],
libraries=['nxcpy'])]
)
install_requires = ['networkx', 'decorator']
if __name__ == "__main__":
setup(
name = 'nxcpy',
packages = ['nxcpy'],
libraries = libraries,
ext_modules = ext_modules,
install_requires = install_requires,
test_suite = 'nose.collector',
tests_require = ['nose>=0.10.1']
)
|
anyweez/regis
|
face/management/commands/personalize.py
|
Python
|
gpl-2.0
| 8,409
| 0.006541
|
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from operator import itemgetter
import face.models.models as regis
import random, math
class Command(BaseCommand):
args = 'none'
help = 'Analyze user performance and modify individual question orderings.'
@transaction.commit_manually
def handle(self, *args, **options):
users = regis.User.objects.filter(is_active=True)
print 'Examining %d user(s)...' % len(users)
# TODO: None of these measures currently include adjustments using the so-called
# confidence parameter (theta): (#correct / (#guesses + sqrt(#guesses - avg(#guesses))))
# This list contains a difficulty estimation for this question across
# all users. Indexed by question template ID.
global_diffs = self.get_global_diffs()
# The list containing global correctness rates for users. Indexed by user ID.
pqs = self.get_pqs()
# This list contains a difficulty estimation for a class of questions.
# Indexed by class.
local_diff = self.get_local_diffs()
# The list containing global correctness rates for users by class. Indexed first
# by class, then by users.
pcs = self.get_pcs()
# For each user, find the relevance scores for each question and
# probabilistically order them.
for user in users:
relevance = {}
# print 'User "%s"' % user.username
for template in regis.QuestionTemplate.objects.all():
global_score = abs(global_diffs[template.id] - pqs[user.id])
local_score = 0
for tag in template.categories.all():
local_score += abs(local_diff[tag.id] - pcs[tag.id][user.id])
# Divide by the total number of categories.
if len(template.categories.all()) > 0:
local_score /= len(template.categories.all())
relevance[template.id] = 2 - (global_score + local_score)
relevance[template.id] /= 2
# print ' #%d %s: %.3f' % (template.id, template.title, relevance[template.id])
questions = [(key, relevance[key]) for key in relevance.keys()]
questions = sorted(questions, key=itemgetter(1), reverse=True)
order = []
while len(questions) > 0:
# Weigh the relevance so that higher values have a significantly higher probability
# of being drawn. Each value [0, 1] is currently being raise to the third power but
# this parameter can be tweaked. Higher values will make it more probable that
# highly relevant questions will be drawn. If the value gets too high then some of
# the relevance numbers will approach zero, which is not good. Don't do that.
total = math.floor(sum([math.pow(question[1], 3) for question in questions]) * 1000)
count = random.randint(0, total)
current = 0
while count > 0:
count -= questions[current][1] * questions[current][1] * 1000
current += 1
order.append(questions[current-1][0])
del questions[current-1]
user_q = regis.Question.objects.exclude(status='retired').filter(user=user)
for question in user_q:
question.order = order.index(question.template.id)
|
question.save()
# Commit the new ordering to the database as a single transaction.
|
transaction.commit()
print 'Complete!'
# For each user, compute their personal question score. This score
# represents the ratio of questions that they get correct vs. those that they
# have unlocked. Higher scores indicate a higher level of completion.
#
# Note that this score is normalized to [0, 1] across all active users.
def get_pqs(self):
diffs = {}
for user in regis.User.objects.filter(is_active=True):
solved = regis.Question.objects.filter(user=user, status='solved')
unsolved = regis.Question.objects.filter(user=user, status='released')
diffs[user.id] = (len(solved) * 1.0) / (len(solved) + len(unsolved))
peak_pgs = max(diffs.values())
for key in diffs.keys():
diffs[key] /= peak_pgs
return diffs
# For each class (tag) AND user, compute the personal class score. This
# represents the ratio of questions that the user has answered correctly,
# similarly to the PQS score, but this is on a per-class basis.
#
# This score is also normalized to [0, 1] across all active users.
def get_pcs(self):
users = regis.User.objects.filter(is_active=True)
diffs = {}
for tag in regis.QuestionTag.objects.all():
diffs[tag.id] = {}
for user in users:
solved = regis.Question.objects.filter(status='solved')
unsolved = regis.Question.objects.filter(status='released')
diffs[tag.id][user.id] = (len(solved) * 1.0) / (len(solved) + len(unsolved))
peak_pcs = max(diffs[tag.id])
for uid in diffs[tag.id].keys():
diffs[tag.id][uid] /= peak_pcs
return diffs
# For each class, compute the global difficulty score. Similar to
# the global difficulty scores but are per-class (tag) instead of
# per-template.
def get_local_diffs(self):
tags = regis.QuestionTag.objects.all()
templates = regis.QuestionTemplate.objects.all()
solved = regis.Question.objects.filter(status='solved')
unsolved = regis.Question.objects.filter(status='released')
correct = {}
available = {}
for tag in tags:
correct[tag.id] = 0
available[tag.id] = 0
# Tally everything up.
for tag in tags:
for template in templates:
for question in solved:
# If the question is solved and pulled from the correct
# template then count it.
if question.template.id == template.id:
correct[tag.id] += 1
available[tag.id] += 1
for question in unsolved:
if question.template.id == template.id:
available[tag.id] += 1
diffs = []
for tag in tags:
if available[tag.id] > 0:
diffs.append(1 - (correct[tag.id] * 1.0 / available[tag.id]))
else:
diffs.append(0)
return diffs
# For each question, compute the global difficulty score. The difficulty
# score is a number in the range [0, 1] and is based on how many user have
# unlocked the question vs. how many have solved it. The score increases
# as the # of users to successfully solve the question decreases (they have
# an inverse relationship).
def get_global_diffs(self):
unanswered = regis.Question.objects.exclude(user=None).filter(status='released')
answered = regis.Question.objects.exclude(user=None).filter(status='solved')
correct = {}
available = {}
templates = regis.QuestionTemplate.objects.all()
for template in templates:
correct[template.id] = 0
available[template.id] = 0
# Count all of the questions that are available but unanswered.
for question in unanswered:
available[question.template.id] += 1
# Count all of the questions that are answered.
for question in answered:
correct[question.template.id] += 1
available[question.template.id] += 1
diffs = {}
for template in templates:
|
Quantipy/quantipy
|
tests/test_rules.py
|
Python
|
mit
| 81,385
| 0.003883
|
import unittest
import os.path
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
import test_helper
import copy
from operator import lt, le, eq, ne, ge, gt
from pandas.core.index import Index
__index_symbol__ = {
Index.union: ',',
Index.intersection: '&',
Index.difference: '~',
Index.sym_diff: '^'
}
from collections import defaultdict, OrderedDict
from quantipy.core.stack import Stack
from quantipy.core.chain import Chain
from quantipy.core.link import Link
from quantipy.core.view_generators.view_mapper import ViewMapper
from quantipy.core.view_generators.view_maps import QuantipyViews
from quantipy.core.view import View
from quantipy.core.helpers import functions
from quantipy.core.helpers.functions import load_json
from quantipy.core.tools.dp.prep import (
frange,
frequency,
crosstab
)
from quantipy.core.tools.view.query import get_dataframe
from quantipy.core.dataset import DataSet
EXTENDED_TESTS = False
COUNTER = 0
class TestRules(unittest.TestCase):
def setUp(self):
self.path = './tests/'
project_name = 'Example Data (A)'
# Load Example Data (A) data and meta into self
name_data = '%s.csv' % (project_name)
path_data = '%s%s' % (self.path, name_data)
self.example_data_A_data = pd.DataFrame.from_csv(path_data)
name_meta = '%s.json' % (project_name)
path_meta = '%s%s' % (self.path, name_meta)
self.example_data_A_meta = load_json(path_meta)
# Variables by type for Example Data A
self.dk = 'Example Data (A)'
self.fk = 'no_filter'
self.single = ['gender', 'locality', 'ethnicity', 'religion', 'q1']
self.delimited_set = ['q2', 'q3', 'q8', 'q9']
self.q5 = ['q5_1', 'q5_2', 'q5_3']
def test_slicex(self):
meta = self.example_data_A_meta
data = self.example_data_A_data
col_x = 'religion'
col_y = 'ethnicity'
################## values
meta['columns'][col_x]['rules'] = {
'x': {'slicex': {'values': [1, 3, 5, 7, 9, 11, 13, 15]}}}
meta['columns'][col_y]['rules'] = {
'y': {'slicex': {'values': [2, 4, 6, 8, 10, 12, 14, 16]}}}
rules_values_x = {
'unwtd': index_items(col_x, all=True,
values=[1, 3, 5, 7, 9, 11, 13, 15]),
'iswtd': index_items(col_x, all=True,
values=[1, 3, 5, 7, 9, 11, 13, 15])}
rules_values_y = {
'unwtd': index_items(col_y, all=True,
values=[2, 4, 6, 8, 10, 12, 14, 16]),
'iswtd': index_items(col_y, all=True,
values=[2, 4, 6, 8, 10, 12, 14, 16])}
confirm_crosstabs(
self,
meta, data,
[None, 'weight_a'],
col_x, col_y,
rules_values_x,
rules_values_y)
def _get_dataset(self):
meta = self.example_data_A_meta
data = self.example_data_A_data
dataset = DataSet('rules_test')
dataset.set_verbose_infomsg(False)
dataset.from_components(data, meta)
return dataset
def _get_stack_with_links(self, dataset, x=None, y=None, w=None):
stack = Stack()
stack.add_data(dataset.name, dataset._data, dataset._meta)
if not x: x = '@'
if not y: y = '@'
stack.add_link(x=x, y=y, weights=w)
return stack
def test_sortx_summaries_mean(self):
dataset = self.
|
_get_dataset()
x = 'q5'
y = '@'
dataset.sorting(x, on='mean')
stack = self._get_stack_with_links(dataset, x)
stack.add_link(x=x, y=y, views=['
|
cbase', 'counts', 'c%', 'mean'])
vks = ['x|f|x:|||cbase', 'x|f|:|||counts', 'x|f|:|y||c%',
'x|d.mean|x:|||mean']
chains = stack.get_chain(data_keys=dataset.name,
filters='no_filter',
x=[x], y=[y], rules=True,
views=vks,
orient_on='x')
chain = chains[0]
for vk in vks:
v = chain['rules_test']['no_filter'][x][y][vk]
l = stack['rules_test']['no_filter'][x][y][vk]
check_chain_view_dataframe = v.dataframe.reindex_like(l.dataframe)
self.assertTrue(check_chain_view_dataframe.equals(l.dataframe))
actual_order = v.dataframe.index.get_level_values(1).tolist()
expected_order = ['q5_4', 'q5_6', 'q5_1', 'q5_3', 'q5_5', 'q5_2']
self.assertEqual(actual_order, expected_order)
def test_sortx_summaries_value(self):
dataset = self._get_dataset()
x = 'q5'
y = '@'
dataset.sorting(x, on=3, ascending=True)
stack = self._get_stack_with_links(dataset, x)
stack.add_link(x=x, y=y, views=['cbase', 'counts', 'c%', 'mean'])
vks = ['x|f|x:|||cbase', 'x|f|:|||counts', 'x|f|:|y||c%',
'x|d.mean|x:|||mean']
chains = stack.get_chain(data_keys=dataset.name,
filters='no_filter',
x=[x], y=[y], rules=True,
views=vks,
orient_on='x')
chain = chains[0]
for vk in vks:
v = chain['rules_test']['no_filter'][x][y][vk]
l = stack['rules_test']['no_filter'][x][y][vk]
check_chain_view_dataframe = v.dataframe.reindex_like(l.dataframe)
self.assertTrue(check_chain_view_dataframe.equals(l.dataframe))
actual_order = v.dataframe.index.get_level_values(1).tolist()
expected_order = ['q5_4', 'q5_5', 'q5_6', 'q5_1', 'q5_3', 'q5_2']
self.assertEqual(actual_order, expected_order)
def test_sortx_summaries_items(self):
dataset = self._get_dataset()
x = '@'
y = 'q5'
dataset.sorting(y, on='q5_2', ascending=False)
stack = self._get_stack_with_links(dataset, y=y)
stack.add_link(x=x, y=y, views=['cbase', 'counts', 'c%', 'mean'])
vks = ['x|f|x:|||cbase', 'x|f|:|||counts', 'x|f|:|y||c%',
'x|d.mean|x:|||mean']
chains = stack.get_chain(data_keys=dataset.name,
filters='no_filter',
x=[x], y=[y], rules=True,
views=vks,
orient_on='x')
chain = chains[0]
for vk in vks:
v = chain['rules_test']['no_filter'][x][y][vk]
l = stack['rules_test']['no_filter'][x][y][vk]
if not 'd.mean' in vk and not 'cbase' in vk:
check_chain_view_dataframe = v.dataframe.reindex_like(l.dataframe)
self.assertTrue(check_chain_view_dataframe.equals(l.dataframe))
actual_order = v.dataframe.index.get_level_values(1).tolist()
expected_order = [3, 5, 98, 2, 1, 97, 4]
self.assertEqual(actual_order, expected_order)
def test_sortx_expand_net_within(self):
dataset = self._get_dataset()
x = 'q2'
y = ['@', 'gender']
dataset.sorting(x, on='@', within=True, between=False, fix=98)
stack = self._get_stack_with_links(dataset, x=x, y=y)
net = [{'test A': [1, 2, 3], 'text': {'en-GB': 'Lab1'}},
{'test B': [5, 6, 97], 'text': {'en-GB': 'Lab2'}}]
net_view = ViewMapper().make_template('frequency')
view_name = 'expandnet'
options = {'logic': net,
'expand': 'after',
'complete': True,
'axis': 'x',
'iterators': {'rel_to': [None, 'y']}}
net_view.add_method(view_name, kwargs=options)
stack.add_link(x=x, y=y, views=net_view)
vks = ['x|f|x[{1,2,3}+],x[{5,6,97}+]*:|||expandnet',
'x|f|x[{1,2,3}+],x[{5,6,97}+]*:|y||expandnet']
chains = stack.get_chain(data_keys=dataset.name,
filters='no_filter',
x=[x], y=y, rules=True,
views=vks,
|
d-qoi/TelegramBots
|
RoseAssassins/cust_handlers/conversationhandler.py
|
Python
|
lgpl-3.0
| 15,351
| 0.004755
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2018
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the ConversationHandler."""
import logging
from telegram import Update
from telegram.ext import (Handler, CallbackQueryHandler, InlineQueryHandler,
ChosenInlineResultHandler)
from telegram.utils.promise import Promise
from MongoDict import MongoDict
class ConversationHandler(Handler):
"""
A handler to hold a conversation with a single user by managing four collections of other
handlers. Note that neither posts in Telegram Channels, nor group interactions with multiple
users are managed by instances of this class.
The first collection, a ``list`` named :attr:`entry_points`, is used to initiate the
conversation, for example with a :class:`telegram.ext.CommandHandler` or
:class:`telegram.ext.RegexHandler`.
The second collection, a ``dict`` named :attr:`states`, contains the different conversation
steps and one or more associated handlers that should be used if the user sends a message when
the conversation with them is currently in that state. You will probably use mostly
:class:`telegram.ext.MessageHandler` and :class:`telegram.ext.RegexHandler` here.
The third collection, a ``list`` named :attr:`fallbacks`, is used if the user is currently in a
conversation but the state has either no associated handler or the handler that is associated
to the state is inappropriate for the update, for example if the update contains a command, but
a regular text message is expected. You could use this for a ``/cancel`` command or to let the
user know their message was not recognized.
The fourth, optional collection of handlers, a ``list`` named :attr:`timed_out_behavior` is
used if the wait for ``run_async`` takes longer than defined in :attr:`run_async_timeout`.
For example, you can let the user know that they should wait for a bit before they can
continue.
To change the state of conversation, the callback function of a handler must return the new
state after responding to the user. If it does not return anything (returning ``None`` by
default), the state will not change. To end the conversation, the callback function must
return :attr:`END` or ``-1``.
Attributes:
entry_points (List[:class:`telegram.ext.Handler`]): A list of ``Handler`` objects that can
trigger the start of the conversation.
states (Dict[:obj:`object`, List[:class:`telegram.ext.Handler`]]): A :obj:`dict` that
defines the different states of conversation a user can be in and one or more
associated ``Handler`` objects that should be used in that state.
fallbacks (List[:class:`telegram.ext.Handler`]): A
|
list of handlers that might be used if
the user is in a conversation, but every handler for their current state returned
``False`` on :attr:`check_update`.
allow_reentry (:obj:`bool`): Optional. Determines if a user can restart a conversation with
an entry point.
run_async_timeout (:obj:`float`): Opt
|
ional. The time-out for ``run_async`` decorated
Handlers.
timed_out_behavior (List[:class:`telegram.ext.Handler`]): Optional. A list of handlers that
might be used if the wait for ``run_async`` timed out.
per_chat (:obj:`bool`): Optional. If the conversationkey should contain the Chat's ID.
per_user (:obj:`bool`): Optional. If the conversationkey should contain the User's ID.
per_message (:obj:`bool`): Optional. If the conversationkey should contain the Message's
ID.
conversation_timeout (:obj:`float`|:obj:`datetime.timedelta`): Optional. When this handler
is inactive more than this timeout (in seconds), it will be automatically ended. If
this value is 0 (default), there will be no timeout.
Args:
entry_points (List[:class:`telegram.ext.Handler`]): A list of ``Handler`` objects that can
trigger the start of the conversation. The first handler which :attr:`check_update`
method returns ``True`` will be used. If all return ``False``, the update is not
handled.
states (Dict[:obj:`object`, List[:class:`telegram.ext.Handler`]]): A :obj:`dict` that
defines the different states of conversation a user can be in and one or more
associated ``Handler`` objects that should be used in that state. The first handler
which :attr:`check_update` method returns ``True`` will be used.
fallbacks (List[:class:`telegram.ext.Handler`]): A list of handlers that might be used if
the user is in a conversation, but every handler for their current state returned
``False`` on :attr:`check_update`. The first handler which :attr:`check_update` method
returns ``True`` will be used. If all return ``False``, the update is not handled.
allow_reentry (:obj:`bool`, optional): If set to ``True``, a user that is currently in a
conversation can restart the conversation by triggering one of the entry points.
run_async_timeout (:obj:`float`, optional): If the previous handler for this user was
running asynchronously using the ``run_async`` decorator, it might not be finished when
the next message arrives. This timeout defines how long the conversation handler should
wait for the next state to be computed. The default is ``None`` which means it will
wait indefinitely.
timed_out_behavior (List[:class:`telegram.ext.Handler`], optional): A list of handlers that
might be used if the wait for ``run_async`` timed out. The first handler which
:attr:`check_update` method returns ``True`` will be used. If all return ``False``,
the update is not handled.
per_chat (:obj:`bool`, optional): If the conversationkey should contain the Chat's ID.
Default is ``True``.
per_user (:obj:`bool`, optional): If the conversationkey should contain the User's ID.
Default is ``True``.
per_message (:obj:`bool`, optional): If the conversationkey should contain the Message's
ID. Default is ``False``.
conversation_timeout (:obj:`float`|:obj:`datetime.timedelta`, optional): When this handler
is inactive more than this timeout (in seconds), it will be automatically ended. If
this value is 0 or None (default), there will be no timeout.
Raises:
ValueError
"""
END = -1
""":obj:`int`: Used as a constant to return when a conversation is ended."""
def __init__(self,
entry_points,
states,
fallbacks,
allow_reentry=False,
run_async_timeout=None,
timed_out_behavior=None,
per_chat=True,
per_user=True,
per_message=False,
conversation_timeout=None,
collection=None):
self.logger = logging.getLogger(__name__)
self.entry_points = entry_points
self.states = states
self.fallbacks = fallbacks
self.allow_reentry = allow_reentry
self.run_async_timeout = run_async_timeout
self.timed_out_behavior = timed_out_behavi
|
supriti/DeepSea
|
srv/modules/runners/ui_rgw.py
|
Python
|
gpl-3.0
| 5,055
| 0.001583
|
# -*- coding: utf-8 -*-
"""
Consolidate any user interface rgw calls for Wolffish and openATTIC.
All operations will happen using the rest-api of RadosGW. The one execption
is getting the credentials for an administrative user which is implemented
here.
"""
import logging
import os
import json
import re
import glob
import salt.client
import salt.utils.minions
log = logging.getLogger(__name__)
class Radosgw(object):
"""
Return a structure containing S3 keys and urls
"""
def __init__(self, canned=None, cluster='ceph', pathname='/srv/salt/ceph/rgw/cache'):
"""
Initialize and call routines
"""
if canned:
self._canned(int(canned))
else:
self.cluster = cluster
self.credentials = {'access_key': None,
'secret_key': None,
'urls': [],
'success': False}
self.pathname = pathname
self._admin()
self._urls()
def _canned(self, canned):
"""
Return examples for debugging without a working Ceph cluster
"""
if canned == 1:
self.credentials = {'access_key': "ABCDEFGHIJKLMNOPQRST",
'secret_key': "0123456789012345678901234567890123456789",
'urls': ["http://rgw1"]}
elif canned == 2:
self.credentials = {'access_key': "ABCDEFGHIJKLMNOPQRST",
'secret_key': "0123456789012345678901234567890123456789",
'urls': ["http://red1",
"http://red2",
"http://blue1:8000",
"http://blue2:8000"]}
def _admin(self, filename="user.admin.json"):
"""
Expect admin user file; otherwise, search for first system user.
Update access_key, secret_key
"""
filepath = "{}/{}".format(self.pathname, filename)
if os.path.exists(filepath):
user = json.loads(open(filepath).read())
else:
user = None
for user_file in glob.glob("{}/user.*".format(self.pathname)):
user = json.loads(open(user_file).read())
if 'system' in user and user['system'] == "true":
break
user = None
|
if not user:
# No system user
log.error("No system user for radosgw found")
return
self.credentials['access_key'] = user['keys'][0]['access_key']
self.credentials['secret_key'] = user['keys'][0]['secret_key']
self.credentials['success'] = True
def _urls(self):
"""
Check for user defined endpoint; otherwise, return list of gateways as
urls.
"""
|
search = "I@cluster:{}".format(self.cluster)
__opts__ = salt.config.client_config('/etc/salt/master')
pillar_util = salt.utils.master.MasterPillarUtil(search, "compound",
use_cached_grains=True,
grains_fallback=False,
opts=__opts__)
cached = pillar_util.get_minion_pillar()
for minion in cached:
if 'rgw_endpoint' in cached[minion]:
self.credentials['urls'].append(cached[minion]['rgw_endpoint'])
return
port = '7480' # civetweb default port
ssl = ''
found = False
for rgw_conf_file_path in glob.glob("/srv/salt/ceph/configuration/files/ceph.conf.*"):
if os.path.exists(rgw_conf_file_path) and os.path.isfile(rgw_conf_file_path):
with open(rgw_conf_file_path) as rgw_conf_file:
for line in rgw_conf_file:
if line:
match = re.search(r'rgw.*frontends.*=.*port=(\d+)(s?)', line)
if match:
port = match.group(1)
ssl = match.group(2)
found = True
if found:
break
for client_file in glob.glob("{}/client.*".format(self.pathname)):
parts = client_file.split('.')
resource = ''
# dedicated keys - use host part
if len(parts) == 4:
resource = parts[2]
# shared keys - use role part
if len(parts) == 3:
resource = parts[1]
if resource and port:
resource += ":{}".format(port)
if resource:
self.credentials['urls'].append("http{}://{}".format(ssl, resource))
def credentials(canned=None, **kwargs):
"""
Return the administrative credentials for the RadosGW
"""
radosgw = Radosgw(canned)
return radosgw.credentials
|
SBT-community/Starbound_RU
|
tools/special_cases.py
|
Python
|
apache-2.0
| 2,754
| 0.017234
|
from re import compile as regex
def matches(patts, filename):
for p in patts:
if not p.match(filename) is None:
return True
return False
class SpecialSection():
def __init__(self, name, pathPatterns, filePatterns, all_conditions = False):
self.name = name
self.allcond = all_conditions
self.fpat = []
self.ppat = []
for pat in filePatterns:
self.fpat.append(regex(pat))
for pat in pathPatterns:
self.ppat.append(regex(pat))
def match(self, filename, path):
fmatch = matches(self.fpat, filename)
if fmatch and not self.allcond:
return True
pmatch = matches(self.ppat, path)
if pmatch and (fmatch or not self.allcond):
return True
return False
specialSections = [
SpecialSection("Прилагательное", [], ["^.*quests/generated/pools/guardthemes\.config$"]),
SpecialSection("Винительный падеж", [], ["^.*quests/generated/pools/weapon\.config$"]),
SpecialSection("Имена персонажей", [], ["^.*namegen\.config$", "^.*\.namesource$"]),
SpecialSection("Наречие", [], ["^.*pools/hatadjectives.config$"]),
SpecialSection("Регулярное выражение (не для перевода, а для поддержки названий на кирилице)", ["^.*/regex$"], ["^.*\.config$"], True),
SpecialSection("Привязанное к полу прилагательное",
["^.*generat
|
edText/fluff/2/.*$"],
["^.*quests/generated/templates/spread_rumors.questtemplate$"], True),
SpecialSection("Предложный падеж", ["^.*generatedText/fluff/3/.*$"],
["^.*quests/generated/templates/escort\.questtemplate$"], True),
SpecialSection("Предложный падеж", [".*generatedText/fluff/5/.*$"],
["^.*quests/generated/templates/kidnapping\.questtemplate$"], True),
SpecialSection("Множественное чи
|
сло", ["^.*generatedText/fluff/3/.*$"],
["^.*kill_monster_group\.questtemplate$"], True),
SpecialSection("Родительный падеж", ["^.+/name$"],
["^.*pools/monsterthreats\.config$"], True),
SpecialSection("Префикс названия банды", ["^.*Prefix/.*"], ["^.*quests/bounty/gang\.config"], True),
SpecialSection("Основная часть названия банды", ["^.*Mid/.*"], ["^.*quests/bounty/gang\.config"], True),
SpecialSection("Окончание названия банды", ["^.*suffix/.*"], ["^.*quests/bounty/gang\.config"], True),
SpecialSection("Префикс главаря банды", ["^.*prefix/.*"], ["^.*quests/bounty/bounty\.config"], True),
SpecialSection("Окончание главаря банды", ["^.*suffix/.*"], ["^.*quests/bounty/bounty\.config"], True),
]
|
cinp/python
|
cinp/common_test.py
|
Python
|
apache-2.0
| 7,291
| 0.055411
|
import pytest
from cinp.common import URI
# TODO: test mutli-object setting
def test_splituri_builduri(): # TODO: test invlid URIs, mabey remove some tests from client_test that are just checking the URI
uri = URI( '/api/v1/' )
( ns, model, action, id_list, multi ) = uri.split( '/api/v1/' )
assert ns == []
assert model is None
assert id_list is None
assert action is None
assert multi is False
assert uri.build( ns, model, action, id_list ) == '/api/v1/'
ns = None
assert uri.build( ns, model, action, id_list ) == '/api/v1/'
( ns, model, action, id_list, multi ) = uri.split( '/api/v1/ns/' )
assert ns == [ 'ns' ]
assert model is None
assert id_list is None
assert action is None
assert multi is False
assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/'
ns = 'ns'
assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/'
( ns, model, action, id_list, multi ) = uri.split( '/api/v1/ns/model' )
assert ns == [ 'ns' ]
assert model == 'model'
assert id_list is None
assert action is None
assert multi is False
assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/model'
id_list = []
assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/model'
( ns, model, action, id_list, multi ) = uri.split( '/api/v1/ns/ns2/' )
assert ns == [ 'ns', 'ns2' ]
assert model is None
assert id_list is None
assert action is None
assert multi is False
assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/ns2/'
( ns, model, action, id_list, multi ) = uri.split( '/api/v1/ns/ns2/model' )
assert ns == [ 'ns', 'ns2' ]
assert model == 'model'
assert id_list is None
assert action is None
assert multi is False
assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/ns2/model'
( ns, model, action, id_list, multi ) = uri.split( '/api/v1/ns/model::' )
assert ns == [ 'ns' ]
assert model == 'model'
assert id_list == [ '' ]
assert action is None
assert multi is False
assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/model::'
id_list = ''
assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/model::'
( ns, model, action, id_list, multi ) = uri.split( '/api/v1/ns/model:ghj:' )
assert ns == [ 'ns' ]
assert model == 'model'
assert id_list == [ 'ghj' ]
assert action is None
assert multi is False
assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/model:ghj:'
id_list = 'ghj'
assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/model:ghj:'
( ns, model, action, id_list, multi ) = uri.split( '/api/v1/ns/model:ghj:dsf:sfe:' )
assert ns == [ 'ns' ]
assert model == 'model'
assert id_list == [ 'ghj', 'dsf', 'sfe' ]
assert action is None
assert multi is True
assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/model:ghj:dsf:sfe:'
( ns, model, action, id_list, multi ) = uri.split( '/api/v1/ns/model(action)' )
assert ns == [ 'ns' ]
assert model == 'model'
assert id_list is None
assert action == 'action'
assert multi is False
assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/model(action)'
( ns, model, action, id_list, multi ) = uri.split( '/api/v1/ns/model:sdf:(action)' )
assert ns == [ 'ns' ]
assert model == 'model'
assert id_list == [ 'sdf' ]
assert action == 'action'
assert multi is False
assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/model:sdf:(action)'
( ns, model, action, id_list, multi ) = uri.split( '/api/v1/ns/model:sdf:eed:(action)' )
assert ns == [ 'ns' ]
assert model == 'model'
assert id_list == [ 'sdf', 'eed' ]
assert action == 'action'
assert multi is True
assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/model:sdf:eed:(action)'
( ns, model, action, id_list, multi ) = uri.split( '/api/v1/', root_optional=True )
assert ns == []
assert model is None
assert id_list is None
assert action is None
assert multi is False
assert uri.build( ns, model, action, id_list ) == '/api/v1/'
assert uri.build( ns, model, action, id_list, in_root=False ) == '/'
with pytest.raises( ValueError ):
( ns, model, action, id_list, multi ) = uri.split( '/', root_optional=False )
( ns, model, action, id_list, multi ) = uri.split( '/', root_optional=True )
assert ns == []
assert model is None
assert id_list is None
assert action is None
assert multi is False
assert uri.build( ns, model, action, id_list ) == '/api/v1/'
assert uri.build( ns, model, action, id_list, in_root=False ) == '/'
( ns, model, action, id_list, multi ) = uri.split( '/api/v1/ns/', root_optional=True )
assert ns == [ 'ns' ]
assert model is None
assert id_list is None
assert action is None
assert multi is False
assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/'
assert uri.build( ns, model, action, id_list, in_root=False ) == '/ns/'
with pytest.raises( ValueError ):
( ns, model, action, id_list, multi ) = uri.split
|
( '/ns/', root_optional=False )
( ns, model, action, id_list, multi ) = uri.split( '/ns/', root_optional=True )
assert ns == [ 'ns' ]
assert model is None
assert id_list is None
assert action is None
assert multi is False
assert uri.build( ns, model, action, id
|
_list ) == '/api/v1/ns/'
assert uri.build( ns, model, action, id_list, in_root=False ) == '/ns/'
def test_extract_ids():
uri = URI( '/api/v1/' )
id_list = [ '/api/v1/ns/model:sdf:', '/api/v1/ns/model:234:', '/api/v1/ns/model:rfv:' ]
assert uri.extractIds( id_list ) == [ 'sdf', '234', 'rfv' ]
id_list = [ '/api/v1/ns/model:sdf:', '/api/v1/ns/model:234:www:', '/api/v1/ns/model:rfv:' ]
assert uri.extractIds( id_list ) == [ 'sdf', '234', 'www', 'rfv' ]
id_list = [ '/api/v1/ns/model:234:www:' ]
assert uri.extractIds( id_list ) == [ '234', 'www' ]
id_list = [ '/api/v1/ns/model:sdf:', '/api/v1/ns/model:234:www', '/api/v1/ns/model:rfv:' ]
with pytest.raises( ValueError ):
uri.extractIds( id_list )
id_list = [ '/api/v1/ns/model:sdf' ]
with pytest.raises( ValueError ):
uri.extractIds( id_list )
id_list = [ '/api/v1/ns/model' ]
uri.extractIds( id_list ) == []
id_list = [ '/api/v1/ns/model:sdf:', '/api/v1/ns/model', '/api/v1/ns/model:rfv:' ]
uri.extractIds( id_list ) == [ 'sdf', 'rfv' ]
assert uri.extractIds( [] ) == []
def test_urilist_to_uri():
uri = URI( '/api/v1/' )
id_list = [ '/api/v1/ns/model:sdf:', '/api/v1/ns/model:234:', '/api/v1/ns/model:rfv:' ]
assert uri.uriListToMultiURI( id_list ) == '/api/v1/ns/model:sdf:234:rfv:'
id_list = [ '/api/v1/ns/model:sdf:', '/api/v1/ns/model:234:www:', '/api/v1/ns/model:rfv:' ]
assert uri.uriListToMultiURI( id_list ) == '/api/v1/ns/model:sdf:234:www:rfv:'
id_list = [ '/api/v1/ns/model:234:www:' ]
assert uri.uriListToMultiURI( id_list ) == '/api/v1/ns/model:234:www:'
id_list = [ '/api/v1/ns/model:sdf:', '/api/v1/ns/model:234:www', '/api/v1/ns/model:rfv:' ]
with pytest.raises( ValueError ):
uri.uriListToMultiURI( id_list )
id_list = [ '/api/v1/ns/model' ]
uri.uriListToMultiURI( id_list ) == []
id_list = [ '/api/v1/ns/model:sdf:', '/api/v1/ns/model', '/api/v1/ns/model:rfv:' ]
uri.uriListToMultiURI( id_list ) == '/api/v1/ns/model:sdf:rfv:'
assert uri.uriListToMultiURI( [] ) == []
|
wubr2000/googleads-python-lib
|
examples/dfp/v201411/team_service/create_teams.py
|
Python
|
apache-2.0
| 1,762
| 0.007946
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates new teams.
To determine which teams exist, run get_all_teams.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
import uuid
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
team_service = client.GetService('TeamService', version='v201411')
# Create team objects.
teams = []
for i in xrange(5):
team = {
'name': 'Team %s' % uuid.uuid4(),
'hasAllCompanies': 'false',
'hasAllInventory': 'false',
'teamAccessType': 'READ_WRITE'
}
teams.append(team)
# Add Teams.
teams = team_service.createTeams(teams)
# Display
|
results.
for team in teams:
print ('Team with ID \'%s\' and name \'%s\' was created.'
% (team['id'], team['name']))
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.
|
LoadFromStorage()
main(dfp_client)
|
adamwwt/chvac
|
venv/lib/python2.7/site-packages/sqlalchemy/events.py
|
Python
|
mit
| 40,130
| 0.0001
|
# sqlalchemy/events.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Core event interfaces."""
from . import event, exc
from .pool import Pool
from .engine import Connectable, Engine, Dialect
from .sql.base import SchemaEventTarget
class DDLEvents(event.Events):
"""
Define event listeners for schema objects,
that is, :class:`.SchemaItem` and other :class:`.SchemaEventTarget`
subclasses, including :class:`.MetaData`, :class:`.Table`,
:class:`.Column`.
:class:`.MetaData` and :class:`.Table` support events
specifically regarding when CREATE and DROP
DDL is emitted to the database.
Attachment events are also provided to customize
behavior whenever a child schema element is associated
with a parent, such as, when a :class:`.Column` is associated
with its :class:`.Table`, when a :class:`.ForeignKeyConstraint`
is associated with a :class:`.Table`, etc.
Example using the ``after_create`` event::
from sqlalchemy import event
from sqlalchemy import Table, Column, Metadata, Integer
m = MetaData()
some_table = Table('some_table', m, Column('data', Integer))
def after_create(target, connection, **kw):
connection.execute("ALTER TABLE %s SET name=foo_%s" %
(target.name, target.name))
event.listen(some_table, "after_create", after_create)
DDL events integrate closely with the
:class:`.DDL` class and the :class:`.DDLElement` hierarchy
of DDL clause constructs, which are themselves appropriate
as listener callables::
from sqlalchemy import DDL
event.listen(
some_table,
"after_create",
DDL("ALTER TABLE %(table)s SET name=foo_%(table)s")
)
The methods here define the name of an event as well
as the names of members that are passed to listener
functions.
See also:
:ref:`event_toplevel`
:class:`.DDLElement`
:class:`.DDL`
:ref:`schema_ddl_sequences`
"""
_target_class_doc = "SomeSchemaClassOrObject"
_dispatch_target = SchemaEventTarget
def before_create(self, target, connection, **kw):
"""Called before CREATE statements are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
CREATE statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def after_create(self, target, connection, **kw):
"""Called after CREATE statements are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
CREATE statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def before_drop(self, target, connection, **kw):
"""Called before DROP statements are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
DROP statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def after_drop(self, target, connection, **kw):
"""Called after DROP statements are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
DROP statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def before_parent_attach(self, target, parent):
"""Called before a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param t
|
arget: the target object
|
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts a modifier for this event:
:param propagate=False: When True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`.Table.tometadata` is used.
"""
def after_parent_attach(self, target, parent):
"""Called after a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts a modifier for this event:
:param propagate=False: When True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`.Table.tometadata` is used.
"""
def column_reflect(self, inspector, table, column_info):
"""Called for each unit of 'column info' retrieved when
a :class:`.Table` is being reflected.
The dictionary of column information as returned by the
dialect is passed, and can be modified. The dictionary
is that returned in each element of the list returned
by :meth:`.reflection.Inspector.get_columns`.
The event is called before any action is taken against
this dictionary, and the contents can be modified.
The :class:`.Column` specific arguments ``info``, ``key``,
and ``quote`` can also be added to the dictionary and
will be passed to the constructor of :class:`.Column`.
Note that this event is only meaningful if either
associated with the :class:`.Table` class across the
board, e.g.::
from sqlalchemy.schema import Table
from sqlalchemy import event
def listen_for_reflect(inspector, table, column_info):
"receive a column_reflect event"
# ...
event.listen(
Table,
'column_reflect',
listen_for_reflect)
...or with a specific :class:`.Table` instance using
the ``listeners`` argument::
def listen_for_reflect(inspector, table, column_info):
"receive a column_reflect event"
# ...
t = Table(
'sometable',
autoload=True,
listeners=[
('column_reflect', listen_for_reflect)
])
This because the reflection process initiated by ``autoload=True``
completes within the scope of the constructor for :class:`.Table`.
"""
class PoolEvents(event.Events):
"""Available events for :class:`.Pool`.
The methods here define the name of an event as well
as the n
|
jameskane05/final_helpstl
|
submit/urls.py
|
Python
|
gpl-2.0
| 177
| 0.00565
|
from django.conf.urls import include, url
from . import
|
views
urlpatterns = [
url(r'^$', views.subform, name='subform'),
|
url(r'^submit', views.submit, name='submit'),
]
|
rogerscristo/BotFWD
|
env/lib/python3.6/site-packages/pytests/test_inlinequeryresultlocation.py
|
Python
|
mit
| 5,366
| 0.002795
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public Licens
|
e as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be usefu
|
l,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import json
import pytest
from telegram import (InputTextMessageContent, InlineQueryResultLocation, InlineKeyboardButton,
InlineQueryResultVoice, InlineKeyboardMarkup)
@pytest.fixture(scope='class')
def inline_query_result_location():
return InlineQueryResultLocation(TestInlineQueryResultLocation.id,
TestInlineQueryResultLocation.latitude,
TestInlineQueryResultLocation.longitude,
TestInlineQueryResultLocation.title,
thumb_url=TestInlineQueryResultLocation.thumb_url,
thumb_width=TestInlineQueryResultLocation.thumb_width,
thumb_height=TestInlineQueryResultLocation.thumb_height,
input_message_content=TestInlineQueryResultLocation.input_message_content,
reply_markup=TestInlineQueryResultLocation.reply_markup)
class TestInlineQueryResultLocation:
id = 'id'
type = 'location'
latitude = 0.0
longitude = 1.0
title = 'title'
thumb_url = 'thumb url'
thumb_width = 10
thumb_height = 15
input_message_content = InputTextMessageContent('input_message_content')
reply_markup = InlineKeyboardMarkup([[InlineKeyboardButton('reply_markup')]])
def test_expected_values(self, inline_query_result_location):
assert inline_query_result_location.id == self.id
assert inline_query_result_location.type == self.type
assert inline_query_result_location.latitude == self.latitude
assert inline_query_result_location.longitude == self.longitude
assert inline_query_result_location.title == self.title
assert inline_query_result_location.thumb_url == self.thumb_url
assert inline_query_result_location.thumb_width == self.thumb_width
assert inline_query_result_location.thumb_height == self.thumb_height
assert inline_query_result_location.input_message_content.to_dict() == \
self.input_message_content.to_dict()
assert inline_query_result_location.reply_markup.to_dict() == self.reply_markup.to_dict()
def test_to_json(self, inline_query_result_location):
json.loads(inline_query_result_location.to_json())
def test_to_dict(self, inline_query_result_location):
inline_query_result_location_dict = inline_query_result_location.to_dict()
assert isinstance(inline_query_result_location_dict, dict)
assert inline_query_result_location_dict['id'] == inline_query_result_location.id
assert inline_query_result_location_dict['type'] == inline_query_result_location.type
assert inline_query_result_location_dict['latitude'] == \
inline_query_result_location.latitude
assert inline_query_result_location_dict['longitude'] == \
inline_query_result_location.longitude
assert inline_query_result_location_dict['title'] == inline_query_result_location.title
assert inline_query_result_location_dict['thumb_url'] == \
inline_query_result_location.thumb_url
assert inline_query_result_location_dict['thumb_width'] == \
inline_query_result_location.thumb_width
assert inline_query_result_location_dict['thumb_height'] == \
inline_query_result_location.thumb_height
assert inline_query_result_location_dict['input_message_content'] == \
inline_query_result_location.input_message_content.to_dict()
assert inline_query_result_location_dict['reply_markup'] == \
inline_query_result_location.reply_markup.to_dict()
def test_equality(self):
a = InlineQueryResultLocation(self.id, self.longitude, self.latitude, self.title)
b = InlineQueryResultLocation(self.id, self.longitude, self.latitude, self.title)
c = InlineQueryResultLocation(self.id, 0, self.latitude, self.title)
d = InlineQueryResultLocation("", self.longitude, self.latitude, self.title)
e = InlineQueryResultVoice(self.id, "", "")
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a == c
assert hash(a) == hash(c)
assert a != d
assert hash(a) != hash(d)
assert a != e
assert hash(a) != hash(e)
|
anhstudios/swganh
|
data/scripts/templates/object/tangible/wearables/ithorian/shared_ith_backpack_s01.py
|
Python
|
mit
| 470
| 0.046809
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE D
|
OCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/ithorian/shared_ith_backpack_s01.iff"
result.attribute_template_id = 11
result.stfName("wearables_name","ith_backpack_s01")
#### BEGIN MODIFIC
|
ATIONS ####
#### END MODIFICATIONS ####
return result
|
lucascudo/pytherisk
|
pytherisk.py
|
Python
|
gpl-3.0
| 4,654
| 0.010314
|
#!/usr/bin/python
# coding=utf-8
import hashlib
import os
import re
import subprocess
import sys
import tempfile
from datetime import datetime
from gtts import gTTS
while 1:
line = sys.stdin.readline().strip()
if line == '':
break
key, data = line.split(':')
if key[:4] != 'agi_':
#skip input that doesn't begin with agi_
sys.stderr.write("Did not work!\n")
sys.stderr.flush()
continue
key = key.strip()
data = data.strip()
if key != '':
env[key] = data
def _speak_espeak(text):
base_file_name = tempfile.named_temporary_file().name
raw_file_name = tempfile.named_temporary_file().name + '-raw.wav'
subprocess.call(['espeak', text, '-vbrazil-mbrola-4', '-g0.5', '-p60', '-s130', '-w', raw_file_name])
subprocess.call(['sox', raw_file_name, base_file_name + '.wav', 'rate', '8k'])
os.remove(raw_file_name)
return base_file_name
def _speak_gtts(text):
try:
text.decode('utf-8')
except:
text = text.encode('utf-8')
digest = '/tmp/' + hashlib.sha224(text).hexdigest()
file_name = digest + '.mp3'
if os.path.isfile(file_name):
return file_name
raw_file_name = digest + '-raw.mp3'
tts = gTTS(text=text, lang='pt-br')
tts.save(raw_file_name)
subprocess.call(['lame', '--scale', '10', raw_file_name, file_name])
os.remove(raw_file_name)
return file_name
def busy(timeout):
sys.stdout.write("EXEC Busy %s\n %timeout ")
sys.stdout.flush()
sys.stderr.write("EXEC Busy %s\n %timeout ")
sys.stderr.flush()
line = sys.stdin.readline()
result = line.strip()
return int(checkresult(result)) - 48
def checkresult (params):
sys.stderr.write("checkresult: %s\n" % params)
params = params.rstrip()
if re.search('^200', params):
result = re.search('result=(\d+)', params)
if (not result):
sys.stderr.write("FAIL ('%s')\n" % params)
sys.stderr.flush()
return -1
else:
result = result.group(1)
sys.stderr.write("PASS (%s)\n" % result)
sys.stderr.flush()
return result
else:
sys.st
|
derr.write("FAIL (unexpected result '%s')\n" % params)
sys.stderr.flush()
return -2
def hangup():
sys.stdout.write("EXEC Hangup")
sys.stdout.flush()
sys.stderr.writ
|
e("EXEC Hangup")
sys.stderr.flush()
line = sys.stdin.readline()
result = line.strip()
return int(checkresult(result)) - 48
def read_digit(timeout):
sys.stdout.write("WAIT FOR DIGIT %s\n" %timeout )
sys.stdout.flush()
sys.stderr.write("WAIT FOR DIGIT %s\n" %timeout )
sys.stderr.flush()
line = sys.stdin.readline()
sys.stderr.write('wait_for_digit line: %s\n' % line)
result = line.strip()
return int(checkresult(result)) - 48
def record(filepath):
sys.stdout.write("EXEC MixMonitor " + filepath)
sys.stdout.flush()
sys.stderr.write("MixMonitor(wav, " + filepath +", mb)\n")
sys.stderr.flush()
line = sys.stdin.readline()
result = line.strip()
return int(checkresult(result)) - 48
def speak(text):
try:
file_name = _speak_gtts(text)
sys.stdout.write("EXEC MP3Player %s\n" % file_name)
except:
print(sys.exc_info())
file_name = _speak_espeak(text)
sys.stdout.write("EXEC PLAYBACK %s\n" % file_name)
sys.stdout.flush()
result = sys.stdin.readline().strip()
return checkresult(result)
def transfer(tech, dest):
sys.stdout.write("EXEC DIAL %s/%s\n" % (tech,dest))
sys.stdout.flush()
result = sys.stdin.readline().strip()
checkresult(result)
monitor()
def wait_exten(timeout):
sys.stdout.write("EXEC WaitExten %s\n %timeout ")
sys.stdout.flush()
sys.stderr.write("EXEC WaitExten %s\n %timeout ")
sys.stderr.flush()
line = sys.stdin.readline()
result = line.strip()
return int(checkresult(result)) - 48
def write_digit(digit, timeout, duration):
if timeout is None and duration is None:
sys.stdout.write("EXEC SendDTMF %s\n" % digit )
sys.stdout.flush()
elif duration is None:
sys.stdout.write("EXEC SendDTMF %s/%s\n" % (digit, timeout) )
sys.stdout.flush()
elif timeout is None:
sys.stdout.write("EXEC SendDTMF %s/%s\n" % (digit, duration) )
sys.stdout.flush()
else:
sys.stdout.write("EXEC SendDTMF %s %s %s\n" % (digit, timeout, duration) )
sys.stdout.flush()
sys.stderr.write("EXEC SendDTMF %s/%s\n" % (digit, duration))
sys.stderr.flush()
line = sys.stdin.readline()
result = line.strip()
return int(checkresult(result)) - 48
|
praekelt/vumi-go
|
go/apps/http_api/tests/test_vumi_app.py
|
Python
|
bsd-3-clause
| 31,622
| 0
|
import base64
import json
from twisted.internet.defer import inlineCallbacks, DeferredQueue, returnValue
from twisted.web.http_headers import Headers
from twisted.web import http
from twisted.web.server import NOT_DONE_YET
from vumi.config import ConfigContext
from vumi.message import TransportUserMessage, TransportEvent
from vumi.tests.helpers import VumiTestCase
from vumi.tests.utils import MockHttpServer, LogCatcher
from vumi.transports.vumi_bridge.client import StreamingClient
from vumi.utils import http_request_full
from go.apps.http_api.resource import (
StreamResourceMixin, StreamingConversationResource)
from go.apps.tests.helpers import AppWorkerHelper
from go.apps.http_api.vumi_app import StreamingHTTPWorker
class TestStreamingHTTPWorker(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.app_helper = self.add_helper(AppWorkerHelper(StreamingHTTPWorker))
self.config = {
'health_path': '/health/',
'web_path': '/foo',
'web_port': 0,
'metrics_prefix': 'metrics_prefix.',
'conversation_cache_ttl': 0,
}
self.app = yield self.app_helper.get_app_worker(self.config)
self.addr = self.app.webserver.getHost()
self.url = 'http://%s:%s%s' % (
self.addr.host, self.addr.port, self.config['web_path'])
conv_config = {
'http_api': {
'api_tokens': [
'token-1',
'token-2',
'token-3',
],
'metric_store': 'metric_store',
}
}
conversation = yield self.app_helper.create_conversation(
config=conv_config)
yield self.app_helper.start_conversation(conversation)
self.conversation = yield self.app_helper.get_conversation(
conversation.key)
self.auth_headers = {
'Authorization': ['Basic ' + base64.b64encode('%s:%s' % (
conversation.user_account.key, 'token-1'))],
}
self.client = StreamingClient()
# Mock server to test HTTP posting of inbound messages & events
self.mock_push_server = MockHttpServer(self.handle_request)
yield self.mock_push_server.start()
self.add_cleanup(self.mock_push_server.stop)
self.push_calls = DeferredQueue()
self._setup_wait_for_request()
self.add_cleanup(self._wait_for_requests)
def _setup_wait_for_request(self):
# Hackery to wait for the request to finish
self._req_state = {
'queue': DeferredQueue(),
'expected': 0,
}
orig_track = StreamingConversationResource.track_request
orig_release = StreamingConversationResource.release_request
def track_wrapper(*args, **kw):
self._req_state['expected'] += 1
return orig_track(*args, **kw)
def release_wrapper(*args, **kw):
return orig_release(*args, **kw).addCallback(
self._req_state['queue'].put)
self.patch(
StreamingConversationResource, 'track_request', track_wrapper)
self.patch(
StreamingConversationResource, 'release_request', release_wrapper)
@inlineCallbacks
def _wait_for_requests(self):
while self._req_state['expected'] > 0:
yield self._req_state['queue'].get()
self._req_state['expected'] -= 1
def handle_request(self, request):
self.push_calls.put(request)
return NOT_DONE_YET
@inlineCallbacks
def pull_message(self, count=1):
url = '%s/%s/messages.json' % (self.url, self.conversation.key)
messages = DeferredQueue()
errors = DeferredQueue()
receiver = self.client.stream(
TransportUserMessage, messages.put, errors.put, url,
Headers(self.auth_headers))
received_messages = []
for msg_id in range(count):
yield self.app_helper.make_dispatch_inbound(
'in %s' % (msg_id,), message_id=str(msg_id),
conv=self.conversation)
recv_msg = yield messages.get()
received_messages.append(recv_msg)
receiver.disconnect()
returnValue((receiver, received_messages))
def assert_bad_request(self, response, reason):
self.assertEqual(response.code, http.BAD_REQUEST)
self.assertEqual(
response.headers.getRawHeaders('content-type'),
['application/json; charset=utf-8'])
data = json.loads(response.delivered_body)
self.assertEqual(data, {
"success": False,
"reason": reason,
})
@inlineCallbacks
def test_proxy_buffering_headers_off(self):
# This is the default, but we patch it anyway to make sure we're
# testing the right thing should the default change.
self.patch(StreamResourceMixin, 'proxy_buffering', False)
receiver, received_messages = yield self.pull_message()
headers = receiver._response.headers
self.assertEqual(headers.getRawHeaders('x-accel-buffering'), ['no'])
@inlineCallbacks
def test_proxy_buffering_headers_on(self):
self.patch(StreamResourceMixin, 'proxy_buffering', True)
receiver, received_messages = yield self.pull_message()
headers = receiver._response.headers
self.assertEqual(headers.getRawHeaders('x-accel-buffering'), ['yes'])
@inlineCallbacks
def test_content_type(self):
receiver, received_messages = yield self.pull_message()
headers = receiver._response.headers
self.assertEqual(
headers.getRawHeaders('content-type'),
['application/json; charset=utf-8'])
@inlineCallbacks
def test_messages_stream(self):
url = '%s/%s/messages.json' % (self.url, self.conversation.key)
messages = DeferredQueue()
errors = DeferredQueue()
receiver = self.client.stream(
TransportUserMessag
|
e, messages.put, errors.put, url,
Headers(self.auth_headers))
msg1 = yield self.app_helper.make_dispatch_inbound(
'in 1', message_id='1', conv=self.conversation)
msg2 = yield self.app_helper.make_dispatch_inbound(
'in 2', message_id='2', conv=self.conversation)
rm1 = yield messages.get()
rm2 = yield messages.get()
receiver.disconne
|
ct()
# Sometimes messages arrive out of order if we're hitting real redis.
rm1, rm2 = sorted([rm1, rm2], key=lambda m: m['message_id'])
self.assertEqual(msg1['message_id'], rm1['message_id'])
self.assertEqual(msg2['message_id'], rm2['message_id'])
self.assertEqual(errors.size, None)
@inlineCallbacks
def test_events_stream(self):
url = '%s/%s/events.json' % (self.url, self.conversation.key)
events = DeferredQueue()
errors = DeferredQueue()
receiver = yield self.client.stream(TransportEvent, events.put,
events.put, url,
Headers(self.auth_headers))
msg1 = yield self.app_helper.make_stored_outbound(
self.conversation, 'out 1', message_id='1')
ack1 = yield self.app_helper.make_dispatch_ack(
msg1, conv=self.conversation)
msg2 = yield self.app_helper.make_stored_outbound(
self.conversation, 'out 2', message_id='2')
ack2 = yield self.app_helper.make_dispatch_ack(
msg2, conv=self.conversation)
ra1 = yield events.get()
ra2 = yield events.get()
receiver.disconnect()
# Sometimes messages arrive out of order if we're hitting real redis.
if ra1['event_id'] != ack1['event_id']:
ra1, ra2 = ra2, ra1
self.assertEqual(ack1['event_id'], ra1['event_id'])
self.assertEqual(ack2['event_id'], ra2['event_id'])
self.assertEqual(errors.size, None)
@inlineCallbacks
def test_missing_auth(self):
url = '%s/%s/messages.json' % (self.url, self.conversation.key)
queue = Defer
|
googleapis/python-aiplatform
|
samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_async.py
|
Python
|
apache-2.0
| 1,466
| 0.000682
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetModel
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform
|
_v1_ModelService_GetModel_async]
from google.cloud import aiplatform_v1
async def sample_get_model():
# Create a client
client = aiplatform_v1.ModelServiceAsyncClient()
# Initialize request argum
|
ent(s)
request = aiplatform_v1.GetModelRequest(
name="name_value",
)
# Make the request
response = await client.get_model(request=request)
# Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_ModelService_GetModel_async]
|
hachreak/invenio-oaiharvester
|
invenio_oaiharvester/upgrades/oaiharvester_2015_07_14_innodb.py
|
Python
|
gpl-2.0
| 1,535
| 0
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Fixes foreign key relationship."""
from invenio_ext.sqlalchemy import db
from inveni
|
o_upgrader.api import op
depends_on = ['invenio_2015_03_03_tag_value']
def info():
"""Return upgrade recipe information."""
return "Fixes foreign key relationship."
def do_upgrade():
"""Carry out the upgrade."""
op.alter_column(
table_name='oaiHARVESTLOG',
|
column_name='bibupload_task_id',
type_=db.MediumInteger(15, unsigned=True),
existing_nullable=False,
existing_server_default='0'
)
def estimate():
"""Estimate running time of upgrade in seconds (optional)."""
return 1
def pre_upgrade():
"""Pre-upgrade checks."""
pass
def post_upgrade():
"""Post-upgrade checks."""
pass
|
miguelgrinberg/heat
|
heat/tests/keystone/test_role_assignments.py
|
Python
|
apache-2.0
| 12,401
| 0
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from heat.common import exception
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources.openstack.keystone import role_assignments
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests import utils
RESOURCE_TYPE = 'OS::Keystone::DummyRoleAssignment'
keystone_role_assignment_template = {
'heat_template_version': '2013-05-23',
'resources': {
'test_role_assignment': {
'type': RESOURCE_TYPE,
'properties': {
'roles': [
{
'role': 'role_1',
'project': 'project_1',
},
{
'role': 'role_1',
'domain': 'domain_1'
}
]
}
}
}
}
class KeystoneRoleAssignmentTest(common.HeatTestCase):
def setUp(self):
super(KeystoneRoleAssignmentTest, self).setUp()
self.ctx = utils.dummy_context()
# For unit testing purpose. Register resource provider explicitly.
resource._register_class(RESOURCE_TYPE,
role_assignments.KeystoneRoleAssignment)
self.stack = stack.Stack(
self.ctx, 'test_stack_keystone',
template.Template(keystone_role_assignment_template)
)
self.test_role_assignment = self.stack['test_role_assignment']
# Mock client
self.keystoneclient = mock.MagicMock()
self.test_role_assignment.client = mock.MagicMock()
self.test_role_assignment.client.return_value = self.keystoneclient
self.roles = self.keystoneclient.client.roles
# Mock client plugin
def _side_effect(value):
return value
self.keystone_client_plugin = mock.MagicMock()
(self.keystone_client_plugin.get_domain_id.
side_effect) = _side_effect
(self.keystone_client_plugin.get_role_id.
side_effect) = _side_effect
(self.keystone_client_plugin.get_project_id.
side_effect) = _side_effect
self.test_role_assignment.client_plugin = mock.MagicMock()
(self.test_role_assignment.client_plugin.
return_value) = self.keystone_client_plugin
def test_resource_mapping_not_defined(self):
# this resource is not planned to support in heat, so resource_mapping
# is not to be defined in KeystoneRoleAssignment
try:
from ..resources.role_assignments import resource_mapping # noqa
self.fail("KeystoneRoleAssignment is designed to be exposed as"
"Heat resource")
except Exception:
pass
def test_properties_title(self):
property_title_map = {
role_assignments.KeystoneRoleAssignment.ROLES: 'roles'
}
for actual_title, expected_title in property_title_map.items():
self.assertEqual(
expected_title,
actual_title,
'KeystoneRoleAssignment PROPERTIE
|
S(%s) title modified.' %
actual_title)
def test_property_roles_validate_schema(self):
schema = (role_assignments.KeystoneRoleAssignment.
properties_schema[
role_assignments.KeystoneRoleAssignment.ROLES])
self.assertEqual(
True,
schema.update_allowed,
'update_allowed for property %s is modified' %
role_assignments.KeystoneRoleAssignme
|
nt.ROLES)
self.assertEqual(properties.Schema.LIST,
schema.type,
'type for property %s is modified' %
role_assignments.KeystoneRoleAssignment.ROLES)
self.assertEqual('List of role assignments.',
schema.description,
'description for property %s is modified' %
role_assignments.KeystoneRoleAssignment.ROLES)
def test_role_assignment_handle_create_user(self):
# validate the properties
self.assertEqual([
{
'role': 'role_1',
'project': 'project_1',
'domain': None
},
{
'role': 'role_1',
'project': None,
'domain': 'domain_1'
}],
(self.test_role_assignment.properties.
get(role_assignments.KeystoneRoleAssignment.ROLES)))
self.test_role_assignment.handle_create(user_id='user_1',
group_id=None)
# validate role assignment creation
# role-user-domain
self.roles.grant.assert_any_call(
role='role_1',
user='user_1',
domain='domain_1')
# role-user-project
self.roles.grant.assert_any_call(
role='role_1',
user='user_1',
project='project_1')
def test_role_assignment_handle_create_group(self):
# validate the properties
self.assertEqual([
{
'role': 'role_1',
'project': 'project_1',
'domain': None
},
{
'role': 'role_1',
'project': None,
'domain': 'domain_1'
}],
(self.test_role_assignment.properties.
get(role_assignments.KeystoneRoleAssignment.ROLES)))
self.test_role_assignment.handle_create(user_id=None,
group_id='group_1')
# validate role assignment creation
# role-group-domain
self.roles.grant.assert_any_call(
role='role_1',
group='group_1',
domain='domain_1')
# role-group-project
self.roles.grant.assert_any_call(
role='role_1',
group='group_1',
project='project_1')
def test_role_assignment_handle_update_user(self):
self.test_role_assignment._stored_properties_data = {
'roles': [
{
'role': 'role_1',
'project': 'project_1'
},
{
'role': 'role_1',
'domain': 'domain_1'
}
]
}
prop_diff = {
role_assignments.KeystoneRoleAssignment.ROLES: [
{
'role': 'role_2',
'project': 'project_1'
},
{
'role': 'role_2',
'domain': 'domain_1'
}
]
}
self.test_role_assignment.handle_update(
user_id='user_1',
group_id=None,
prop_diff=prop_diff)
# Add role2-project1-domain1
# role-user-domain
self.roles.grant.assert_any_call(
role='role_2',
user='user_1',
domain='domain_1')
# role-user-project
self.roles.grant.assert_any_call(
role='role_2',
user='user_1',
project='project_1')
# Remove role1-project1-domain1
# role-user-domain
self.roles.revoke.assert_any_call(
role='role_1',
user='user_1',
domain='domain_1')
# role-user-project
self.roles.revoke.assert_any_call(
role='role_1',
user='user_1'
|
tiffanyjaya/kai
|
vendors/pdfminer.six/pdfminer/settings.py
|
Python
|
mit
| 187
| 0
|
STRI
|
CT = False
try:
from django.conf import settings
STRICT = getattr(settings, 'PDF_MINER_IS_STRICT', STRICT)
except Exception:
# in case it's not a django project
pass
| |
gencer/sentry
|
src/sentry/web/api.py
|
Python
|
bsd-3-clause
| 26,313
| 0.001254
|
from __future__ import absolute_import, print_function
import base64
import logging
import six
import traceback
from time import time
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotAllowed
from django.utils.encoding import force_bytes
from django.views.decorators.cache import never_cache, cache_control
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.base import View as BaseView
from functools import wraps
from raven.contrib.django.models import client as Raven
from sentry import quotas, tsdb
from sentry.coreapi import (
APIError, APIForbidden, APIRateLimited, ClientApiHelper, CspApiHelper, LazyData,
MinidumpApiHelper,
)
from sentry.interfaces import schemas
from sentry.models import Project, OrganizationOption, Organization
from sentry.signals import (
event_accepted, event_dropped, event_filtered, event_received)
from sentry.quotas.base import RateLimit
from sentry.utils import json, metrics
from sentry.utils.data_filters import FILTER_STAT_KEYS_TO_VALUES
from sentry.utils.data_scrubber import SensitiveDataFilter
from sentry.utils.dates import to_datetime
from sentry.utils.http import (
is_valid_origin,
get_origins,
is_same_domain,
)
from sentry.utils.pubsub import QueuedPublisher, RedisPublisher
from sentry.utils.safe import safe_execute
from sentry.web.helpers import render_to_response
logger = logging.getLogger('sentry')
# Transparent 1x1 gif
|
# See http://probablyprogramming.com/2009/03/15/the-tiniest-gif-ever
PIXEL = base64.b64decode('R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs=')
PROTOCOL_VERSIONS = frozenset(('2.0', '3', '4', '5', '6', '7'))
pubsub = QueuedPublisher(
RedisPublisher(getattr(settings, 'REQUESTS_PUBSUB_CONNECTION', None))
) if getattr(settin
|
gs, 'REQUESTS_PUBSUB_ENABLED', False) else None
def api(func):
@wraps(func)
def wrapped(request, *args, **kwargs):
data = func(request, *args, **kwargs)
if request.is_ajax():
response = HttpResponse(data)
response['Content-Type'] = 'application/json'
else:
ref = request.META.get('HTTP_REFERER')
if ref is None or not is_same_domain(ref, request.build_absolute_uri()):
ref = reverse('sentry')
return HttpResponseRedirect(ref)
return response
return wrapped
class APIView(BaseView):
helper_cls = ClientApiHelper
def _get_project_from_id(self, project_id):
if not project_id:
return
if not project_id.isdigit():
raise APIError('Invalid project_id: %r' % project_id)
try:
return Project.objects.get_from_cache(id=project_id)
except Project.DoesNotExist:
raise APIError('Invalid project_id: %r' % project_id)
def _parse_header(self, request, helper, project):
auth = helper.auth_from_request(request)
if auth.version not in PROTOCOL_VERSIONS:
raise APIError(
'Client using unsupported server protocol version (%r)' %
six.text_type(auth.version or '')
)
if not auth.client:
raise APIError("Client did not send 'client' identifier")
return auth
@csrf_exempt
@never_cache
def dispatch(self, request, project_id=None, *args, **kwargs):
helper = self.helper_cls(
agent=request.META.get('HTTP_USER_AGENT'),
project_id=project_id,
ip_address=request.META['REMOTE_ADDR'],
)
origin = None
try:
origin = helper.origin_from_request(request)
response = self._dispatch(
request, helper, project_id=project_id, origin=origin, *args, **kwargs
)
except APIError as e:
context = {
'error': force_bytes(e.msg, errors='replace'),
}
if e.name:
context['error_name'] = e.name
response = HttpResponse(
json.dumps(context), content_type='application/json', status=e.http_status
)
# Set X-Sentry-Error as in many cases it is easier to inspect the headers
response['X-Sentry-Error'] = context['error']
if isinstance(e, APIRateLimited) and e.retry_after is not None:
response['Retry-After'] = six.text_type(e.retry_after)
except Exception as e:
# TODO(dcramer): test failures are not outputting the log message
# here
if settings.DEBUG:
content = traceback.format_exc()
else:
content = ''
logger.exception(e)
response = HttpResponse(
content, content_type='text/plain', status=500)
# TODO(dcramer): it'd be nice if we had an incr_multi method so
# tsdb could optimize this
metrics.incr('client-api.all-versions.requests')
metrics.incr('client-api.all-versions.responses.%s' %
(response.status_code, ))
metrics.incr(
'client-api.all-versions.responses.%sxx' % (
six.text_type(response.status_code)[0], )
)
if helper.context.version:
metrics.incr('client-api.v%s.requests' %
(helper.context.version, ))
metrics.incr(
'client-api.v%s.responses.%s' % (
helper.context.version, response.status_code)
)
metrics.incr(
'client-api.v%s.responses.%sxx' %
(helper.context.version, six.text_type(
response.status_code)[0])
)
if response.status_code != 200 and origin:
# We allow all origins on errors
response['Access-Control-Allow-Origin'] = '*'
if origin:
response['Access-Control-Allow-Headers'] = \
'X-Sentry-Auth, X-Requested-With, Origin, Accept, ' \
'Content-Type, Authentication'
response['Access-Control-Allow-Methods'] = \
', '.join(self._allowed_methods())
response['Access-Control-Expose-Headers'] = \
'X-Sentry-Error, Retry-After'
return response
def _dispatch(self, request, helper, project_id=None, origin=None, *args, **kwargs):
request.user = AnonymousUser()
project = self._get_project_from_id(project_id)
if project:
helper.context.bind_project(project)
Raven.tags_context(helper.context.get_tags_context())
if origin is not None:
# This check is specific for clients who need CORS support
if not project:
raise APIError('Client must be upgraded for CORS support')
if not is_valid_origin(origin, project):
tsdb.incr(tsdb.models.project_total_received_cors,
project.id)
raise APIForbidden('Invalid origin: %s' % (origin, ))
# XXX: It seems that the OPTIONS call does not always include custom headers
if request.method == 'OPTIONS':
response = self.options(request, project)
else:
auth = self._parse_header(request, helper, project)
key = helper.project_key_from_auth(auth)
# Legacy API was /api/store/ and the project ID was only available elsewhere
if not project:
project = Project.objects.get_from_cache(id=key.project_id)
helper.context.bind_project(project)
elif key.project_id != project.id:
raise APIError('Two different projects were specified')
helper.context.bind_auth(auth)
Raven.tags_context(helper.context.get_tags_context())
# Explicitly bind Organization so we don't implicitly query it later
# this just allows us to comfortably assure that `project.organization` is safe.
#
|
krischer/wfdiff
|
src/wfdiff/tests/test_wfdiff.py
|
Python
|
gpl-3.0
| 452
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
wfdiff test suite.
Run with pytest.
:copyright:
Lion Krischer (krischer@geophysik.uni-muenchen.de), 2014-2015
:license:
GNU General Public License, Version 3
(http://www.gnu.org/copyleft/gpl.html)
"""
import inspect
import os
# Most
|
generic way to get the data folder path.
DATA
|
_DIR = os.path.join(os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe()))), "data")
|
tachylyte/HydroGeoPy
|
one_d_numerical.py
|
Python
|
bsd-2-clause
| 3,784
| 0.013214
|
### Implementation of the numerical Stehfest inversion inspired by J Barker https://www.uni-leipzig.de/diffusion/presentations_DFII/pdf/DFII_Barker_Reduced.pdf
import inversion
import math
def finiteConc(t, v, De, R, deg, x, c0, L, N):
''' t is time (T), v is velocity (L/T), De is effective hydrodynamic dispersion (including diffusion) (L^2/T),
R is retardation (-), deg is first order decay constant (1/T), x is position along path (L),
c0 is source concentration (M/L^3), L is pathway length (L), n is effective porosity (-), N is input variable stehfestCoeff().
Return concentration (M/L^3) at position x'''
Vs = inversion.stehfestCoeff(N)
rt = math.log(2.0) / t
Sum = 0
for i in range(1, N+1):
s = i * rt
Sum = Sum + Vs[i - 1] * (c0 / s) * ((math.exp(((((v + (v ** 2 + (4 * De * R * (s + deg))) ** (1 / 2)) / (2 * De)) * x)) + ((((v - (v ** 2 + (4 * De * R * (s + deg))) ** (1 / 2)) / (2 * De)) * L))) - math.exp(((((v - (v ** 2 + (4 * De * R * (s + deg))) ** (1 / 2)) / (2 * De)) * x)) + ((((v + (v ** 2 + (4 * De * R * (s + deg))) ** (1 / 2)) / (2 * De)) * L)))) / (math.exp((((v - (v ** 2 + (4 * De * R * (s + deg))) ** (1 / 2)) / (2 * De)) * L)) - math.exp((((v + (v ** 2 + (4 * De * R * (s + deg))) ** (1 / 2)) / (2 * De)) * L))))
return rt * Sum
def finiteFlux(t, v, De, R, deg, x, c0, L, n, N):
''' t is time (T), v is velocity (L/T), De is effective hydrodynamic dispersion (including diffusion) (L^2/T),
R is retardation (-), deg is first order decay constant (1/T), x is position along path (L),
c0 is source concentration (M/L^3), L is pathway length (L), n is effective porosity (-), N is input variable stehfestCoeff().
Return concentration (M/L^3) at position x'''
Vs = inversion.stehfestCoeff(N)
rt = math.log(2.0) / t
Sum = 0
for i in range(1, N+1):
s = i * rt
a1 = (v - (v ** 2 + (4 * De * R * (s + deg))) ** (1 / 2)) / (2 * De)
a2 = (v + (v ** 2 + (4 * De * R * (s + deg))) ** (1 / 2)) / (2 * De)
z1 = (v - (De * a2))
z2 = (v - (De * a1))
Sum = Sum + Vs[i - 1] * ((c0 * n) / s) * ((((z1 * math.exp((a2 * x) + (a1 * L))) - (z2 * math.exp((a1 * x) + (
|
a2 * L)))) / (math.exp(a1 * L) - math.exp(a2 * L))))
return rt * Sum
def infiniteConc(t, v, De, R, deg, x, c0, N):
''' t is time (T), v is velocity (L/T), De is effective hydrodynamic dispersion (including diffusion) (L^2/T),
|
R is retardation (-), deg is first order decay constant (1/T), x is position along path (L),
c0 is source concentration (M/L^3), n is effective porosity (-), N is input variable stehfestCoeff().
Return concentration (M/L^3) at position x'''
Vs = inversion.stehfestCoeff(N)
rt = math.log(2.0) / t
Sum = 0
for i in range(1, N+1):
s = i * rt
Sum = Sum + Vs[i - 1] * (c0 / s) * math.exp(((v - (v ** 2 + (4 * De * R * (s + deg))) ** 0.5) / (2 * De)) * x)
return rt * Sum
def infiniteFlux(t, v, De, R, deg, x, c0, n, N):
''' t is time (T), v is velocity (L/T), De is effective hydrodynamic dispersion (including diffusion) (L^2/T),
R is retardation (-), deg is first order decay constant (1/T), x is position along path (L),
c0 is source concentration (M/L^3), n is effective porosity (-), N is input variable stehfestCoeff().
Return flux at position x'''
Vs = inversion.stehfestCoeff(N)
rt = math.log(2.0) / t
Sum = 0
for i in range(1, N+1):
s = i * rt
Sum = Sum + Vs[i - 1] * (((c0 * n) / s) * ((v - (De * ((v - ((v ** 2) + (4 * De * R * (s + deg))) ** 0.5) / (2 * De)))) * math.exp(((v - (v ** 2 + (4 * De * R * (s + deg))) ** 0.5) / (2 * De)) * x)))
return rt * Sum
|
ikedumancas/ikequizgen
|
quizzes/migrations/0005_auto_20150813_0645.py
|
Python
|
mit
| 1,156
| 0.000865
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('quizzes', '0004_auto_20150811_1354'),
]
operations = [
migrations.AlterModelOptions(
name='choice',
options={'ordering': ['order']},
),
migrations.AlterModelOptions(
name='question',
options={'ordering': ['order']},
),
migrations.AlterModelOptions(
name='quiz',
options={'ordering': ['-timestamp'], 'verbose_name': 'Quiz', 'verbose_name_plural': 'Quizzes'},
),
migrations.AddField(
model_name='choice',
name='order',
fi
|
eld=models.IntegerField(default=0),
),
migrations.AddField(
model_name='question',
name='order',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='quiz',
name='is_active',
|
field=models.BooleanField(default=False, verbose_name=b'active'),
),
]
|
elsigh/browserscope
|
third_party/appengine_tools/devappserver2/endpoints/discovery_api_proxy.py
|
Python
|
apache-2.0
| 3,882
| 0.005667
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this
|
file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License i
|
s distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Proxy that dispatches Discovery requests to the prod Discovery service."""
import httplib
import json
import logging
class DiscoveryApiProxy(object):
"""Proxies discovery service requests to a known cloud endpoint."""
# The endpoint host we're using to proxy discovery and static requests.
# Using separate constants to make it easier to change the discovery service.
_DISCOVERY_PROXY_HOST = 'webapis-discovery.appspot.com'
_STATIC_PROXY_HOST = 'webapis-discovery.appspot.com'
_DISCOVERY_API_PATH_PREFIX = '/_ah/api/discovery/v1/'
def _dispatch_request(self, path, body):
"""Proxies GET request to discovery service API.
Args:
path: A string containing the URL path relative to discovery service.
body: A string containing the HTTP POST request body.
Returns:
HTTP response body or None if it failed.
"""
full_path = self._DISCOVERY_API_PATH_PREFIX + path
headers = {'Content-type': 'application/json'}
connection = httplib.HTTPSConnection(self._DISCOVERY_PROXY_HOST)
try:
connection.request('POST', full_path, body, headers)
response = connection.getresponse()
response_body = response.read()
if response.status != 200:
logging.error('Discovery API proxy failed on %s with %d.\r\n'
'Request: %s\r\nResponse: %s',
full_path, response.status, body, response_body)
return None
return response_body
finally:
connection.close()
def generate_discovery_doc(self, api_config, api_format):
"""Generates a discovery document from an API file.
Args:
api_config: A string containing the .api file contents.
api_format: A string, either 'rest' or 'rpc' depending on the which kind
of discvoery doc is requested.
Returns:
The discovery doc as JSON string.
Raises:
ValueError: When api_format is invalid.
"""
if api_format not in ['rest', 'rpc']:
raise ValueError('Invalid API format')
path = 'apis/generate/' + api_format
request_dict = {'config': json.dumps(api_config)}
request_body = json.dumps(request_dict)
return self._dispatch_request(path, request_body)
def generate_directory(self, api_configs):
"""Generates an API directory from a list of API files.
Args:
api_configs: A list of strings which are the .api file contents.
Returns:
The API directory as JSON string.
"""
request_dict = {'configs': api_configs}
request_body = json.dumps(request_dict)
return self._dispatch_request('apis/generate/directory', request_body)
def get_static_file(self, path):
"""Returns static content via a GET request.
Args:
path: A string containing the URL path after the domain.
Returns:
A tuple of (response, response_body):
response: A HTTPResponse object with the response from the static
proxy host.
response_body: A string containing the response body.
"""
connection = httplib.HTTPSConnection(self._STATIC_PROXY_HOST)
try:
connection.request('GET', path, None, {})
response = connection.getresponse()
response_body = response.read()
finally:
connection.close()
return response, response_body
|
wxgeo/geophar
|
wxgeometrie/modules/cryptographie/__init__.py
|
Python
|
gpl-2.0
| 12,975
| 0.006816
|
# -*- coding: utf-8 -*-
##--------------------------------------#######
# Cryptographie #
##--------------------------------------#######
# WxGeometrie
# Dynamic geometry, graph plotter, and more for french mathematic teachers.
# Copyright (C) 2005-2013 Nicolas Pourcelot
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from string import ascii_uppercase as majuscules
from functools import partial
from random import shuffle
import re
from PyQt5.QtWidgets import QVBoxLayout, QInputDialog, QPushButton,\
QTextEdit, QGridLayout, QLabel, QLineEdit, QSpacerItem
from PyQt5.QtCore import Qt, QTimer
from ...GUI.menu import MenuBar
from ...GUI.panel import Panel_simple
from ...pylib import print_error
#~ from ... import param
dict_accents = {
"é": "E",
"É": "E",
"ê": "E",
"Ê": "E",
"è": "E",
"È": "E",
"à": "A",
"À": "A",
"â": "A",
"Â": "A",
"ô": "O",
"Ô": "O",
"î": "I",
"Î": "I",
"ù": "U",
"Ù": "U",
"û": "U",
"Û": "U",
"ç": "C",
"Ç": "C",
}
class CaseLettre(QLineEdit):
def __init__(self, parent):
self.parent = parent
QLineEdit.__init__(self, parent)
self.setAlignment(Qt.AlignCenter)
def keyPressEvent(self, evt):
self.parent.message('')
n = evt.key()
if 65 <= n <= 90 or 97 <= n <= 122:
c = chr(n).upper()
for case in self.parent.cases.values():
if case.text() == c:
self.parent.message('La lettre %s est déjà utilisée !' %c)
return
self.setText(c)
elif n in (Qt.Key_Backspace, Qt.Key_Delete):
self.clear()
##QLineEdit.keyPressEvent(self, evt)
class CryptographieMenuBar(MenuBar):
def __init__(self, panel):
MenuBar.__init__(self, panel)
self.ajouter("Fichier", ["quitter"])
self.ajouter("Affichage", ["onglet"], ["plein_ecran"])
self.ajouter("Outils",
["Coder un message", "Code le message par substitution mono-alphabétique.",
"Ctrl+K", panel.coder],
["Coder avec espaces", "Code le message en conservant les espaces (substitution mono-alphabétique).",
"Ctrl+Shift+K", partial(panel.coder, espaces=True)],
["Générer une nouvelle clé", "Générer une nouvelle permutation de l'alphabet.", None, panel.generer_cle],
["Modifier la clé", "Générer une nouvelle permutation de l'alphabet.", None, panel.DlgModifierCle],
None,
["Coder avec Vigenère", "Codage par la méthode de Vigenère (substitution poly-alphabétique).",
None, partial(panel.coder_vigenere, ask=True)],
None,
["options"])
self.ajouter("avance2")
self.ajouter("?")
class Cryptographie(Panel_simple):
titre = "Cryptographie" # Donner un titre à chaque module
def __init__(self, *args, **kw):
Panel_simple.__init__(self, *args, **kw)
self._freeze = False
self.widget_modifie = None
# La clé est la permutation de l'alphabet actuellement utilisée
# pour le codage par substitution mono-alphabétique.
self.generer_cle()
# La clé de chiffrement pour le codage par substitution poly-alphabétique
# (appelé aussi chiffre de Vigenère).
self.cle_vigenere = 'EXEMPLE'
# Signe indiquant un caractère non déchiffré
self.symbole = '-' # '.'
self.sizer = QVBoxLayout()
self.textes = QGridLayout()
self.textes.setSpacing(5)
size = (400, 300)
txt_clair = QLabel("<b>Texte en clair</b>")
self.clair = QTextEdit()
self.clair.setMinimumSize(*size)
formater_clair = partial(self.formater, widget=self.clair)
self.clair.textChanged.connect(formater_clair)
self.clair.cursorPositionChanged.connect(formater_clair)
self.copier_clair = QPushButton('Copier le texte en clair')
self.copier_clair.clicked.connect(partial(self.copier, widget=self.clair))
txt_code = QLabel("<b>Texte codé</b>")
self.code = QTextEdit()
self.code.setMinimumSize(*size)
self.code.textChanged.connect(self.code_modifie)
self.code.cursorPositionChanged.connect(partial(self.formater, widget=self.code))
self.copier_code = QPushButton('Copier le texte codé')
self.copier_code.clicked.connect(partial(self.copier, widget=self.code))
self.textes.addWidget(txt_clair, 0, 0)
self.textes.addItem(QSpacerItem(50, 1), 0, 1)
self.textes.addWidget(txt_code, 0, 2)
self.textes.addWidget(self.clair, 1, 0)
self.textes.addWidget(self.code, 1, 2)
self.textes.addWidget(self.copier_code, 2, 2)
self.textes.addWidget(self.copier_clair, 2, 0)
self.table = QGridLayout()
self.table.setSpacing(3)
self.cases = {}
self.table.addWidget(QLabel("Codé : ", self), 0, 0)
self.table.addWidget(QLabel("Clair : ", self), 1, 0)
##self.table.setColumnStretch(0, 100)
for i, l in enumerate(majuscules):
lettre = QLineEdit(l, self)
lettre.setAlignment(Qt.AlignCenter)
lettre.setReadOnly(True)
lettre.setEnabled(False)
self.table.addWidget(lettre, 0, i + 1)
##self.table.setColumnStretch(i + 1, 1)
for i, l in enumerate(majuscules):
c = self.cases[l] = CaseLettre(self)
c.setMaxLength(1)
self.table.addWidget(c, 1, i + 1)
c.textChanged.connect(self.decoder)
self.sizer.addLayout(self.textes)
self.sizer.addLayout(self.table)
self.setLayout(self.sizer)
##self.adjustSize()
self.couleur1 = "5A28BE" # sky blue
self.couleur2 = "C86400" # Lime Green
self.couleur_position = "FFCDB3"
self.reg = re.compile("([-A-Za-z]|<##>|</##>)+")
##couleur_position = wx.Color(255, 205, 179) # FFCDB3
##couleur1 = wx.Color(90, 40, 190) # 5A28BE
##couleur2 = wx.Color(200, 100, 0) # C86400
##black = wx.Color(0, 0, 0) # 000000
##white = wx.Color(255, 255, 255) # FFFFFF
##self.special = wx.TextAttr(wx.NullColour, couleur_position)
##self.fond = wx.TextAttr(couleur1, wx.NullColour) #"sky blue"
##self.fond2 = wx.TextAttr(couleur2, wx.NullColour) # "Lime Green"
##self.defaut = wx.TextAttr(black, white)
##
##self.Bind(wx.EVT_IDLE, self.OnIdle)
timer = QTimer(self)
|
timer.timeout.connect(self.OnIdle)
timer.start(100)
# DEBUG:
##self.code.setPlainText('WR IRAMXPZRHRDZ IK HRYYOVR AL IRYYBKY RYZ NOALWLZR POM WR NOLZ FKR W BD O VOMIR WRY YLVDRY IR PBDAZKOZLBD RZ WRY RYPOARY RDZMR WRY HBZY OWBMY FKR I QOELZKIR BD VMBKPR WRY WRZZMRY ALDF POM
|
ALDF')
def copier(self, evt=None, widget=None):
self.vers_presse_papier(widget.toPlainText())
def DlgModifierCle(self, evt=None):
while True:
text, ok = QInputDialog.getText(self, "Modifier la clé",
"La clé doit être une permutation de l'alphabet,\n"
"ou un chiffre qui indique de combien l'alphabet est décalé.",
text=str(self.cle))
if ok:
try:
self.modifier_cle(text)
|
denz/swarm-crawler
|
swarm_crawler/dataset/datasource.py
|
Python
|
bsd-3-clause
| 7,406
| 0.009317
|
from string import printable
import re
from urlparse import urlunparse
from itertools import chain, ifilter
from fnmatch import fnmatch
from werkzeug import cached_property
from swarm import transport, swarm
from swarm.ext.http.helpers import parser, URL
from ..text import PageText
from .tree import TrieTree as Tree
class DescribedMixin(object):
@classmethod
def info(cls):
if cls.__doc__:
yield cls.__doc__
for base in cls.__bases__:
if issubclass(base, DescribedMixin):
for info in base.info():
yield info
@classmethod
def describe(cls):
return 'Extracts ' + ' and '.join(cls.info())
class Datasource(object):
__OMIT = ('dataset',)
tags = None
def __getstate__(self):
return dict((k, v) for (k, v) in self.__dict__.items() \
if not k in self.__class__.__OMIT)
RANGE = printable
def __init__(self, dataset_path, **kwargs):
self.dataset_path = dataset_path
self.__dict__.update(kwargs)
@cached_property
def dataset(self):
if self.dataset_path is not None:
return Tree.load(self.dataset_path)
def items(self):
if False:
yield
def links(self):
if False:
yield
class LinksMixin(DescribedMixin):
"""links"""
tests = ('deny_scheme',
'allow_scheme',
'deny_domain',
'allow_domain',
'deny_url',
'allow_url',)
modifiers = ('drop_fragment',)
allow_schemas = None
deny_schemas = ['javascript', 'mailto',]
allow_domains = None
deny_domains = None
allow_urls = None
deny_urls = [ '*.gif',
'*.jpeg',
'*.jpg',
'*.css',
'*.js',
'*.png',
'*.ico',
'*.xml'
]
unique = True
cmdopts = { 'allow_schemas':{'default':allow_schemas,
'nargs':'+',
'help':'Allow only listed schemas'},
'deny_schemas':{'default':deny_schemas,
'nargs':'+',
'help':'Deny listed schemas'},
'allow_domains':{'default':allow_domains,
'nargs':'+',
'help':'Allow only listed domains (dot started treated as suffixes)'},
'deny_domains':{'default':deny_domains,
'nargs':'+',
'help':'Deny listed domains (dot started treated as suffixes)'},
'allow_urls':{'default':allow_urls,
'nargs':'+',
'help':'Regexps for allowed urls'},
'deny_urls':{'default':deny_urls,
'nargs':'+',
'help':'Regexps for denied urls'},
'no_unique':{'dest':'unique',
'default':unique,
'action':'store_false',
'help':'Disable following unique urls only'},
}
def fnmatch(self, value, matchers, ret):
if any((fnmatch(value, matcher) for matcher in matchers)):
return ret
return not ret
def deny_scheme(self, url):
if not self.deny_schemas:
return True
return self.fnmatch(url.parsed.scheme, self.deny_schemas, False)
def allow_scheme(self, url):
if not self.allow_schemas:
return True
return self.fnmatch(url.parsed.scheme, self.allow_schemas, True)
def deny_domain(self, url):
if not self.deny_domains:
return True
return self.fnmatch(url.parsed.hostname, self.deny_domains, False)
def allow_domain(self, url):
if not self.allow_domains:
return True
return self.fnmatch(url.parsed.hostname, self.allow_domains, True)
def allow_url(self, url):
if not self.allow_urls:
return True
return self.fnmatch(urlunparse((None, None) + url.parsed[2:]),
self.allow_urls,
True)
def deny_url(self, url):
if not self.deny_urls:
return True
return self.fnmatch(urlunparse((None, None) + url.parsed[2:]),
self.deny_urls,
False)
def drop_fragment(self, url):
if not url.parsed.fragment:
return url
else:
return URL(urlunparse(url.parsed[:5] + ('',)))
def allowed(self, url):
return all(getattr(self, test)(url) for test in self.tests)
def modified(self, url):
for modifier in (getattr(self, modifier) for modifier in self.modifiers):
url = modifier(url)
return url
@parser
def links(self, html):
from swarm import swarm
if html is None:
return
html.make_links_absolute(transport.url)
for element, attribute, link, pos in html.iterlinks():
url = URL(link)
if not self.allowed(url):
continue
url = self.modified(url)
if self.unique and self.is_unique(url):
yield url
def is_unique(self, url):
if url in getattr(self, '_urls', []):
return False
elif not hasattr(self, '_urls'):
self._urls = []
self._urls.append(url)
return True
class XpathParserMixin(DescribedMixin):
"""xpath selected content"""
@parser
def items(html):
if False:
yield
class ReadableMixin(DescribedMixin):
"""textual content"""
greed = 1
def items(self):
yield PageText(transport.content, url=transport.url)\
.winner(greed=self.greed)
class CmdlineArgsMixin(object):
@classmethod
def get_opts(cls):
containers = cls.__bases__ + (cls,)
return dict(ifilter(bool, chain(*(getattr(
|
c,
'cmdopts',
{}).items()
|
for c in containers))))
@classmethod
def populate_parser(cls, parser):
for optname, kwargs in cls.get_opts().items():
parser.add_argument('--%s'%optname.replace('_', '-'), **kwargs)
def __unicode__(self):
descr = 'Extract ' + ' and '.join(self.info())
opts = []
for optname in self.get_opts().keys():
optvalue = getattr(self, optname, None)
if optvalue and not optvalue == getattr(self.__class__, optname, None):
opts += '%s:%s'%(optname, optvalue),
return descr + (' (%s)'%(', '.join(opts)) if opts else '')
class NoContentDatasource(CmdlineArgsMixin, LinksMixin, Datasource):
pass
class XpathContentOnlyDatasource(CmdlineArgsMixin, XpathParserMixin, Datasource):
pass
class XpathDatasource(CmdlineArgsMixin, LinksMixin, XpathParserMixin, Datasource):
pass
class ReadableContentOnlyDatasource(CmdlineArgsMixin, ReadableMixin, Datasource):
pass
class ReadableDatasource(CmdlineArgsMixin, LinksMixin, ReadableMixin, Datasource):
pass
|
openlabs/pyes
|
pyes/rivers.py
|
Python
|
bsd-3-clause
| 4,849
| 0.000619
|
class River(object):
def __init__(self, index_name=None, index_type=None, bulk_size=100, bulk_timeout=None):
self.name = index_name
self.index_name = index_name
self.index_type = index_type
self.bulk_size = bulk_size
self.bulk_timeout = bulk_t
|
imeout
def serialize(self):
res = self._serialize()
index = {}
if self.name:
index['name'] = self.name
if self.i
|
ndex_name:
index['index'] = self.index_name
if self.index_type:
index['type'] = self.index_type
if self.bulk_size:
index['bulk_size'] = self.bulk_size
if self.bulk_timeout:
index['bulk_timeout'] = self.bulk_timeout
if index:
res['index'] = index
return res
def __repr__(self):
return str(self.serialize())
def _serialize(self):
raise NotImplementedError
class RabbitMQRiver(River):
type = "rabbitmq"
def __init__(self, host="localhost", port=5672, user="guest",
password="guest", vhost="/", queue="es", exchange="es",
routing_key="es", **kwargs):
super(RabbitMQRiver, self).__init__(**kwargs)
self.host = host
self.port = port
self.user = user
self.password = password
self.vhost = vhost
self.queue = queue
self.exchange = exchange
self.routing_key = routing_key
def _serialize(self):
return {
"type": self.type,
self.type: {
"host": self.host,
"port": self.port,
"user": self.user,
"pass": self.password,
"vhost": self.vhost,
"queue": self.queue,
"exchange": self.exchange,
"routing_key": self.routing_key
}
}
class TwitterRiver(River):
type = "twitter"
def __init__(self, user=None, password=None, **kwargs):
self.user = user
self.password = password
self.consumer_key = kwargs.pop('consumer_key', None)
self.consumer_secret = kwargs.pop('consumer_secret', None)
self.access_token = kwargs.pop('access_token', None)
self.access_token_secret = kwargs.pop('access_token_secret', None)
# These filters may be lists or comma-separated strings of values
self.tracks = kwargs.pop('tracks', None)
self.follow = kwargs.pop('follow', None)
self.locations = kwargs.pop('locations', None)
super(TwitterRiver, self).__init__(**kwargs)
def _serialize(self):
result = {"type": self.type}
if self.user and self.password:
result[self.type] = {"user": self.user,
"password": self.password}
elif (self.consumer_key and self.consumer_secret and self.access_token
and self.access_token_secret):
result[self.type] = {"oauth": {
"consumer_key": self.consumer_key,
"consumer_secret": self.consumer_secret,
"access_token": self.access_token,
"access_token_secret": self.access_token_secret,
}
}
else:
raise ValueError("Twitter river requires authentication by username/password or OAuth")
filter = {}
if self.tracks:
filter['tracks'] = self.tracks
if self.follow:
filter['follow'] = self.follow
if self.locations:
filter['locations'] = self.locations
if filter:
result[self.type]['filter'] = filter
return result
class CouchDBRiver(River):
type = "couchdb"
def __init__(self, host="localhost", port=5984, db="mydb", filter=None,
filter_params=None, script=None, user=None, password=None,
**kwargs):
super(CouchDBRiver, self).__init__(**kwargs)
self.host = host
self.port = port
self.db = db
self.filter = filter
self.filter_params = filter_params
self.script = script
self.user = user
self.password = password
def serialize(self):
result = {
"type": self.type,
self.type: {
"host": self.host,
"port": self.port,
"db": self.db,
"filter": self.filter,
}
}
if self.filter_params is not None:
result[self.type]["filter_params"] = self.filter_params
if self.script is not None:
result[self.type]["script"] = self.script
if self.user is not None:
result[self.type]["user"] = self.user
if self.password is not None:
result[self.type]["password"] = self.password
return result
|
toastedcornflakes/scikit-learn
|
sklearn/feature_extraction/tests/test_image.py
|
Python
|
bsd-3-clause
| 11,187
| 0.000089
|
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
|
def test_grid_to_graph():
# Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_
|
size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask,
dtype=np.float64)
assert_true(A.dtype == np.float64)
def test_connect_regions():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
for thr in (50, 150):
mask = face > thr
graph = img_to_graph(face, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
mask = face > 50
graph = grid_to_graph(*face.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = face > 150
graph = grid_to_graph(*face.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_face():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
face = face.astype(np.float32)
face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2]
+ face[1::2, 1::2])
face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2]
+ face[1::2, 1::2])
face = face.astype(np.float32)
face /= 16.0
return face
def _orange_face(face=None):
face = _downsampled_face() if face is None else face
face_color = np.zeros(face.shape + (3,))
face_color[:, :, 0] = 256 - face
face_color[:, :, 1] = 256 - face / 2
face_color[:, :, 2] = 256 - face / 4
return face_color
def _make_images(face=None):
face = _downsampled_face() if face is None else face
# make a collection of faces
images = np.zeros((3,) + face.shape)
images[0] = face
images[1] = face + 1
images[2] = face + 2
return images
downsampled_face = _downsampled_face()
orange_face = _orange_face(downsampled_face)
face_collection = _make_images(downsampled_face)
def test_extract_patches_all():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
face = orange_face
i_h, i_w = face.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
face = downsampled_face
face = face[:, 32:97]
i_h, i_w = face.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(face, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
face = downsampled_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_reconstruct_patches_perfect_color():
face = orange_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_patch_extractor_fit():
faces = face_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(faces))
def test_patch_extractor_max_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(faces) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(faces) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
faces = face_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(faces)
assert_equal(patches.shape, (len(faces) * 100, 19, 25))
def test_patch_extractor_all_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_pat
|
userzimmermann/robotframework-python3
|
src/robot/output/listeners.py
|
Python
|
apache-2.0
| 10,498
| 0.001048
|
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six import add_metaclass, text_type as unicode
import inspect
import os.path
from robot import utils
from robot.errors import DataError
from robot.model import Tags
from .loggerhelper import AbstractLoggerProxy
from .logger import LOGGER
if utils.is_jython:
from java.lang import Object
from java.util import HashMap
class _RecursionAvoidingMetaclass(type):
"""Metaclass to wrap listener methods so that they cannot cause recursion.
Recursion would otherwise happen if one listener logs something and that
message is received and logged again by log_message or message method.
"""
def __new__(cls, name, bases, dct):
for attr, value in dct.items():
if not attr.startswith('_') and inspect.isroutine(value):
dct[attr] = cls._wrap_listener_method(value)
dct['_calling_method'] = False
return type.__new__(cls, name, bases, dct)
@staticmethod
def _wrap_listener_method(method):
def wrapped(self, *args):
if not self._calling_method:
self._calling_method = True
method(self, *args)
self._calling_method = False
return wrapped
@add_metaclass(_RecursionAvoidingMetaclass)
class Listeners(object):
_start_attrs = ('id', 'doc', 'starttime', 'longname')
_end_attrs = _start_attrs + ('endtime', 'elapsedtime', 'status', 'message')
_kw_extra_attrs = ('args', '-id', '-longname', '-message')
def __init__(self, listeners):
self._listeners = self._import_listeners(listeners)
self._running_test = False
self._setup_or_teardown_type = None
def __bool__(self):
return bool(self._listeners)
#PY2
def __nonzero__(self):
return self.__bool__()
def _import_listeners(self, listener_data):
listeners = []
for name, args in listener_data:
try:
listeners.append(ListenerProxy(name, args))
except DataError as err:
if args:
name += ':' + ':'.join(args)
LOGGER.error("Taking listener '%s' into use failed: %s"
% (name, unicode(err)))
return listeners
def start_suite(self, suite):
for listener in self._listeners:
if listener.version == 1:
listener.call_method(listener.start_suite, suite.name, suite.doc)
else:
attrs = self._get_start_attrs(suite, 'metadata')
attrs.update(self._get_suite_attrs(suite))
listener.call_method(listener.start_suite, suite.name, attrs)
def _get_suite_attrs(self, suite):
return {
'tests' : [t.name for t in suite.tests],
'suites': [s.name for s in suite.suites],
'totaltests': suite.test_count,
'source': suite.source or ''
}
def end_suite(self, suite):
for listener in self._listeners:
self._notify_end_suite(listener, suite)
def _notify_end_suite(self, listener, suite):
if listener.version == 1:
listener.call_method(listener.end_suite, suite.status,
suite.full_message)
else:
attrs = self._get_end_attrs(suite, 'metadata')
attrs['statistics'] = suite.stat_message
attrs.update(self._get_suite_attrs(suite))
listener.call_method(listener.end_suite, suite.name, attrs)
def start_test(self, test):
self._running_test = True
for listener in self._listeners:
if listener.version == 1:
listener.call_method(listener.start_test, test.name, test.doc,
list(test.tags))
else:
attrs = self._get_start_attrs(test, 'tags')
attrs['critical'] = 'yes' if test.critical else 'no'
attrs['template'] = test.template or ''
listener.call_method(listener.start_test, test.name, attrs)
def end_test(self, test):
self._running_test = False
for listener in self._listeners:
self._notify_end_test(listener, test)
def _notify_end_test(self, listener, test):
if listener.version == 1:
listener.call_method(listener.end_test, test.status, test.message)
else:
attrs = self._get_end_attrs(test, 'tags')
attrs['critical'] = 'yes' if test.critical else 'no'
attrs['template'] = test.template or ''
listener.call_method(listener.end_test, test.name, attrs)
def start_keyword(self, kw):
for listener in self._listeners:
if listener.version == 1:
listener.call_method(listener.start_keyword, kw.name, kw.args)
else:
attrs = self._get_start_attrs(kw, *self._kw_extra_attrs)
attrs['type'] = self._get_keyword_type(kw, start=True)
listener.call_method(listener.start_keyword, kw.name, attrs)
def end_keyword(self, kw):
for listener in self._listeners:
if listener.version == 1:
listener.call_method(listener.end_keyword, kw.status)
else:
attrs = self._get_end_attrs(kw, *self._kw_extra_attrs)
attrs['type'] = self._get_keyword_type(kw, start=False)
listener.call_method(listener.end_keyword, kw.name, attrs)
def _get_keyword_type(self, kw, start=True):
# When running setup or teardown, only the top level keyword has type
# set to setup/teardown but we want to pass that type also to all
# start/end_keyword listener methods called below that keyword.
if kw.type == 'kw':
return self._setup_or_teardown_type or 'Keyword'
kw_type = self._get_setup_or_teardown_type(kw)
self._setup_or_teardown_type = kw_type if start else None
return kw_type
def _get_setup_or_teardown_type(self, kw):
return '%s %s' % (('Test' if self._running_test else 'Suite'),
kw.type.title())
def log_message(self, msg):
for listener in self._listeners:
if listener.version == 2:
|
listener.call_method(listener.log_message, self._create_msg_dict(msg))
def message(self, msg):
for listener in self._listeners:
if listener.version == 2:
listener.call_method(listener.message, self._create_msg_dict(msg))
def _create_msg_dict(self, msg):
return {'timestamp': msg.timestamp, 'message': msg.message,
'level': msg.level, 'html': 'yes' if msg.html else 'no'}
def output_f
|
ile(self, name, path):
for listener in self._listeners:
listener.call_method(getattr(listener, '%s_file' % name.lower()), path)
def close(self):
for listener in self._listeners:
listener.call_method(listener.close)
def _get_start_attrs(self, item, *extra):
return self._get_attrs(item, self._start_attrs, extra)
def _get_end_attrs(self, item, *extra):
return self._get_attrs(item, self._end_attrs, extra)
def _get_attrs(self, item, default, extra):
names = self._get_attr_names(default, extra)
return dict((n, self._get_attr_value(item, n)) for n in names)
def _get_attr_names(self, default, extra):
names = list(default)
for name in extra:
if not name.startswith('-'):
names.append(name)
elif name[1:]
|
rvanharen/SitC
|
knmi_getdata.py
|
Python
|
apache-2.0
| 5,962
| 0.003187
|
#!/usr/bin/env python2
'''
Description:
Author: Ronald van Haren, NLeSC (r.vanharen@esciencecenter.nl)
Created: -
Last Modified: -
License: Apache 2.0
Notes: -
'''
from lxml.html import parse
import csv
import urllib2
from lxml import html
import numbers
import json
import os
import utils
from numpy import vstack
import argparse
class get_knmi_reference_data:
'''
description
'''
def __init__(self, opts):
#self.outputdir = opts.outputdir
self.csvfile = opts.csvfile
self.outputdir = opts.outputdir
self.keep = opts.keep
self.check_output_dir()
if len(opts.stationid)==0:
self.get_station_ids()
else:
self.stationdids = [opts.stationid]
self.download_station_data()
self.get_station_locations()
def get_station_ids(self):
'''
get all stationids from the KNMI website
'''
self.url = 'http://www.knmi.nl/klimatologie/uurgegevens/'
page = parse(self.url)
# get list of ids
rows = page.xpath(".//tbody/@id")
#self.stationids = [int(stationid[3:]) for stationid in rows]
self.stationids = [str(stationid) for stationid in rows]
def download_station_data(self):
page = parse(self.url)
for stationid in self.stationids:
print stationid
relpaths = page.xpath(".//tbody[@id='" + stationid + "']/tr/td/span/a/@href")
for path in relpaths:
fullpath = os.path.join(self.url, path)
request = urllib2.urlopen(fullpath)
filename = os.path.basename(path)
outputfile = os.path.join(self.outputdir, filename)
if self.keep:
if os.path.exists(outputfile):
# check if filesize is not null
if os.path.getsize(outputfile) > 0:
# file exists and is not null, continue next iteration
continue
else:
# file exists but is null, so remove and redownload
os.remove(outputfile)
elif os.path.exists(outputfile):
os.remove(outputfile)
#save
output = open(outputfile, "w")
output.write(request.read())
output.close()
def get_station_locations(self):
# get station names for stationids
url = 'http://www.knmi.nl/klimatologie/metadata/stationslijst.html'
page = parse(url)
url_metadata = page.xpath(".//table/tr/td/a/@href")
station_name_id = [c.text for c in page.xpath(".//table/tr/td/a")]
station_id = [s.split()[0] for s in station_name_id]
station_names = [" ".join(s.split()[1:]) for s in station_name_id]
for idx, stationid in enumerate(station_id):
station_url = os.path.join(os.path.split(url)[0],
url_metadata[idx])
page = parse(station_url)
rows = [c.text for c in page.xpath(".//table/tr/td")]
idx_position = rows.index('Positie:') + 1
idx_startdate = rows.index('Startdatum:') + 1
lat, lon = rows[idx_position].encode('UTF-8').replace(
'\xc2\xb0','').replace(' N.B. ', ',').replace(
'O.L.','').strip().split(',')
lat,lon = self.latlon_conversion(lat,lon)
try:
dataout = vstack((dataout,
[station_id[idx], station_names[idx],
lat, lon, station_url]))
except NameError:
dataout = [station_id[idx], station_names[idx],
lat, lon, station_url]
header = ['station_id', 'station_name','latitude', 'longitude', 'url']
dataout = vstack((header, dataout))
# write to csv file
utils.write_csvfile(self.csvfile, dataout)
# get station locations
pass
def latlon_conversion(self, lat, lon):
'''
conversion of GPS position to lat/lon decimals
example string for lat and lon input: "52 11'"
'''
# latitude conversion
latd = lat.replace("'","").split()
lat = float(latd[0]) + float(latd[1])/60
# longitude conversion
lond = lon.replace("'","").split()
lon = float(lond[0]) + float(lond[1])/60
return lat,lon
def check_output_dir(self):
'''
check if outputdir exists and create if not
'''
if not os.path.exists(self.outputdir):
os.makedirs(self.outputdir)
if __name__ == "__main__":
# define argument menu
description = 'Get data KNMI reference stations'
parser = argparse.ArgumentParser(description=description)
# fill argument groups
parser.add_argument('-o', '--outputdir', help='Data output dir
|
ectory',
default=os.path.join(os.getcwd(),'KNMI'),
required=False)
parser.add_argument('-s', '--stationid', help='Station id',
default='', required=False, action='store')
parser.add_argument('-c', '--csvfile', help='CSV data file',
required=True, action='store')
parser.add_argument('-k', '--keep', help='Keep downloaded files',
required=False, actio
|
n='store_true')
parser.add_argument('-l', '--log', help='Log level',
choices=utils.LOG_LEVELS_LIST,
default=utils.DEFAULT_LOG_LEVEL)
# extract user entered arguments
opts = parser.parse_args()
# define logger
logname = os.path.basename(__file__) + '.log'
logger = utils.start_logging(filename=logname, level=opts.log)
# process data
get_knmi_reference_data(opts)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.