repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
Fillll/reddit2telegram | reddit2telegram/channels/~inactive/r_jacksepticeye/app.py | Python | mit | 149 | 0.006711 | #encoding:utf-8
subreddit = 'jacksepticeye'
t_channel = '@r_jackseptice | ye'
def send_post(subm | ission, r2t):
return r2t.send_simple(submission)
|
ininex/geofire-python | resource/lib/python2.7/site-packages/gcloud/streaming/buffered_stream.py | Python | mit | 2,444 | 0 | """Small helper class to provide a small slice of a stream.
This class reads ahead to detect if we are at the end of the stream.
"""
class BufferedStream(object):
"""Buffers a stream, reading ahead to determine if we're at the end.
:type stream: readable file-like object
:param stream: the stream to be buffered
:type start: integer
:param start: the starting point in the stream
:type size: integer
:param size: the size of the buffer
"""
def __init__(self, stream, start, size):
self._stream = stream
self._start_pos = start
self._buffer_pos = 0
self._buffered_data = self._stream.read(size)
self._stream_at_end = len(self._buffered_data) < size
self._end_pos = self._start_pos + len(self._buffered_data)
def __repr__(self):
return ('Buffered stream %s from position %s-%s with %s '
'bytes remaining' % (self._stream, self._start_pos,
self._end_pos, self._bytes_remaining))
def __len__(self):
return len(self._buffered_data)
@property
def stream_exhausted(self):
"""Does the stream have bytes remaining beyond the buffer
:rtype: boolean
"""
return self._stream_at_end
@property
def stream_end_position(self):
"""Point to which stream was | read into the buffer
:rtype: integer
"""
return self._end_pos
@property
def _bytes_remaining(self):
"""Bytes remaining to be read from the buffer
:rtype: integer
"""
return len(self._buffered_data) - self._buffer_pos
def read(self, size=None):
"""Read bytes from the buffer.
:type size: integer or None
:param size: How many | bytes to read (defaults to all remaining bytes).
"""
if size is None or size < 0:
raise ValueError(
'Illegal read of size %s requested on BufferedStream. '
'Wrapped stream %s is at position %s-%s, '
'%s bytes remaining.' %
(size, self._stream, self._start_pos, self._end_pos,
self._bytes_remaining))
if not self._bytes_remaining:
return b''
size = min(size, self._bytes_remaining)
data = self._buffered_data[self._buffer_pos:self._buffer_pos + size]
self._buffer_pos += size
return data
|
lsaffre/timtools | timtools/sdoc/barcodes.py | Python | bsd-2-clause | 4,217 | 0.030828 | ## Copyright 2003-2009 Luc Saffre
## This file is part of the TimTools project.
## TimTools is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
## TimTools is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with TimTools; if not, see <http://www.gnu.org/licenses/>.
"""
these classes are for use by PdfRenderer (defined in pdf.py)
"""
from reportlab.platypus.flowables import Flowable
from reportlab.lib import colors
from reportlab.lib.units import inch, mm
import bookland
"""
barCodeSymbol.moduleHeight
"""
class Bar:
def __init__(self,bit,long=False):
self.bit = bit
self.long = long
self.width = 1 # thickness in modules
def inc(self):
self.width += 1
def draw(self,canvas,barCodeSymbol,x):
if self.bit == "0":
return
canvas.setLineWidth(self.width*barCodeSymbol.moduleWidth*inch)
#canvas.setFillColor(self.fillcolor)
canvas.setStrokeColor(colors.black)
top = barCodeSymbol.moduleHeight * inch
bottom = 12
if self.bit == "L":
bottom -= 6
# x = x * barCodeSymbol.moduleWidth
canvas | .line(x,bottom,x,top)
class BarcodeFlowable(Flowable):
'''A barcode symbol as flowable. Works only with EAN13.'''
def __init__(self, barCodeSymbol):
Flowable.__init__(self)
assert hasattr(barCodeSymbol,'font')
"name of font to use for printing human-readable text"
assert hasattr(barCo | deSymbol,'bits')
"""string of 0,1 and L characters. each character stands for one
"bit" or "module". 0 means white, 1 means black, L means long
black"""
assert hasattr(barCodeSymbol,'moduleHeight')
"module height in points"
assert hasattr(barCodeSymbol,'patternWidth')
"width in inch"
self.lightMargin = 9 * barCodeSymbol.moduleWidth * inch
self.barCodeSymbol = barCodeSymbol
self.width = self.barCodeSymbol.patternWidth * inch \
+ self.lightMargin * 2
self.height = self.barCodeSymbol.moduleHeight * inch
#self.vAlign = "TOP"
#self.hAlign="LEFT"
#self.leftIndent=0
## def wrap(self, *args):
## # print "x = ", self.size[0] / mm
## # print "y = ", self.size[1] / mm
## # print self.barCodeSymbol.bits
## return self.size
def draw(self):
canvas = self.canv
# print canvas.pagesize
#canvas.setLineWidth(6)
#canvas.setFillColor(self.fillcolor)
#canvas.setStrokeColor(self.strokecolor)
#canvas.translate(self.xoffset+self.size,0)
#canvas.rotate(90)
#canvas.scale(self.scale, self.scale)
#hand(canvas, debug=0, fill=1)
bars = []
bar=Bar(self.barCodeSymbol.bits[0])
for bit in self.barCodeSymbol.bits[1:]:
if bit==bar.bit:
bar.inc()
else:
bars.append(bar)
bar=Bar(bit)
bars.append(bar)
textHeight = 6
textMargin = 5
x = self.lightMargin + 1
for bar in bars:
bar.draw(canvas,self.barCodeSymbol,x)
x += bar.width * self.barCodeSymbol.moduleWidth * inch
# canvas.setFont(self.barCodeSymbol.font,12)
# canvas.setFont("OCR-B",12)
canvas.setFont("Helvetica",11)
canvas.drawString(0,0 ,
self.barCodeSymbol.ean13.n[0])
canvas.drawString(self.lightMargin+textMargin,0 ,
self.barCodeSymbol.ean13.n[1:7])
canvas.drawString(self.lightMargin+textMargin
+6*7*self.barCodeSymbol.moduleWidth*inch
+3,0 ,
self.barCodeSymbol.ean13.n[7:])
canvas.setLineWidth(0.0001)
# canvas.rect(0,0,self.width,self.height)
|
wavesoft/LiveQ | liveq-common/liveq/utils/FLAT.py | Python | gpl-2.0 | 3,333 | 0.031203 | ################################################################
# LiveQ - An interactive volunteering computing batch system
# Copyright (C) 2013 Ioannis Charalampidis
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
################################################################
import numpy
import re
def parseFLATBuffer(buf, index=True):
"""
Parse FLAT buffer and return the structured data
"""
sections_list = []
section = None
activesection = None
# Pick appropriate return format
sections = None
if index:
sections = {}
else:
sections = []
# Start processing the buffer line-by-line
for line in buf.splitlines():
# Process lines
if not | line:
# Empty line
pass
elif "# BEGIN " in line:
# Ignore labels found some times in AIDA files
dat = line.split(" ")
section = dat[2]
sectiontype = 0
# Get additional section title
title = ""
if len(dat) > 3:
title = " ".join(dat[3:])
# Allocate section record
activesection = { "d": { }, "v": [ ], "t": title }
elif ("# END " in line) and (section != None):
# Section end
if index | :
sections[section] = activesection
else:
activesection['n'] = section
sections.append(activesection)
section = None
elif line.startswith("#") or line.startswith(";"):
# Comment
pass
elif section:
# Data inside section
# "SPECIAL" section is not parsable here
if section == "SPECIAL":
continue
# Try to split
data = line.split("=",1)
# Could not split : They are histogram data
if len(data) == 1:
# Split data values
data = FLATParser.WHITESPACE.split(line.strip())
# Check for faulty values
if len(data) < 3:
continue
# Otherwise collect
activesection['v'].append( numpy.array(data, dtype=numpy.float64) )
else:
# Store value
activesection['d'][data[0]] = data[1]
# Return sections
return sections
class FLATParser:
"""
Simple function to parser histograms in FLAT format
"""
# Precompiled regex entry
WHITESPACE = re.compile("\s+")
@staticmethod
def parseFileObject(fileobject, index=True):
"""
Function to read a FLAT file (by the file object descriptor) into python structures
"""
# Read entire file and use parseBuffer
return parseFLATBuffer(fileobject.read(), index)
@staticmethod
def parse(filename, index=True):
"""
Function to read a FLAT file into python structures
"""
# Open file
with open(filename, 'r') as f:
# Use FileObject parser to read the file
return parseFLATBuffer(f.read(), index)
def parseBuffer(buf, index=True):
"""
Parse FLAT file from buffer
"""
return parseFLATBuffer(buf, index) |
8l/beri | cheritest/trunk/tests/mt/test_ipc.py | Python | apache-2.0 | 1,772 | 0.004515 | #-
# Copyright (c) 2014 Robert M. Norton
# All rights reserved.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICE | NSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
@attr('mt')
class test_ipc(BaseBERITestCase):
def test_cause_t0(self):
self.assertRegisterEqual(s | elf.MIPS.threads[0].s0 & 0xffff, 0x800, "Thread 0 cause register not interrupt on IP3")
def test_epc_t0(self):
expected_epc=self.MIPS.threads[0].s2
self.assertRegisterInRange(self.MIPS.threads[0].s1, expected_epc, expected_epc + 4, "Thread 0 epc register not expected_epc")
def test_cause_t1(self):
self.assertRegisterEqual(self.MIPS.threads[1].s0 & 0xffff, 0x400, "Thread 1 cause register not interrupt on IP2")
def test_epc_t1(self):
expected_epc=self.MIPS.threads[0].s3
self.assertRegisterInRange(self.MIPS.threads[1].s1, expected_epc, expected_epc + 4, "Thread 1 epc register not expected_epc")
|
erickpeirson/django-ncbi | ncbi/ncbi/wsgi.py | Python | gpl-3.0 | 385 | 0 | """
WSGI config for ncbi project.
It exposes the WSG | I callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ncbi.settings")
application = get_wsgi_applicat | ion()
|
espdev/readthedocs.org | readthedocs/rtd_tests/tests/test_footer.py | Python | mit | 3,568 | 0.003363 | import json
import mock
from django.test import TestCase
from django.test.client import RequestFactory
from readthedocs.core.middleware import FooterNoSessionMiddleware
from readthedocs.rtd_tests.mocks.paths import fake_paths_by_regex
from readthedocs.projects.models import Project
class Testmaker(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
self.client.login(username='eric', password='test')
self.pip = Project.objects.get(slug='pip')
self.latest = self.pip.versions.create_latest()
def test_footer(self):
r = self.client.get('/api/v2/footer_html/?project=pip&version=latest&page=index', {})
resp = json.loads(r.content)
self.assertEqual(resp['version_active'], True)
self.assertEqual(resp['version_compare']['is_highest'], True)
self. | assertE | qual(resp['version_supported'], True)
self.assertEqual(r.context['main_project'], self.pip)
self.assertEqual(r.status_code, 200)
self.latest.active = False
self.latest.save()
r = self.client.get('/api/v2/footer_html/?project=pip&version=latest&page=index', {})
resp = json.loads(r.content)
self.assertEqual(resp['version_active'], False)
self.assertEqual(r.status_code, 200)
def test_footer_uses_version_compare(self):
version_compare = 'readthedocs.restapi.views.footer_views.get_version_compare_data'
with mock.patch(version_compare) as get_version_compare_data:
get_version_compare_data.return_value = {
'MOCKED': True
}
r = self.client.get('/api/v2/footer_html/?project=pip&version=latest&page=index', {})
self.assertEqual(r.status_code, 200)
resp = json.loads(r.content)
self.assertEqual(resp['version_compare'], {'MOCKED': True})
def test_pdf_build_mentioned_in_footer(self):
with fake_paths_by_regex('\.pdf$'):
response = self.client.get(
'/api/v2/footer_html/?project=pip&version=latest&page=index', {})
self.assertContains(response, 'pdf')
def test_pdf_not_mentioned_in_footer_when_build_is_disabled(self):
self.pip.enable_pdf_build = False
self.pip.save()
with fake_paths_by_regex('\.pdf$'):
response = self.client.get(
'/api/v2/footer_html/?project=pip&version=latest&page=index', {})
self.assertNotContains(response, 'pdf')
def test_epub_build_mentioned_in_footer(self):
with fake_paths_by_regex('\.epub$'):
response = self.client.get(
'/api/v2/footer_html/?project=pip&version=latest&page=index', {})
self.assertContains(response, 'epub')
def test_epub_not_mentioned_in_footer_when_build_is_disabled(self):
self.pip.enable_epub_build = False
self.pip.save()
with fake_paths_by_regex('\.epub$'):
response = self.client.get(
'/api/v2/footer_html/?project=pip&version=latest&page=index', {})
self.assertNotContains(response, 'epub')
def test_no_session_logged_out(self):
mid = FooterNoSessionMiddleware()
factory = RequestFactory()
# Null session here
request = factory.get('/api/v2/footer_html/')
mid.process_request(request)
self.assertEqual(request.session, {})
# Proper session here
home_request = factory.get('/')
mid.process_request(home_request)
self.assertTrue(home_request.session.TEST_COOKIE_NAME, 'testcookie')
|
ChristianSch/crime-stats-nlp | conf.py | Python | mit | 8,233 | 0.006195 | # -*- coding: utf-8 -*-
#
# crime-nlp-cfgi documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 19 17:51:24 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'crime-nlp-cfgi'
copyright = u'2015, Christian Schulze'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, rel | ative to source directory, that | match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'crime-nlp-cfgidoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'crime-nlp-cfgi.tex', u'crime-nlp-cfgi Documentation',
u'Christian Schulze', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'crime-nlp-cfgi', u'crime-nlp-cfgi Documentation',
[u'Christian Schulze'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'crime-nlp-cfgi', u'crime-nlp-cfgi Documentation',
u'Christian Schulze', 'crime-nlp-cfgi', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in t |
zhinaonet/sqlmap-z | lib/core/agent.py | Python | gpl-3.0 | 50,858 | 0.004031 | #!/usr/bin/env python
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import re
from lib.core.common import Backend
from lib.core.common import extractRegexResult
from lib.core.common import getSQLSnippet
from lib.core.common import getUnicode
from lib.core.common import isDBMSVersionAtLeast
from lib.core.common import isNumber
from lib.core.common import isTechniqueAvailable
from lib.core.common import randomInt
from lib.core.common import randomStr
from lib.core.common import safeSQLIdentificatorNaming
from lib.core.common import safeStringFormat
from lib.core.common import singleTimeWarnMessage
from lib.core.common import splitFields
from lib.core.common import unArrayizeValue
from lib.core.common import urlencode
from lib.core.common import zeroDepthSearch
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import queries
from lib.core.dicts import DUMP_DATA_PREPROCESS
from lib.core.dicts import FROM_DUMMY_TABLE
from lib.core.enums import DBMS
from lib.core.enums import HTTP_HEADER
from lib.core.enums import PAYLOAD
from lib.core.enums import PLACE
from lib.core.enums import POST_HINT
from lib.core.exception import SqlmapNoneDataException
from lib.core.settings import BOUNDARY_BACKSLASH_MARKER
from lib.core.settings import BOUNDED_INJECTION_MARKER
from lib.core.settings import DEFAULT_COOKIE_DELIMITER
from lib.core.settings import DEFAULT_GET_POST_DELIMITER
from lib.core.settings import GENERIC_SQL_COMMENT
from lib.core.settings import NULL
from lib.core.settings import PAYLOAD_DELIMITER
from lib.core.settings import REPLACEMENT_MARKER
from lib.core.settings import SLEEP_TIME_MARKER
from lib.core.unescaper import unescaper
class Agent(object):
"""
This class defines the SQL agent methods.
"""
def payloadDirect(self, query):
query = self.cleanupPayload(query)
if query.upper().startswith("AND "):
query = re.sub(r"(?i)AND ", "SELECT ", query, 1)
elif query.upper().startswith(" UNION ALL "):
query = re.sub(r"(?i) UNION ALL ", "", query, 1)
elif query.startswith("; "):
query = query.replace("; ", "", 1)
if Backend.getIdentifiedDbms() in (DBMS.ORACLE,): # non-standard object(s) make problems to a database connector while returned (e.g. XMLTYPE)
_, _, _, _, _, _, fieldsToCastStr, _ = self.getFields(query)
for field in fieldsToCastStr.split(','):
query = query.replace(field, self.nullAndCastField(field))
if kb.tamperFunctions:
for function in kb.tamperFunctions:
query = function(payload=query)
return query
def payload(self, place=None, parameter=None, value=None, newValue=None, where=None):
"""
This method replaces the affected parameter with the SQL
injection statement to request
"""
if conf.direct:
return self.payloadDirect(newValue)
retVal = ""
if kb.forceWhere:
where = kb.forceWhere
elif where is None and isTechniqueAvailable(kb.technique):
where = kb.injection.data[kb.technique].where
if kb.injection.place is not None:
place = kb.injection.place
if kb.injection.parameter is not None:
parameter = kb.injection.parameter
paramString = conf.parameters[place]
paramDict = conf.paramDict[place]
origValue = getUnicode(paramDict[parameter])
if place == PLACE.URI or BOUNDED_INJECTION_MARKER in origValue:
paramString = origValue
if place == PLACE.URI:
origValue = origValue.split(kb.customInjectionMark)[0]
else:
origValue = filter(None, (re.search(_, origValue.split(BOUNDED_INJECTION_MARKER)[0]) for _ in (r"\w+\Z", r"[^\"'><]+\Z", r"[^ ]+\Z")))[0].group(0)
origValue = origValue[origValue.rfind('/') + 1:]
for char in ('?', '=', ':'):
if char in origValue:
origValue = origValue[origValue.rfind(char) + 1:]
elif place == PLACE.CUSTOM_POST:
paramString = origValue
origValue = origValue.split(kb.customInjectionMark)[0]
if kb.postHint in (POST_HINT.SOAP, POST_HINT.XML):
origValue = origValue.split('>')[-1]
elif kb.postHint in (POST_HINT.JSON, POST_HINT.JSON_LIKE):
origValue = extractRegexResult(r"(?s)\"\s*:\s*(?P<result>\d+\Z)", origValue) or extractRegexResult(r'(?s)\s*(?P<result>[^"\[,]+\Z)', origValue)
else:
_ = extractRegexResult(r"(?s)(?P<result>[^\s<>{}();'\"&]+\Z)", origValue) or ""
origValue = _.split('=', 1)[1] if '=' in _ else ""
elif place == PLACE.CUSTOM_HEADER:
paramString = origValue
origValue = origValue.split(kb.customInjectionMark)[0]
origValue = origValue[origValue.find(',') + 1:]
match = re.search(r"([^;]+)=(?P<value>[^;]+);?\Z", origValue)
if match:
origValue = match.group("value")
elif ',' in paramString:
header = paramString.split(',')[0]
if header.upper() == HTTP_HEADER.AUTHORIZATION.upper():
origValue = origValue.split(' ')[-1].split(':')[-1]
origValue = origValue or ""
if value is None:
if where == PAYLOAD.WHERE.ORIGINAL:
value = origValue
elif where == PAYLOAD.WHERE.NEGATIVE:
if conf.invalidLogical:
match = re.search(r'\A[^ ]+', newValue)
newValue = newValue[len(match.group() if match else ""):]
_ = randomInt(2)
value = "%s%s AND %s=%s" % (origValue, match.group() if match else "", _, _ + 1)
elif conf.invalidBignum:
value = randomInt(6)
elif conf.invalidString:
value = randomStr(6)
else:
if newValue.startswith("-"):
value = ""
else:
value = "-%s" % randomInt()
elif where == PAYLOAD.WHERE.REPLACE:
value = ""
else:
value = origValue
newValue = "%s%s" % (value, newValue)
newValue = self.cleanupPayload(newValue, origValue)
if place in (PLACE.URI, PLACE.CUSTOM_POST, PLACE.CUSTOM_HEADER):
| _ = "%s%s" % (origValue, kb.customInjectionMark)
if kb.postHint == POST_HINT.JSON and not isNumber(newValue) and not '"%s"' % _ in paramString:
newValue = '"%s"' % newValue
elif kb.postHint == POST_HINT.JSON_LIKE and not isNumber(newValue) and not "'%s'" % _ in paramString:
newValue = "'%s'" % newValue
newValue = newValue.replace(kb.customInjectionMark, REPLACEMENT_MARKER)
| retVal = paramString.replace(_, self.addPayloadDelimiters(newValue))
retVal = retVal.replace(kb.customInjectionMark, "").replace(REPLACEMENT_MARKER, kb.customInjectionMark)
elif BOUNDED_INJECTION_MARKER in paramDict[parameter]:
_ = "%s%s" % (origValue, BOUNDED_INJECTION_MARKER)
retVal = "%s=%s" % (re.sub(r" (\#\d\*|\(.+\))\Z", "", parameter), paramString.replace(_, self.addPayloadDelimiters(newValue)))
elif place in (PLACE.USER_AGENT, PLACE.REFERER, PLACE.HOST):
retVal = paramString.replace(origValue, self.addPayloadDelimiters(newValue))
else:
def _(pattern, repl, string):
retVal = string
match = None
for match in re.finditer(pattern, string):
pass
if match:
while True:
_ = re.search(r"\\g<([^>]+)>", repl)
if _:
try:
repl = repl.replace(_.group(0), match.group(int(_.group(1)) if _.group(1).isdigit() else _.group(1)))
|
jopamer/swift | utils/incrparse/incr_transfer_tree.py | Python | apache-2.0 | 3,699 | 0.002974 | #!/usr/bin/env python
from __future__ import print_function
import argparse
import os
import subprocess
import sys
from test_util import TestFailedError, run_command, \
serializeIncrParseMarkupFile
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Utility for testing incremental syntax tree transfer',
epilog='''
Based of a single template the utility generates a pre-edit and a post-edit
file. It then verifies that the incrementally transferred syntax tree
matches the syntax tree passed as --expected-incremental-syntax-tree.
To generate the pre-edit and the post-edit file from the template, it
operates on markers of the form:
<<test_case<pre|||post>>>
These placeholders are replaced by:
- 'pre' if a different test case than 'test_case' is run
- 'pre' for the pre-edit version of 'test_case'
- 'post' for the post-edit version of 'test_case'
''')
parser.add_argument(
'file', type=argparse.FileType(),
help='The template file to test')
parser.add_argument(
'--test-case', default='',
help='The test case to execute. If no test case is specified all '
'unnamed substitutions are applied')
parser.add_argument(
'--temp-dir', required=True,
help='A temporary directory where pre-edit and post-edit files can be '
'saved')
parser.add_argument(
'--swift-syntax-test', required=True,
help='The path to swift-syntax-test')
parser.add_argument(
'--expected-incremental-syntax-tree', required=True,
help='The path to a file | that contains the expected incrementally '
'transferred syntax tree')
args = parser.parse_args(sys.argv[1:])
test_file = args.file.name
test_file_name = os.path.basename(test_file)
test_case = ar | gs.test_case
temp_dir = args.temp_dir
swift_syntax_test = args.swift_syntax_test
expected_syntax_tree_file = args.expected_incremental_syntax_tree
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
incremental_serialized_file = temp_dir + '/' + test_file_name + '.' \
+ test_case + '.incr.json'
try:
serializeIncrParseMarkupFile(test_file=test_file,
test_case=test_case,
mode='incremental',
serialization_mode='incremental',
omit_node_ids=False,
output_file=incremental_serialized_file,
temp_dir=temp_dir + '/temp',
swift_syntax_test=swift_syntax_test,
print_visual_reuse_info=False)
except TestFailedError as e:
print('Test case "%s" of %s FAILed' % (test_case, test_file),
file=sys.stderr)
print(e.message, file=sys.stderr)
sys.exit(1)
# Check if the two syntax trees are the same
try:
run_command(
[
'diff', '-u',
incremental_serialized_file,
expected_syntax_tree_file
])
except subprocess.CalledProcessError as e:
print('Test case "%s" of %s FAILed' % (test_case, test_file),
file=sys.stderr)
print('Syntax tree of incremental parsing does not match expected '
'incrementally transfer syntax tree:\n\n', file=sys.stderr)
print(e.output, file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()
|
mkuiack/tkp | tkp/telescope/lofar/beam.py | Python | bsd-2-clause | 1,207 | 0.004143 | """
Beam characterization calculations.
For more information and the math behind this code go to the `LOFAR imaging
capabilities page
<http://www.astron.nl/radio-observatory/astronomers/lofar-imaging-capabilities-sensitivity/lofar-imaging-capabilities/lofa>`_.
"""
import math
def fwhm(lambda_, d, alpha1=1.3):
"""
The nominal Full Width Half Maximum (FWHM) of a LOFAR Station beam.
:param lambda_: wavelength in meters
:param d: station diameter.
:param alpha1: depends on the tapering intrinsic to the layout of the station,
| and any additional tapering which may be used to form the
station beam. No electronic tapering is presently applied to
LOFAR station beamforming. For a uniformly illuminated circular
aperture, alpha1 takes the value of 1.02, and the value increases
with tapering (Napier 1999).
:returns: the nominal Full Width Half Maximum (FWHM)
"""
return alpha1 * lambda_ / d
def fov(fwhm):
| """
The Field of View (FoV) of a LOFAR station
:param fwhm: nominal Full Width Half Maximum, caulculated with :func:`fwhm`.
"""
return math.pi * ((fwhm / 2) ** 2) |
guaix-ucm/pyemir | emirdrp/tools/merge_bounddict_files.py | Python | gpl-3.0 | 3,275 | 0.000611 | #
# Copyright 2008-2018 Universidad Complutense de Madrid
#
# This file is part of PyEmir
#
# PyEmir is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyEmir is distri | buted in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyEmir. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import division
from __future__ import print_function
import argparse
from copy import deepcopy
from | datetime import datetime
import json
import os
import sys
from uuid import uuid4
from numina.tools.arg_file_is_new import arg_file_is_new
from emirdrp.core import EMIR_NBARS
def main(args=None):
# parse command-line options
parser = argparse.ArgumentParser()
# positional arguments
parser.add_argument("filename",
help="TXT with list of bounddict files",
type=argparse.FileType('rt'))
parser.add_argument("--outfile", required=True,
help="Output merged JSON file",
type=lambda x: arg_file_is_new(parser, x))
# optional arguments
parser.add_argument("--echo",
help="Display full command line",
action="store_true")
args = parser.parse_args()
if args.echo:
print('\033[1m\033[31mExecuting: ' + ' '.join(sys.argv) + '\033[0m\n')
# initialize empty output
bounddict = {}
# read list of JSON files to be merged
file_content = args.filename.read().splitlines()
next_file_is_first = True
for line in file_content:
if len(line) > 0:
if line[0] != '#':
tmpfile = line.split()[0]
if not os.path.isfile(tmpfile):
raise ValueError("File " + tmpfile + " not found!")
tmpbounddict = json.loads(open(tmpfile).read())
if next_file_is_first:
bounddict = deepcopy(tmpbounddict)
# update some values
bounddict['meta_info']['creation_date'] = \
datetime.now().isoformat()
bounddict['uuid'] = str(uuid4())
next_file_is_first = False
else:
for islitlet in range(EMIR_NBARS):
cslitlet = "slitlet" + str(islitlet).zfill(2)
if cslitlet in tmpbounddict['contents']:
for dateobs in tmpbounddict['contents'][cslitlet]:
bounddict['contents'][cslitlet][dateobs] = \
tmpbounddict['contents'][cslitlet][dateobs]
# save merged JSON file
with open(args.outfile.name, 'w') as fstream:
json.dump(bounddict, fstream, indent=2, sort_keys=True)
print('>>> Saving file ' + args.outfile.name)
if __name__ == "__main__":
main() |
sbuss/voteswap | users/migrations/0014_signuplog.py | Python | mit | 928 | 0.002155 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-10-03 02:38
from __future__ | import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(m | igrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('users', '0013_auto_20161002_0504'),
]
operations = [
migrations.CreateModel(
name='SignUpLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('referer', models.CharField(max_length=255, null=True)),
('ip', models.CharField(max_length=255, null=True)),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
zmap/ztag | ztag/annotations/FtpLacie.py | Python | apache-2.0 | 3,497 | 0.002574 | import re
from ztag.annotation import Annotation
from ztag.annotation import OperatingSystem
from ztag.annotation import Type
from ztag.annotation import Manufacturer
from ztag import protocols
import ztag.test
class FtpLacie(Annotation):
protocol = protocols.FTP
subprotocol = protocols.FTP.BANNER
port = None
product_re_dict = {
"2Big": re.compile("^220 LaCie-2big(-NAS)? FTP Server", re.IGNORECASE),
"5Big": re.compile(
"^220 LaCie-5big(-NAS)?(-Pro)? FTP Server",
re.IGNORECASE
),
"D2": re.compile("^220 LaCie-d2 FTP Server", re.IGNORECASE),
"CloudBox": re.compile(
"^220 LaCie-CloudBox FTP Server",
re.IGNORECASE
),
"Network Space 2": re.compile(
"^220 NetworkSpace2 FTP Server",
re.IGNORECASE
),
"": re.compile(
"^220 LaCie(-NAS)? FTP Server \[\d+\.\d+\.\d+\.\d+\]",
re.IGNORECASE
),
}
tests = {
"FtpLacie_1": {
"global_metadata": {
"device_type": Type.NAS,
"manufacturer": Manufacturer.LACIE,
"product": "Network Space 2"
}
},
"FtpLacie_2": {
"global_metadata": {
"device_type": Type.NAS,
"manufacturer": Manufacturer.LACIE,
"product": "CloudBox"
}
},
"FtpLacie_3": {
"global_metadata": {
"device_type": Type.NAS,
"manufacturer": Manufacturer.LACIE,
"product": "5Big"
}
}
}
def process(self, obj, meta):
banner = obj["banner"]
found = ""
for product, regex in self.product_re_dict.items():
if regex.search(banner):
meta.global_metadata.device_type = Type.NAS
m | eta.global_metadata.manufacturer = Manufacturer.LACIE
meta.global_metadata.product = product
return meta
""" Tests
"220 NetworkSpace2 FTP Server [10.0.1.50]\r\n"
"220 LaCie-CloudBox FTP Serve | r [192.168.0.16]\r\n"
"220 LaCie-CloudBox FTP Server [192.168.0.21]\r\n"
"220 LaCie-CloudBox FTP Server [192.168.1.197]\r\n"
"220 NetworkSpace2 FTP Server [192.168.100.17]\r\n"
"220 LaCie-CloudBox FTP Server [192.168.1.11]\r\n"
"220 NetworkSpace2 FTP Server [192.168.0.26]\r\n"
"220 LaCie-CloudBox FTP Server [192.168.1.93]\r\n"
"220 LaCie-2big FTP Server [192.168.1.206]\r\n"
"220 NetworkSpace2 FTP Server [192.168.0.13]\r\n"
"220 NetworkSpace2 FTP Server [192.168.0.19]\r\n"
"220 LaCie-CloudBox FTP Server [192.168.0.100]\r\n"
"220 LaCie-CloudBox FTP Server [192.168.1.59]\r\n"
"220 NetworkSpace2 FTP Server [192.168.0.100]\r\n"
"220 LaCie-CloudBox FTP Server [192.168.1.3]\r\n"
"220 NetworkSpace2 FTP Server [192.168.1.24]\r\n"
"220 LaCie-2big FTP Server [192.168.178.31]\r\n"
"220 LaCie-CloudBox FTP Server [192.168.1.12]\r\n"
"220 NetworkSpace2 FTP Server [192.168.0.128]\r\n"
"220 LaCie-5big FTP Server [192.168.0.5]\r\n"
"220 LaCie-d2 FTP Server [192.168.10.152]\r\n"
"220 LaCie-5big-Pro FTP Server [::ffff:192.168.1.24]\r\n"
"220 LaCie-2big-NAS FTP Server [192.168.1.250]\r\n"
"220 NetworkSpace2 FTP Server [192.168.0.103]\r\n"
"220 NetworkSpace2 FTP Server [192.168.1.39]\r\n"
"220 LaCie FTP Server [192.168.1.34]\r\n"
"""
|
tropp/acq4 | acq4/devices/SutterMP285/SutterMP285.py | Python | mit | 18,485 | 0.011577 | # -*- coding: utf-8 -*-
from __future__ import with_statement
from acq4.devices.Device import *
from acq4.devices.OptomechDevice import *
#import serial, struct
from acq4.drivers.SutterMP285 import *
from acq4.drivers.SutterMP285 import SutterMP285 as SutterMP285Driver ## name collision with device class
from acq4.util.Mutex import Mutex
from acq4.util.Thread import Thread
import acq4.util.debug as debug
import os, time
#import pdb
import devTemplate
#import functions as fn
import acq4.pyqtgraph as pg
import numpy as np
from copy import deepcopy
class SutterMP285(Device, OptomechDevice):
sigPositionChanged = QtCore.Signal(object)
sigLimitsChanged = QtCore.Signal(object)
def __init__(self, dm, config, name):
Device.__init__(self, dm, config, name)
OptomechDevice.__init__(self, dm, config, name)
self.config = config
self.configFile = os.path.join('devices', name + '_config.cfg')
self.lock = Mutex(QtCore.QMutex.Recursive)
self.port = config['port'] ## windows com ports start at COM1, pyserial ports start at 0
self.scale = config.pop('scale', (1, 1, 1))
# Interpret "COM1" as port 0
if isinstance(self.port, basestring) and self.port.lower()[:3] == 'com':
self.port = int(self.port[3:]) - 1
self.baud = config.get('baud', 9600) ## 9600 is probably factory default
self.pos = [0, 0, 0]
self.limits = [
[[0,False], [0,False]],
[[0,False], [0,False]],
[[0,False], [0,False]]
]
self.maxSpeed = 1e-3
self.loadConfig()
self.mp285 = SutterMP285Driver(self.port, self.baud)
self.driverLock = Mutex(QtCore.QMutex.Recursive)
self.mThread = SutterMP285Thread(self, self.mp285, self.driverLock, self.scale, self.limits, self.maxSpeed)
self.mThread.sigPositionChanged.connect(self.posChanged)
self.mThread.start()
dm.declareInterface(name, ['stage'], self)
def loadConfig(self):
cfg = self.dm.readConfigFile(self.configFile)
if 'limits' in cfg:
self.limits = cfg['limits']
if 'maxSpeed' in cfg:
self.maxSpeed = cfg['maxSpeed']
def storeConfig(self):
cfg = {
'limits': self.limits,
'maxSpeed': self.maxSpeed,
}
self.dm.writeConfigFile(cfg, self.configFile)
def setLimit(self, axis, limit, val=None, enabled=None):
if val is not None:
self.limits[axis][limit][0] = val
elif enabled is not None:
self.limits[axis][limit][1] = enabled
self.mThread.setLimits(self.limits)
self.storeConfig()
def getLimit(self):
return(self.limits)
def setMaxSpeed(self, val):
self.mThread.setMaxSpeed(val)
self.maxSpeed = val
self.storeConfig()
def setResolution(self, res):
self.mThread.setResolution(res)
def quit(self):
#print "serial SutterMP285 requesting thread exit.."
self.mThread.stop(block=True)
def posChanged(self, data):
with self.lock:
rel = [0] * len(self.pos)
if 'rel' in data:
rel[:len(data['rel'])] = data['rel']
else:
rel[:len(data['abs'])] = [data['abs'][i] - self.pos[i] for i in range(len(data['abs']))]
self.pos[:len(data['abs'])] = data['abs']
self.sigPositionChanged.emit({'rel': rel, 'abs': self.pos[:]})
tr = pg.SRTTransform3D()
tr.translate(*self.pos)
self.setDeviceTransform(tr) ## this informs rigidly-connected devices that they have moved
def getPosition(self, refresh=False):
"""
Return the position of the stage.
If refresh==False, the last known position is returned. Otherwise, the current position is requested from the controller.
"""
if refresh:
with self.driverLock:
pos = np.array(self.mp285.getPos()) * self.scale
with self.lock:
if refresh and not np.all(pos == self.pos):
self.posChanged({'abs': pos})
return self.pos[:]
def getState(self):
with self.lock:
return (self.pos[:],)
def deviceInterface(self, win):
return SMP285Interface(self, win)
def moveBy(self, pos, speed=400, fine=True, block=True, timeout = 10.):
"""Move by the specified amounts.
pos must be a sequence (dx, dy, dz) with values in meters.
speed will be set before moving unless speed=None
"""
with self.driverLock:
if speed is not None:
self.mp285.setSpeed(speed, fine)
self.mp285.moveBy(pos, block=block, timeout = timeout)
self.getPosition(refresh=True)
def moveTo(self, pos, speed=400, fine=True, block=True, timeout = 10.):
"""Move by the absolute position.
pos must be a sequence (dx, dy, dz) w | ith values in meters.
speed will be set before moving unless speed=None
"""
with self.driverLock:
if speed is not None:
self.mp285.setSpeed(speed, fine)
self.mp285 | .setPos(pos, block=block, timeout = timeout)
self.getPosition(refresh=True)
class SMP285Interface(QtGui.QWidget):
def __init__(self, dev, win):
QtGui.QWidget.__init__(self)
self.ui = devTemplate.Ui_Form()
self.ui.setupUi(self)
self.win = win
self.dev = dev
#QtCore.QObject.connect(self.dev, QtCore.SIGNAL('positionChanged'), self.update)
self.dev.sigPositionChanged.connect(self.update)
self.update()
self.limitBtns = [
[self.ui.xMinBtn, self.ui.xMaxBtn],
[self.ui.yMinBtn, self.ui.yMaxBtn],
[self.ui.zMinBtn, self.ui.zMaxBtn],
]
self.limitSpins = [
[self.ui.xMinSpin, self.ui.xMaxSpin],
[self.ui.yMinSpin, self.ui.yMaxSpin],
[self.ui.zMinSpin, self.ui.zMaxSpin],
]
self.limitChecks = [
[self.ui.xMinCheck, self.ui.xMaxCheck],
[self.ui.yMinCheck, self.ui.yMaxCheck],
[self.ui.zMinCheck, self.ui.zMaxCheck],
]
def mkLimitCallback(fn, *args):
return lambda: fn(*args)
for axis in range(3):
for limit in range(2):
self.limitBtns[axis][limit].clicked.connect(mkLimitCallback(self.getLimit, axis, limit))
self.limitSpins[axis][limit].valueChanged.connect(mkLimitCallback(self.updateLimit, axis, limit))
self.limitChecks[axis][limit].toggled.connect(mkLimitCallback(self.enableLimit, axis, limit))
pos, enabled = self.dev.limits[axis][limit]
#self.limitLabels[axis][limit].setText(pg.siFormat(pos, suffix='m', precision=5))
self.limitSpins[axis][limit].setValue(pos)
self.limitChecks[axis][limit].setChecked(enabled)
self.ui.maxSpeedSpin.setOpts(value=self.dev.maxSpeed, siPrefix=True, dec=True, suffix='m/s', step=0.1, minStep=1e-6)
self.ui.maxSpeedSpin.valueChanged.connect(self.maxSpeedChanged)
self.ui.updatePosBtn.clicked.connect(self.updateClicked)
self.ui.joyBtn.sigStateChanged.connect(self.joyStateChanged)
self.ui.coarseStepRadio.toggled.connect(self.resolutionChanged)
self.ui.fineStepRadio.toggled.connect(self.resolutionChanged)
def getLimit(self, axis, limit):
## called when the limit buttons are pressed in the GUI - gets limit and stores in the spin box
pos = self.dev.getPosition()[axis]
self.limitSpins[axis][limit].setValue(pos)
self.updateLimit(axis, limit)
#self.dev.setLimit(axis, limit, val=pos)
#self.limitChecks[axis][limit].setChecked(True)
def updateLimit(self, axis, limit):
## called when the limit buttons are pressed in the GUI
pos = self.limitSpins[axis][limit].value()
#self.dev.getPosition()[axis]
self.dev. |
BlessedAndy/Programming-Foundations-with-Python | Programming Foundations with Python/src/cn/careerwinner/sap/report_scheduler.py | Python | apache-2.0 | 55 | 0 | '''
C | reated on Jul 18, 2017
@author: I310003
'''
| |
mddenton/Projects-my | Games/blackjack.py | Python | mit | 1,541 | 0.009734 | # python v. 3.3.2
import random
def genSuit():
suit = random.randint(1, 4)
if suit == 1:
return "Clubs"
if suit == 2:
return "Diamonds"
if suit == 3:
return "Hearts"
if suit == 4:
return "Spades"
return "Whoops!"
def genRank():
rank = random.randint(1, 13)
if rank == 1:
return "Ace"
if rank == 11:
return "Jack"
if rank == 12:
return "Queen"
if rank == 13:
return "King"
return str(rank)
def drawCard():
return genRank() + " of " + genSuit()
# def test ():
# n = 0
# while n < 10:
# print (drawCard())
# n+= 1
def value(card):
posit | ion = card.find(" ")
rank = card[0: position]
| # print(rank)
if rank == "Ace":
return 11
if rank == "King" or rank == "Queen" or rank == "Jack":
return 10
return int(rank)
def hand():
card1 = drawCard()
card2 = drawCard()
drawTwo = value(card1) + value(card2)
print ("first card: ", card1, ", second card: ", card2, sep='')
if drawTwo == 21:
print ("Blackjack...a Natural!")
elif drawTwo <= 16:
card3 = drawCard()
drawThree = value(card1) + value(card2) + value(card3)
print ("third card: ", card3, sep='')
if drawThree == 21:
print ("Blackjack on third card!")
else:
print ("sorry, ", drawThree, " is not 21")
else:
print ("sorry, ", drawTwo, " is not 21")
def theGame():
hand()
hand()
theGame()
|
tethysplatform/tethys | tests/unit_tests/test_tethys_gizmos/test_gizmo_options/test_base.py | Python | bsd-2-clause | 1,994 | 0 | """
********************************************************************************
* Name: base.py
* Author: nswain
* Created On: July 23, 2018
* Copyright: (c) Aquaveo 2018
********************************************************************************
"""
import unittest
import tethys_gizmos.gizmo_options.base as basetest
class TestTethysGizmosBase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_TethysGizmoOptions(self):
test_dict = 'key1="value with spaces" key2="value_with_no_spaces"'
test_class = 'Map Type'
result = basetest.TethysGizmoOptions(test_dict, test_class)
self.assertIsInstance(result['attributes'], dict)
self.assertEqual('value with spaces', result['attributes']['key1'])
self.assertEqual('value_with_no_spaces', result['attributes']['key2'])
self.assertEqual('Map Type', result['classes'])
def test_get_tethys_gizmos_js(self):
result = basetest.TethysGizmoOptions.get_tethys_gizmos_js()
self.assertIn('tethys_gizmos.js', result[0])
self.assertNotIn('.css', result[0])
def tes | t_get_tethys_gizmos_css(self):
result = basetest.TethysGizmoOptions.get_tethys_gizmos_css()
self.assertIn('tethys_gizmos.css', result[0])
| self.assertNotIn('.js', result[0])
def test_get_vendor_js(self):
result = basetest.TethysGizmoOptions.get_vendor_js()
self.assertFalse(result)
def test_get_gizmo_js(self):
result = basetest.TethysGizmoOptions.get_gizmo_js()
self.assertFalse(result)
def test_get_vendor_css(self):
result = basetest.TethysGizmoOptions.get_vendor_css()
self.assertFalse(result)
def test_get_gizmo_css(self):
result = basetest.TethysGizmoOptions.get_gizmo_css()
self.assertFalse(result)
def test_SecondaryGizmoOptions(self):
result = basetest.SecondaryGizmoOptions()
self.assertFalse(result)
|
azafred/fredlibs | tests/test_advanced.py | Python | bsd-2-clause | 271 | 0 | # -*- coding: utf-8 -*-
from .context import fredlibs
import unittest
class | AdvancedTestSuite(unittest.TestCase):
"""Advanced test cases."""
def test_thoughts(self):
self.assertIsNone(fredlibs.main( | ))
if __name__ == '__main__':
unittest.main()
|
ttm/sonhos | scripts/exwn.py | Python | cc0-1.0 | 203 | 0.019704 | import nltk as k
#ss=k.corpus.wordnet.synset("thumb")
aa=k.corp | us.wordnet.synsets("word")
print("hypernyms: ", aa[0].hypernyms(),"\nhyponyms: | ", aa[0].hyponyms())
print("\n\nall methods:", dir(aa[0]))
|
Tendrl/commons | tendrl/commons/tests/utils/test_event_utils.py | Python | lgpl-2.1 | 676 | 0 | import __builtin__
import maps
from tendrl.commons.utils.event_utils import emit_event
def test_emit_event():
setattr(__builtin__, "NS", maps.NamedDict())
NS.publisher_id = 0
NS.node_context = maps.NamedDict(fqdn="test",
| node_id="0")
NS.tendrl_context = maps.NamedDict(integration_id="",
cluster_name="",
sds_name="")
emit_event("test", "test", "test", "test", "test",
tags=maps.NamedDict(entity_type="brick"))
emit_event("test", "test", "test", "test", "test",
tags=maps.NamedD | ict(entity_type="volume"))
|
teoreteetik/api-snippets | monitor/events/instance-get-example-phone-number/instance-get-example-phone-number.6.x.py | Python | mit | 370 | 0 | # Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token | from twi | lio.com/user/account
account_sid = "ACCOUNT_SID"
auth_token = "your_auth_token"
client = Client(account_sid, auth_token)
event = client.monitor.events("AE21f24380625e4aa4abec76e39b14458d").fetch()
print(event.description)
|
cyncyncyn/evette | languagefiles/language_us_1.3.2.py | Python | gpl-2.0 | 68,624 | 0.041691 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
#Copyright (C) 2007 Adam Spencer - Free Veterinary | Management Suite
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public | License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
##Contact: evetteproject@dsl.pipex.com
####US English####
def GetDictionary():
dictionary = {}
##Misc
dictionary["usernamelabel"] = (
"Username",
"Username"
)
dictionary["passwordlabel"] = (
"Password",
"Password"
)
dictionary["submitlabel"] = (
"Submit",
"Submit"
)
dictionary["totallabel"] = (
"Total",
"Total"
)
dictionary["fromlabel"] = (
"From",
"From"
)
dictionary["tolabel"] = (
"To",
"To"
)
dictionary["pricelabel"] = (
"Price",
"Price"
)
dictionary["descriptionlabel"] = (
"Description",
"Decsription"
)
dictionary["yeslabel"] = (
"Yes",
"Yes"
)
dictionary["nolabel"] = (
"No",
"No"
)
dictionary["editlabel"] = (
"Edit",
"Edit"
)
dictionary["deletelabel"] = (
"Delete",
"Delete"
)
dictionary["searchlabel"] = (
"Search",
"Search"
)
dictionary["resetlabel"] = (
"Reset",
"Reset"
)
dictionary["movelabel"] = (
"Move",
"Move"
)
dictionary["unitlabel"] = (
"Unit",
"Unit"
)
dictionary["onlabel"] = (
"on",
"on"
)
dictionary["namelabel"] = (
"Name",
"Name"
)
dictionary["headertext1"] = (
"The complete FREE veterinary practice management package",
"The complete open-source veterinary practice management package"
)
dictionary["headertext2"] = (
"You can change this header to anything you like by editing",
"You can change this header to anything you like by editing"
)
dictionary["generatedbylabel"] = (
"Generated by",
"Generated by"
)
dictionary["timelabel"] = (
"Time",
"Time"
)
dictionary["operationslabel"] = (
"Operations",
"Operations"
)
dictionary["operatinglabel"] = (
"Operating",
"Operating"
)
dictionary["consultinglabel"] = (
"Consulting",
"Consulting"
)
dictionary["vetlabel"] = (
"Vet",
"Vet"
)
dictionary["animaldetailslabel"] = (
"Animal Details",
"Animal Details"
)
dictionary["ownerdetailslabel"] = (
"Owner Details",
"Owner Details"
)
dictionary["receiptlabel"] = (
"Receipt",
"Receipt"
)
dictionary["problemlabel"] = (
"Problem",
"Problem"
)
dictionary["noteslabel"] = (
"Notes",
"Notes"
)
dictionary["planlabel"] = (
"Plan",
"Plan"
)
dictionary["userdeleted"] = (
"User deleted",
"User Deleted"
)
dictionary["changelog"] = (
"Change Log",
"Change Log"
)
dictionary["positionlabel"] = (
"Position",
"Position"
)
dictionary["datelabel"] = (
"Date",
"Date"
)
dictionary["invalidtimemessage"] = (
"Invalid Time",
"Invalid Time"
)
dictionary["containslabel"] = (
"Contains",
"Contains"
)
dictionary["nextduelabel"] = (
"Next Due",
"Next Due"
)
dictionary["nonelabel"] = (
"None",
"None"
)
##Menus
dictionary["clientmenu"] = (
"&Clients",
"&Clients"
)
dictionary["appointmentsmenu"] = (
"&Appointments",
"&Appointments"
)
dictionary["medicationmenu"] = (
"&Medication",
"&Medication"
)
dictionary["proceduresmenu"] = (
"&Procedures",
"&Procedures"
)
dictionary["lookupsmenu"] = (
"&Lookups",
"&Lookups"
)
dictionary["formsmenu"] = (
"&Forms",
"&Forms"
)
dictionary["staffmenu"] = (
"&Staff",
"&Staff"
)
dictionary["settingsmenu"] = (
"Se&ttings",
"Se&ttings"
)
dictionary["helpmenu"] = (
"&Help",
"&Help"
)
dictionary["entirelabel"] = (
"Entire",
"Entire"
)
dictionary["neuteredlabel"] = (
"Neutered",
"Neutered"
)
##Menu items
dictionary["addclientmenu"] = (
("Add Client", "Create a new client record"),
("Add Client", "Create a new client record")
)
dictionary["findclientmenu"] = (
("Find Clients", "Find client and animal records"),
("Find Clients", "Find client and animal records")
)
dictionary["viewappointmentsmenu"] = (
("Todays Appointments", "View todays appointments"),
("Todays Appointments", "View todays appointments")
)
dictionary["viewoperationsmenu"] = (
("Todays Operations", "View todays operations"),
("Todays Operations", "View todays operations")
)
dictionary["editusersmenu"] = (
("Edit Users", "Add and edit Evette users"),
("Edit Users", "Add and edit Evette users")
)
dictionary["editrotamenu"] = (
("Edit Rota", "Edit the rota"),
("Edit Rota", "Edit the rota")
)
dictionary["editmedicationmenu"] = (
("Edit Medication", "Edit Medication"),
("Edit Medication", "Edit Medication")
)
dictionary["editvaccinationsmenu"] = (
("Edit Vaccinations", "Edit Vaccinations"),
("Edit Vaccinations", "Edit Vaccinations")
)
dictionary["editproceduresmenu"] = (
("Edit Procedures", "Edit Procedures"),
("Edit Procedures", "Edit Procedures")
)
dictionary["editcoloursmenu"] = (
("Edit Colours", "Edit Colours"),
("Edit Colors", "Edit Colors")
)
dictionary["editbreedsmenu"] = (
("Edit Breeds", "Edit Breeds"),
("Edit Breeds", "Edit Breeds")
)
dictionary["editspeciesmenu"] = (
("Edit Species", "Edit Species"),
("Edit Species", "Edit Species")
)
dictionary["editformsmenu"] = (
("Edit Forms", "Edit Forms"),
("Edit Forms", "Edit Forms")
)
dictionary["editsettingsmenu"] = (
("Edit Settings", "Edit settings unique to this practice"),
("Edit Settings", "Edit settings unique to this practice")
)
dictionary["randomdatamenu"] = (
("Random Data", "Generate random sample data to experiment with"),
("Random Data", "Generate random sample data to experiment with")
)
dictionary["resettablesmenu"] = (
("Reset Database", "Completely reset the evette database"),
("Reset Database", "Completely reset the Evette database. Be careful!")
)
dictionary["gethelpmenu"] = (
("Help", "Get help on using Evette"),
("Help", "Get help on using Evette")
)
dictionary["aboutmenu"] = (
("About", "Information about this program"),
("About", "Information about Evette")
)
##Toolbar
dictionary["addclienttoolbar"] = (
(" Add Client ", "Create a new client record"),
(" Add Client ", "Create a new client record")
)
dictionary["findclienttoolbar"] = (
(" Client Search ", "Find clients and animals"),
(" Client Search ", "Find clients and their animals")
)
dictionary["viewappointmentstoolbar"] = (
(" Todays Appointments ", "View todays appointments"),
(" Todays Appointments ", "View todays appointments")
)
dictionary["viewoperationstoolbar"] = (
(" Todays Operations ", "View todays operations"),
(" Todays Operations ", "View todays operations")
)
##Client Panel
dictionary["newclientpagetitle"] = (
"New Client",
"New Client"
)
dictionary["clienttitlelabel"] = (
"Title",
"Title"
)
dictionary["clientforenameslabel"] = (
"First Name",
"First Names"
)
dictionary["clientsurnamelabel"] = (
"Last Name",
"Last Name"
)
dictionary["clientaddresslabel"] = (
"Address",
"Address"
)
dictionary["clientpostcodelabel"] = (
"Post Code",
"Zip Code"
)
dictionary["clienthomephonelabel"] = (
"Home Phone",
"Home Phone"
)
dictionary["clientmobilephonelabel"] = (
"Mobile Phone",
"Mobile Phone"
)
dictionary["clientworkphonelabel"] = (
"Work Phone",
"Work Phone"
)
dictionary["clientemailaddresslabel"] = (
"Email",
"Email"
)
dictionary["clientcommentslabel"] = (
"Comments",
"Comments"
)
dictionary["clientanimalslabel"] = (
"Animals",
"Animals"
)
dictionary["clientaddanimaltooltip"] = (
"Create a new animal",
"Create a new animal"
)
dicti |
eduNEXT/edx-platform | lms/djangoapps/coursewarehistoryextended/tests.py | Python | agpl-3.0 | 4,269 | 0.002577 | """
Tests for coursewarehistoryextended
Many aspects of this app are covered by the courseware tests,
but these are specific to the new storage model with multiple
backend tables.
"""
import json
from unittest import skipUnless
from unittest.mock import patch
from django.conf import settings
from django.db import connections
from django.test import TestCase
from lms.djangoapps.courseware.models import BaseStudentModuleHistor | y, StudentModule, StudentModuleHistory
from lms.djangoapps.courseware.tests.factories import COURSE_KEY
from lms.djangoapps.courseware.tests.factories import LOCATION
from lms.djangoapps.courseware.tests.factories import StudentModuleFactory
@skipUnless(settings.FEATURES["ENABLE_CSMH_EXTENDED"], "CSMH Extended needs to be enabled")
clas | s TestStudentModuleHistoryBackends(TestCase):
""" Tests of data in CSMH and CSMHE """
# Tell Django to clean out all databases, not just default
databases = set(connections)
def setUp(self):
super().setUp()
for record in (1, 2, 3):
# This will store into CSMHE via the post_save signal
csm = StudentModuleFactory.create(
module_state_key=LOCATION('usage_id'),
course_id=COURSE_KEY,
state=json.dumps({'type': 'csmhe', 'order': record}),
)
# This manually gets us a CSMH record to compare
csmh = StudentModuleHistory(student_module=csm,
version=None,
created=csm.modified,
state=json.dumps({'type': 'csmh', 'order': record}),
grade=csm.grade,
max_grade=csm.max_grade)
csmh.save()
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_CSMH_EXTENDED": True})
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_READING_FROM_MULTIPLE_HISTORY_TABLES": True})
def test_get_history_true_true(self):
student_module = StudentModule.objects.all()
history = BaseStudentModuleHistory.get_history(student_module)
assert len(history) == 6
assert {'type': 'csmhe', 'order': 3} == json.loads(history[0].state)
assert {'type': 'csmhe', 'order': 2} == json.loads(history[1].state)
assert {'type': 'csmhe', 'order': 1} == json.loads(history[2].state)
assert {'type': 'csmh', 'order': 3} == json.loads(history[3].state)
assert {'type': 'csmh', 'order': 2} == json.loads(history[4].state)
assert {'type': 'csmh', 'order': 1} == json.loads(history[5].state)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_CSMH_EXTENDED": True})
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_READING_FROM_MULTIPLE_HISTORY_TABLES": False})
def test_get_history_true_false(self):
student_module = StudentModule.objects.all()
history = BaseStudentModuleHistory.get_history(student_module)
assert len(history) == 3
assert {'type': 'csmhe', 'order': 3} == json.loads(history[0].state)
assert {'type': 'csmhe', 'order': 2} == json.loads(history[1].state)
assert {'type': 'csmhe', 'order': 1} == json.loads(history[2].state)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_CSMH_EXTENDED": False})
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_READING_FROM_MULTIPLE_HISTORY_TABLES": True})
def test_get_history_false_true(self):
student_module = StudentModule.objects.all()
history = BaseStudentModuleHistory.get_history(student_module)
assert len(history) == 3
assert {'type': 'csmh', 'order': 3} == json.loads(history[0].state)
assert {'type': 'csmh', 'order': 2} == json.loads(history[1].state)
assert {'type': 'csmh', 'order': 1} == json.loads(history[2].state)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_CSMH_EXTENDED": False})
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_READING_FROM_MULTIPLE_HISTORY_TABLES": False})
def test_get_history_false_false(self):
student_module = StudentModule.objects.all()
history = BaseStudentModuleHistory.get_history(student_module)
assert len(history) == 0
|
lento/cortex | test/IECoreMaya/PluginLoadUnload.py | Python | bsd-3-clause | 2,870 | 0.032404 | ##########################################################################
#
# Copyright (c) 2008, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source | and binary forms, with or without
# modification, ar | e permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import maya.cmds
import IECore
import IECoreMaya
class TestPluginLoadUnload( IECoreMaya.TestCase ) :
def test( self ):
""" Test loading/unloading of plugin """
# Plugin should be loaded by MayaUnitTest.TestProgram when we get here
self.assert_( maya.cmds.pluginInfo( "ieCore", query = True, loaded = True ) )
for i in range( 0, 20 ) :
self.failIf( maya.cmds.pluginInfo( "ieCore", query = True, serviceDescriptions = True ) )
maya.cmds.unloadPlugin( "ieCore" )
self.failIf( maya.cmds.pluginInfo( "ieCore", query = True, loaded = True ) )
maya.cmds.loadPlugin( "ieCore" )
self.assert_( maya.cmds.pluginInfo( "ieCore", query = True, loaded = True ) )
self.assert_( maya.cmds.pluginInfo( "ieCore", query = True, loaded = True ) )
def tearDown( self ):
if not maya.cmds.pluginInfo( "ieCore", query = True, loaded = True ) :
maya.cmds.loadPlugin( "ieCore" )
# Make sure plugin is definitely loaded when we exit tests
assert( maya.cmds.pluginInfo( "ieCore", query = True, loaded = True ) )
if __name__ == "__main__":
IECoreMaya.TestProgram()
|
src053/PythonComputerScience | chap8/syracuse.py | Python | cc0-1.0 | 606 | 0.049505 | #a Program that finds the syracuse sequence
#function to call the even calculation
def even(n):
return n/2
#function to call the odd calculation
def odd(n):
return 3 * n + 1
def main():
#get | the natural number from user
n = eval(input("Please enter a natural number: "))
#iterate until the n var is 1
while n > 1:
#if n var is evenly divisible by two then run the even function
if n % 2 == 0:
#for trouble shooting
print("even function will be ran")
n = even(n)
#print the value of n
print(n)
#if n var is odd then run the odd function
else:
n = odd(n)
print(n)
|
main() |
supasate/word_prediction | Chapter4/4-7-even-solution.py | Python | gpl-2.0 | 193 | 0.00885 | # จงเขียนโปรแกรมแสดงเลขคู่ในช่วง 0 ถึง 10 (รวม 10 ด้วย)
for i in range(11):
if (i % 2 == 0) | :
print( | i) |
drussell393/ConfidenceMon | modules/zabbix.py | Python | mit | 5,988 | 0.009185 | import json
import logging
import re
import urllib2
import base64
class _NullHandler(logging.Handler):
"""
Null Handler class for Logger
"""
def emit(self, record):
pass
logger = logging.getLogger(__name__)
logger.addHandler(_NullHandler())
class ZabbixAPIException(Exception):
"""
ZabbixAPI exception class
code list:
-32602 - Invalid params (eg already exists)
-32500 - no permissions
"""
pass
class ZabbixAPIObjectClass(object):
"""
ZabbixAPI Object class
"""
def __init__(self, name, parent):
self.name = name
self.parent = parent
def __getattr__(self, attr):
"""
Dynamically create a method (ie: get)
"""
def fn(*args, **kwargs):
if args and kwargs:
raise TypeError("Found both args and kwargs")
logger.debug(attr)
return self.parent.do_request(
'{0}.{1}'.format(self.name, attr),
args or kwargs
)['result']
return fn
class ZabbixAPI(object):
"""
ZabbixAPI class, implement interface to zabbix api
Examples:
z = ZabbixAPI('https://zabbix.server', user='admin', password='zabbix')
# Get API Version
z.api_info.version()
>> u'2.2.1'
# ir
z.do_request('apiinfo.version')
>> {u'jsonrpc': u'2.0', u'result': u'2.2.1', u'id': u'1'}
# Get all disabled hosts
z.host.getobjects(status=1)
# or
z.do_request('host.getobjects', {'status':1})
"""
def __init__(self, url='https://localhost/zabbix',
use_authenticate=False, user='admin', password='zabbix'):
self.use_authenticate = use_authenticate
self.auth = None
s | elf.url = url + '/api_jsonrpc.php'
self.__log | in(user, password)
logger.debug("JSON-PRC Server: %s", self.url)
def __getattr__(self, attr):
"""
Dynamically create an object class (ie: host)
"""
return ZabbixAPIObjectClass(attr, self)
def __login(self, user='', password=''):
"""
Do login to zabbix server
Attributes:
user (str): Zabbix user
password (str): Zabbix user password
"""
logger.debug("ZabbixAPI.login({0},{1})".format(user, password))
self.auth = None
if self.use_authenticate:
self.auth = self.user.authenticate(user=user, password=password)
else:
self.auth = self.user.login(user=user, password=password)
def api_version(self):
"""
Return version of Zabbix API
"""
return self.apiinfo.version()
def do_request(self, method, params=None):
"""
Make request to Zabbix API
Attributes:
method (str): Any of ZabbixAPI method, like: apiinfo.version
params (str): Methods parameters
Examples:
z = ZabbixAPI()
apiinfo = z.do_request('apiinfo.version')
"""
request_json = {
'jsonrpc':'2.0',
'method': method,
'params': params or {},
'auth': self.auth,
'id': '1',
}
logger.debug('urllib2.Request({0}, {1})'.format(self.url,json.dumps(request_json)))
base64string = base64.encodestring('%s:%s' % ('API', 'e27ef659f1def022b35a7edb3239472e')).replace('\n', '')
req = urllib2.Request(self.url, json.dumps(request_json))
req.add_header("Authorization", "Basic %s" % base64string)
req.get_method = lambda: 'POST'
req.add_header('Content-Type', 'application/json-rpc')
try:
res = urllib2.urlopen(req)
response_json = json.load(res)
except ValueError:
raise ZabbixAPIException("Unable to parse json: %" % res)
logger.debug("Response Body: %s" % json.dumps(response_json, indent=4,
separators=(',', ': ')))
if 'error' in response_json:
msg = "Error {code}: {message}, {data} while sending {json}".format(
code=response_json['error']['code'],
message=response_json['error']['message'],
data=response_json['error']['data'],
json=str(request_json)
)
raise ZabbixAPIException(msg, response_json['error']['code'])
return response_json
def get_id(self, item_type, item=None, with_id=False, hostid=None, **args):
"""
Return id or ids of zabbix objects.
Attributes:
item_type (str): Type of zabbix object
item (str): Name of zabbix object.
If None - will return list of all object in the scope.
with_id (bool): Return values will be in zabbix format.
Examlpe: {'itemid: 128}
hostid (int): Specify id of host for special cases
templateid (int): Specify scope to specific template
app_name (str): Specify scope to specific template
"""
result = None
type_ = '{item_type}.get'.format(item_type=item_type)
item_filter_name = {
'trigger': 'description',
'triggerprototype': 'description',
'mediatype': 'description',
'user': 'alias',
'usermacro': 'macro',
}
item_id_name = {
'usermacro': 'hostmacro',
'usergroup': 'usrgrp',
'hostgroup': 'group',
'discoveryrule': 'item',
'graphprototype': 'graph',
'itemprototype': 'item',
'triggerprototype': 'trigger',
}
filter_ = { 'filter': { item_filter_name.get(item_type, 'name'): item }, 'output':'extend' }
if hostid:
filter_['filter'].update({ 'hostid': hostid })
if args.get('templateid'):
filter_['templateids'] = args['templateid']
if args.get('app_name'):
filter_['application'] = args['app_name']
logger.debug('do_request( "{type}", {filter} )'.format(type=type_, filter=filter_))
response = self.do_request(type_, filter_)['result']
if response:
item_id = '{item}id'.format(item=item_id_name.get(item_type, item_type))
result = []
for obj in response:
if with_id:
result.append({ item_id: int(obj.get(item_id)) })
else:
result.append(int(obj.get(item_id)))
list_types = (list, type(None))
if not isinstance(item, list_types):
result = result[0]
return result
|
sfu-discourse-lab/SFU_Comment_Extractor | Source_Code/data_cleanup/repair_gnm_article_text.py | Python | mit | 1,779 | 0.033165 | from bs4 import BeautifulSoup
import requests
import os
import re
import time
import dbm
import csv
import pandas as pd
def _count_words(s):
"""count the number of words in a paragraph
Args:
[String] s
Return:
[INT] number of words in the string
"""
return len(s.split(" "))
def get_page(url):
"" | "get the page information
Args:
[String] URL: the URL to the page we | try to parse
Return:
[Data Structure] page: the request response from the page
"""
print(url)
page = ''
while page == '':
try:
time.sleep(1)
page = requests.get(url)
except:
print("Connection refused by the server..")
print("Let me sleep for 5 seconds")
print("ZZzzzz...")
time.sleep(5)
print("Was a nice sleep, now let me continue...")
continue
return page
def main(input_file):
bound = 200
df = pd.read_csv(input_file)
# get the article with the text less than 200 words
short_article_index = df.index[df.article_text.apply(_count_words) < bound]
short_article_url = df.article_url[df.article_text.apply(_count_words) < bound]
# reparsing the article of those URL with to the articles
for i in short_article_index:
page = get_page(df.article_url.iloc[i])
soup = BeautifulSoup(page.content, 'html.parser')
article_text = [p.get_text().encode('utf-8').strip() \
for p in soup.find_all('p', class_="c-article-body__text")]
if article_text:
df.article_text.iloc[i] = "/n".join(article_text)
print("NOTE: the new file named GNM_ARTICLES_NEW.CSV")
df.to_csv("gnm_articles_new.csv",index=False)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='the path to the gnm_articles.csv')
parser.add_argument('input_file', type=str, help='the input file')
args = parser.parse_args()
main(args.input_file)
|
locaweb/leela | try/src/try_leela/helpers.py | Python | apache-2.0 | 1,592 | 0.010678 | # -*- coding: utf-8 -*-
import sys
import time
def make(session):
session.execute("make (%(rnd_name.0)s)")
a_guid = session.message()[1][-1]
assert(session.message() is None)
return(a_guid)
def link(session, a, l, b):
session.execute("make %s -[%s]> %s" % (a, l, b))
assert(s | ession.message() is None)
def kill(session, a, l, b=None):
if (b is None):
session.execute("kill %s -[%s]> ()" % (a, l))
else:
session.execute("kill %s -[%s]> %s" % (a, l, b))
assert(session.message() is None)
def kattr_put(session, a, name, value, ttl=None):
if (ttl is None):
ttl = ""
else:
ttl = " with ttl:%d" % (ttl,)
session.execute("attr put % | s \"%s\" %s%s" % (a, name, value, ttl))
assert(session.message() is None)
def tattr_put(session, a, name, time, value, ttl=None):
if (ttl is None):
ttl = ""
else:
ttl = " with ttl:%d" % (ttl,)
session.execute("attr put %s \"%s\" [%s] %s%s" % (a, name, time, value, ttl))
assert(session.message() is None)
def kattr_del(session, a, name):
session.execute("attr del %s \"%s\"" % (a, name))
assert(session.message() is None)
def string_value(a):
return("\"%s\"" % a)
def int32_value(a):
return("(int32 %d)" % a)
def uint32_value(a):
return("(uint32 %d)" % a)
def int64_value(a):
return("(int64 %d)" % a)
def uint64_value(a):
return("(uint64 %d)" % a)
def double_value(a):
return("(double %s)" % repr(a))
def sleep(t):
sys.stdout.write("(time.sleep %d)" % t)
sys.stdout.flush()
time.sleep(t * 2)
|
alexm92/sentry | src/sentry/models/commit.py | Python | bsd-3-clause | 1,338 | 0 | from __future__ import absolute_import, print_function
from django.db import models
from django.utils import timezone
from sentry.db.models import (
BoundedPositiveIntegerField, FlexibleForeignKey, Model, sane_repr
)
from sentry.utils.cache import memoize
class Commit(Model):
__core__ = False
organization_id = BoundedPositiveIntegerField(db_index=True)
repository_id = BoundedPositiveIntegerField()
key = models.CharField(max_length=64)
date_added = models.DateTimeField(default=timezone.now)
# all commit metadata must be optional, as it may not be available
# when the initial commit obje | ct is referenced (and thus created)
author = FlexibleForeignKey('sentry.CommitAuthor', null=True)
message = models.TextField | (null=True)
class Meta:
app_label = 'sentry'
db_table = 'sentry_commit'
index_together = (
('repository_id', 'date_added'),
)
unique_together = (
('repository_id', 'key'),
)
__repr__ = sane_repr('organization_id', 'repository_id', 'key')
@memoize
def title(self):
if not self.message:
return ''
return self.message.splitlines()[0]
@memoize
def short_id(self):
if len(self.key) == 40:
return self.key[:12]
return self.key
|
gaftech/fmanalyser | fmanalyser/commands/fmlogger.py | Python | mit | 4,946 | 0.00647 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from ..client import MODE_CHOICES
from ..client.tasks import ReadChannelValues, WriteChannelValue
from ..client.worker import Worker
from ..utils.command import BaseCommand
from ..values.channel import Channel
from ..values.signals import ValueChangeEvent
from optparse import OptionGroup, OptionConflictError
import logging
import sys
import time
class Command(BaseCommand):
def make_option_groups(self, parser):
groups = super(Command, self).make_option_groups(parser)
# Runtime options
runtime_group = OptionGroup(parser, 'runtime options')
runtime_group.add_option('--sleep', default=1, type='float',
help='time to sleep (s) between two probing loops')
# Channel options
channel_group = OptionGroup(parser, 'channel options')
self._append_channel_options(channel_group)
groups.append(channel_group)
# Data output options
data_group = OptionGroup(parser, 'data logging options')
data_group.add_option('--file',dest='datafile',
help="File to store data in")
groups.append(data_group)
return groups
def _append_channel_options(self, group):
# initial frequency
group.add_option('--set-freq', '-F',
help='set initial frequency (MHz)')
group.add_option('--mode', '-m', type='choice',
choices = MODE_CHOICES,
help='set device mode {mes|rds|stereo}')
group.add_option('--mes',
action='store_const', dest='mode', const='mes',
help='set measurement mode')
group.add_option('--rdsm', '--rds-mode',
action='store_const', dest='mode', const='rds',
help='set rds mode')
group.add_option('--stereo',
action='store_const', dest='mode', const='stereo',
help='set stereo mode')
# measurements
for descriptor in Channel.iter_descriptors():
if not descriptor.readable:
continue
longopt = '--%s' % descriptor.key
shortopt = None
if descriptor.short_key is not None:
shortopt = '-%s' % descriptor.short_key
kwargs= {
'dest': descriptor.key,
'action': 'store_true',
'help': 'enable %s measurement' % descriptor,
}
try:
group.add_option(longopt, shortopt, **kwargs)
except OptionConflictError:
group.add_option(longopt, **kwargs)
return group
def configure_logging(self):
super(Command, self).configure_logging()
datalogger_name = '%s.data' % self.get_logger_name()
datalogger = logging.getLogger(datalogger_name)
datalogger.propagate = False
datalogger.setLevel(logging.DEBUG)
data_streamhandler = logging.StreamHandler(sys.stdout)
datalogger.addHandler(data_streamhandler)
if self.options.datafile:
data_filehandler = logging.FileHandler(self.options.datafile)
datalogger.addHandler(data_filehandler)
self.datalogger = datalogger
def stop(self, signal, frame):
self.logger.info(u"stopping on signal %s..." % signal)
if hasattr(self, 'worker'):
self.worker.stop()
def execute(self):
ValueChangeEvent.connect(self.on_value_changed)
channel = self.make_channel()
self.worker.run()
mode = self.options.mode
if mode is not None:
mode_variable = channel.get_variable('mode')
mode_variable.set_command(mode)
self.worker.enqueue(WriteChannelValue, variable=mode_variable)
freq = self.options.set_freq
if freq is not None:
freq_variable = channel.get_variable('frequency')
freq_variable.set_command(freq)
self.worker.enqueue(WriteChannelValue, variable=freq_variable)
while self.worker.is_alive():
task = self.worker.enqueue(ReadChannelValues, channel=channel)
task | .wait(blocking=False, timeout=2)
time.sleep(self.options.sleep)
def make_channel(self):
channel = Channel()
for variable in channel.get_variables():
enabled = getattr(self.options, variable.descriptor.key)
variable.enabled = enabled
return channel
def on_value_changed(se | lf, sender, event):
message = self.format_event(event)
self.log_data(message)
def format_event(self, event):
descriptor = event.sender.descriptor
return '%s: %s' % (descriptor.key, descriptor.format_value(event.new_value))
def log_data(self, message):
self.datalogger.info(message)
def main():
sys.exit(Command().run())
if __name__ == '__main__':
main()
|
antonyr/django-haystack | haystack/management/commands/rebuild_index.py | Python | bsd-3-clause | 1,324 | 0.003776 | # encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from django.core.management import ca | ll_command
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Completely rebuilds the search index by removing th | e old data and then updating."
def add_arguments(self, parser):
parser.add_argument(
'--noinput', action='store_false', dest='interactive', default=True,
help='If provided, no prompts will be issued to the user and the data will be wiped out.'
)
parser.add_argument(
'-u', '--using', action='append', default=[],
help='Update only the named backend (can be used multiple times). '
'By default all backends will be updated.'
)
parser.add_argument(
'-k', '--workers', default=0, type=int,
help='Allows for the use multiple workers to parallelize indexing. Requires multiprocessing.'
)
parser.add_argument(
'--nocommit', action='store_false', dest='commit',
default=True, help='Will pass commit=False to the backend.'
)
def handle(self, **options):
call_command('clear_index', **options)
call_command('update_index', **options)
|
apache/incubator-airflow | docs/exts/docs_build/docs_builder.py | Python | apache-2.0 | 12,386 | 0.002018 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import re
import shlex
import shutil
from glob import glob
from subprocess import run
from typing import List
from rich.console import Console
from docs.exts.docs_build.code_utils import (
AIRFLOW_SITE_DIR,
ALL_PROVIDER_YAMLS,
CONSOLE_WIDTH,
DOCS_DIR,
PROCESS_TIMEOUT,
ROOT_PROJECT_DIR,
pretty_format_path,
)
from docs.exts.docs_build.errors import DocBuildError, parse_sphinx_warnings
from docs.exts.docs_build.helm_chart_utils import chart_version
from docs.exts.docs_build.spelling_checks import SpellingError, parse_spelling_warnings
console = Console(force_terminal=True, color_system="standard", width=CONSOLE_WIDTH)
class AirflowDocsBuilder:
"""Documentation builder for Airflow."""
def __init__(self, package_name: str, for_production: bool):
self.package_name = package_name
self.for_production = for_production
@property
def _doctree_dir(self) -> str:
return f"{DOCS_D | IR}/_doctrees/docs/{self.package_name}"
@property
def _inventory_cache_dir(self) -> str:
return f"{DOCS_DIR}/_inventory_cache"
@property
def is_versioned(self):
"""Is current documentation package versioned?"""
# Disable versioning. This documentation does not apply to any released product and we can upd | ate
# it as needed, i.e. with each new package of providers.
return self.package_name not in ('apache-airflow-providers', 'docker-stack')
@property
def _build_dir(self) -> str:
if self.is_versioned:
version = "stable" if self.for_production else "latest"
return f"{DOCS_DIR}/_build/docs/{self.package_name}/{version}"
else:
return f"{DOCS_DIR}/_build/docs/{self.package_name}"
@property
def log_spelling_filename(self) -> str:
"""Log from spelling job."""
return os.path.join(self._build_dir, f"output-spelling-{self.package_name}.log")
@property
def log_spelling_output_dir(self) -> str:
"""Results from spelling job."""
return os.path.join(self._build_dir, f"output-spelling-results-{self.package_name}")
@property
def log_build_filename(self) -> str:
"""Log from build job."""
return os.path.join(self._build_dir, f"output-build-{self.package_name}.log")
@property
def log_build_warning_filename(self) -> str:
"""Warnings from build job."""
return os.path.join(self._build_dir, f"warning-build-{self.package_name}.log")
@property
def _current_version(self):
if not self.is_versioned:
raise Exception("This documentation package is not versioned")
if self.package_name == 'apache-airflow':
from airflow.version import version as airflow_version
return airflow_version
if self.package_name.startswith('apache-airflow-providers-'):
provider = next(p for p in ALL_PROVIDER_YAMLS if p['package-name'] == self.package_name)
return provider['versions'][0]
if self.package_name == 'helm-chart':
return chart_version()
return Exception(f"Unsupported package: {self.package_name}")
@property
def _publish_dir(self) -> str:
if self.is_versioned:
return f"docs-archive/{self.package_name}/{self._current_version}"
else:
return f"docs-archive/{self.package_name}"
@property
def _src_dir(self) -> str:
return f"{DOCS_DIR}/{self.package_name}"
def clean_files(self) -> None:
"""Cleanup all artifacts generated by previous builds."""
api_dir = os.path.join(self._src_dir, "_api")
shutil.rmtree(api_dir, ignore_errors=True)
shutil.rmtree(self._build_dir, ignore_errors=True)
os.makedirs(api_dir, exist_ok=True)
os.makedirs(self._build_dir, exist_ok=True)
def check_spelling(self, verbose: bool) -> List[SpellingError]:
"""
Checks spelling
:param verbose: whether to show output while running
:return: list of errors
"""
spelling_errors = []
os.makedirs(self._build_dir, exist_ok=True)
shutil.rmtree(self.log_spelling_output_dir, ignore_errors=True)
os.makedirs(self.log_spelling_output_dir, exist_ok=True)
build_cmd = [
os.path.join(ROOT_PROJECT_DIR, "docs", "exts", "docs_build", "run_patched_sphinx.py"),
"-W", # turn warnings into errors
"--color", # do emit colored output
"-T", # show full traceback on exception
"-b", # builder to use
"spelling",
"-c",
DOCS_DIR,
"-d", # path for the cached environment and doctree files
self._doctree_dir,
self._src_dir, # path to documentation source files
self.log_spelling_output_dir,
]
env = os.environ.copy()
env['AIRFLOW_PACKAGE_NAME'] = self.package_name
if self.for_production:
env['AIRFLOW_FOR_PRODUCTION'] = 'true'
if verbose:
console.print(
f"[blue]{self.package_name:60}:[/] Executing cmd: ",
" ".join(shlex.quote(c) for c in build_cmd),
)
console.print(f"[blue]{self.package_name:60}:[/] The output is hidden until an error occurs.")
with open(self.log_spelling_filename, "wt") as output:
completed_proc = run(
build_cmd,
cwd=self._src_dir,
env=env,
stdout=output if not verbose else None,
stderr=output if not verbose else None,
timeout=PROCESS_TIMEOUT,
)
if completed_proc.returncode != 0:
spelling_errors.append(
SpellingError(
file_path=None,
line_no=None,
spelling=None,
suggestion=None,
context_line=None,
message=(
f"Sphinx spellcheck returned non-zero exit status: {completed_proc.returncode}."
),
)
)
warning_text = ""
for filepath in glob(f"{self.log_spelling_output_dir}/**/*.spelling", recursive=True):
with open(filepath) as spelling_file:
warning_text += spelling_file.read()
spelling_errors.extend(parse_spelling_warnings(warning_text, self._src_dir))
console.print(f"[blue]{self.package_name:60}:[/] [red]Finished spell-checking with errors[/]")
else:
if spelling_errors:
console.print(
f"[blue]{self.package_name:60}:[/] [yellow]Finished spell-checking with warnings[/]"
)
else:
console.print(
f"[blue]{self.package_name:60}:[/] [green]Finished spell-checking successfully[/]"
)
return spelling_errors
def build_sphinx_docs(self, verbose: bool) -> List[DocBuildError]:
"""
Build Sphinx documentation.
:param verbose: whether to show output while running
:return: list of errors
"""
build_errors = []
os.makedirs(self._build_dir, exist_ok=True)
build_cmd = [
os.path.join(ROOT_PROJECT_ |
mangaki/mangaki | mangaki/mangaki/management/commands/anilist_tags_to_json.py | Python | agpl-3.0 | 3,990 | 0.002514 | # SPDX-FileCopyrightText: 2014, Mangaki Authors
# SPDX-License-Identifier: AGPL-3.0-only
from time import sleep
import json
from django.core.management.base import BaseCommand
from mangaki.wrappers.anilist import AniList
from mangaki.models import Work
MAX_ATTEMPTS = 5
BACKOFF_DELAY = 2
class Command(BaseCommand):
"""
Recherche par titre chaque Work sur AniList afin de finalement récupérer les
tags. Si un titre ne match pas sur AniList, le log l'affiche et un fichier
stocke l'ensemble des Work non récupérés (ID + Titre sur la BDD).
Enfin, sort un fichier JSON contenant, pour chaque Work la liste des tags
et le poids associé (valeur de 0 à 1) récupéré grâce au système de votes
d'AniList.
Le format du JSON est le même que pour illustration2vec !
"""
help = 'AniList tags to JSON'
def add_arguments(self, parser):
parser.add_argument('work_id', nargs='*', type=int)
def handle(self, *args, **options):
client = AniList()
if options['work_id']:
works = Work.objects.filter(pk__in=options['work_id']).order_by('pk')
else:
works = Work.objects.all().order_by('pk')
if works.count() == 0:
self.stdout.write(self.style.WARNING('No works to process ...'))
return
final_tags = {}
all_tags = set()
missed_titles = {}
count = works.count()
| self.stdout.write('Number of works : '+str(count)+'\n\n')
for work in works:
title_display = work.title.encode('utf8')
if self.stdout.encoding is not None:
title_display = title_display.decode(self.stdout.encoding)
anilist_result = None
# Try to fe | tch data from AniList with an exponential backoff
for tries in range(MAX_ATTEMPTS):
try:
# Search the work by title on AniList
anilist_result = client.get_work(search_title=work.title)
break
except Exception as err:
print(err)
delay = BACKOFF_DELAY ** tries
self.stdout.write(self.style.WARNING('Sleep : Retrying {} in {} seconds ...'.format(title_display, delay)))
sleep(delay)
continue
# Couldn't fetch data even after retrying : exit
if tries >= MAX_ATTEMPTS - 1:
self.stderr.write(self.style.ERROR('\nBanned from AniList ...'))
self.stderr.write(self.style.ERROR('--- Latest Work ID : '+str(work.pk)+' ---'))
break
# Work couldn't be found on Anilist
if not anilist_result:
missed_titles[work.id] = work.title
self.stdout.write(self.style.WARNING('Could not match "'+str(title_display)+'" on AniList'))
continue
self.stdout.write('> Working on : '+str(title_display))
dict_key = '{}.jpg'.format(work.pk)
tags_list = []
if not anilist_result.tags:
continue
for tag in anilist_result.tags:
tag_name = tag['name']
tag_weight = tag['votes']/100
if tag_weight != 0:
tags_list.append([tag_name, tag_weight])
all_tags.add(tag_name)
final_tags[dict_key] = tags_list
self.stdout.write(self.style.SUCCESS('\n--- Writing tags to anilist_tags.json ---'))
with open('anilist_tags.json', 'w', encoding='utf-8') as f:
json.dump(final_tags, f)
self.stdout.write(self.style.SUCCESS('--- Writing missed titles to missed_anilist_titles.json ---'))
with open('missed_anilist_titles.json', 'w', encoding='utf-8') as f:
json.dump(missed_titles, f)
self.stdout.write(self.style.SUCCESS('--- Number of different tags : '+str(len(all_tags))+' ---'))
|
jmartinm/invenio | modules/miscutil/lib/plotextractor_converter.py | Python | gpl-2.0 | 8,021 | 0.003117 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import os
import re
from invenio.shellutils import run_shell_command, run_process_with_timeout, Timeout
from invenio.plotextractor_output_utils import get_converted_image_name, \
write_message
def untar(original_tarball, sdir):
"""
Here we decide if our file is actually a tarball (sometimes the
'tarballs' gotten from arXiv aren't actually tarballs. If they
'contain' only the TeX file, then they are just that file.), then
we untar it if so and decide which of its constituents are the
TeX file and which are the images.
@param: tarball (string): the name of the tar file from arXiv
@param: dir (string): the directory where we would like it untarred to
@return: (image_list, tex_file) (([string, string, ...], string)):
list of images in the tarball and the name of the TeX file in the
tarball.
"""
tarball = check_for_gzip(original_tarball)
dummy1, cmd_out, cmd_err = run_shell_command('file %s', (tarball,))
tarball_output = 'tar archive'
if re.search(tarball_output, cmd_out) == None:
run_shell_command('rm %s', (tarball,))
return ([], [], None)
cmd_list = ['tar', 'xvf', tarball, '-C', sdir]
dummy1, cmd_out, cmd_err = run_process_with_timeout(cmd_list)
if cmd_err != '':
return ([], [], None)
if original_tarball != tarball:
run_shell_command('rm %s', (tarball,))
cmd_out = cmd_out.split('\n')
tex_output_contains = 'TeX'
tex_file_extension = 'tex'
image_output_contains = 'image'
eps_output_contains = '- type eps'
ps_output_contains = 'Postscript'
file_list = []
image_list = []
might_be_tex = []
for extracted_file in cmd_out:
if extracted_file == '':
break
if extracted_file.startswith('./'):
extracted_file = extracted_file[2:]
# ensure we are actually looking at the right file
extracte | d_file = os.path.join(sdir, extracted_file)
# Add to full list of ext | racted files
file_list.append(extracted_file)
dummy1, cmd_out, dummy2 = run_shell_command('file %s', (extracted_file,))
# is it TeX?
if cmd_out.find(tex_output_contains) > -1:
might_be_tex.append(extracted_file)
# is it an image?
elif cmd_out.lower().find(image_output_contains) > cmd_out.find(':') \
or \
cmd_out.lower().find(eps_output_contains) > cmd_out.find(':')\
or \
cmd_out.find(ps_output_contains) > cmd_out.find(':'):
# we have "image" in the output, and it is not in the filename
# i.e. filename.ext: blah blah image blah blah
image_list.append(extracted_file)
# if neither, maybe it is TeX or an image anyway, otherwise,
# we don't care
else:
if extracted_file.split('.')[-1].lower() == tex_file_extension:
# we might have tex source!
might_be_tex.append(extracted_file)
elif extracted_file.split('.')[-1] in ['eps', 'png', \
'ps', 'jpg', 'pdf']:
# we might have an image!
image_list.append(extracted_file)
if might_be_tex == []:
# well, that's tragic
# could not find TeX file in tar archive
return ([], [], [])
return (file_list, image_list, might_be_tex)
def check_for_gzip(tfile):
"""
Was that tarball also gzipped? Let's find out!
@param: file (string): the name of the object (so we can gunzip, if
that's necessary)
@output: a gunzipped file in the directory of choice, if that's necessary
@return new_file (string): The name of the file after gunzipping or the
original name of the file if that wasn't necessary
"""
gzip_contains = 'gzip compressed data'
dummy1, cmd_out, dummy2 = run_shell_command('file %s', (tfile,))
if cmd_out.find(gzip_contains) > -1:
# we have a gzip!
# so gzip is retarded and won't accept any file that doesn't end
# with .gz. sad.
run_shell_command('cp %s %s', (tfile, tfile + '.tar.gz'))
new_dest = os.path.join(os.path.split(tfile)[0], 'tmp.tar')
run_shell_command('touch %s', (new_dest,))
dummy1, cmd_out, cmd_err = run_shell_command('gunzip -c %s',
(tfile + '.tar.gz',))
if cmd_err != '':
write_message('Error while gunzipping ' + tfile)
return tfile
tarfile = open(new_dest, 'w')
tarfile.write(cmd_out)
tarfile.close()
run_shell_command('rm %s', (tfile + '.tar.gz',))
return new_dest
return tfile
def convert_images(image_list):
"""
Here we figure out the types of the images that were extracted from
the tarball and determine how to convert them into PNG.
@param: image_list ([string, string, ...]): the list of image files
extracted from the tarball in step 1
@return: image_list ([str, str, ...]): The list of image files when all
have been converted to PNG format.
"""
png_output_contains = 'PNG image'
ret_list = []
for image_file in image_list:
if os.path.isdir(image_file):
continue
# FIXME: here and everywhere else in the plot extractor
# library the run shell command statements should be (1)
# called with timeout in order to prevent runaway imagemagick
# conversions; (2) the arguments should be passed properly so
# that they are escaped.
dummy1, cmd_out, dummy2 = run_shell_command('file %s', (image_file,))
if cmd_out.find(png_output_contains) > -1:
ret_list.append(image_file)
else:
# we're just going to assume that ImageMagick can convert all
# the image types that we may be faced with
# for sure it can do EPS->PNG and JPG->PNG and PS->PNG
# and PSTEX->PNG
converted_image_file = get_converted_image_name(image_file)
cmd_list = ['convert', image_file, converted_image_file]
try:
dummy1, cmd_out, cmd_err = run_process_with_timeout(cmd_list)
if cmd_err == '':
ret_list.append(converted_image_file)
else:
write_message('convert failed on ' + image_file)
except Timeout:
write_message('convert timed out on ' + image_file)
return ret_list
def extract_text(tarball):
"""
We check to see if there's a file called tarball.pdf, and, if there is,
we run pdftotext on it. Simple as that.
@param: tarball (string): the raw name of the tarball
@return: None
"""
try:
os.stat(tarball + '.pdf')
cmd_list = ['pdftotext', tarball + '.pdf ', tarball + '.txt']
dummy1, dummy2, cmd_err = run_process_with_timeout(cmd_list)
if cmd_err != '':
return - 1
write_message('generated ' + tarball + '.txt from ' + tarball + '.pdf')
except:
write_message('no text from ' + tarball + '.pdf')
|
lakshmisampath/searchlight | searchlight/elasticsearch/plugins/glance/metadefs.py | Python | apache-2.0 | 5,700 | 0 | # Copyright 2015 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from searchlight.elasticsearch.plugins import base
from searchlight.elasticsearch.plugins.glance \
import metadefs_notification_handler
from searchlight.elasticsearch.plugins.glance \
import serialize_glance_metadef_ns
class MetadefIndex(base.IndexBase):
def __init__(self):
super(MetadefIndex, self).__init__()
def get_index_name(self):
return 'glance'
def get_document_type(self):
return 'metadef'
def get_document_id_field(self):
return 'namespace'
def get_mapping(self):
property_mapping = {
'dynamic': True,
'type': 'nested',
'properties': {
'name': {'type': 'string', 'index': 'not_analyzed'},
'type': {'type': 'string'},
'title': {'type': 'string'},
'description': {'type': 'string'},
}
}
mapping = {
'_id': {
'path': 'namespace',
},
'properties': {
'display_name': {'type': 'string'},
'description': {'type': 'string'},
'namespace': {'type': 'string', 'index': 'not_analyzed'},
'owner': {'type': 'string', 'index': 'not_analyzed'},
'visibility': {'type': 'string', 'index': 'not_analyzed'},
'resource_types': {
'type': 'nested',
'properties': {
'name': {'type': 'string'},
# TODO(sjmc7): add these back in? They don't seem
# to be accessible via the API
# 'prefix': {'type': 'string'},
# 'properties_target': {'type': 'string'},
},
},
'objects': {
'type': 'nested',
'properties': {
'id': {'type': 'string', 'index': 'not_analyzed'},
'name': {'type': 'string'},
'description': {'type': 'string'},
'properties': property_mapping,
}
},
'properties': property_mapping,
'tags': {
'type': 'nested',
'properties': {
'name': {'type': 'string'},
}
}
},
}
return mapping
def get_rbac_filter(self, request_context):
# TODO(krykowski): Define base get_rbac_filter in IndexBase class
# which will provide some common subset of query pieces.
# Something like:
# def get_common_context_pieces(self, request_context):
# return [{'term': {'owner': request_context.owner,
# 'type': {'value': self.get_document_type()}}]
return [
{
"and": [
{
'or': [
{
'term': {
'owner': request_context.owner
}
},
{
'term': {
'visibility': 'public'
}
}
]
},
{
'type': {
'value': self.get_document_type()
}
},
{
'index': {
'value': self.get_index_name()
}
}
]
}
]
def get_objects(self):
from searchlight.elasticsearch.plugins import openstack_clients
gc = openstack_clients.get_glanceclient()
return list(gc.metadefs_namespace.list())
def serialize(self, metadef_obj):
return serialize_glance_metadef_ns(metadef_obj)
def get_notification_handler(self):
return metadefs_notification_handler.MetadefHandler(
self.engine,
self.get_index_name(),
self.get_document_type()
)
def get_notification_supported_events(self):
return [
"metadef_namespace.create",
"metadef_namespace.update",
"metadef_namespace.dele | te",
"metadef_object.create",
"metadef_object.update",
"metadef_object.delete",
"metadef_property.create",
"metadef_property.update",
"metadef_property.delete",
"metadef_tag.create",
"metadef_tag.update",
"metadef_tag.delete",
"metadef_resource_type.create",
"metadef_resource_type.delete",
"metadef_namespace.delete_properties",
"m | etadef_namespace.delete_objects",
"metadef_namespace.delete_tags"
]
|
nibrahim/PlasTeX | plasTeX/Base/LaTeX/Bibliography.py | Python | mit | 4,545 | 0.00418 | #!/usr/bin/env python
"""
C.11.3 Bibliography and Citation (p208)
"""
import plasTeX, codecs
from plasTeX.Base.LaTeX.Sectioning import chapter, section
from plasTeX import Command, Environment
from Lists import List
log = plasTeX.Logging.getLogger()
class bibliography(chapter):
args = 'files:str'
linkType = 'bibliography'
def invoke(self, tex):
res = chapter.invoke(self, tex)
self.title = self.ownerDocument.createElement('bibname').expand(tex)
self.loadBibliographyFile(tex)
return res
def loadBibliographyFile(self, tex):
# Load bibtex file
try:
file = tex.kpsewhich(tex.jobname+'.bbl')
tex.input(codecs.open(file, 'r', self.ownerDocument.config['files']['input-encoding']))
except OSError, msg:
log.warning(msg)
class bibliographystyle(Command):
args = 'style'
class thebibliography(List):
args = 'widelabel'
linkType = 'bibliography'
class bibitem(List.item):
args = '[ label ] key:str'
def invoke(self, tex):
res = List.item.invoke(self, tex)
a = self.attributes
# Put the entry into the global bibliography
doc = self.ownerDocument
bibitems = doc.userdata.getPath('bibliography/bibitems', {})
bibitems[a['key']] = self
doc.userdata.setPath('bibliography/bibitems', bibitems)
self.ref = str(len([x for x in bibitems.values()
if not x.attributes['label']]))
key = a['key']
label = a.get('label')
bibcites = doc.userdata.getPath('bibliography/bibcites', {})
if not bibcites.has_key(key):
if label is None:
label = doc.createDocumentFragment()
label.extend(self.ref)
bibcites[key] = label |
doc.userdata.setPath('bibliography/bibcites', bibcites)
return res
@property
def id(self):
return self.attributes['key']
@property
def bibcite(self):
doc = self.ownerDocument
res = doc.createDocumentFr | agment()
bibcites = doc.userdata.getPath('bibliography/bibcites', {})
res.extend(bibcites.get(self.attributes['key']))
return res
def digest(self, tokens):
if self.macroMode == Command.MODE_END:
return
for tok in tokens:
if not isinstance(tok, thebibliography.bibitem):
continue
tokens.push(tok)
break
return List.digest(self, tokens)
class cite(Command):
args = '[ text ] bibkeys:list:str'
@property
def bibitems(self):
# Get all referenced items
output = []
doc = self.ownerDocument
for x in self.attributes['bibkeys']:
item = doc.userdata.getPath('bibliography/bibitems', {}).get(x)
if item is None:
log.warning('Bibliography item "%s" has no entry', x)
else:
output.append(item)
return output
@property
def postnote(self):
a = self.attributes
if a['text'] is not None:
return a['text']
return ''
def citation(self):
""" (Jones et al., 1990) """
res = self.ownerDocument.createDocumentFragment()
i = 0
res.append('[')
for i, item in enumerate(self.bibitems):
node = self.ownerDocument.createElement('bgroup')
node.extend(item.bibcite)
node.idref['bibitem'] = item
res.append(node)
if i < (len(self.bibitems)-1):
res.append(', ')
else:
if self.postnote:
res.append(', ')
res.append(self.postnote)
res.append(']')
return res
class nocite(Command):
args = 'bibkeys:str'
class bibcite(Command):
args = 'key:str info'
def invoke(self, tex):
Command.invoke(self, tex)
value = self.attributes['info'].firstChild
doc = self.ownerDocument
bibcites = doc.userdata.getPath('bibliography/bibcites', {})
bibcites[self.attributes['key']] = value
doc.userdata.setPath('bibliography/bibcites', bibcites)
class citation(Command):
pass
class bibstyle(Command):
pass
class bibdata(Command):
pass
class newblock(Command):
pass
class bibliographyref(Command):
pass
|
iPlantCollaborativeOpenSource/libcloud | libcloud/loadbalancer/drivers/elb.py | Python | apache-2.0 | 12,957 | 0 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'ElasticLBDriver'
]
from libcloud.utils.py3 import httplib
from libcloud.utils.xml import findtext, findall
from libcloud.loadbalancer.types import State
from libcloud.loadbalancer.base import Driver, LoadBalancer, Member
from libcloud.common.aws import AWSGenericResponse, SignedAWSConnection
VERSION = '2012-06-01'
HOST = 'elasticloadbalancing.%s.amazonaws.com'
ROOT = '/%s/' % (VERSION)
NS = 'http://elasticloadbalancing.amazonaws.com/doc/%s/' % (VERSION, )
class ELBResponse(AWSGenericResponse):
"""
Amazon ELB response class.
"""
namespace = NS
exceptions = {}
xpath = 'Error'
class ELBConnection(SignedAWSConnection):
version = VERSION
host = HOST
responseCls = ELBResponse
service_name = 'elasticloadbalancing'
class ElasticLBDriver(Driver):
name = 'Amazon Elastic Load Balancing'
website = 'http://aws.amazon.com/elasticloadbalancing/'
connectionCls = ELBConnection
signature_version = '4'
def __init__(self, access_id, secret, region, token=None):
self.token = token
super(ElasticLBDriver, self).__init__(access_id, secret, token=token)
self.region = region
self.region_name = region
self.connection.host = HOST % (region)
def list_protocols(self):
return ['tcp', 'ssl', 'http', 'https']
def list_balancers(self):
params = {'Action': 'DescribeLoadBalancers'}
data = self.connection.request(ROOT, params=params).object
return self._to_balancers(data)
def create_balancer(self, name, port, protocol, algorithm, members,
ex_members_availability_zones=None):
if ex_members_availability_zones is None:
ex_members_availability_zones = ['a']
params = {
'Action': 'CreateLoadBalancer',
'LoadBalancerName': name,
'Listeners.member.1.InstancePort': str(port),
'Listeners.member.1.InstanceProtocol': protocol.upper(),
'Listeners.member.1.LoadBalancerPort': str(port),
'Listeners.member.1.Protocol': protocol.upper(),
}
for i, z in enumerate(ex_members_availability_zones):
zone = ''.join((self.region, z))
params['AvailabilityZones.member.%d' % (i + 1)] = zone
data = self.connection.request(ROOT, params=params).object
balancer = LoadBalancer(
id=name,
name=name,
state=State.PENDING,
ip=findtext(element=data, xpath='DNSName', namespace=NS),
port=port,
driver=self.connection.driver
)
balancer._members = []
return balancer
def destroy_balancer(self, balancer):
params = {
'Action': 'DeleteLoadBalancer',
'LoadBalancerName': balancer.id
}
self.connection.request(ROOT, params=params)
return True
def get_balancer(self, balancer_id):
params = {
'Action': 'DescribeLoadBalancers',
'LoadBalancerNames.member.1': balancer_id
}
data = self.connection.request(ROOT, params=params).object
return self._to_balancers(data)[0]
def balancer_attach_compute_node(self, balancer, node):
params = {
'Action': 'RegisterInstancesWithLoadBalancer',
'LoadBalancerName': balancer.id,
'Instances.member.1.InstanceId': node.id
}
self.connection.request(ROOT, params=params)
balancer._members.append(Member(node.id, None, None, balancer=self))
def balancer_detach_member(self, balancer, member):
params = {
'Action': 'DeregisterInstancesFromLoadBalancer',
'LoadBalancerName': balancer.id,
'Instances.member.1.InstanceId': member.id
}
self.connection.request(ROOT, params=params)
balancer._members = [m for m in balancer._members if m.id != member.id]
return True
def balancer_list_members(self, balancer):
re | turn balancer._members
def ex_list_balancer_policies(self, balancer):
"""
Return a list of policy description string.
:rtype: ``list`` of ``str``
"""
params = {
'Action': 'DescribeLoadBalancerPolicies',
| 'LoadBalancerName': balancer.id
}
data = self.connection.request(ROOT, params=params).object
return self._to_policies(data)
def ex_list_balancer_policy_types(self):
"""
Return a list of policy type description string.
:rtype: ``list`` of ``str``
"""
params = {'Action': 'DescribeLoadBalancerPolicyTypes'}
data = self.connection.request(ROOT, params=params).object
return self._to_policy_types(data)
def ex_create_balancer_policy(self, name, policy_name, policy_type,
policy_attributes=None):
"""
Create a new load balancer policy
:param name: Balancer name to create the policy for
:type name: ``str``
:param policy_name: policy to be created
:type policy_name: ``str``
:param policy_type: policy type being used to create policy.
:type policy_type: ``str``
:param policy_attributes: Each list contain values, ['AttributeName',
'value']
:type policy_attributes: ``PolicyAttribute list``
"""
params = {
'Action': 'CreateLoadBalancerPolicy',
'LoadBalancerName': name,
'PolicyName': policy_name,
'PolicyTypeName': policy_type
}
if policy_attributes is not None:
for index, (name, value) in enumerate(
policy_attributes.iteritems(), 1):
params['PolicyAttributes.member.%d. \
AttributeName' % (index)] = name
params['PolicyAttributes.member.%d. \
AttributeValue' % (index)] = value
response = self.connection.request(ROOT, params=params)
return response.status == httplib.OK
def ex_delete_balancer_policy(self, name, policy_name):
"""
Delete a load balancer policy
:param name: balancer name for which policy will be deleted
:type name: ``str``
:param policy_name: The Mnemonic name for the policy being deleted
:type policy_name: ``str``
"""
params = {
'Action': 'DeleteLoadBalancerPolicy',
'LoadBalancerName': name,
'PolicyName': policy_name
}
response = self.connection.request(ROOT, params=params)
return response.status == httplib.OK
def ex_set_balancer_policies_listener(self, name, port, policies):
"""
Associates, updates, or disables a policy with a listener on
the load balancer
:param name: balancer name to set policies for listerner
:type name: ``str``
:param port: port to use
:type port: ``str``
:param policies: List of policies to be associated with the balancer
:type policies: ``string list``
"""
params = {
'Action': 'SetLoadBalancerPoliciesOfListener',
'LoadBalancerName': name,
'LoadBalancerPort': str(port)
}
|
seibert-media/Highton | highton/fields/object_field.py | Python | apache-2.0 | 933 | 0.002144 | from highton.fields import Field
class ObjectField(Field):
"""
The ObjectFIeld parses the init_class object
:ivar init_class: a Highton model
" | ""
def __init__(self, name, init_class):
super().__init__(name)
self.init_class = init_class
def encode(self):
"""
Just encode the object you have as value
:return: the parsed element
:rtype: xml.etree.ElementTree.Element
"""
return self.value.encode()
| def decode(self, element):
return self.init_class.decode(element)
def to_serializable_value(self):
"""
Run through all fields of the object and parse the values
:return:
:rtype: dict
"""
return {
name: field.to_serializable_value()
for name, field in self.value.__dict__.items()
if isinstance(field, Field) and self.value
}
|
eggsandbeer/scheduler | tests/test_site_yearly_aggregator.py | Python | bsd-3-clause | 1,460 | 0.008219 | __author__ = 'Bohdan Mushkevych'
import unittest
from settings import enable_test_mode
enable_test_mode()
from db.model.raw_data import DOMAIN_NAME, TIMEPERIOD
from | constants import PROCESS_SITE_YEARLY
from tests import monthly_fixtures
from tests import yearly_fixtures
from tests.test_abstract_worker import AbstractWorkerUnitTest
from workers.site_yearly_aggregator import SiteYearlyAggregator
class SiteYearlyAggregatorUnitTest(AbstractWorkerUnitTest):
def virtual_set_up(self): |
super(SiteYearlyAggregatorUnitTest, self).constructor(baseclass=SiteYearlyAggregator,
process_name=PROCESS_SITE_YEARLY,
output_prefix='EXPECTED_SITE_YEARLY',
output_module=yearly_fixtures,
generate_output=False,
compare_results=True)
monthly_fixtures.clean_site_entries()
return monthly_fixtures.generated_site_entries()
def virtual_tear_down(self):
monthly_fixtures.clean_site_entries()
def _get_key(self, obj):
return obj[DOMAIN_NAME], obj[TIMEPERIOD]
def test_aggregation(self):
super(SiteYearlyAggregatorUnitTest, self).perform_aggregation()
if __name__ == '__main__':
unittest.main()
|
ixc/django-fluent-contents | fluent_contents/plugins/commentsarea/appsettings.py | Python | apache-2.0 | 862 | 0.00348 | """
Settings for the markup part.
"""
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
COMMENTS_APP = getattr(settings, 'COMMENTS_APP', 'comments')
FLUEN | T_COMMENTSAREA_THREADEDCOMMENTS = 'threadedcomments' in settings.INSTALLED_APPS
# Test threadedcomments support
if FLUENT_COMMENTSAREA_THREADEDCOMMENTS:
try:
from threadedcomments.templatetags import threadedcomments_tags
except ImportError:
raise ImportError("The 'threadedcomments' package is too old to use for the 'commentsarea' plugin.")
# Avoid getting an error that the Form misses a parent parameter.
# The threadedcomments module needs to separate CO | MMENTS_APP setting.
if not COMMENTS_APP or COMMENTS_APP == 'comments':
raise ImproperlyConfigured("To use 'threadedcomments', specify the COMMENTS_APP as well")
|
payal97/portal | systers_portal/common/tests/selenium/test_admin_actions.py | Python | gpl-2.0 | 4,015 | 0.000249 | from common.tests.selenium.base import SeleniumTestCase
from selenium.webdriver.support.ui import Select, WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
class TestAdminActions(SeleniumTestCase):
"""
Automated visual tests admin actions
"""
def test_can_create_community(self):
self.make_admin()
self.browser.get(self.live_server_url)
self.browser.add_cookie(self.create_session_cookie())
self.browser.refresh()
self.browser.get('{}{}'.format(self.live_server_url, '/community/add_community/'))
name = self.browser.find_element_by_id("id_name")
slug = self.browser.find_element_by_id("id_slug")
order = self.browser.find_element_by_id("id_order")
email = self.browser.find_element_by_id("id_email")
mailing_list = self.browser.find_element_by_id("id_mailing_list")
parent_community = Select(
self.browser.find_element_by_id("id_parent_community"))
website = self.browser.find_element_by_id("id_website")
facebook = self.browser.find_element_by_id("id_facebook")
googleplus = self.browser.find_element_by_id("id_googleplus")
twitter = self.browser.find_element_by_id("id_twitter")
name.send_keys("Foo Community")
slug.send_keys("foo-community")
order.send_keys("5")
email.send_keys("foo.community@systers.org")
mailing_list.send_keys('foo.community.list@systers.org')
parent_community.select_by_index(0)
website.send_keys('http://www.foo-community.org')
facebook.send_keys('http://www.facebook.com/foo-community')
googleplus.send_keys('http://plus.google.com/foo-community')
twitter.send_keys('http://www.twitter.com/foo-community')
self.browser.find_element_by_id('submit-id-save').click()
# Wait required on this page. Tests will fail without
# wait even after successful completion of required actions.
wait = WebDriverWait(self.browser, 10)
wait.until(
EC.presence_of_element_located(
(By.XPATH, "//h1[contains(text(),'Foo Community')]")))
self.assertTrue('Foo Community' in self.browser.title)
def test_can_create_meetup(self):
self.make_ | admin()
self.browser.get(self.live_server_url)
self.browser.add_cookie(self.create_session_cookie())
self.browser.refresh()
self.browser.get('{}{}'.format(self.live_server_url, '/meetup/add/'))
name = self.browser.find_element_by_id("id_name")
slug = self.browser.find_element_by_id("id_slug")
location = Select(self.browser.find_eleme | nt_by_id("id_location"))
email = self.browser.find_element_by_id("id_email")
name.send_keys("Foo Community")
slug.send_keys("foo-community")
email.send_keys("foo.community@systers.org")
location.select_by_index(1)
# Locate the CKE iframe for description
description = self.browser.find_element_by_xpath(
"//div[contains(@id, 'cke_1_contents')]/iframe")
self.browser.switch_to.frame(description) # switch context to iframe
description_editor = self.browser.find_element_by_xpath("//body")
description_editor.send_keys("Foo description")
self.browser.switch_to_default_content() # return to main context
# Locate the CKE iframe for sponsors
sponsors = self.browser.find_element_by_xpath(
"//div[contains(@id, 'cke_2_contents')]/iframe")
self.browser.switch_to.frame(sponsors)
sponsors_editor = self.browser.find_element_by_xpath("//body")
sponsors_editor.send_keys("Foo sponsor")
self.browser.switch_to_default_content()
self.browser.find_element_by_id('submit-id-save').click()
message = self.browser.find_element_by_class_name('container').text
self.assertTrue('Meetup added Successfully' in message)
|
ContinuumIO/chaco | chaco/overlays/simple_inspector_overlay.py | Python | bsd-3-clause | 6,043 | 0.00182 | """A simple inspector overlay for plots
This module provides the SimpleInspectorOverlay for displaying
information gathered from an inspector tool in a TextGrid. By default
it is configured to work with a SimpleInspectorTool.
The module also provides some helper factory functions for creating text
formatters for dictionary values.
"""
from numpy import array
from traits.api import Any, List, Callable, Enum, Bool
from text_grid_overlay import TextGridOverlay
def basic_formatter(key, decimals):
"""Create a basic '<key>: <value>' formatting function
This factory creates a function that formats a specified key and with a
numerical value from a dictionary into a string.
Parameters
----------
key
The dictionary key to format.
decimals
The number of decimal places to show.
Returns
-------
format
A factory function that takes a dictionary and returns a string.
"""
format_string = '%s: %%(%s).%df' % (key, key, decimals)
def format(**kwargs):
return format_string % kwargs
return format
def datetime_formatter(key, time_format='%Y/%m/%d %H:%M:%S'):
"""Create a datetime formatting function
This factory creates a function that formats a specified key and with a
timestamp value from a dictionary into a string.
Parameters
----------
key
The dictionary key to format. The corresponding value should be a
timestamp.
time_format
A format string suitable for strftime().
Returns
-------
format
A factory function that takes a dictionary and returns a string.
"""
import datetime
def format(**kwargs):
dt = datetime.datetime.fromtimestamp(kwargs[key])
return key+': '+dt.strftime(time_format)
return format
def time_formatter(key):
"""Create a time formatting function
This factory creates a function that formats a specified key and with a
timestamp value from a dictionary into a 'HH:MM:SS' format string.
Parameters
----------
key
The dictionary key to format. The corresponding value should be a
timestamp.
Returns
-------
format
A factory function that takes a dictionary and returns a string.
"""
return datetime_formatter(key, time_format='%H:%M:%S')
def date_formatter(key):
"""Create a date formatting function
This factory creates a function that formats a specified key and with a
timestamp value from a dictionary into a 'yyyy/mm/dd' format string.
Parameters
----------
key
The dictionary key to format. The corresponding value should be a
timestamp.
Returns
-------
format
A factory function that takes a dictionary and returns a string.
"""
return datetime_formatter(key, time_format='%Y/%m/%d')
class SimpleInspectorOverlay(TextGridOverlay):
""" Simple inspector overlay for plots
This is a simple overlay that listens for new_value events on a
SimpleInspectorTool and displays formatted values in a grid.
By default this displays the 'x' and 'y' values provided by the
SimpleInspectorTool, but instances can provide a field_formatters
trait which is a list of lists of callables which extract values
from a dictionary and formats them. Each callable corresponds to a
cell in the underlying TextGrid component.
Although by default this works with the SimpleInspectorTool, with
appropriate field_formatters this class can be used with any inspector
tool that follows the same API.
"""
# XXX We should probably refactor this into a BaseInspectorOverlay
# which handles the visibility and basic event handling, and smaller
# version of this class which handles inserting values into a text grid
# the inspector that I am listening to. This should have a new_value
# event and a visible trait for me to listen to.
inspector = Any
# fields to display
field_formatters = List(List(Callable))
# Anchor the text to the mouse? (If False, then the text is in one of the
# corners.) Use the **align** trait to determine which corner.
tooltip_mode = Bool(False)
# The default state of the overlay is visible.
visible = True
# Whether the overlay should auto-hide and auto-show based on the
# tool's location, or whether it should be forced to be hidden or visible.
visibility = Enum("auto", True, False)
#########################################################################
# Traits Handlers
#########################################################################
def _field_formatters_default(self):
return [[basic_formatter('x', 2)], [b | asic_formatter('y', 2)]]
def _new_value_updated(self, event):
if event is None:
self.text_grid = array()
if self.visibility == "auto":
self.visibility = False
elif self.visibility == "auto" | :
self.visible = True
if self.tooltip_mode:
self.alternate_position = self.inspector.last_mouse_position
d = event
text = []
self.text_grid.string_array = array([[formatter(**d) for formatter in row]
for row in self.field_formatters])
self.text_grid.request_redraw()
def _visible_changed(self):
if self.component:
self.request_redraw()
def _inspector_changed(self, old, new):
if old:
old.on_trait_event(self._new_value_updated, 'new_value', remove=True)
old.on_trait_change(self._tool_visible_changed, "visible", remove=True)
if new:
new.on_trait_event(self._new_value_updated, 'new_value')
new.on_trait_change(self._tool_visible_changed, "visible")
self._tool_visible_changed()
def _tool_visible_changed(self):
self.visibility = self.inspector.visible
if self.visibility != "auto":
self.visible = self.visibility
|
rg3915/spark | spark/feeds/views.py | Python | mit | 6,435 | 0 | import json
from django.contrib.auth.decorators import login_required
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.http import (HttpResponse, HttpResponseBadRequest,
HttpResponseForbidden)
from django.shortcuts import get_object_or_404, render
from django.template.context_processors import csrf
from django.template.loader import render_to_string
from spark.activities.models import Activity
from spark.decorators import ajax_required
from spark.feeds.models import Feed
FEEDS_NUM_PAGES = 10
@login_required
def feeds(request):
all_feeds = Feed.get_feeds()
paginator = Paginator(all_feeds, FEEDS_NUM_PAGES)
feeds = paginator.page(1)
from_feed = -1
if feeds:
from_feed = feeds[0].id
return render(request, 'feeds/feeds.html', {
'feeds': feeds,
'from_feed': from_feed,
'page': 1,
})
def feed(request, pk):
feed = get_object_or_404(Feed, pk=pk)
return render(request, 'feeds/feed.html', {'feed': feed})
@login_required
@ajax_required
def load(request):
from_feed = request.GET.get('from_feed')
page = request.GET.get('page')
feed_source = request.GET.get('feed_source')
all_feeds = Feed.get_feeds(from_feed)
if feed_source != 'all':
all_feeds = all_feeds.filter(user__id=feed_source)
paginator = Paginator(all_feeds, FEEDS_NUM_PAGES)
try:
feeds = paginator.page(page)
except PageNotAnInteger:
return HttpResponseBadRequest()
except EmptyPage:
feeds = []
html = ''
csrf_token = (csrf(request)['csrf_token'])
for feed in feeds:
html = '{0}{1}'.format(html,
render_to_string('feeds/partial_feed.html',
{
'feed': feed,
'user': request.user,
'csrf_token': csrf_token
}))
return HttpResponse(html)
def _html_feeds(last_feed, user, csrf_token, feed_source='all'):
feeds = Feed.get_feeds_after(last_feed)
if feed_source != 'all':
feeds = feeds.filter(user__id=feed_source)
html = ''
for feed in feeds:
html = '{0}{1}'.format(html,
render_to_string('feeds/partial_feed.html',
{
'feed': feed,
'user': user,
'csrf_token': csrf_token
}))
return html
@login_required
@ajax_required
def load_new(request):
last_feed = request.GET.get('last_feed')
user = request.user
csrf_token = (csrf(request)['csrf_token'])
html = _html_feeds(last_feed, user, csrf_token)
return HttpResponse(html)
@login_required
@ajax_required
def check(request):
last_feed = request.GET.get('last_feed')
feed_source = request.GET.get('feed_source')
feeds = Feed.get_feeds_after(last_feed)
if feed_source != 'all':
feeds = feeds.filter(user__id=feed_source)
count = feeds.count()
return HttpResponse(count)
@login_required
@ajax_required
def post(request):
last_feed = request.POST.get('last_feed')
user = request.user
csrf_token = (csrf(request)['csrf_token'])
feed = Feed()
feed.user = user
post = request.POST['post']
post = post.strip()
if len(post) > 0:
feed.post = post[:255]
feed.save()
html = _html_feeds(last_feed, user, csrf_token)
return HttpResponse(html)
@login_required
@ajax_required
def like(request):
feed_id = request.POST['feed']
feed = Feed.objects.get(pk=feed_id)
user = request.user
like = Activity.objects.filter(activity_type=Activity.LIKE, feed=feed_id,
user=user)
if like:
user.profile.unotify_liked(feed)
like.delete()
else:
| like = Activity(activity_type=Activity.LIKE, feed=feed_id, user=user)
like.save()
user.profile.notify_liked(feed)
return HttpResponse(feed.calculate_likes())
@login_required
@ajax_required
def comment(request):
if request.method == 'POST':
feed_id = request.POST['feed']
feed = Feed.objects.get(pk=feed_id | )
post = request.POST['post']
post = post.strip()
if len(post) > 0:
post = post[:255]
user = request.user
feed.comment(user=user, post=post)
user.profile.notify_commented(feed)
user.profile.notify_also_commented(feed)
return render(request, 'feeds/partial_feed_comments.html',
{'feed': feed})
else:
feed_id = request.GET.get('feed')
feed = Feed.objects.get(pk=feed_id)
return render(request, 'feeds/partial_feed_comments.html',
{'feed': feed})
@login_required
@ajax_required
def update(request):
first_feed = request.GET.get('first_feed')
last_feed = request.GET.get('last_feed')
feed_source = request.GET.get('feed_source')
feeds = Feed.get_feeds().filter(id__range=(last_feed, first_feed))
if feed_source != 'all':
feeds = feeds.filter(user__id=feed_source)
dump = {}
for feed in feeds:
dump[feed.pk] = {'likes': feed.likes, 'comments': feed.comments}
data = json.dumps(dump)
return HttpResponse(data, content_type='application/json')
@login_required
@ajax_required
def track_comments(request):
feed_id = request.GET.get('feed')
feed = Feed.objects.get(pk=feed_id)
return render(request, 'feeds/partial_feed_comments.html', {'feed': feed})
@login_required
@ajax_required
def remove(request):
try:
feed_id = request.POST.get('feed')
feed = Feed.objects.get(pk=feed_id)
if feed.user == request.user:
likes = feed.get_likes()
parent = feed.parent
for like in likes:
like.delete()
feed.delete()
if parent:
parent.calculate_comments()
return HttpResponse()
else:
return HttpResponseForbidden()
except Exception:
return HttpResponseBadRequest()
|
timj/scons | src/engine/SCons/Tool/msvc.py | Python | mit | 11,390 | 0.004829 | """engine.SCons.Tool.msvc
Tool-specific initialization for Microsoft Visual C/C++.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os.path
import re
import sys
import SCons.Action
import SCons.Builder
import SCons.Errors
import SCons.Platform.win32
import SCons.Tool
import SCons.Tool.msvs
import SCons.Util
import SCons.Warnings
import SCons.Scanner.RC
from .MSCommon import msvc_exists, msvc_setup_env_once
CSuffixes = ['.c', '.C']
CXXSuffixes = ['.cc', '.cpp', '.cxx', '.c++', '.C++']
def validate_vars(env):
"""Validate the PCH and PCHSTOP construction variables."""
if 'PCH' in env and env['PCH']:
if 'PCHSTOP' not in env:
raise SCons.Errors.UserError("The PCHSTOP construction must be defined if PCH is defined.")
if not SCons.Util.is_String(env['PCHSTOP']):
raise SCons.Errors.UserError("The PCHSTOP construction variable must be a string: %r"%env['PCHSTOP'])
def pch_emitter(target, source, env):
"""Adds the object file target."""
validate_vars(env)
pch = None
obj = None
for t in target:
if SCons.Util.splitext(str(t))[1] == '.pch':
pch = t
if SCons.Util.splitext(str(t))[1] == '.obj':
obj = t
if not obj:
obj = SCons.Util.splitext(str(pch))[0]+'.obj'
target = [pch, obj] # pch must be first, and obj second for the PCHCOM to work
return (target, source)
def object_emitter(target, source, env, parent_emitter):
"""Sets up the PCH dependencies for an object file."""
validate_vars(env)
parent_emitter(target, source, env)
# Add a dependency, but only if the target (e.g. 'Source1.obj')
# doesn't correspond to the pre-compiled header ('Source1.pch').
# If the basenames match, then this was most likely caused by
# someone adding the source file to both the env.PCH() and the
# env.Program() calls, and adding the explicit dependency would
# cause a cycle on the .pch file itself.
#
# See issue #2505 for a discussion of what to do if it turns
# out this assumption causes trouble in the wild:
# http://scons.tigris.org/issues/show_bug.cgi?id=2505
if 'PCH' in env:
pch = env['PCH']
if str(target[0]) != SCons.Util.splitext(str(pch))[0] + '.obj':
env.Depends(target, pch)
return (target, source)
def static_object_emitter(target, source, env):
return object_emitter(target, source, env,
SCons.Defaults.StaticObjectEmitter)
def shared_object_emitter(target, source, env):
return object_emitter(target, source, env,
SCons.Defaults.SharedObjectEmitter)
pch_action = SCons.Action.Action('$PCHCOM', '$PCHCOMSTR')
pch_builder = SCons.Builder.Builder(action=pch_action, suffix='.pch',
emitter=pch_emitter,
source_scanner=SCons.Tool.SourceFileScanner)
# Logic to build .rc files into .res files (resource files)
res_scanner = SCons.Scanner.RC.RCScan()
res_action = SCons.Action.Action('$RCCOM', '$RCCOMSTR')
res_builder = SCons.Builder.Builder(action=res_action,
src_suffix='.rc',
suffix='.res',
src_builder=[],
source_scanner=res_scanner)
def msvc_batch_key(action, env, target, source):
"""
Returns a key to identify unique batches of sources for compilation.
If batching is enabled (via the $MSVC_BATCH setting), then all
targe | t+source pairs that use the same action, defined by the same
environment, and have the same target and source directories, will
be batched.
Returning None specifies that the specified target+source should not
be batched with other compilations.
"""
# Fixi | ng MSVC_BATCH mode. Previous if did not work when MSVC_BATCH
# was set to False. This new version should work better.
# Note we need to do the env.subst so $MSVC_BATCH can be a reference to
# another construction variable, which is why we test for False and 0
# as strings.
if not 'MSVC_BATCH' in env or env.subst('$MSVC_BATCH') in ('0', 'False', '', None):
# We're not using batching; return no key.
return None
t = target[0]
s = source[0]
if os.path.splitext(t.name)[0] != os.path.splitext(s.name)[0]:
# The base names are different, so this *must* be compiled
# separately; return no key.
return None
return (id(action), id(env), t.dir, s.dir)
def msvc_output_flag(target, source, env, for_signature):
"""
Returns the correct /Fo flag for batching.
If batching is disabled or there's only one source file, then we
return an /Fo string that specifies the target explicitly. Otherwise,
we return an /Fo string that just specifies the first target's
directory (where the Visual C/C++ compiler will put the .obj files).
"""
# Fixing MSVC_BATCH mode. Previous if did not work when MSVC_BATCH
# was set to False. This new version should work better. Removed
# len(source)==1 as batch mode can compile only one file
# (and it also fixed problem with compiling only one changed file
# with batch mode enabled)
if not 'MSVC_BATCH' in env or env.subst('$MSVC_BATCH') in ('0', 'False', '', None):
return '/Fo$TARGET'
else:
# The Visual C/C++ compiler requires a \ at the end of the /Fo
# option to indicate an output directory. We use os.sep here so
# that the test(s) for this can be run on non-Windows systems
# without having a hard-coded backslash mess up command-line
# argument parsing.
return '/Fo${TARGET.dir}' + os.sep
CAction = SCons.Action.Action("$CCCOM", "$CCCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
ShCAction = SCons.Action.Action("$SHCCCOM", "$SHCCCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
CXXAction = SCons.Action.Action("$CXXCOM", "$CXXCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
ShCXXAction = SCons.Action.Action("$SHCXXCOM", "$SHCXXCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
def generate(env):
"""Add Builders and construction variables for MSVC++ to an Environment."""
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
# TODO(batch): shouldn't reach in to cmdgen this way; necessary
# for now to bypass the checks in Builder.DictCmdGenerator.__call__()
# and allow .cc and .cpp to be compiled in the same command line.
static_obj.cmdgen.source_ext_match = False
shared_obj.cmdgen.source_ex |
gltn/stdm | stdm/third_party/sqlalchemy/util/_collections.py | Python | gpl-2.0 | 29,219 | 0.000068 | # util/_collections.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Collection classes and helpers."""
from __future__ import absolute_import
import operator
import types
import weakref
from .compat import binary_types
from .compat import collections_abc
from .compat import itertools_filterfalse
from .compat import py2k
from .compat import string_types
from .compat import threading
EMPTY_SET = frozenset()
class AbstractKeyedTuple(tuple):
__slots__ = ()
def keys(self):
"""Return a list of string key names for this :class:`.KeyedTuple`.
.. seealso::
:attr:`.KeyedTuple._fields`
"""
return list(self._fields)
class KeyedTuple(AbstractKeyedTuple):
"""``tuple`` subclass that adds labeled names.
E.g.::
>>> k = KeyedTuple([1, 2, 3], labels=["one", "two", "three"])
>>> k.one
1
>>> k.two
2
Result rows returned by :class:`_query.Query` that contain multiple
ORM entities and/or column expressions make use of this
class to return rows.
The :class:`.KeyedTuple` exhibits similar behavior to the
``collections.namedtuple()`` construct provided in the Python
standard library, however is architected very differently.
Unlike ``collections.namedtuple()``, :class:`.KeyedTuple` is
does not rely on creation of custom subtypes in order to represent
a new series of keys, instead each :class:`.KeyedTuple` instance
receives its list of keys in place. The subtype approach
of ``collections.namedtuple()`` introduces significant complexity
and performance overhead, which is not necessary for the
:class:`_query.Query` object's use case.
.. seealso::
:ref:`ormtutorial_querying`
"""
def __new__(cls, vals, labels=None):
t = tuple.__new__(cls, vals)
if labels:
t.__dict__.update(zip(labels, vals))
else:
labels = []
t.__dict__["_labels"] = labels
return t
@property
def _fields(self):
"""Return a tuple of string key names for this :class:`.KeyedTuple`.
This method provides compatibility with ``collections.namedtuple()``.
.. seealso::
:meth:`.KeyedTuple.keys`
"""
return tuple([l for l in self._labels if l is not None])
def __setattr__(self, key, value):
raise AttributeError("Can't set attribute: %s" % key)
def _asdict(self):
"""Return the contents of this :class:`.KeyedTuple` as a dictionary.
This method provides compatibility with ``collections.namedtuple()``,
with the exception that the dictionary returned is **not** ordered.
"""
return {key: self.__dict__[key] for key in self.keys()}
class _LW(AbstractKeyedTuple):
__slots__ = ()
def __new__(cls, vals):
return tuple.__new__(cls, vals)
def __reduce__(self):
# for pickling, degrade down to the regular
# KeyedTuple, thus avoiding anonymous class pickling
# difficulties
return KeyedTuple, (list(self), self._real_fields)
def _asdict(self):
"""Return the contents of this :class:`.KeyedTuple` as a dictionary."""
d = dict(zip(self._real_fields, self))
d.pop(None, None)
return d
class ImmutableContainer(object):
def _immutable(self, *arg, **kw):
raise TypeError("%s object is immutable" % self.__class__.__name__)
__delitem__ = __setitem__ = __setattr__ = _immutable
class immutabledict(ImmutableContainer, dict):
clear = pop = popitem = setdefault = update = ImmutableContainer._immutable
def __new__(cls, *args):
new = dict.__new__(cls)
dict.__init__(new, *args)
return new
def __init__(self, *args):
pass
def __reduce__(self):
return immutabledict, (dict(self),)
def union(self, d):
if not d:
return self
elif not self:
if isinstance(d, immutabledict):
return d
else:
return immutabledict(d)
else:
d2 = immutabledict(self)
dict.update(d2, d)
return d2
def __repr__(self):
return "immutabledict(%s)" % dict.__repr__(self)
class Properties(object):
"""Provide a __getattr__/__setattr__ interface over a dict."""
__slots__ = ("_data",)
def __init__(self, data):
object.__setattr__(self, "_data", data)
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(list(self._data.values()))
def __dir__(self):
return dir(super(Properties, self)) + [
str(k) for k in self._data.keys()
]
def __add__(self, other):
return list(self) + list(other)
def __setitem__(self, key, obj):
self._data[key] = obj
def __getitem__(self, key):
return self._data[key]
def __delitem__(self, key):
del self._data[key]
def __setattr__(self, key, obj):
self._data[key] = obj
def __getstate__(self):
return {"_data": self._data}
def __setstate__(self, state):
object.__setattr__(self, "_data", state["_data"])
def __getattr__(self, key):
try:
return self._data[key]
except KeyError:
raise AttributeError(key)
def __contains__(self, key):
return key in self._data
def as_immutable(self):
"""Return an immutable proxy for this :class:`.Properties`."""
return ImmutableProperties(self._data)
def update(self, value):
self._data.update(value)
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
def keys(self):
return list(self._data)
def values(self):
return list(self._data.values())
def items(self):
return list(self._data.items())
def has_key(self, key):
return key in self._data
def clear(self):
self._data.clear()
class OrderedProperties(Properties):
"""Provide a __getattr__/__setattr__ interface with an OrderedDict
as backing store."""
__slots__ = ()
def __init__(self):
Properties.__init__(self, OrderedDict())
class ImmutableProperties(ImmutableContainer, Properties):
"""Provide immutable dict/object attribute to an underlying dictionary."""
__slots__ = ()
class OrderedDict(dict):
"""A dict that returns keys/values/items in the order they were added."""
__slots__ = ("_list",)
def __reduce__(self):
return OrderedDict, (self.items(),)
def __init__(self, ____sequence=None, **kwargs):
self._list = []
if ____sequence is None:
if kwargs:
self.update(**kwargs)
else:
self.update(____sequence, **kwargs)
def clear(self):
self._list = []
dict.clear(self)
def copy(self):
return self.__copy__()
def __copy__(self):
return OrderedDict(self)
def sort(self, *arg, **kw):
self._list.sort(*arg, **kw)
def update(self, ____sequence=None, **kwargs):
if ____sequence is not None:
if hasattr(____sequence, "keys"):
for key in ____sequence.keys():
self.__setitem__(key, ____sequence[key])
else:
for key, value in ____sequence:
self[key] = value
if kwargs:
self.update(kwargs)
def setdefault(self, key, value):
if key not in self:
self.__setitem__(key, value)
return value
else:
return self.__getitem__(key)
def __iter__(self):
return iter(self._list)
def keys(self):
return list(self)
def values(se | lf):
return [self[key] for key in self._list]
def items(self):
return [(key, self[key]) for key in self._list]
if | |
beagles/sosreport-neutron | sos/plugins/ata.py | Python | gpl-2.0 | 1,452 | 0.008953 | ### This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Pub | lic License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, UbuntuPlugin, DebianPlugin
import os
class Ata(Plugin, RedHatPlugin, UbuntuPlugin, DebianPlugin):
""" ATA and IDE related information (including PATA and SATA)
"""
plugin_name = "ata"
packages = ('hdparm', 'smartmontools' | )
def setup(self):
dev_path = '/dev'
sys_block = '/sys/block'
self.add_copy_spec('/proc/ide')
if os.path.isdir(sys_block):
for disk in os.listdir(sys_block):
if disk.startswith("sd") or disk.startswith("hd"):
disk_path = os.path.join(dev_path, disk)
self.add_cmd_output("hdparm %s" % disk_path)
self.add_cmd_output("smartctl -a %s" % disk_path)
|
streamr/marvin | migrations/versions/19b7fe1331be_.py | Python | mit | 665 | 0.010526 | """Add number_of_streams to movies.
Revision ID: 19b7fe1331be
Revises: 2c76677d803f
Create Date: 2013-11-16 22:11:44.560000
"""
# revision identifiers, used by Alembic.
revision = '19b7fe1331be'
down_revision = '2 | c76677d803f'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('movie', sa.Column('number_of_streams', sa.Integer(), nullable=False, server_default='0'))
### end Alembic commands ###
def d | owngrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('movie', 'number_of_streams')
### end Alembic commands ###
|
ICGC-TCGA-PanCancer/pcawg-central-index | pcawg_metadata_parser/pcawg_summary.loader.py | Python | gpl-2.0 | 955 | 0.013613 | #!/usr/bin/env python
import sys
import json
from elasticsearch1 import Elasticsearch
def init_es(es_host, es_index):
es = Elasticsearch([ es_host ])
es.indices.delete( es_index, ignore=[400, 404] )
es.indices.create( es_index, ignore=400 )
# create mappings
with open('pcawg_summa | ry.mapping.json', 'r') as m:
es_mapping = m.read()
es.indices.put_mapping(index=es_index, doc_type='donor', body=es_mapping)
return es
def main(argv=None):
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
es_host = 'localhost:9200'
es_index = 'pcawg_summary'
es = init_es(es_host, es_index)
with open('pcawg_summary.jsonl', 'r') as t:
for entity in t:
| doc = json.loads(entity)
es.index(index=es_index, doc_type='donor', id=doc['donor_unique_id'], \
body=doc, timeout=90 )
if __name__ == "__main__":
sys.exit(main())
|
jamielennox/keystone | keystone/tests/unit/test_v2_keystoneclient.py | Python | apache-2.0 | 58,102 | 0 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from keystoneclient.contrib.ec2 import utils as ec2_utils
from keystoneclient import exceptions as client_exceptions
from keystoneclient.v2_0 import client as ks_client
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from six.moves import range
import webob
from keystone.tests import unit as tests
from keystone.tests.unit import default_fixtures
from keystone.tests.unit.ksfixtures import appserver
from keystone.tests.unit.ksfixtures import database
CONF = cfg.CONF
DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
class ClientDrivenTestCase(tests.TestCase):
def config_files(self):
config_files = super(ClientDrivenTestCase, self).config_files()
config_files.append(tests.dirs.tests_conf('backend_sql.conf'))
return config_files
def setUp(self):
super(ClientDrivenTestCase, self).setUp()
# FIXME(morganfainberg): Since we are running tests through the
# controllers and some internal api drivers are SQL-only, the correct
# approach is to ensure we have the correct backing store. The
# credential api makes some very SQL specific assumptions that should
# be addressed allowing for non-SQL based testing to occur.
self.useFixture(database.Database())
self.load_backends()
self.load_fixtures(default_fixtures)
# TODO(termie): add an admin user to the fixtures and use that user
# override the fixtures, for now
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_bar['id'],
self.role_admin['id'])
conf = self._paste_config('keystone')
fixture = self.useFixture(appserver.AppServer(conf, appserver.MAIN))
self.public_server = fixture.server
fixture = self.useFixture(appserver.AppServer(conf, appserver.ADMIN))
self.admin_server = fixture.server
self.default_client = self.get_client()
self.addCleanup(self.cleanup_instance('public_server', 'admin_server',
'default_client'))
def _public_url(self):
public_port = self.public_server.socket_info['socket'][1]
return "http://localhost:%s/v2.0" % public_port
def _admin_url(self):
admin_port = self.admin_server.socket_info['socket'][1]
return "http://localhost:%s/v2.0" % admin_port
def _client(self, admin=False, **kwargs):
url = self._admin_url() if admin else self._public_url()
kc = ks_client.Client(endpoint=url,
auth_url=self._public_url(),
**kwargs)
kc.authenticate()
# have to manually overwrite the management url after authentication
kc.management_url = url
return kc
def get_client(self, user_ref=None, tenant_ref=None, admin=False):
if user_ref is None:
user_ref = self.user_foo
if tenant_ref is None:
for user in default_fixtures.USERS:
# The fixture ID is no longer used as the ID in the database
# The fixture ID, however, is still used as part of the
# attribute name when storing the created object on the test
# case. This means that we need to use the fixture ID below to
# find the actial object so that we can get the ID as stored
# in the database to compare against.
if (getattr(self, 'user_%s' % user['id'])['id'] ==
user_ref['id']):
tenant_id = user['tenants'][0]
else:
tenant_id = tenant_ref['id']
return self._client(username=user_ref['name'],
password=user_ref['password'],
tenant_id=tenant_id,
admin=admin)
def test_authenticate_tenant_name_and_tenants(self):
client = self.get_client()
tenants = client.tenants.list()
self.assertEqual(self.tenant_bar['id'], tenants[0].id)
def test_authenticate_tenant_id_and_tenants(self):
client = self._client(username=self.user_foo['name'],
password=self.user_foo['password'],
tenant_id='bar')
tenants = client.tenants.list()
self.assertEqual(self.tenant_bar['id'], tenants[0].id)
def test_authenticate_invalid_tenant_id(self):
self.assertRaises(client_exceptions.Unauthorized,
self._client,
username=self.user_foo['name'],
password=self.user_foo['password'],
tenant_id='baz')
def test_authenticate_token_no_tenant(self):
client = self.get_client()
token = client.auth_token
token_client = self._client(token=token)
tenants = token_client.tenants.list()
self.assertEqual(self.tenant_bar['id'], tenants[0].id)
def test_authenticate_token_tenant_id(self):
client = self.get_client()
token = client.auth_token
token_client = self._client(token=token, tenant_id='bar')
tenants = token_client.tenants.list()
self.assertEqual(self.tenant_bar['id'], tenants[0].id)
def test_authenticate_token_invalid_te | nant_id(self):
client = self.get_client()
token = client.auth_token
self.assertRaises(client_exceptions.Unauthorized,
self._client, token=token,
tenant_id=uuid.uuid4().hex)
def test_authenticate_token_invalid_tenant_name(self):
client = self.get_client()
token = client.auth_token
self.assertRaises(client_exceptions.Unauthorized,
self._client, token=token,
| tenant_name=uuid.uuid4().hex)
def test_authenticate_token_tenant_name(self):
client = self.get_client()
token = client.auth_token
token_client = self._client(token=token, tenant_name='BAR')
tenants = token_client.tenants.list()
self.assertEqual(self.tenant_bar['id'], tenants[0].id)
self.assertEqual(self.tenant_bar['id'], tenants[0].id)
def test_authenticate_and_delete_token(self):
client = self.get_client(admin=True)
token = client.auth_token
token_client = self._client(token=token)
tenants = token_client.tenants.list()
self.assertEqual(self.tenant_bar['id'], tenants[0].id)
client.tokens.delete(token_client.auth_token)
self.assertRaises(client_exceptions.Unauthorized,
token_client.tenants.list)
def test_authenticate_no_password(self):
user_ref = self.user_foo.copy()
user_ref['password'] = None
self.assertRaises(client_exceptions.AuthorizationFailure,
self.get_client,
user_ref)
def test_authenticate_no_username(self):
user_ref = self.user_foo.copy()
user_ref['name'] = None
self.assertRaises(client_exceptions.AuthorizationFailure,
self.get_client,
user_ref)
def test_authenticate_disabled_tenant(self):
admin_client = self.get_client(admin=True)
tenant = {
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'enabled': False,
}
tenant_ref = admin_client.tenan |
batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Doc/includes/sqlite3/simple_tableprinter.py | Python | apache-2.0 | 731 | 0.002736 | import sqlite3
FIELD_MAX_WIDTH = 20
TABLE_NAME = 'people'
SELECT = 'select * from %s order by age, name_last' % TABLE_NAME
con = sqlite3.connect("mydb")
cur = con.cursor()
cur.execute(SELECT)
# Print a header.
for fieldDesc in cur.description:
| print(fieldDesc[0].ljust(FIELD_MAX_WIDTH), end=' ')
print() # Finish the header with a newline.
print('-' * 78)
# For each row, print the value of each field left-justified within
# the maximum possible width of that field.
fieldIndices = range(len(cur.descript | ion))
for row in cur:
for fieldIndex in fieldIndices:
fieldValue = str(row[fieldIndex])
print(fieldValue.ljust(FIELD_MAX_WIDTH), end=' ')
print() # Finish the row with a newline.
con.close()
|
SUSE/azure-sdk-for-python | azure-mgmt-servermanager/azure/mgmt/servermanager/models/session_parameters.py | Python | mit | 2,307 | 0.002167 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generate | d by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SessionParameters(Model):
"""Parameter col | lection for creation and other operations on sessions.
:param user_name: Encrypted User name to be used to connect to node.
:type user_name: str
:param password: Encrypted Password associated with user name.
:type password: str
:param retention_period: Session retention period. Possible values
include: 'Session', 'Persistent'
:type retention_period: str or :class:`RetentionPeriod
<azure.mgmt.servermanager.models.RetentionPeriod>`
:param credential_data_format: Credential data format. Possible values
include: 'RsaEncrypted'
:type credential_data_format: str or :class:`CredentialDataFormat
<azure.mgmt.servermanager.models.CredentialDataFormat>`
:param encryption_certificate_thumbprint: Encryption certificate
thumbprint.
:type encryption_certificate_thumbprint: str
"""
_attribute_map = {
'user_name': {'key': 'properties.userName', 'type': 'str'},
'password': {'key': 'properties.password', 'type': 'str'},
'retention_period': {'key': 'properties.retentionPeriod', 'type': 'RetentionPeriod'},
'credential_data_format': {'key': 'properties.credentialDataFormat', 'type': 'CredentialDataFormat'},
'encryption_certificate_thumbprint': {'key': 'properties.EncryptionCertificateThumbprint', 'type': 'str'},
}
def __init__(self, user_name=None, password=None, retention_period=None, credential_data_format=None, encryption_certificate_thumbprint=None):
self.user_name = user_name
self.password = password
self.retention_period = retention_period
self.credential_data_format = credential_data_format
self.encryption_certificate_thumbprint = encryption_certificate_thumbprint
|
cbertinato/pandas | pandas/tests/tseries/offsets/test_fiscal.py | Python | bsd-3-clause | 29,268 | 0 | """
Tests for Fiscal Year and Fiscal Quarter offset classes
"""
from datetime import datetime
from dateutil.relativedelta import relativedelta
import pytest
from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG
from pandas import Timestamp
from pandas.tseries.frequencies import get_offset
from pandas.tseries.offsets import FY5253, FY5253Quarter
from .common import assert_offset_equal, assert_onOffset
from .test_offsets import Base, WeekDay
def makeFY5253LastOfMonthQuarter(*args, **kwds):
return FY5253Quarter(*args, variation="last", **kwds)
def makeFY5253NearestEndMonthQuarter(*args, **kwds):
return FY5253Quarter(*args, variation="nearest", **kwds)
def makeFY5253NearestEndMonth(*args, **kwds):
return FY5253(*args, variation="nearest", **kwds)
def makeFY5253LastOfMonth(*args, **kwds):
return FY5253(*args, variation="last", **kwds)
def test_get_offset_name():
assert (makeFY5253LastOfMonthQuarter(
weekday=1, startingMonth=3,
qtr_with_extra_week=4).freqstr == "REQ-L-MAR-TUE-4")
assert (makeFY5253NearestEndMonthQuarter(
weekday=1, startingMonth=3,
qtr_with_extra_week=3).freqstr == "REQ-N-MAR-TUE-3")
def test_get_offset():
with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):
get_offset('gibberish')
with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):
get_offset('QS-JAN-B')
pairs = [
("RE-N-DEC-MON",
makeFY5253NearestEndMonth(weekday=0, startingMonth=12)),
("RE-L-DEC-TUE",
makeFY5253LastOfMonth(weekday=1, startingMonth=12)),
("REQ-L-MAR-TUE-4",
makeFY5253LastOfMonthQuarter(weekday=1,
startingMonth=3,
qtr_with_extra_week=4)),
("REQ-L-DEC-MON-3",
makeFY5253LastOfMonthQuarter(weekday=0,
startin | gMonth=12,
qtr_with_extra_week=3)),
("REQ-N-DEC-MON-3",
makeFY5253NearestEndMonthQuarter(weekday=0,
startingMonth=12,
qtr_with_extra_week=3))]
for name, expected in pairs:
offset = get_offset(name)
assert offset == expected, ("Expected %r to yield % | r (actual: %r)" %
(name, expected, offset))
class TestFY5253LastOfMonth(Base):
offset_lom_sat_aug = makeFY5253LastOfMonth(1, startingMonth=8,
weekday=WeekDay.SAT)
offset_lom_sat_sep = makeFY5253LastOfMonth(1, startingMonth=9,
weekday=WeekDay.SAT)
on_offset_cases = [
# From Wikipedia (see:
# http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar#Last_Saturday_of_the_month_at_fiscal_year_end)
(offset_lom_sat_aug, datetime(2006, 8, 26), True),
(offset_lom_sat_aug, datetime(2007, 8, 25), True),
(offset_lom_sat_aug, datetime(2008, 8, 30), True),
(offset_lom_sat_aug, datetime(2009, 8, 29), True),
(offset_lom_sat_aug, datetime(2010, 8, 28), True),
(offset_lom_sat_aug, datetime(2011, 8, 27), True),
(offset_lom_sat_aug, datetime(2012, 8, 25), True),
(offset_lom_sat_aug, datetime(2013, 8, 31), True),
(offset_lom_sat_aug, datetime(2014, 8, 30), True),
(offset_lom_sat_aug, datetime(2015, 8, 29), True),
(offset_lom_sat_aug, datetime(2016, 8, 27), True),
(offset_lom_sat_aug, datetime(2017, 8, 26), True),
(offset_lom_sat_aug, datetime(2018, 8, 25), True),
(offset_lom_sat_aug, datetime(2019, 8, 31), True),
(offset_lom_sat_aug, datetime(2006, 8, 27), False),
(offset_lom_sat_aug, datetime(2007, 8, 28), False),
(offset_lom_sat_aug, datetime(2008, 8, 31), False),
(offset_lom_sat_aug, datetime(2009, 8, 30), False),
(offset_lom_sat_aug, datetime(2010, 8, 29), False),
(offset_lom_sat_aug, datetime(2011, 8, 28), False),
(offset_lom_sat_aug, datetime(2006, 8, 25), False),
(offset_lom_sat_aug, datetime(2007, 8, 24), False),
(offset_lom_sat_aug, datetime(2008, 8, 29), False),
(offset_lom_sat_aug, datetime(2009, 8, 28), False),
(offset_lom_sat_aug, datetime(2010, 8, 27), False),
(offset_lom_sat_aug, datetime(2011, 8, 26), False),
(offset_lom_sat_aug, datetime(2019, 8, 30), False),
# From GMCR (see for example:
# http://yahoo.brand.edgar-online.com/Default.aspx?
# companyid=3184&formtypeID=7)
(offset_lom_sat_sep, datetime(2010, 9, 25), True),
(offset_lom_sat_sep, datetime(2011, 9, 24), True),
(offset_lom_sat_sep, datetime(2012, 9, 29), True)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, dt, expected = case
assert_onOffset(offset, dt, expected)
def test_apply(self):
offset_lom_aug_sat = makeFY5253LastOfMonth(startingMonth=8,
weekday=WeekDay.SAT)
offset_lom_aug_sat_1 = makeFY5253LastOfMonth(n=1, startingMonth=8,
weekday=WeekDay.SAT)
date_seq_lom_aug_sat = [datetime(2006, 8, 26), datetime(2007, 8, 25),
datetime(2008, 8, 30), datetime(2009, 8, 29),
datetime(2010, 8, 28), datetime(2011, 8, 27),
datetime(2012, 8, 25), datetime(2013, 8, 31),
datetime(2014, 8, 30), datetime(2015, 8, 29),
datetime(2016, 8, 27)]
tests = [
(offset_lom_aug_sat, date_seq_lom_aug_sat),
(offset_lom_aug_sat_1, date_seq_lom_aug_sat),
(offset_lom_aug_sat, [
datetime(2006, 8, 25)] + date_seq_lom_aug_sat),
(offset_lom_aug_sat_1, [
datetime(2006, 8, 27)] + date_seq_lom_aug_sat[1:]),
(makeFY5253LastOfMonth(n=-1, startingMonth=8,
weekday=WeekDay.SAT),
list(reversed(date_seq_lom_aug_sat))),
]
for test in tests:
offset, data = test
current = data[0]
for datum in data[1:]:
current = current + offset
assert current == datum
class TestFY5253NearestEndMonth(Base):
def test_get_year_end(self):
assert (makeFY5253NearestEndMonth(
startingMonth=8, weekday=WeekDay.SAT).get_year_end(
datetime(2013, 1, 1)) == datetime(2013, 8, 31))
assert (makeFY5253NearestEndMonth(
startingMonth=8, weekday=WeekDay.SUN).get_year_end(
datetime(2013, 1, 1)) == datetime(2013, 9, 1))
assert (makeFY5253NearestEndMonth(
startingMonth=8, weekday=WeekDay.FRI).get_year_end(
datetime(2013, 1, 1)) == datetime(2013, 8, 30))
offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12,
variation="nearest")
assert (offset_n.get_year_end(datetime(2012, 1, 1)) ==
datetime(2013, 1, 1))
assert (offset_n.get_year_end(datetime(2012, 1, 10)) ==
datetime(2013, 1, 1))
assert (offset_n.get_year_end(datetime(2013, 1, 1)) ==
datetime(2013, 12, 31))
assert (offset_n.get_year_end(datetime(2013, 1, 2)) ==
datetime(2013, 12, 31))
assert (offset_n.get_year_end(datetime(2013, 1, 3)) ==
datetime(2013, 12, 31))
assert (offset_n.get_year_end(datetime(2013, 1, 10)) ==
datetime(2013, 12, 31))
JNJ = FY5253(n=1, startingMonth=12, weekday=6, variation="nearest")
assert (JNJ.get_year_end(datetime(2006, 1, 1)) ==
datetime(2006, 12, 31))
offset_lom_aug_sat = makeFY5253NearestEndMonth(1, startingMonth=8,
weekday=WeekDay.SAT)
offset_lom_aug_thu = makeFY5253NearestEndMonth(1, startingMonth=8,
|
enthought/etsproxy | enthought/blocks/compiler_/ast/deep_equality.py | Python | bsd-3-clause | 113 | 0 | # p | roxy module
from __future__ imp | ort absolute_import
from codetools.blocks.compiler_.ast.deep_equality import *
|
idlesign/torrt | tests/rpc/test_transmission.py | Python | bsd-3-clause | 2,194 | 0.002279 | import pytest
from torrt.rpc.transmission import TransmissionRPC, TransmissionRPCException
@pytest.fixture
def transmission():
rpc = TransmissionRPC()
return rpc
def test_auth(response_mock):
transmission = TransmissionRPC(user='wrong', password='wrongwrong')
with response_mock(
f'POST {transmission.url} -> 401:<h1>401: | Unauthorized</h1>Unauthorized | User',
bypass=False
):
with pytest.raises(TransmissionRPCException):
transmission.method_get_version()
def test_get_version(response_mock, transmission):
with response_mock(
f'POST {transmission.url} -> 200:'
'{"arguments":{"rpc-version":15,"rpc-version-minimum":1,"version":"2.94 (d8e60ee44f)"},"result":"success"}\n',
bypass=False
):
version = transmission.method_get_version()
assert version == 15
def test_get_torrents(response_mock, transmission, datafix_read, torrent_params):
with response_mock(
f"POST {transmission.url} -> 200:{datafix_read('transm_gettorents.json')}",
bypass=False
):
response = transmission.method_get_torrents(hashes=['xxxxx'])
assert response == [{
'comment': 'somecomment',
'downloadDir': '/home/idle',
'hashString': 'xxxxx',
'id': 1,
'name': 'mytorr',
'hash': 'xxxxx',
'download_to': '/home/idle',
'params': torrent_params,
}]
def test_remove_torrent(response_mock, transmission):
with response_mock(
f'POST {transmission.url} -> 200:'
'{"arguments":{},"result":"success"}',
bypass=False
):
response = transmission.method_remove_torrent(hash_str='xxxxx')
assert response == {}
def test_add_torrent(response_mock, transmission, torrent_params, torrent_data):
with response_mock(
f'POST {transmission.url} -> 200:'
'{"arguments":{},"result":"success"}',
bypass=False
):
response = transmission.method_add_torrent(
torrent=torrent_data,
download_to='/here/',
params=torrent_params,
)
assert response == {}
|
lmaag182/nupic_physical | online.py | Python | agpl-3.0 | 994 | 0.009054 | try:
import serial # Python2
except ImportError:
from serial3 import * # Python3
from nupic.frameworks.opf.modelfactory import | ModelFactory
import os,sys
ser = serial.Serial('/dev/ttyACM0', 9600)
def get_online(number_of_records=20):# 0 means forever
model = ModelFactory.loadFromCheckpoint(os.getcwd() + "/model_save")
count=0
ser.flushInput()
while (count < number_of_records) or (number_of_records == 0):
count = count + 1
text = ser.readline()
if (len(text.split(",")) == 4):
result = model.run({
"s1": float(text.split("," | )[0]),
"s2": float(text.split(",")[1]),
"s3": float(text.split(",")[2]),
"s4": float(text.split(",")[3])
})
prediction = int(result.inferences['multiStepBestPredictions'][4])
sys.stdout.write("\r"+ str(prediction))
sys.stdout.write("\t"+ text)
ser.write(str(prediction)+ '\n') |
monikagrabowska/osf.io | addons/base/views.py | Python | apache-2.0 | 27,063 | 0.001626 | import datetime
import httplib
import os
import uuid
import markupsafe
from django.utils import timezone
from flask import make_response
from flask import redirect
from flask import request
import furl
import jwe
import jwt
from django.db import transaction
from modularodm import Q
from modularodm.exceptions import NoResultsFound
from addons.base.models import BaseStorageAddon
from framework import sentry
from framework.auth import Auth
from framework.auth import cas
from framework.auth import oauth_scopes
from framework.auth.decorators import collect_auth, must_be_logged_in, must_be_signed
from framework.exceptions import HTTPError
from framework.routing import json_renderer
from framework.sentry import log_exception
from framework.transactions.handlers import no_auto_transaction
from website import mails
from website import settings
from addons.base import exceptions
from addons.base import signals as file_signals
from osf.models import FileNode, StoredFileNode, TrashedFileNode
from website.models import Node, NodeLog, User
from website.profile.utils import get_gravatar
from website.project import decorators
from website.project.decorators import must_be_contributor_or_public, must_be_valid_project
from website.project.model import DraftRegistration, MetaSchema
from website.project.utils import serialize_node
from website.util import rubeus
# import so that associated listener is instantiated and gets emails
from website.notifications.events.files import FileEvent # noqa
ERROR_MESSAGES = {'FILE_GONE': u'''
<style>
#toggleBar{{display: none;}}
</style>
<div class="alert alert-info" role="alert">
<p>
The file "{file_name}" stored on {provider} was deleted via the OSF.
</p>
<p>
It was deleted by <a href="/{deleted_by_guid}">{deleted_by}</a> on {deleted_on}.
</p>
</div>''',
'FILE_GONE_ACTOR_UNKNOWN': u'''
<style>
#toggleBar{{display: none;}}
</style>
<div class="alert alert-info" role="alert">
<p>
The file "{file_name}" stored on {provider} was deleted via the OSF.
</p>
<p>
It was deleted on {deleted_on}.
</p>
</div>''',
'DONT_KNOW': u'''
<style>
#toggleBar{{display: none;}}
</style>
<div class="alert alert-info" role="alert">
<p>
File not found at {provider}.
</p>
</div>''',
'BLAME_PROVIDER': u'''
<style>
#toggleBar{{display: none;}}
</style>
<div class="alert alert-info" role="alert">
<p>
This {provider} link to the file "{file_name}" is currently unresponsive.
The provider ({provider}) may currently be unavailable or "{file_name}" may have been removed from {provider} through another interface.
</p>
<p>
You may wish to verify this through {provider}'s website.
</p>
</div>''',
'FILE_SUSPENDED': u'''
<style>
#toggleBar{{display: none;}}
</style>
<div class="alert alert-info" role="alert">
This content has been removed.
</div>'''}
WATERBUTLER_JWE_KEY = jwe.kdf(settings.WATERBUTLER_JWE_SECRET.encode('utf-8'), settings.WATERBUTLER_JWE_SALT.encode('utf-8'))
@decorators.must_have_permission('write')
@decorators.must_not_be_registration
def disable_addon(auth, **kwargs):
node = kwargs['node'] or kwargs['project']
addon_name = kwargs.get('addon')
if addon_name is None:
raise HTTPError(httplib.BAD_REQUEST)
deleted = node.delete_addon(addon_name, auth)
return {'deleted': deleted}
@must_be_logged_in
def get_addon_user_config(**kwargs):
user = kwargs['auth'].user
addon_name = kwargs.get('addon')
if addon_name is None:
raise HTTPError(httplib.BAD_REQUEST)
addon = user.get_addon(addon_name)
if addon is None:
raise HTTPError(httplib.BAD_REQUEST)
return addon.to_json(user)
permission_map = {
'create_folder': 'write',
'revisions': 'read',
'metadata': 'read',
'download': 'read',
'upload': 'write',
'delete': 'write',
'copy': 'write',
'move': 'write',
'copyto': 'write',
'moveto': 'write',
'copyfrom': 'read',
'movefrom': 'write',
}
def check_access(node, auth, action, cas_resp):
"""Verify that user can perform requested action on resource. Raise appropriate
error code if action cannot proceed.
"""
permission = permission_map.get(action, None)
if permission is None:
raise HTTPError(httplib.BAD_REQUEST)
if cas_resp:
if permission == 'read':
if node.is_public:
return True
required_scope = oauth_scopes.CoreScopes.NODE_FILE_READ
else:
required_scope = oauth_scopes.CoreScopes.NODE_FILE_WRITE
if not cas_resp.authenticated \
or required_scope not in oauth_scopes.normalize_scopes(cas_resp.attributes['accessTokenScope']):
raise HTTPError(httplib.FORBIDDEN)
if permission == 'read':
if node.can_view(auth):
return True
# The user may have admin privileges on a parent node, in which
# case they should have read permissions
if node.is_registration and node.registered_from.can_view(auth):
return True
if permission == 'write' and node.can_edit(auth):
return True
# Users attempting to register projects with components might not have
# `write` permissions for all components. This will result in a 403 for
# all `copyto` actions as well as `copyfrom` actions if the component
# in question is not public. To get around this, we have to recursively
# check the node's parent node to determine if they have `write`
# permissions up the stack.
# TODO(hrybacki): is there a way to tell if this is for a registration?
# All nodes being registered that receive the `copyto` action will have
# `node.is_registration` == True. However, we have no way of telling if
# `copyfrom` actions are originating from a node being registered.
# TODO This is | raise UNAUTHORIZED for registrations that have not been archived yet
if action == 'copyfrom' or (action == 'copyto' and node.is_registration):
parent = node.parent_node
while parent:
if parent.can_edit(auth):
return True
parent = parent.parent_node
# Users with the PREREG_ADMIN_TAG shoul | d be allowed to download files
# from prereg challenge draft registrations.
try:
prereg_schema = MetaSchema.find_one(
Q('name', 'eq', 'Prereg Challenge') &
Q('schema_version', 'eq', 2)
)
allowed_nodes = [node] + node.parents
prereg_draft_registration = DraftRegistration.find(
Q('branched_from', 'in', [n for n in allowed_nodes]) &
Q('registration_schema', 'eq', prereg_schema)
)
if action == 'download' and \
auth.user is not None and \
prereg_draft_registration.count() > 0 and \
settings.PREREG_ADMIN_TAG in auth.user.system_tags:
return True
except NoResultsFound:
pass
raise HTTPError(httplib.FORBIDDEN if auth.user else httplib.UNAUTHORIZED)
def make_auth(user):
if user is not None:
return {
'id': user._id,
'email': '{}@osf.io'.format(user._id),
'name': user.fullname,
}
return {}
@collect_auth
def get_auth(auth, **kwargs):
cas_resp = None
if not auth.user:
# Central Authentication Server OAuth Bearer Token
authorization = request.headers.get('Authorization')
if authorization and authorization.startswith('Bearer '):
client = cas.get_client()
try:
access_token = cas.parse_auth_header(authorization)
cas_resp = client.profile(access_token)
except cas.CasError as err:
sentry.log_exception()
# NOTE: We assume that the request is an AJAX request
return json_renderer(err)
if cas_resp.authenticated:
auth.user = User.load(cas_resp.user)
try:
data = jwt.decode(
jwe.decrypt(request.args.get('payload', '').encode('utf-8'), WATERBUTLER_JWE_KEY),
settings.WATERBUTLER_JWT_SECRET,
options={ |
glandium/git-cinnabar | cinnabar/cmd/bundle.py | Python | gpl-2.0 | 3,371 | 0 | from __future__ import absolute_import, unicode_literals
import logging
from cinnabar.cmd.util import CLI
from cinnabar.git import (
Git,
GitProcess,
InvalidConfig,
)
from cinnabar.githg import GitHgStore
from cinnabar.helper import GitHgHelper
from cinnabar.hg.bundle import | (
create_bundle,
PushStore,
)
from cinnabar.hg.repo import (
BundleApplier,
get_bundle,
get_clonebundle,
get_repo,
Remote,
unbundle20,
unbundler,
)
from cinnabar.util import fsencode
@CLI.subcommand
@CLI.argument('--version', choices=(1, 2), type=int,
default=2 if unbundle20 else 1,
help='bundle versi | on')
@CLI.argument('path', help='path of the bundle')
@CLI.argument('rev', nargs='+',
help='git revision range (see the Specifying Ranges'
' section of gitrevisions(7))')
def bundle(args):
'''create a mercurial bundle'''
revs = [fsencode(r) for r in args.rev]
bundle_commits = list((c, p) for c, t, p in GitHgHelper.rev_list(
b'--topo-order', b'--full-history', b'--parents', b'--reverse', *revs))
if bundle_commits:
# TODO: better UX. For instance, this will fail with an exception when
# the parent commit doesn't have mercurial metadata.
GRAFT = {
None: False,
b'false': False,
b'true': True,
}
try:
graft = Git.config('cinnabar.graft', values=GRAFT)
except InvalidConfig as e:
logging.error(str(e))
return 1
store = PushStore(graft=graft)
if args.version == 1:
b2caps = {}
elif args.version == 2:
b2caps = {
b'HG20': (),
b'changegroup': (b'01', b'02'),
}
with open(args.path, 'wb') as fh:
if not b2caps:
fh.write(b'HG10UN')
for data in create_bundle(store, bundle_commits, b2caps):
fh.write(data)
store.close(rollback=True)
@CLI.subcommand
@CLI.argument('--clonebundle', action='store_true',
help='get clone bundle from given repository')
@CLI.argument('url', help='url of the bundle')
def unbundle(args):
'''apply a mercurial bundle to the repository'''
# Make git emit its error when the current directory is not in a git repo.
proc = GitProcess('rev-parse')
ret = proc.wait()
if ret:
return ret
remote = Remote(b'', fsencode(args.url))
if remote.parsed_url.scheme not in (b'file', b'http', b'https'):
logging.error('%s urls are not supported.' % remote.parsed_url.scheme)
return 1
if args.clonebundle:
repo = get_repo(remote)
if not repo.capable(b'clonebundles'):
logging.error('Repository does not support clonebundles')
return 1
bundle = get_clonebundle(repo)
else:
bundle = get_bundle(remote.url)
store = GitHgStore()
GRAFT = {
None: False,
b'false': False,
b'true': True,
}
try:
graft = Git.config('cinnabar.graft', values=GRAFT)
except InvalidConfig as e:
logging.error(str(e))
return 1
if graft:
store.prepare_graft()
bundle = unbundler(bundle)
apply_bundle = BundleApplier(bundle)
del bundle
apply_bundle(store)
store.close()
|
idstudiolab/imagestore | src/imagestore/tests.py | Python | lgpl-2.1 | 4,153 | 0.00313 | import unittest
import re
from lxml import etree
from zope.testing import doctest, cleanup
import zope.component.eventtesting
from imagestore.xml import XMLValidationError, local_file
class ValidationTests(unittest.TestCase):
relaxng = etree.RelaxNG(file=local_file('rng', 'imagestore.rng'))
def validate(self, el):
if not self.relaxng.validate(etree.ElementTree(el)):
raise XMLValidationError("%s failed to validate" % el.tag)
def test_basic(self):
xml = '''
<imagestore xmlns="http://studiolab.io.tudelft.nl/ns/imagestore">
<sessions>
</sessions>
</imagestore>
'''
self.validate(etree.XML(xml))
def test_attributes(self):
xml = '''
<imagestore xmlns="http://studiolab.io.tudelft.nl/ns | /imagestore">
<sessions href="sessions">
</sessions>
</imagestore>
'''
self.validate(etree.XML(xml))
def test_attributes_illegal(self):
xml = '''
<imagestore xmlns="http://studiolab.io.tudelft.nl/ns/imagestore">
<sessions name="sessions">
| </sessions>
</imagestore>
'''
self.assertRaises(XMLValidationError, self.validate, etree.XML(xml))
def test_extended(self):
xml = '''
<imagestore xmlns="http://studiolab.io.tudelft.nl/ns/imagestore">
<sessions>
<session href="sessions/foo" name="foo">
<group xmlns="http://studiolab.io.tudelft.nl/ns/imagestore" href="." name="collection">
<source src="APP/sessions/foo/images/UNKNOWN" name="UNKNOWN"/>
<metadata href="metadata">
<depth href="metadata/depth">0.0</depth>
<rotation href="metadata/rotation">0.0</rotation>
<x href="metadata/x">0.0</x>
<y href="metadata/y">0.0</y>
</metadata>
<objects href="objects">
<image href="objects/alpha" name="alpha">
<source src="APP/sessions/foo/images/a.png" name="a.png"/>
<metadata href="objects/alpha/metadata">
<depth href="objects/alpha/metadata/depth">0.0</depth>
<rotation href="objects/alpha/metadata/rotation">0.0</rotation>
<x href="objects/alpha/metadata/x">0.0</x>
<y href="objects/alpha/metadata/y">0.0</y>
</metadata>
</image>
<group href="objects/beta" name="beta">
<source src="APP/sessions/foo/images/a.png" name="a.png"/>
<metadata href="objects/beta/metadata">
<depth href="objects/beta/metadata/depth">0.0</depth>
<rotation href="objects/beta/metadata/rotation">0.0</rotation>
<x href="objects/beta/metadata/x">0.0</x>
<y href="objects/beta/metadata/y">0.0</y>
</metadata>
<objects href="objects/beta/objects"/>
</group>
</objects>
</group>
<images>
</images>
</session>
</sessions>
</imagestore>
'''
self.validate(etree.XML(xml))
def setUpZope(test):
zope.component.eventtesting.setUp(test)
def cleanUpZope(test):
cleanup.cleanUp()
r_created = re.compile('<created>[^/]*</created>')
r_modified = re.compile('<modified>[^/]*</modified>')
def datetime_normalize(xml):
result = r_created.sub('<created></created>', xml)
result = r_modified.sub('<modified></modified', result)
return result
def test_suite():
optionflags = (
doctest.ELLIPSIS
| doctest.REPORT_NDIFF
| doctest.NORMALIZE_WHITESPACE
)
return unittest.TestSuite([
doctest.DocFileSuite(
'model.txt', optionflags=optionflags,
setUp=setUpZope, tearDown=cleanUpZope,
globs={'datetime_normalize': datetime_normalize}),
unittest.makeSuite(ValidationTests)])
|
PuZheng/lejian-backend | lejian/config/views.py | Python | mit | 2,153 | 0.000477 | # -*- coding: UTF-8 -*-
from flask import jsonify
from sqlalchemy.orm.exc import NoResultFound
from flask.ext.databrowser import ModelView, col_spec, sa
from flask.ext.databrowser.action import DeleteAction
from flask.ext.babel import lazy_gettext, gettext as _
from genuine_ap.config import config_ws
from genuine_ap.models import Config
from genuine_ap.apis import wraps
from genuine_ap.database import db
@config_ws.route('/config/<opts>')
def config_ws(opts):
opts = opts.split(',')
try:
opts = [wraps(Config.query.filter(Config.name == opt).one()) for opt
in opts]
except NoResultFound:
return "invalid config options", 403
return jsonify(dict((opt.name, opt.as_dict()) for opt in opts))
class ConfigModelView(ModelView):
@ModelView.cached
@property
def list_columns(self):
return [
col_spec.ColSpec('id', _('id')),
col_spec.ColSpec('name', _('name')),
col_spec.ColSpec('type_', _('type')),
col_spec.ColSpec('brief', _('brief')),
col_spec.ColSpec('value', _('value')),
]
@ModelView.cached
@property
def create_columns(self):
return [
col_spec.InputColSpec('name', _('name')),
col_spec.InputColSpec('type_', _('type')),
col_spec.InputColSpec('brief', _('brief')),
col_spec.InputColSpec('value', _('value'),
doc=u'若是布尔类型,应当填写1(真)或0(假)'),
]
@ModelView.cached
@property
def edit_columns(self):
return [
col_spec.InputColSpec('name', _('name')),
col_sp | ec.InputColSpec('type_', _('type')),
col_spe | c.InputColSpec('brief', _('brief')),
col_spec.InputColSpec('value', _('value'),
doc=u'若是布尔类型,应当填写1(真)或0(假)'),
]
def get_actions(self, processed_objs=None):
return [DeleteAction(_("remove"))]
config_model_view = ConfigModelView(sa.SAModell(Config, db,
lazy_gettext('Config')))
|
pearpai/TensorFlow-action | deep_learning_with_tensorFlow/Chapter07/p176.py | Python | apache-2.0 | 1,084 | 0.000988 | # coding=utf-8
import matplotlib.pyplot as plt
import tensorflow as | tf
import numpy as np
from PIL import Image
path = "../datasets/demo_picture/cat.jpg"
img = Image.open(path)
# 获取长宽
print img.size
image_raw_data = tf.gfile.FastGFile("../datasets/demo_picture/cat.jpg", 'r').read()
img_data = tf.image.decode_jpeg(image_raw_data)
with tf.Sess | ion() as sess:
croped = tf.image.resize_image_with_crop_or_pad(img_data, 1000, 1000)
padded = tf.image.resize_image_with_crop_or_pad(img_data, 3000, 3000)
print 'croped ', croped.get_shape()
print 'padded ', croped.get_shape()
# TensorFlow的函数处理图片后存储的数据是float32格式的,需要转换成uint8才能正确打印图片。
# plt.imshow(croped.eval())
# plt.show()
# plt.imshow(padded.eval())
# plt.show()
# central_cropped = tf.image.central_crop(img_data, 0.5)
# plt.imshow(central_cropped.eval())
# plt.show()
jt = tf.image.crop_to_bounding_box(img_data, 1500, 0, 200, 2673)
print 'jt ', jt.get_shape()
plt.imshow(jt.eval())
plt.show()
|
valuesandvalue/valuesandvalue | vavs_project/fbdata/views.py | Python | mit | 3,902 | 0.004357 | # fbdata.views
# DJANGO
from django.contrib import messages
from django.contrib.auth.models import User
#from django.db.models import Max
#from django.db.models import Q
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.shortcuts import get_object_or_404
from django.views.generic.base import TemplateView, View
from django.views.generic import (
ArchiveIndexView,
CreateView,
UpdateView,
DetailView
)
# DJANGO-BRACES
from braces.views import LoginRequiredMixin, StaffuserRequiredMixin
# FBDATA
from .details import get_fbobject_data_json
from .fbids import get_fbuser_from_djuser
from .graphs import narrative_data_json
from .models import UserAnalysis
from .participant import get_participant_profile
from .utils import (
can_access_user,
isodatestr_to_date,
isodatestr_to_datetime,
recent_time_frame
)
def _json_error(message):
return '{"type":"error", "message":%s}' % message
class UserAnalysisView(LoginRequiredMixin, TemplateView):
"""View for single user data."""
template_name = 'fbdata/user_analysis.html'
def get(self, request, username=None, start=None, end=None, page=None):
username = username or request.user.username
if not can_access_user(request.user, username):
raise Http404
else:
user = get_object_or_404(User, username=username)
participant = get_participant_profile(user)
fbuser = participant.fbuser
if start and end:
start = isodatestr_to_date(start)
end = isodatestr_to_date(end)
pager = None
elif page:
page = int(page)
start, end = participant.page_dates(page)
start = start.date()
end = end.date()
pager = participant.paginate(page=page)
else:
start, end = participant.recent_time_frame()
if start and end:
start = start.date()
end = end.date()
pager = participant.paginate()
else:
| pager = None
return self.render_to_response({
| 'user': user,
'username': username,
'fbuser': fbuser,
'participant': participant,
'start': start,
'end': end,
'pager': pager})
class InteractionData(LoginRequiredMixin, View):
"""Returns JSON interaction data."""
def get(self, request, username, start, end):
#if request.is_ajax()
if not can_access_user(request.user, username):
raise Http404
else:
user = get_object_or_404(User, username=username)
participant = get_participant_profile(user)
anon = False if request.user == user else participant.anon_data
start = isodatestr_to_datetime(start)
end = isodatestr_to_datetime(end)
json_data = narrative_data_json(
user, start, end, fbuser=participant.fbuser, anon=anon)
return HttpResponse(json_data, content_type="application/json")
class DetailData(LoginRequiredMixin, View):
"""Returns JSON detail data."""
def get(self, request, username, data_type, pk):
#if request.is_ajax()
if not can_access_user(request.user, username):
raise Http404
else:
user = get_object_or_404(User, username=username)
participant = get_participant_profile(user)
anon = False if request.user == user else participant.anon_data
json_data = get_fbobject_data_json(data_type, pk, anon=anon)
return HttpResponse(json_data, content_type="application/json")
|
lavalamp-/ws-backend-community | lib/wsregex.py | Python | gpl-3.0 | 3,414 | 0.022847 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import re
class RegexLib(object):
"""
A class containing all regular expressions used throughout the DataHound
application.
"""
# Class Members
# Potentially better email regex
# "([A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,4})"
# http://www.webmonkey.com/2008/08/four_regular_expressions_to_check_email_addresses/
caps_alpha_regex = re.compile("^[A-Z]+$")
cc_last_four_regex = re.compile("^[0-9]{4}$")
docker_log_entry_regex = re.compile("^\[\d{4}-\d{2}-\d{2}")
# domain_name_regex = re.compile("^[a-zA-Z0-9-*]+(\.[a-zA-Z0-9-]+)*$")
domain_name_regex = re.compile("^((?!-)[A-Za-z0-9-]{1,63}(?<!-)\.)+[A-Za-z]{2,63}$")
email_regex = re.compile("^[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,10}$")
export_file_name_regex = re.compile("^[0-9A-Za-z_-]{1,32}$")
file_log_entry_regex = re.compile("^\[\d{2}/\d{2}/\d{2} ")
file_name_regex = re.compile("^[A-Za-z-_0-9]+$")
first_name_regex = re.compile("^[A-Za-z\-']{1,32}$")
hostname_regex = re.compile(
"^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z\-]*[A-Za-z])$",
flags=re.IGNORECASE
)
html_form_regex = re.compile("<form.*?</form>", flags=re.IGNORECASE | re.DOTALL)
integer_regex = re.compile("^[0-9]+$")
ipv4_address_regex = re.compile(
"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$",
flags=re.IGNORECASE
)
ipv4_cidr_regex = re.compile(
"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/([0-9]|[1-2 | ][0-9]|3[0-2]))$",
flags=re.IGNORECASE
)
last_name_regex = re.compile("^[A-Za-z\-']{1,32}$")
log_entry_stub_regex = re.compile("\[(.*?)\]")
mime_string_regex = re.compile("^[a-z\-]+/[a-z\.\-_0-9]+(;(\s?[\w=\.\-]+)+)?$", flags=re.IGNORECASE)
order_name_regex = re.compile("^[A-Za-z-_0-9]+$")
protocol_regex = re.compile("^([A-Z]{1,10})://", flags=re.IGNORECASE)
query_string_r | egex = re.compile(
"^([\\\\\w\-!@\$%\^\*\(\)_`~+\[\]{}|;'\",<>]+=([\\\\\w\-!@\$%\^\*\(\)_`~+\[\]{}|;'\",<>]*)?(&[\\\\\w\-!@\$%\^\*\(\)_`~+\[\]{}|;'\",<>]+=([\\\\\w\-!@\$%\^\*\(\)_`~+\[\]{}|;'\",<>]*)?)*)$",
flags=re.IGNORECASE,
)
url_port_regex = re.compile(".+:([1-9]([0-9]{1,10})?)$", flags=re.IGNORECASE)
url_protocol_regex = re.compile("^([A-Z0-9-_]+?):", flags=re.IGNORECASE)
url_scheme_regex = re.compile("^([A-Z0-9]{1,25})://", flags=re.IGNORECASE)
user_name_regex = re.compile("^[A-Z0-9]{1,32}$", flags=re.IGNORECASE)
uuid4_string_regex = re.compile(
"^[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}$",
flags=re.IGNORECASE,
)
zmap_bandwidth_regex = re.compile("^\d+[GMK]$")
zmap_empty_bandwidth_regex = re.compile("^0+[GMK]$")
ssl_certificate_regex = re.compile("(-----BEGIN CERTIFICATE-----.*?-----END CERTIFICATE-----)", flags=re.DOTALL)
authority_info_uri_regex = re.compile("URI:(.*)")
basic_auth_realm_regex = re.compile("realm=\"(.*?)\"")
card_last_four_regex = re.compile("^\d\d\d\d$")
# Instantiation
# Static Methods
# Class Methods
# Public Methods
# Protected Methods
# Private Methods
# Properties
# Representation and Comparison
|
bhupennewalkar1337/erpnext | erpnext/schools/doctype/discussion/test_discussion.py | Python | gpl-3.0 | 263 | 0.007605 | # -*- coding: utf-8 -*-
# Cop | yright (c) 2015, Frappe and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Di | scussion')
class TestDiscussion(unittest.TestCase):
pass
|
FluidityStokes/fluidity | tools/vtu2ensight.py | Python | lgpl-2.1 | 5,350 | 0.011589 | #!/usr/bin/env python3
import sys
import argparse
import glob
import re
import vtk
import vtktools
def parse_args(argv):
parser = argparse.ArgumentParser(
prog="vtu2ensight",
description="""This converts a vtu file to a ensight file. If applied to checkpointed files, use rename_checkpoint first and ensure that 'checkpoint' is removed from the basename of the solution files.""")
parser.add_argument(
"-v",
"--verbose",
help="Print something...",
action = "store_true",
dest = "verbose",
default = False
)
parser.add_argument(
"-s",
"--static",
help="Use this flag only when a fixed mesh was used. By default a dynamically varying (adaptive) spatial mesh is assumed.",
action = "store_true",
dest = "static",
default = False
)
parser.add_argument(
"-i",
help="Use this flag to set the index of the vtu file you wish to convert. By default all vtu files with the matching basename are converted.",
dest = "dumpno",
default = -1
)
parser.add_argument(
"-l",
"--last-dump",
help="Use this flag to automatically find the vtu file with the highest dump number and only convert that opposed to all vtu files. Note: It does not check the timestamps. If -l and -i are given, -i is neglected.",
action = "store_true",
dest = "lastdump",
default = False
)
parser.add_argument(
'basename',
metavar='basename',
help="Basename of output (without .pvtu or .vtu)",
)
args = parser.parse_args()
return args
# Function taken from:
# http://stackoverflow.com/questions/2669059/how-to-sort-alpha-numeric-set-in-python
def sorted_nicely(l):
""" Sort the given iterable in the way that humans expect."""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
def getvtulist(basename, dumpno, lastdump):
# Find all vtu/pvtu files for (p)vtus in this folder:
vtus = []
searchstring = basename+"_"
if (dumpno <0): searchstring = searchstring+"[0-9]*vtu"
else: searchstring = searchstring+str(int(dumpno))+".*vtu"
for file in sorted_nicely(glob.glob(searchstring)):
if (not ('checkpoint' in file)):
vtus.append(file)
if (lastdump):
vtus = [vtus[-1]]
return vtus
def getvtk(filename):
# read in vtu file:
reader = vtktools.vtu(filename)
return reader
def getensightwriter(basename, static):
writer=vtk.vtkEnSightWriter()
writer.SetFileName(basename)
writer.SetTransientGeometry(not(static))
return writer
def addblockid(ug):
# get number of elements in ug:
nele = int(ug.GetNumberOfCells())
# add blockID to ug (required by the ensight format)
blockIDs = vtk.vtkUnsignedIntArray()
blockIDs.SetNumberOfTuples(nele)
blockIDs.SetNumberOfComponents(1)
blockIDs.SetName("BlockId")
for j in range(nele):
blockIDs.SetValue(j,1)
ug.GetCellData().AddArray(blockIDs)
return ug
def removeghostlevel(reader, ug):
for i in range(reader.gridreader.GetNumberOfCellArrays()):
if (reader.gridreader.GetCellArrayName(i) == "vtkGhostLevels"):
ug.GetCellData().RemoveArray(i)
break
return ug
def writedata(writer, ug, i):
#writer.SetGhostLevel(0)
#writer.SetBlockIDs(1)
writer.SetNumberOfBlocks(1)
writer.SetTimeStep(i)
if vtk.vtkVersion.GetVTKMajorVersion() <= 5:
writer.SetInput(ug)
else:
writer.SetInputData(ug)
writer.Write()
def writecase(writer, ntimesteps):
# write header information (case file)
writer.WriteCaseFile(ntimesteps)
def main(args):
verbose = args.verbose
static = args.static
basename = args.basename
dumpno = int(args.dumpno)
lastdump = int(args.lastdump)
if (dumpno>=0 or lastdump): static = True
# get list of vtu/pvtu files:
vtus = getvtulist(basename, dumpno, lastdump)
if (not vtus): raise IOError
# prevent reading errors, if only one vtu file was found, set static to True:
if (len(vtus) == 1): static = True
# writer:
writer = getensightwriter(basename, static)
# write data for each vtu-file:
for i in range(len(vtus)):
if (verbose):
print("processing vtu file: "+vtus[i])
# get vtk object:
reader = getvtk(vtus[i])
# add block id (required by the | ensight | format):
ug = addblockid(reader.ugrid)
# check/remove ghostlevel array:
ug = removeghostlevel(reader, ug)
# write data:
writedata(writer, ug, i)
# write case file:
writecase(writer, len(vtus))
if __name__ == "__main__":
# get arguments:
args = parse_args(sys.argv)
try:
main(args)
print("EnSight output files have been written successfully.")
except IOError:
print("Error: Could not find any output files with a basename \""+args.basename+"\".")
except:
raise Exception("Something went wrong. Aborting operation.")
|
grani/grpc | src/python/grpcio_health_checking/grpc_health/v1/health.py | Python | bsd-3-clause | 2,629 | 0 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Reference implementation for health checking in gRPC Python."""
import threading
import grpc
from grpc_health.v1 import health_pb2
class HealthServicer(health_pb2.HealthServicer):
"""Servicer handling RPCs for service statuses."""
def __init__(self):
self._server_status_lock = threading.Lock()
self._server_status = {}
def Check(self, request, context):
with self._server_status_lock:
sta | tus = self._server_status.get(request.service)
if status is None:
context.set_code(grpc.StatusCode.NOT_FOUND)
return health_pb2.HealthCheckResponse()
else:
return health_pb2.HealthCheckResponse(status=status)
def set(self, service, status):
"""Sets the status of a service.
Args:
service: string, the name of the service.
| NOTE, '' must be set.
status: HealthCheckResponse.status enum value indicating
the status of the service
"""
with self._server_status_lock:
self._server_status[service] = status
|
nirmeshk/oh-mainline | mysite/profile/migrations/0097_auto__chg_field_unsubscribetoken_modified_date__del_field_citation_dis.py | Python | agpl-3.0 | 17,952 | 0.008021 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'UnsubscribeToken.modified_date'
db.alter_column(u'profile_unsubscribetoken', 'modified_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True))
# Deleting field 'Citation.distinct_months'
db.delete_column(u'profile_citation', 'distinct_months')
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'UnsubscribeToken.modified_date'
raise RuntimeError("Cannot reverse this migration. 'UnsubscribeToken.modified_date' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'UnsubscribeToken.modified_date'
db.alter_column(u'profile_unsubscribetoken', 'modified_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True))
# Adding field 'Citation.distinct_months'
db.add_column(u'profile_citation', 'distinct_months',
| self.gf('django.db.models.fields.IntegerField')(null=True),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharF | ield', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'customs.webresponse': {
'Meta': {'object_name': 'WebResponse'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response_headers': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'text': ('django.db.models.fields.TextField', [], {}),
'url': ('django.db.models.fields.TextField', [], {})
},
u'profile.citation': {
'Meta': {'object_name': 'Citation'},
'contributor_role': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'data_import_attempt': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profile.DataImportAttempt']", 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'}),
'first_commit_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_due_to_duplicate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'old_summary': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True'}),
'portfolio_entry': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profile.PortfolioEntry']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
u'profile.dataimportattempt': {
'Meta': {'object_name': 'DataImportAttempt'},
'completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'}),
'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profile.Person']"}),
'query': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'web_response': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['customs.WebResponse']", 'null': 'True'})
},
u'profile.forwarder': {
'Meta': {'object_name': 'Forwarder'},
'address': ('django.db.models.fields.TextField', [], {}),
'expires_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stops_being_listed_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'profile.link_person_tag': {
'Meta': {'object_name': 'Link_Person_Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profile.Person']"}),
'source': ('django.db.models.fields.CharFie |
vasyarv/edx-ora2 | openassessment/xblock/test/test_grade.py | Python | agpl-3.0 | 20,558 | 0.002636 | # -*- coding: utf-8 -*-
"""
Tests for grade handlers in Open Assessment XBlock.
"""
import copy
import ddt
import json
import mock
from django.test.utils import override_settings
from submissions import api as sub_api
from openassessment.workflow import api as workflow_api
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api
from openassessment.xblock.openassessmentblock import OpenAssessmentBlock
from .base import XBlockHandlerTestCase, scenario
@ddt.ddt
class TestGrade(XBlockHandlerTestCase):
"""
View-level tests for the XBlock grade handlers.
"""
PEERS = ['McNulty', 'Moreland']
ASSESSMENTS = [
{
'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
'criterion_feedback': {
u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'Peer 1: ฝﻉɭɭ ɗѻกﻉ!'
},
'overall_feedback': u'єאςєɭɭєภՇ ฬ๏гк!',
},
{
'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
'criterion_feedback': {
u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'Peer 2: ฝﻉɭɭ ɗѻกﻉ!',
u'Form': u'Peer 2: ƒαιя נσв'
},
'overall_feedback': u'Good job!',
},
]
SUBMISSION = (u'ՇﻉรՇ', u'รપ๒๓ٱรรٱѻก')
STEPS = ['peer', 'self']
AI_ALGORITHMS = {
'fake': 'openassessment.assessment.worker.algorithm.FakeAIAlgorithm'
}
@scenario('data/grade_scenario.xml', user_id='Greggs')
def test_render_grade(self, xblock):
# Submit, assess, and render the grade view
self._create_submission_and_assessments(
xblock, self.SUBMISSION, self.PEERS, self.ASSESSMENTS, self.ASSESSMENTS[0]
)
resp = self.request(xblock, 'render_grade', json.dumps(dict()))
# Verify that feedback from each scorer appears in the view
self.assertIn(u'єאςєɭɭєภՇ ฬ๏гк!', resp.decode('utf-8'))
self.assertIn(u'Good job!', resp.decode('utf-8'))
# Verify that student submission is in the view
self.assertIn(self.SUBMISSION[1], resp.decode('utf-8'))
# Verify that the submission and peer steps show that we're graded
# This isn't strictly speaking part of the grade step rendering,
# but we've already done all the setup to get to this point in the flow,
# so we might as well verify it here.
resp = self.request(xblock, 'render_submission', json.dumps(dict()))
self.assertIn('response', resp.lower())
self.assertIn('complete', resp.lower())
# Verify that student submission is in the view
self.assertIn(self.SUBMISSION[1], resp.decode('utf-8'))
resp = self.request(xblock, 'render_peer_assessment', json.dumps(dict()))
self.assertIn('peer', resp.lower())
self.assertIn('complete', resp.lower())
resp = self.request(xblock, 'render_self_assessment', json.dumps(dict()))
self.assertIn('self', resp.lower())
self.assertIn('complete', resp.lower())
@scenario('data/grade_scenario_self_only.xml', user_id='Greggs')
def test_render_grade_self_only(self, xblock):
# Submit, assess, and render the grade view
self._create_submission_and_assessments(
xblock, self.SUBMISSION, [], [], self.ASSESSMENTS[0],
waiting_for_peer=True
)
resp = self.request(xblock, 'render_grade', json.dumps(dict()))
# Verify that feedback from each scorer appears in the view
self.assertIn(u'ﻉซƈﻉɭɭﻉกՇ', resp.decode('utf-8'))
self.assertIn(u'Fair', resp.decode('utf-8'))
# Verify that the submission and peer steps show that we're graded
# This isn't strictly speaking part of the grade step rendering,
# but we've already done all the setup to get to this point in the flow,
# so we might as well verify it here.
resp = self.request(xblock, 'render_submission', json.dumps(dict()))
self.assertIn('response', resp.lower())
self.assertIn('complete', resp.lower())
resp = self.request(xblock, 'render_peer_assessment', json.dumps(dict()))
self.assertNotIn('peer', resp.lower())
self.assertNotIn('complete', resp.lower())
resp = self.request(xblock, 'render_self_assessment', json.dumps(dict()))
self.assertIn('self', resp.lower())
self.assertIn('complete', resp.lower())
@scenario('data/feedback_only_criterion_grade.xml', user_id='Greggs')
| def test_render_grade_feedback_only_criterion(self, xblock):
# | Add in per-criterion feedback for the feedback-only criterion
peer_assessments = copy.deepcopy(self.ASSESSMENTS)
for asmnt in peer_assessments:
asmnt['criterion_feedback'] = {
u'𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞': u"Ṫḧïṡ ïṡ ṡöṁë ḟëëḋḅäċḳ."
}
self_assessment = copy.deepcopy(self.ASSESSMENTS[0])
self_assessment['criterion_feedback'] = {
u'𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞': "Feedback here",
u'Form': 'lots of feedback yes"',
u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': "such feedback"
}
# Submit, assess, and render the grade view
self._create_submission_and_assessments(
xblock, self.SUBMISSION, self.PEERS, peer_assessments, self_assessment
)
# Render the grade section
resp = self.request(xblock, 'render_grade', json.dumps(dict()))
self.assertIn('your response', resp.lower())
# Verify that feedback from each scorer appears in the view
self.assertIn(u'єאςєɭɭєภՇ ฬ๏гк!', resp.decode('utf-8'))
self.assertIn(u'Good job!', resp.decode('utf-8'))
@mock.patch.object(OpenAssessmentBlock, 'is_admin', new_callable=mock.PropertyMock)
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
@scenario('data/grade_scenario_ai_only.xml', user_id='Greggs')
def test_render_grade_ai_only(self, xblock, mock_is_admin):
# Train classifiers using the fake AI algorithm
mock_is_admin.return_value = True
self.request(xblock, 'schedule_training', json.dumps({}), response_format='json')
# Submit, assess, and render the grade view
self._create_submission_and_assessments(
xblock, self.SUBMISSION, [], [], None, waiting_for_peer=True
)
resp = self.request(xblock, 'render_grade', json.dumps(dict()))
# Verify that feedback from each scorer appears in the view
self.assertNotIn(u'єאςєɭɭєภՇ', resp.decode('utf-8'))
self.assertIn(u'Poor', resp.decode('utf-8'))
# Verify that the submission and peer steps show that we're graded
# This isn't strictly speaking part of the grade step rendering,
# but we've already done all the setup to get to this point in the flow,
# so we might as well verify it here.
resp = self.request(xblock, 'render_submission', json.dumps(dict()))
self.assertIn('response', resp.lower())
self.assertIn('complete', resp.lower())
resp = self.request(xblock, 'render_peer_assessment', json.dumps(dict()))
self.assertNotIn('peer', resp.lower())
self.assertNotIn('complete', resp.lower())
resp = self.request(xblock, 'render_self_assessment', json.dumps(dict()))
self.assertNotIn('self', resp.lower())
self.assertNotIn('complete', resp.lower())
@scenario('data/feedback_per_criterion.xml', user_id='Bernard')
def test_render_grade_feedback_per_criterion(self, xblock):
# Submit, assess, and render the grade view
self._create_submission_and_assessments(
xblock, self.SUBMISSION, self.PEERS, self.ASSESSMENTS, self.ASSESSMENTS[0]
)
# Verify that the context for the grade complete page contains the feedback
_, context = xblock.render_grade_complete(xblock.get_workflow_info())
criteria = context['rubric_criteria']
self.assertEqual(criteria[0]['peer_feedback'], [
u'Peer 2: ฝﻉɭɭ ɗѻกﻉ!',
u'Peer 1: ฝﻉɭɭ ɗѻกﻉ!',
])
self.assertEqual(criteria[0]['self_feedback'], u'Peer 1: ฝﻉɭɭ ɗѻกﻉ!')
self.assertEqual(criteria[1]['peer_feedback'], [u'Peer 2 |
XiaoxiaoLiu/morphology_analysis | IVSCC/morph_clustering_on_bbp_features_old_example.py | Python | gpl-3.0 | 37,767 | 0.013319 | __author__ = 'xiaoxiaol'
import numpy as np
import pylab as pl
import scipy
import pandas as pd
import seaborn as sns
import os
import sys, getopt
from scipy.cluster import hierarchy
import platform
from scipy.stats.stats import pearsonr
import scipy.stats as stats
from PIL import Image
import glob
from sklearn.metrics import silhouette_samples, silhouette_score
import math
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from itertools import cycle
####################################
ZSCORE_OUTLIER_THRESHOLD = 5
####################################
sns.set_context("poster")
def zscore(features, remove_outlier=0):
zscores = scipy.stats.zscore(features, 0)
# zscores = normalizeFeatures(features)
return zscores
# def normalizeFeatures(features):
# meanFeatures = np.median(features, 0)
# stdFeatures = np.std(features, 0)
# if np.count_nonzero(stdFeatures) < len(stdFeatures):
# print "zero detected"
# print stdFeatures
# normalized = (features - meanFeatures) / stdFeatures
# return normalized
#### need to be updated
# def distance_matrix(df_all, feature_names, out_distanceMatrix_file, REMOVE_OUTLIER=0):
# feature_array = df_all[feature_names].astype(float)
# distanceMatrix = []
# normalized = zscore(feature_array)
# #normalized = normalizeFeatures(feature_array)
#
# if num_outliers > 0:
# if not REMOVE_OUTLIER: # only clp
# normalized[normalized < -ZSCORE_OUTLIER_THRESHOLD] = -ZSCORE_OUTLIER_THRESHOLD
# normalized[normalized > ZSCORE_OUTLIER_THRESHOLD] = ZSCORE_OUTLIER_THRESHOLD
#
# for i in range(len(normalized)):
# queryFeature = normalized[i] # each row is a feature vector
# scores = np.exp(-np.sum(abs(normalized - queryFeature) ** 2, 1) / 100) #similarity
# #scores = np.sum(np.abs(normalized - queryFeature) ** 2, 1) # distance
# distanceMatrix.append(scores)
#
# df_dist = pd.DataFrame(distanceMatrix)
# df_dist.to_csv(out_distanceMatrix_file, index=False)
# print("score sim matrix is saved to : " + out_distanceMatrix_file + "\n")
# return df_dist
def copySnapshots(df_in, snapshots_dir, output_dir):
if not os.path.exists(output_dir):
os.mkdir(output_dir)
swc_files = df_in['swc_file_name']
if len(swc_files) > 0:
for afile in swc_files:
filename = snapshots_dir + '/' + afile.split('/')[-1] + '.BMP'
if os.path.exists(filename):
os.system("cp " + filename + " " + output_dir + "/\n")
return
def assemble_screenshots(input_dir, output_image_file_name, size):
files = glob.glob(input_dir + "/*.BMP")
assemble_image = Image.new("RGB", (size * len(files),size))
y = 0
for infile in files:
im = Image.open(infile)
im.thumbnail((size, size), Image.ANTIALIAS)
assemble_image.paste(im, (y, 0))
y += size
assemble_image.save(output_image_file_name)
return
def generateLinkerFileFromDF(df_in, output_ano_file, strip_path=False, swc_path=None):
swc_files = df_in['swc_file_name']
if len(swc_files) > 0:
with open(output_ano_file, 'w') as outf:
for afile in swc_files:
if swc_path is not None:
filename = swc_path + '/'+afile
else:
filename = afile
if strip_path:
filename = afile.split('/')[-1]
line = 'SWCFILE=' + filename + '\n'
outf.write(line)
outf.close()
return
############## heatmap plot: hierachical clustering ########
#
# def heatmap_plot_distancematrix(df_distanceMatrix, merged, output_dir, title=None):
# pl.figure()
#
# # Create a custom palette for creline colors
# cre_lines = np.unique(merged['cre_line'])
# cre_line_pal = sns.color_palette("hls", len(cre_lines))
# cre_line_lut = dict(zip(cre_lines, cre_line_pal)) # map creline type to color
# creline_colors = merged['cre_line'].map(cre_line_lut)
#
# # Create a custom palette for dendrite_type colors thre colors
# dendrite_types = np.unique(merged['dendrite_type'])
# dendrite_type_pal = sns.color_palette(['white','gray','black'])
# dendrite_type_lut = dict(zip(dendrite_types, dendrite_type_pal))
# dendritetype_colors = merged['dendrite_type'].map(dendrite_type_lut)
#
# # Create a custom colormap for the heatmap values
# #cmap = sns.diverging_palette(240, 10, as_cmap=True)
#
# g = sns.clustermap(df_distanceMatrix, method='ward', metric='euclidean', linewidths=0.0,
# row_colors=dendritetype_colors, col_colors=creline_colors, cmap=cmap, xticklabels=False,
# yticklabels=False)
# if title:
# pl.title(title)
# # Legend for row and col colors
#
# for label in dendrite_types:
# pl.bar(0, 0, color=dendrite_type_lut[label], label=label, linewidth=0)
# pl.legend(loc="center", ncol=1)
#
# for label in cre_lines:
# g.ax_col_dendrogram.bar(0, 0, color=cre_line_lut[label], label=label, linewidth=0)
# g.ax_col_dendrogram.legend(loc="center", ncol=3)
#
# pl.title('Similarities')
#
# filename = output_dir + '/similarity_heatmap.png'
# pl.savefig(filename, dpi=300)
# print("save similarity matrix heatmap figure to :" + filename)
# pl.close()
return g
def plot_confusion_matrix(cm, xlabel, ylabel, xnames, ynames, title='Confusion matrix', cmap=pl.cm.Blues):
pl.grid(False)
pl.imshow(cm, interpolation = 'none',cmap=cmap)
pl.title(title)
pl.colorbar()
tick_marksx = np.arang | e(len(xnames))
tick_marksy = np.arange(len(ynames))
pl.xticks(tick_mark | sx, xnames)
pl.yticks(tick_marksy, ynames)
pl.tight_layout()
pl.ylabel(ylabel)
pl.xlabel(xlabel)
def heatmap_plot_zscore_ivscc(df_zscore_features, df_all, output_dir, title=None):
# Create a custom palette for dendrite_type colors
dendrite_types = [np.nan, 'aspiny', 'sparsely spiny', 'spiny']
# dendrite_type_pal = sns.color_palette("coolwarm", len(dendrite_types))
dendrite_type_pal = sns.color_palette(["gray","black","purple","red"])
dendrite_type_lut = dict(zip(dendrite_types, dendrite_type_pal))
dendrite_type_colors = df_all['dendrite_type'].map(dendrite_type_lut)
# Create a custom palette for creline colors
cre_lines = np.unique(df_all['cre_line'])
print cre_lines
cre_lines = ['Pvalb-IRES-Cre','Sst-IRES-Cre','Gad2-IRES-Cre', 'Htr3a-Cre_NO152',
'Nr5a1-Cre', 'Ntsr1-Cre','Rbp4-Cre_KL100' ,'Rorb-IRES2-Cre-D', 'Scnn1a-Tg2-Cre',
'Scnn1a-Tg3-Cre','Slc17a6-IRES-Cre','Cux2-CreERT2']
cre_line_pal = sns.color_palette("BrBG", len(cre_lines))
cre_line_lut = dict(zip(cre_lines, cre_line_pal)) # map creline type to color
cre_line_colors = df_all['cre_line'].map(cre_line_lut)
# layers = np.unique(df_all['layer'])
# layer_pal = sns.light_palette("green", len(layers))
# layer_lut = dict(zip(layers, layer_pal))
# layer_colors = df_all['layer'].map(layer_lut)
# # only if types are available
# types = np.unique(df_all['types'])
# #reorder
# types = ['NGC','multipolar','symm', 'bitufted','bipolar','tripod', 'Martinotti','cortico-cortical', 'cortico-thal','non-tufted', 'short-thick-tufted', 'tufted','thick-tufted']
# type_pal = sns.color_palette("coolwarm", len(types))# sns.diverging_palette(220, 20, n=len(types))# sns.color_palette("husl", len(types))
# type_lut = dict(zip(types, type_pal))
# type_colors = df_all['types'].map(type_lut)
# Create a custom colormap for the heatmap values
#cmap = sns.diverging_palette(240, 10, as_cmap=True)
linkage = hierarchy.linkage(df_zscore_features, method='ward', metric='euclidean')
data = df_zscore_features.transpose()
row_linkage = hierarchy.linkage(data, method='ward', metric='euclidean')
feature_order = hierarchy.leaves_list(row_linkage)
#print data.index
matchIndex = [data.index[x] for x in feature_order]
#print matchIndex
data = data.r |
cjaymes/pyscap | src/scap/model/oval_5/sc/linux/SystemDUnitDependencyItemElement.py | Python | gpl-3.0 | 1,135 | 0.002643 | # Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with | PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.Model import Model
from scap.model.oval_5.sc.ItemType import ItemType
logger = logging.getLogger(__name__)
class SystemDUnitDependencyItemElement(ItemType):
| MODEL_MAP = {
'elements': [
{'tag_name': 'unit', 'class': 'scap.model.oval_5.sc.EntityItemType', 'min': 0, 'max': 1},
{'tag_name': 'dependency', 'class': 'scap.model.oval_5.sc.EntityItemType', 'min': 0, 'max': None},
],
}
|
jayGattusoNLNZ/DROID_comparer | DROID_export_comparer.py | Python | mit | 1,579 | 0.019633 | import csv
import os
#### make sure these file names are the same as the ones on your system
baseline_csv = r"baseline.csv"
new_csv = r"cleaned_csv.csv"
########### do not edit below this line #################
baseline_as_rows = []
new_as_rows = []
if not os.path.exists(baseline_csv):
quit("The baseline log csv file is not found - please check your filename '{}'".format(baseline_csv))
if not os.path.exists(new_csv):
quit("Your local log csv file is not found - please check your filename '{}'".format(new_csv))
with open(baseline_csv) as data:
baseline_as_csv = csv.reader(data)
for row in baseline_as_cs | v:
baseline_as_rows.append(row)
with open(n | ew_csv) as new_data:
new_rows = csv.reader(new_data)
for row in new_rows:
new_as_rows.append(row)
if len(baseline_as_rows) != len(new_as_rows):
quit("Your csv log file '{}' does not have the same number of rows as the baseline log '{}'.".format(new_csv, baseline_csv))
else:
print "Your csv log file '{}' has the same number of rows as the baseline log '{}'.".format(new_csv, baseline_csv)
print
for i, row in enumerate(baseline_as_rows):
if row != new_as_rows[i]:
print "Different row data detected in row #{}".format(i+1)
print "Baseline: \t{}".format(row)
print "New: \t\t{}".format(new_as_rows[i])
print
print "Comparison complete. If you do not see any rows indicated as 'different' your log file is the same as the baseline. Congrats!\nIf you see rows indicated as 'different' check your DROID settings and try again\n\n"
|
jinverar/crits | crits/relationships/views.py | Python | mit | 13,325 | 0.005854 | import json
from django.contrib.auth.decorators import user_passes_test
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template.loader import render_to_string
from django.template import RequestContext
from crits.core.user_tools import user_can_view_data
from crits.relationships.forms import ForgeRelationshipForm
from crits.relationships.handlers import get_relationship_types
from crits.relationships.handlers import forge_relationship, update_relationship_dates, update_relationship_confidences
from crits.relationships.handlers import update_relationship_types, delete_relationship, update_relationship_reasons
@user_passes_test(user_can_view_data)
def add_new_relationship(request):
"""
Add a new relationship. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST' and request.is_ajax():
form = ForgeRelationshipForm(request.POST)
choices = [(c,c) for c in get_relationship_types(False)]
form.fields['forward_relationship'].choices = choices
if form.is_valid():
cleaned_data = form.cleaned_data;
results = forge_relationship(left_type=cleaned_data.get('forward_type'),
left_id=cleaned_data.get('forward_value'),
right_type=cleaned_data.get('reverse_type'),
right_id=cleaned_data.get('dest_id'),
rel_type=cleaned_data.get('forward_relationship'),
rel_date=cleaned_data.get('relationship_date'),
analyst=request.user.username,
rel_reason=cleaned_data.get('rel_reason'),
rel_confidence=cleaned_data.get('rel_confidence'),
get_rels=True)
if results['success'] == True:
relationship = {'type': cleaned_data.get('forward_type'),
'value': cleaned_data.get('forward_value')}
message = render_to_string('relationships_listing_widget.html',
{'relationship': relationship,
'nohide': True,
'relationships': results['relationships']},
RequestContext(request))
result = {'success': True, ' | message': message}
else:
message = "Error adding relationship: %s" % results['me | ssage']
result = {'success': False, 'message': message}
else:
message = "Invalid Form: %s" % form.errors
form = form.as_table()
result = {'success': False, 'form': form, 'message': message}
return HttpResponse(json.dumps(result), mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def update_relationship_type(request):
"""
Update relationship type. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST' and request.is_ajax():
results = update_relationship_types(left_type=request.POST['my_type'],
left_id=request.POST['my_value'],
right_type=request.POST['reverse_type'],
right_id=request.POST['dest_id'],
rel_type=request.POST['forward_relationship'],
rel_date=request.POST['relationship_date'],
new_type=request.POST['new_relationship'],
analyst=request.user.username)
if results['success']:
message = "Successfully updated relationship: %s" % results['message']
result = {'success': True, 'message': message}
else:
message = "Error updating relationship: %s" % results['message']
result = {'success': False, 'message': message}
return HttpResponse(json.dumps(result), mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def update_relationship_confidence(request):
"""
Update relationship confidence. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST' and request.is_ajax():
new_confidence = request.POST['new_confidence']
if new_confidence not in ('unknown', 'low', 'medium', 'high'):
result = {'success': False,
'message': 'Unknown confidence level.'}
return HttpResponse(json.dumps(result), mimetype="application/json")
else:
results = update_relationship_confidences(left_type=request.POST['my_type'],
left_id=request.POST['my_value'],
right_type=request.POST['reverse_type'],
right_id=request.POST['dest_id'],
rel_type=request.POST['forward_relationship'],
rel_date=request.POST['relationship_date'],
analyst=request.user.username,
new_confidence=new_confidence)
if results['success']:
message = "Successfully updated relationship: %s" % results['message']
result = {'success': True, 'message': message}
else:
message = "Error updating relationship: %s" % results['message']
result = {'success': False, 'message': message}
return HttpResponse(json.dumps(result), mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def update_relationship_reason(request):
"""
Update relationship reason. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST' and request.is_ajax():
results = update_relationship_reasons(left_type=request.POST['my_type'],
left_id=request.POST['my_value'],
right_type=request.POST['reverse_type'],
right_id=request.POST['dest_id'],
rel_type=request.POST['forward_relationship'],
rel_date=request.POST['relationship_date'],
analyst=request.user.username,
new_reason=request.POST['new_reason'])
if results['success']:
message = "Successfully updated relationship: %s" % results['message']
result = {'success': True, 'message': message}
else:
|
longwosion/GitBundler | gitbundler/server.py | Python | bsd-2-clause | 7,538 | 0.006235 | #! /usr/bin/env python
#coding=utf-8
import os
from gitcmd import GitCmd
from config import GitBundlerConfig
class GitBundlerServer(object):
def __init__(self):
config = GitBundlerConfig()
self.url = config.get('server.upload.url', verbose=False)
self.user = config.get('server.upload.user', verbose=False)
self.password = config.get('server.upload.password', verbose=False)
def guess_bundle(self, name=None, path=None):
config = GitBundlerConfig()
repo, filename, options = (None, None, None)
if name:
repo, filename, options = config.get_serverbundle(name)
if path:
repo, filename, options = config.get_serverbundle_match(path)
if repo and filename and options:
self.repo = repo
self.filename = filename
self.options = options
return True
return False
def pushbundle(self, bundlename, range=None):
if self.guess_bundle(name=bundlename):
if range:
self.options = range
self.push(self.repo, self.filename, self.options)
def pushdir(self, path, range=None):
if self.guess_bundle(path=path):
if range:
self.options = range
self.push(self.repo, self.filename, self.options)
def upload(self, file):
from gitbundler.poster.encode import multipart_encode
from gitbundler.poster.streaming | http import register_openers
import urllib2
register_openers()
if os.path.exists(file):
print " Uploading File:: %s" % file
print " Server:: %s" % self.url
| file = open(file, "rb")
params = {'fileName': file}
datagen, headers = multipart_encode(params)
headers['gitbundler-user'] = self.user
headers['gitbundler-password'] = self.password
request = urllib2.Request(self.url, datagen, headers)
result = urllib2.urlopen(request)
print " Upload:: %s" % result.read()
file.close()
else:
print "error: cannot found upload file: %s." % file
def lpush(self, remote, branch=None):
git = GitCmd(self.filename, self.repo)
git.get_branch()
if branch:
remote_branch = branch
else:
remote_branch = git.branch
git.checkout(remote_branch)
git.execute('pull %s %s' % (remote, remote_branch))
def push(self, repo, filename, options):
from gitbundler.poster.encode import multipart_encode
from gitbundler.poster.streaminghttp import register_openers
import urllib2
register_openers()
repo = os.path.abspath(repo)
if not os.path.exists(repo):
print "error: repo folder %s doesn't existed" %repo
return
print "Bundle> \n File :: %s, git: %s, %s" % (filename, repo, options)
git = GitCmd(filename, repo)
git.get_branch()
print git.pull()
print git.bundle_create(filename, options)
if os.path.exists(os.path.join(repo, filename)):
print " Info :: %s" % git.ls_remote(filename)
print " Server:: %s" % self.url
file = open(os.path.join(repo, filename), "rb")
params = {'fileName': file}
datagen, headers = multipart_encode(params)
headers['gitbundler-user'] = self.user
headers['gitbundler-password'] = self.password
request = urllib2.Request(self.url, datagen, headers)
result = urllib2.urlopen(request)
print " Upload:: %s" % result.read()
file.close()
os.unlink(os.path.relpath(os.path.join(repo, filename)))
else:
print "error: generate git bundle fails."
class GitBundlerClient(object):
def __init__(self):
config = GitBundlerConfig()
self.url = config.get('client.download.url', verbose=False)
self.user = config.get('client.download.user', verbose=False)
self.password = config.get('client.download.password', verbose=False)
def pullbundle(self, bundlename, force_branch=None):
config = GitBundlerConfig()
repo, filename = config.get_clientbundle(bundlename)
if repo and filename:
self.pull(repo, filename, force_branch)
def pulldir(self, path, force_branch=None):
config = GitBundlerConfig()
repo, filename = config.get_clientbundle_match(path)
if repo and filename:
self.pull(repo, filename, force_branch)
def download(self, filename):
import os
import urllib2
fileurl = '%s%s' % (self.url, filename)
print " Downloading File:: %s" % file
print " Server:: %s" % self.url
headers = {}
headers['gitbundler-user'] = self.user
headers['gitbundler-password'] = self.password
request = urllib2.Request(fileurl, None, headers)
server = urllib2.urlopen(request)
bundle = open(filename, 'wb')
bundle.write(server.read())
bundle.close()
server.close()
if os.path.exists(filename):
print " Download %s successfully" % filename
else:
print "error: download file %s fails." % filename
def pull(self, repo, filename, force_branch):
import os
import urllib2
git = GitCmd(filename, repo)
git.get_branch()
git.set_force_branch(force_branch)
fileurl = '%s%s' % (self.url, filename)
print 'cmd >> Downloading %s' % fileurl
headers = {}
headers['gitbundler-user'] = self.user
headers['gitbundler-password'] = self.password
request = urllib2.Request(fileurl, None, headers)
server = urllib2.urlopen(request)
bundle = open(os.path.join(repo, filename), 'wb')
bundle.write(server.read())
bundle.close()
server.close()
if os.path.exists(os.path.join(repo, filename)):
print " Info :: %s" % git.ls_remote(filename)
print " Server:: %s" % self.url
print git.pull_bundle(filename)
#os.unlink(os.path.relpath(os.path.join(repo, filename)))
else:
print "error: git bundle download fails."
def archive_bundle(self, bundlename, commit, output):
config = GitBundlerConfig()
repo, filename = config.get_clientbundle(bundlename)
if repo and filename:
self.archive(repo, filename, commit, output)
def archive_dir(self, path, commit, output):
config = GitBundlerConfig()
repo, filename = config.get_clientbundle_match(path)
if repo and filename:
self.archive(repo, filename, commit, output)
def archive(self, repo, filename, commit, output):
git = GitCmd(filename, repo)
print git.archive(commit, output)
|
VIVEKLUCKY1848/gedit-plugins-1 | plugins/synctex/synctex/__init__.py | Python | gpl-2.0 | 219 | 0.004566 | import gi
gi.r | equire_version('Gedit', '3.0')
gi.require_version('Gtk', '3.0')
gi.require_version('Peas', '1.0')
gi.require_version('PeasGtk', '1.0')
from .synctex import SynctexApp | Activatable, SynctexWindowActivatable
|
FXIhub/condor | examples/scripts/rotations/example.py | Python | bsd-2-clause | 6,922 | 0.014447 | import numpy
try:
import matplotlib.pyplot as pypl
plotting = True
except:
plotting = False
import os, shutil
this_dir = os.path.dirname(os.path.realpath(__file__))
import condor
import logging
logger = logging.getLogger('condor')
logger.setLevel("INFO")
#logger.setLevel("DEBUG")
out_dir = this_dir + "/pngs"
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
os.mkdir(out_dir)
# Source
src = condor.Source(wavelength=0.1E-9, pulse_energy=1E-3, focus_diameter=1E-6)
# Detector
det = condor.Detector(distance=0.5, pixel_size=750E-6, nx=100, ny=100)#, cx=55, cy=55)
#angles_d = numpy.array([0., 22.5, 45.])
angles_d = numpy.array([72.5])
for angle_d in angles_d:
angle = angle_d/360.*2*numpy.pi
rotation_axis = numpy.array([1.,1.,0.])/numpy.sqrt(2.)
quaternion = condor.utils.rotation.quat(angle,rotation_axis[0],rotation_axis[1], rotation_axis[2])
rotation_values = numpy.array([quaternion])
rotation_formalism = "quaternion"
rotation_mode = "extrinsic"
#rotation_values = None
#rotation_formalism = "random"
#rotation_mode = "extrinsic"
#rotation_values = None
#rotation_formalism = None
#rotation_mode = "extrinsic"
#print("Angle = %.2f degrees" % angle_d)
short_diameter = 25E-9*12/100.
long_diameter = 2*short_diameter
spheroid_diameter = condor.utils.spheroid_diffraction.to_spheroid_diameter(short_diameter/2.,long_diameter/2.)
spheroid_flattening = condor.utils.spheroid_diffraction.to_spheroid_flattening(short_diameter/2.,long_diameter/2.)
N_long = 20
N_short = int(round(short_diameter/long_diameter * N_long))
# Spheroid
if True:
# Ideal spheroid
#print("Simulating spheroid")
par = condor.ParticleSpheroid(diameter=spheroid_diameter, material_type="water", flattening=spheroid_flattening, rotation_values=rotation_values, rotation_formalism=rotation_formalism, rotation_mode=rotation_mode)
s = "particle_spheroid"
E = condor.Experiment(src, {s : par}, det)
res = E.propagate()
real_space = numpy.fft.fftshift(numpy.fft.ifftn(res["entry_1"]["data_1"]["data_fourier"]))
vmin = numpy.log10(res["entry_1"]["data_1"]["data"].max()/10000.)
if plotting:
pypl.imsave(out_dir + "/%s_%2.2fdeg.png" % (s,angle_d), numpy.log10(res["entry_1"]["data_1"]["data"]), vmin=vmin)
pypl.imsave(out_dir + "/%s_rs_%2.2fdeg.png" % (s,angle_d), abs(real_space))
if True:
# Map (spheroid)
#print("Simulating map (spheroid)")
par = condor.ParticleMap(diameter=spheroid_diameter, material_type="water", flattening=spheroid_flattening, geometry="spheroid", rotation_values=rotation_values, rotation_formalism=rotation_formalism, rotation_mode=rotation_mode)
s = "particle_map_spheroid"
E = condor.Experiment(src, {s : par}, det)
res = E.propagate()
real_space = numpy.fft.fftshift(numpy.fft.ifftn(res["entry_1"]["data_1"]["data_fourier"]))
vmin = numpy.log10(res["entry_1"]["data_1"]["data"].max()/10000.)
if plotting:
pypl.imsave(out_dir + "/%s_%2.2f.png" % (s,angle_d), numpy.log10(res["entry_1"]["data_1"]["data"]), vmin=vmin)
pypl.imsave(out_dir + "/%s_rs_%2.2f.png" % (s,angle_d), abs(real_space))
# Box
if True:
# Map (box)
dx = long_diameter/(N_long-1)
map3d = numpy.zeros(shape=(N_long,N_long,N_long))
map3d[:N_short,:,:N_short] = 1.
map3d[N_short:N_short+N_short,:N_short,:N_short] = 1.
# Map
#print("Simulating map (custom)")
par = condor.ParticleMap(diameter=long_diameter, material_type="water", geometry="custom", map3d=map3d, dx=dx, rotation_values=rotation_values, rotation_formalism=rotation_formalism, rotation_mode=rotation_mode)
s = "particle_map_custom"
E = condor.Experiment(src, {s : par}, det)
res = E.propagate()
if plotting:
data_fourier = res["entry_1"]["data_1"]["data_fourier"]
#data_fourier = abs(data_fourier)*numpy.exp(-1.j*numpy.angle(data_fourier))
real_space = numpy.fft.fftshift(numpy.fft.ifftn(numpy.fft.fftshift(data_fourier)))
vmin = numpy.log10(res["entry_1"]["data_1"]["data"].max()/10000.)
pypl.imsave(out_dir + "/%s_map.png" % (s),map3d.sum(0))
pypl.imsave(out_dir + "/%s_%2.2f.png" % (s,angle_d), numpy.log10(res["entry_1"]["data_1"]["data"]), vmin=vmin)
pypl.imsave(out_dir + "/%s_%2.2f_phases.png" % (s,angle_d), numpy.angle(res["entry_1"]["data_1"]["data_fourier"])%(2*numpy.pi))
pypl.imsave(out_dir + "/%s_rs_%2.2f.png" % (s,angle_d), abs(real_space))
if True:
# Atoms (box)
#print("Simulating atoms")
Z1,Y1,X1 = numpy.meshgrid(numpy.linspace(0, short_diameter, N_short),
numpy.linspace(0, long_diameter, N_long),
numpy.linspace(0, short_diameter, N_short),
indexing="ij")
Z2,Y2,X2 = numpy.meshgrid(numpy.linspace(0, short_diameter, N_short) + long_diameter/2.,
numpy.linspace(0, short_diameter, N_short),
numpy.linspace(0, short_diameter, N_short),
indexing="ij")
Z = numpy.concatenate((Z1.ravel(),Z2.ravel()))
Y = numpy.concatenate((Y1.ravel(),Y2.ravel()))
X = numpy.concatenate((X1.ravel(),X2.ravel()))
proj = numpy.zeros(shape=(N_long,N_long))
dx = long_diameter/(N_long-1)
for (x,y,z) in zip(X.ravel(),Y.ravel(),Z.ravel()):
proj[int(round(y/dx)),int(round(x/dx))] += 1
if plotting:
pypl.imsave(out_dir + "/%s_proj.png" % (s),proj)
atomic_positions = numpy.array([[x,y,z] for x,y,z in zip(X.ravel(),Y.ravel(),Z.ravel())])
atomic_numbers = numpy.ones(int(atomic_positions.size/3), dtype=numpy.int16)
par = condor.ParticleAtoms(atomic_positions=atomic_positions, atomic_numbers=atomic_numbers, rotation_values=rotation_values, rotation_formalism=rotation_formalism, rotation_mode=rotation_mode)
s = "particle_atoms"
E = condor.Experiment(src, {s : par}, det)
res = E.propagate()
if plotting:
real_space = numpy.fft.fftshift(numpy.fft.ifftn(nu | mpy.fft.fftshift(res["entry_1"]["data_1"]["data_fourier"])))
fourier_space = res["entry_1"]["data_1"]["data_fourier"]
vmin = numpy.log10(res["entry_1"]["data_1"]["data"].max()/10000.)
pypl.imsave(out_dir + "/%s_%2.2f.png | " % (s,angle_d), numpy.log10(res["entry_1"]["data_1"]["data"]), vmin=vmin)
pypl.imsave(out_dir + "/%s_%2.2f_phases.png" % (s,angle_d), numpy.angle(fourier_space)%(2*numpy.pi))
pypl.imsave(out_dir + "/%s_rs_%2.2f.png" % (s,angle_d), abs(real_space))
|
gercordero/va_de_vuelta | src/resumen.py | Python | gpl-3.0 | 166 | 0.042169 | import | pilas
from aplicacion import App
pilas.iniciar(ancho = 1280, alto = 800, titulo = "Aplicadion para Docente")
pilas.cambiar_escen | a(App())
pilas.ejecutar() |
leifos/treconomics | treconomics_project/treconomics/experiment_configuration.py | Python | mit | 8,405 | 0.009756 | __author__ = 'leif'
import os
import socket
import logging
import logging.config
import logging.handlers
from autocomplete_trie import AutocompleteTrie
from ifind.search.engines.whooshtrec import Whooshtrec
from experiment_setup import ExperimentSetup
work_dir = os.getcwd()
# when deployed this needs to match up with the hostname, and directory to where the project is
my_whoosh_doc_index_dir = os.path.join(work_dir, 'data/fullindex/')
if 'local' not in socket.gethostname():
my_whoosh_doc_index_dir = '/home/leifos/test500index'
#my_whoosh_doc_index_dir = '/Users/david/Workspace/indexes/aquaint_test500_whoosh'
my_whoosh_query_index_dir = os.path.join(work_dir, "/trec_query_index/index")
my_experiment_log_dir = work_dir
qrels_file = os.path.join(work_dir, "data/TREC2005.qrels.txt")
qrels_diversity_file = os.path.join(work_dir, "data/sigir-combined.diversity.qrels")
stopword_file = os.path.join(work_dir, "data/stopwords.txt")
data_dir = os.path.join(work_dir, "data")
print "Work DIR: " + work_dir
print "QRELS File: " + qrels_file
print "my_whoosh_doc_index_dir: " + my_whoosh_doc_index_dir
print "Stopword file: " + stopword_file
event_logger = logging.getLogger('event_log')
event_logger.setLevel(logging.INFO)
event_logger_handler = logging.FileHandler(os.path.join(my_experiment_log_dir, 'experiment.log'))
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
event_logger_handler.setFormatter(formatter)
event_logger.addHandler(event_logger_handler)
# workflow must always start with startexperiment/
exp_work_flows = [
['startexperiment/', 'consent', 'preexperiment/AN/',
'prepracticetask/0/', 'search/0/', 'postpracticetask/0/',
# 'anitatimeinstructions/TC/',
'anitapretasksurvey/1/', 'search/1/', 'anitaposttask0survey/1/',
'anitaposttask1survey/1/', 'anitaposttask2survey/1/',
'anitaposttask3survey/1/', 'taskspacer/',
'anitapretasksurvey/2/', 'search/2/', 'anitaposttask0survey/2/',
'anitaposttask1survey/2/', 'anitaposttask2survey/2/',
'anitaposttask3survey/2/', 'taskspacer/',
'anitapretasksurvey/3/', 'search/3/', 'anitaposttask0survey/3/',
'anitaposttask1survey/3/', 'anitaposttask2survey/3/',
'anitaposttask3survey/3/', 'taskspacer/',
'anitapretasksurvey/4/', 'search/4/', 'anitaposttask0survey/4/',
'anitaposttask1survey/4/', 'anitaposttask2survey/4/',
'anitaposttask3survey/4/',
'anitaexit1survey/', 'anitaexit2survey/', 'anitaexit3survey/',
'anitademographicssurvey/', 'logout/'],
['startexperiment/', 'consent', 'preexperiment/AN/',
'prepracticetask/0/', 'search/0/', 'postpracticetask/0/',
# 'anitatimeinstructions/NTC/',
'anitapretasksurvey/1/', 'search/1/', 'anitaposttask0survey/1/',
'anitaposttask1survey/1/', 'anitaposttask2survey/1/',
'anitaposttask3survey/1/', 'taskspacer/',
'anitapretasksurvey/2/', 'search/2/', 'anitaposttask0survey/2/',
'anitaposttask1survey/2/', 'anitaposttask2survey/2/',
'anitaposttask3survey/2/', 'taskspacer/',
'anitapretasksurvey/3/', 'search/3/', 'anitaposttask0survey/3/',
'anitaposttask1survey/3/', 'anitaposttask2survey/3/',
'anitaposttask3survey/3/', 'taskspacer/',
'anitapretasksurvey/4/', 'search/4/', 'anitaposttask0survey/4/',
'anitaposttask1survey/4/', 'anitaposttask2survey/4/',
'anitaposttask3survey/4/',
'anitaexit1survey/', 'anitaexit2survey/', 'anitaexit3survey/',
'anitademographicssurvey/', 'logout/'],
['startexperiment/', 'consent', 'preexperiment/AN/',
'anitaexit1survey/', 'anitaexit2survey/', 'anitaexit3survey/',
'anitademographicssurvey/', 'logout/'],
]
snippet_flow = [
'startexperiment/', 'preexperiment/UK/',
'demographicssurvey/',
'prepracticetask/0/','taskspacer2/0/', 'search/0/', 'postpracticetask/0/', 'taskspacer',
'snippetpretask/1/','taskspacer2/1/', 'search/1/', 'snippetposttask/1/','systemsnippetposttask/1/',
'taskspacer',
'snippetpretask/2/', 'taskspacer2/2/','search/2/', 'snippetposttask/2/','systemsnippetposttask/2/',
'taskspacer',
'snippetpretask/3/','taskspacer2/3/', 'search/3/', 'snippetposttask/3/','systemsnippetposttask/3/',
'taskspacer',
'snippetpretask/4/','taskspacer2/4/', 'search/4/', 'snippetposttask/4/','systemsnippetposttask/4/',
'taskspacer', 'snippetexitsurvey/', 'performance/', 'endexperiment/',
'logout/'
]
diversity_flow = [
'startexperiment/', 'preexperiment/UK/',
'demographicssurvey/',
'prepracticetask/0/', 'search/0/', 'diversityperformancepractice/', 'postpracticetask/0/', 'taskspacer/',
'snippetpretask/1/', 'taskspacerwithdetails/1/', 'search/1/', 'diversityposttask/1/','systemdiversityposttask/1/',
'taskspacer',
'snippetpretask/2/','taskspacerwithdetails/2/','search/2/', 'diversityposttask/2/','systemdiversityposttask/2/',
'taskspacer',
'snippetpretask/3/','taskspacerwithdetails/3/', 'search/3/', 'diversityposttask/3/','systemdiversitypos | ttask/3/',
'taskspacer',
'snippetpretask/4/','taskspacerwithdetails/4/', 'search/4/', 'diversityposttask/4/','systemdiversityposttask/4/',
'taskspacer', 'diversityexitsurvey/', 'diversityperformance/', 'endexperiment/',
'logout/'
]
jaana_flow = [
'startexperiment/', 'preexperiment/UK/',
'demographicssurvey/',
'prepracticetask/0/','taskspac | er2/0/', 'search/0/', 'postpracticetask/0/', 'taskspacer',
'snippetpretask/1/','taskspacer2/1/', 'search/1/', 'posttaskquestions/1/',
'taskspacer',
'snippetpretask/2/', 'taskspacer2/2/','search/2/', 'posttaskquestions/2/',
'taskspacer',
'performance/', 'endexperiment/',
'logout/'
]
test_flow = [
'startexperiment/', 'snippetexitsurvey/','snippetpretask/1/', 'snippetposttask/1/','systemsnippetposttask/1/',
'pretask/1/', 'search/1/','taskspacer',
'pretask/2/', 'search/2/','taskspacer',
'pretask/3/', 'search/3/',
'pretask/4/', 'search/4/','endexperiment/',
'logout/'
]
suggestion_trie = AutocompleteTrie(
min_occurrences=3,
suggestion_count=8,
include_stopwords=False,
stopwords_path=os.path.join(work_dir, "data/stopwords.txt"),
vocab_path=os.path.join(work_dir, "data/vocab.txt"),
vocab_trie_path=os.path.join(work_dir, "data/vocab_trie.dat"))
search_engine = Whooshtrec(
whoosh_index_dir=my_whoosh_doc_index_dir,
stopwords_file=stopword_file,
model=1,
newschema=True)
search_engine.key_name = 'bm25'
search_engine.set_fragmenter(frag_type=2, surround=30)
exp_chiir2016 = ExperimentSetup(
workflow= exp_work_flows[0],
engine=search_engine,
practice_topic='408',
topics=['347', '367', '435','354'],
rpp=10,
practice_interface=1,
practice_diversity = 4,
interface=[1, 1, 1, 1],
diversity=[4,4,4,4],
rotation_type=1,
description='standard condition bm25 test',
trie=suggestion_trie,
autocomplete=True,
timeout=[150,600,600,600, 600],
delay_results = [0,5,0,5,0]
)
exp_sigir2017 = ExperimentSetup(
workflow=snippet_flow,
engine=search_engine,
practice_topic='367',
topics=['347', '341', '435','408'],
rpp=10,
practice_interface=1,
interface=[1, 2, 3, 4],
rotation_type=1,
description='standard condition bm25 test',
trie=suggestion_trie,
autocomplete=True,
timeout=[150,600,600,600, 600]) # 300s = 5min; 600s = 10min; 1200s = 20min
exp_jaana = ExperimentSetup(
workflow=jaana_flow,
engine=search_engine,
practice_topic='367',
topics=['347', '435'],
rpp=10,
practice_interface=1,
interface=[1, 1],
rotation_type=1,
description='standard condition bm25 test',
trie=suggestion_trie,
autocomplete=True,
timeout=[150,1200,1200])
exp_sigir2018 = ExperimentSetup(
workflow=diversity_flow,
engine=search_engine,
practice_topic='367',
topics=['347', '341', '435','408'],
rpp=10,
practice_interface=1,
interface=[1, 1, 1, 1],
rotation_type=2,
practice_diversity=2,
diversity=[1,2,3,4],
description='standard condition bm25 test',
trie=suggestion_trie,
autocomplete=True,
target=4,
|
spilkjir/CTGViewer | src/PyQwtNavigationPlot.py | Python | gpl-3.0 | 7,973 | 0.001255 | # -*- coding: utf-8 -*-
#
# Created on Oct 15, 2013
# @authors: Jiri Spilka
# http://people.ciirc.cvut.cz/~spilkjir
# @2015, CIIRC, Czech Technical University in Prague
#
# Licensed under the terms of the GNU GENERAL PUBLIC LICENSE
# (see CTGViewer.py for details)
"""
PyQwtNavigationPlot
--------------------
The navigation plot shows overview of complete signals. By clicking on this
plot the controled plots are moved to the clicked position. The area of visible signal
is highlighted using py:class:´PyQwtNavigationPlotHighlight´
Reference
~~~~~~~~~
.. autoclass:: PyQwtNavigationPlot
:members:
:undoc-members:
.. autoclass:: PyQwtNavigationPlotHighlight
:members:
:undoc-members:
"""
from PyQt4.QtGui import QColor
from PyQt4 import Qt
from PyQt4.Qwt5.Qwt import QwtPlotCurve, QwtScaleDiv, QwtPlot
from PyQwtWidgetGui import PyQwtWidgetGui
from PyQwtCanvasPicker import PyQwtCanvasPicker
from Config import ConfigStatic
class PyQwtNavigationPlot(PyQwtWidgetGui):
"""
The navigation plot shows overview of complete signals. By clicking on this
plot a signal is emitted with clicked point position.
:param parent:
:param l_controled_plots: list of controled plots
:type parent: QWidget
:type l_controled_plots: :py:mod:`PyQwtWidgetGui`
"""
# selectedPointChanged = pyqtSignal(['int'])
def __init__(self, parent=None, l_controled_plots=None):
PyQwtWidgetGui.__init__(self, parent)
self.l_controled_plots = l_controled_plots
self._timeString = None
self._selected_point = 0
# self._paper_height = 75
self._height = 120
# self._height = 220
self._toco_offset = -1*ConfigStatic.plot_toco_offset
self.xlabel("")
color = QColor(200, 200, 200)
self.setCanvasBackground(color)
self.xAxisEnabled(True)
self.yAxisEnabled(False)
# self.yAxisEnabled(True)
self.setMaximumHeight(self._height)
self.setMinimumHeight(self._height)
self.set_locator_minute(60)
# self.setLocatorHourInterval(2)
self.setYMinMax(self._toco_offset - 1, | 200) # the -1 is because of UCs with all zero values
# self.setYMinMax(0, 300)
self.canvasPicker = PyQwtCanvasPicker(self)
self.canvasPicker.signal_point_clicked.connect(self.set_selected_point)
self._step = 300*self.get_sampling_freq() # 300 = 5 minutes / for left-rigth arrow move
self.signal_plot_cle | ared.connect(self.reinit)
self.reinit()
def reinit(self):
# self._selected_point = 0
navplothighlight = PyQwtNavigationPlotHighlight(self)
navplothighlight.attach(self)
# cm = self.canvasMap(QwtPlot.xBottom)
# self._pointFmin = self.invTransform(QwtPlot.xBottom, cm.p1())
# self._pointFmax = self.invTransform(QwtPlot.xBottom, cm.p2())
def setXAxis(self, center_point=None, breplot=True):
if self._bPlotDatesXAxis:
scalediv = self.axisScaleDiv(QwtScaleDiv.MajorTick)
scalediv.setTicks(QwtScaleDiv.MinorTick, [])
scalediv.setTicks(QwtScaleDiv.MediumTick, [])
scalediv.setTicks(QwtScaleDiv.MajorTick, self._time_tics_located)
# print scalediv.isValid()
# print self._time_tics_located
scalediv.setInterval(self._minTime, self.xAxisMaxSample())
self.setAxisScaleDiv(QwtPlot.xBottom, scalediv)
if breplot:
self.replot()
def setYAxis(self, height=None, bReplot=True):
self.setAxisScale(QwtPlot.yLeft, self._minView, self._maxView, 0)
def clear_selected_point(self):
self._selected_point = 0
def get_selected_point(self):
"""
When user clicked on the plot, this variable holds x-coordinate of a point
:rtype: QPointF
"""
return self._selected_point
def set_selected_point(self, point):
"""
Set point clicked by a user.
:param point: clicked point
:type point: QPointF
"""
self._selected_point = point.x()
self.correct_point_boundaries()
self._selected_point_changed(self._selected_point)
def change_selected_pointright(self, step=None):
step = step if step is not None else self._step
self._selected_point += step
self.correct_point_boundaries()
self._selected_point_changed(self._selected_point)
def change_selected_pointleft(self, step=None):
step = step if step is not None else self._step
self._selected_point -= step
self.correct_point_boundaries()
self._selected_point_changed(self._selected_point)
def correct_point_boundaries(self):
"""
If a point is inside boundaries. Point is restricted to be within r = (xmax - xmin)/2, [r, max_view - r]
"""
xmin = self.l_controled_plots[0].viewXMinSample()
xmax = self.l_controled_plots[0].viewXMaxSample()
r = int((xmax - xmin)/2)
nmax = self.xAxisMaxSample()
if self._selected_point < r:
self._selected_point = r
elif self._selected_point > nmax - r:
self._selected_point = nmax - r
def _selected_point_changed(self, point):
"""
When user click on navigation plot:
Updates all controled plots -> set their x axis
Updates (this) navigation plot.
"""
for plot in self.l_controled_plots:
plot.setXAxis(point)
self.replot()
def get_toco_offset(self):
return self._toco_offset
class PyQwtNavigationPlotHighlight(QwtPlotCurve):
"""
The navigation plot highlight is used for highlighting area where
user clicked with mouse. This class is inherited from Qwt.QwtPlotCurve.
:param p_navplot: navigation plot widget
:type p_navplot: :py:mod:`PyQwtWidgetGui`
"""
def __init__(self, p_navplot):
QwtPlotCurve.__init__(self)
self.__p_nav_plot = p_navplot
_color = QColor(0, 0, 0)
_color.setAlpha(50)
self._penColor = _color
self._brushColor = _color
def drawFromTo(self, painter, xmap, ymap, start, stop):
"""
Draws rectangles around a point where user clicked
:param painter:
:param xmap:
:param ymap:
:param start:
:param stop:
:type painter: QPainter
"""
painter.setPen(Qt.QPen(self._penColor, 1))
painter.setBrush(self._brushColor)
maxyview = self.__p_nav_plot.viewYMaxSample()
minyview = self.__p_nav_plot.viewYMinSample()
yminmarginpx, ymaxmarginpx = self.__p_nav_plot.getMarginsYPx()
py1 = ymap.transform(minyview) + yminmarginpx
py2 = ymap.transform(maxyview) - ymaxmarginpx
clickedpoint = self.__p_nav_plot.get_selected_point()
viewxminsamp = self.__p_nav_plot.l_controled_plots[0].viewXMinSample()
viewxmaxsamp = self.__p_nav_plot.l_controled_plots[0].viewXMaxSample()
xaxisminsmaple = self.__p_nav_plot.l_controled_plots[0].xAxisMinSample()
xaxismaxsmaple = self.__p_nav_plot.l_controled_plots[0].xAxisMaxSample()
# check boundaries for line plotting
if clickedpoint < viewxminsamp:
clickedpoint = viewxminsamp
elif clickedpoint > viewxmaxsamp:
clickedpoint = viewxmaxsamp
# drawRect(x, y, width, height)
painter.drawRect(xmap.transform(viewxminsamp), py1,
xmap.transform(viewxmaxsamp) - xmap.transform(viewxminsamp), py2 - py1)
painter.drawLine(xmap.transform(clickedpoint), py1, xmap.transform(clickedpoint), py2)
# painter.setPen(Qt.QPen(QColor(100, 100, 100), 1))
# painter.setBrush(self._brushColor)
# p = ymap.transform((maxyview - minyview)/2)
# painter.drawLine(xmap.transform(xaxisminsmaple), p, xmap.transform(xaxismaxsmaple), p)
|
wittrup/crap | python/kal.py | Python | mit | 1,302 | 0.004608 | cmd = r"C:\Users\wittr\sdr\kalibrate-win-release\kal.exe"
import os, re
from subprocess import Popen, PIPE
pattern_band = r'\s*band to scan\s*\((.+)\)'
pattern_chan = r'(chan).+?(\d+).+?([\d.]+)(\w+).+?([\d.]+)(\w+).+?(\w+).+?([\d.]+)'
process = Popen(cmd + ' -h', shell=True, stdout=P | IPE) # To use a pipe with the subprocess module, you have to pass shell=True
(result, err) = process.communicate()
process.wait() # Wait for p | rocess to finish
result = str(result, encoding='ascii')
matches = re.findall(pattern_band, result)
results = {}
for match in matches:
bands = match.split(', ')
for band in bands:
process = Popen(cmd + ' -s ' + band, shell=True, stdout=PIPE, stderr=PIPE) # To use a pipe with the subprocess module, you have to pass shell=True
(result, err) = process.communicate()
process.wait() # Wait for process to finish
result = str(result, encoding='ascii')
channels = re.findall(pattern_band, result)
for channel in channels:
channel = list(channel)
with channel as c:
c.insert(2, 'freq')
c.insert(4, 'freqdesignation')
c.insert(6, 'bandwidth')
c.insert(8, 'banddesignation')
channel = dict(zip(c[0::2], c[1::2]))
|
danieljf24/cmrf | relevanceFusion.py | Python | mit | 1,905 | 0.007874 | import os
import sys
import numpy as np
from basic.common import checkToSkip
def process(options):
overwrite = options.overwrite
inputeFile = options.inputeFile
weightFile = options.weightFile
resultFile = options.resultFile
weightFile = os.path.join('result', weightFile)
weight = open(weightFile).readline().strip().split()
weight = np.array(weight, dtype=np.float)
resultFile = os.path.join('result', resultFile)
if checkToSkip(resultFile, overwrite):
sys.exit(0)
fout = open(resultFile, 'w')
done = 0
for line in open(os.path.join('result', inputeFile)):
elems = line.strip().split()
vecs = map(float, elems[3:])
vecs = np.array(vecs, dtype=np.float)
assert(len(weight) == len(vecs))
fout.write(" ".join(elems[:2]) + " " + str(np.dot(weight, vecs)) + '\n')
done += 1
if done % 10000 == 0:
print done, 'Done'
fout.close()
print "final score result after relevance fusion have written in %s" % resultFile
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
from optparse import OptionParser
parser = OptionParser(usage="""usage: %prog [options] """)
parser.add_option("--overwrite", default=0, type="int", help="overwrite existing file (default: 0)")
parser.add_option("--inputeFile", default='qid.img.lable.feature.txt', type="string", help="file stored all sco | re from different methods")
parser.add_option("--weightFile", default='optimized_wights.txt', type="string", help="optimized wight will be written in the file")
parser.add_option("--resultFile", default='fianl.result.txt', type="string", help="final score after relevance fusion")
(options, args) = parser.parse_args(argv)
return process(options)
if __name__ == " | __main__":
sys.exit(main())
|
karllessard/tensorflow | tensorflow/python/kernel_tests/lookup_ops_test.py | Python | apache-2.0 | 145,826 | 0.008771 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lookup ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
import six
from tensorflow.python import tf2
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import counter
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers as reader_ops
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
from tensorflow.python.saved_model import load as saved_model_load
from tensorflow.python.saved_model import save as saved_model_save
from tensorflow.python.training import saver
from tensorflow.python.training import server_lib
from tensorflow.python.training.tracking import graph_view
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util as trackable
from tensorflow.python.util import compat
class BaseLookupTableTest(test.TestCase):
def getHashTable(self):
if tf2.enabled():
return lookup_ops.StaticHashTable
else:
return lookup_ops.StaticHashTableV1
def getVocabularyTable(self):
if tf2.enabled():
return lookup_ops.StaticVocabularyTable
else:
return lookup_ops.StaticVocabularyTableV1
def initialize_table(self, table):
if not tf2.enabled():
self.evaluate(table.initializer)
class StaticHashTableTest(BaseLookupTableTest):
def testStaticHashTable(self):
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
self.initialize_table(table)
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
exported_keys_tensor, exported_values_tensor = table.export()
self.assertItemsEqual([b"brain", b"salad", b"surgery"],
self.evaluate(exported_keys_tensor))
self.assertItemsEqual([0, 1, 2], self.evaluate(exported_values_tensor))
def testStaticHashTableFindHighRank(self):
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
self.initialize_table(table)
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant([["brain", "salad"],
["tank", "tarkus"]])
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllEqual([[0, 1], [-1, -1]], result)
def testStaticHashTableInitWithPythonArrays(self):
default_val = -1
keys = ["brain", "salad", "surgery"]
values = [0, 1, 2]
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(
keys, values, value_dtype=dtypes.int64), default_val)
self.initialize_table(table)
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
def testStaticHashTableInitWithNumPyArrays(self):
default_val = -1
keys = np.array(["brain", "salad", "surgery"], dtype=np.str)
values = np.array([0, 1, 2], dtype=np.int64)
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
self.initialize_table(table)
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
def testMultipleStaticHashTables(self):
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table1 = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
table2 = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
table3 = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
self.initialize_table(table1)
self.initialize_table(table2)
self.initialize_table(table3)
self.assertAllEqual(3, self.evaluate(table1.size()))
self.assertAllEqual(3, self.evaluate(table2.size()))
self.assertAllEqual(3, self.evaluate(table3.size()))
input_string = constant_op.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = self.evaluate([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
def testStaticHashTableWithTensorDefault(self):
default_val = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
self.initialize_table(table)
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
def testStaticHashTableWithSparseTensorInput(self):
default_val = constant_op.constant( | -1, dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
self.initialize_table(table)
sp_indices = [[0, 0], [0, 1], [1, 0]]
sp_shape = [2, 2]
input_ | tensor = sparse_tensor.SparseTensor(
constant_op.constant(sp_indices, dtypes.int64),
constant_op.constant(["brain", "salad", "tank"]),
constant_op.constant(sp_shape, dtypes.int64))
output = table.lookup(input_tensor)
out_indices, out_values, out_shape = self.evaluate(output)
|
tunegoon/asteria | manage.py | Python | mit | 250 | 0 | #!/usr/bin/env python
import os
import sys
|
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "asteria.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv) | |
weaponsjtu/neteye | urls.py | Python | gpl-2.0 | 1,514 | 0.023118 | from django.conf.urls.defaults import *
#from pinax.apps.account import urls
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
from wangji.sharesite.models import Onesite
info_dict = {
'queryset': Onesite.objects.all(),
}
urlpatterns = patterns('',
# Example:
# (r'^wangji/', include('wangji.foo.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/', include(admin.site.urls)),
(r'^static/(?P<path>.*)$','django.views.static.serve',{'document_root':settings.STATIC_ROOT, 'show_indexes': True}),
#(r"^account/", include("pinax.apps.account.urls")),
(r'^account/', include('wangji.account.urls')),
(r'^people/', include('wangji.people.urls')),
(r'^sharesite/', include('wangji.sharesite.urls')),
(r'^(?P<tag_id>\d+)/$', 'wangji.sharesite.views.tag'),
(r'^siteinfo/$', | 'wangji.sharesite.option.siteinfo'),
(r'^$', 'wangji.sharesite.views.index'),
(r'^snooker/$', 'wangji.sharesite.snooker.snooker'),
(r'^sjtu/$', 'wangji.sharesite.snooker.index'),
(r'^invite/$', 'wangji.sharesite.snooker.invite'),
(r'^snooker/about/$', 'wangji.s | haresite.snooker.about'),
(r'^snooker/players/$', 'wangji.sharesite.snooker.players'),
(r'^snooker/clubs/$', 'wangji.sharesite.snooker.clubs'),
(r'^snooker/voice/$', 'wangji.sharesite.snooker.voice'),
# Uncomment the admin/doc line below to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
)
|
michsien/Learning_python | exercises _1-44/ex41.py | Python | mit | 2,013 | 0.03378 | import random
from urllib import urlopen
import sys
WORD_URL = "http://learncodethehardway.org/words.txt"
WORDS = []
PHRASES = {
"class %%%(%%%):":
"Make a class named %%% that is-a %%%.",
"class %%%(object):\n\tdef __init__(self, ***)" :
"class %%% has-a __init__ that takes self and *** parameters.",
"class %%%(object):\n\tdef ***(self, @@@)":
"class %%% has-a function named *** that takes self and @@@ parameters.",
"*** = %%%()":
"Set *** to an instance of class %%%.",
"***.***(@@@)":
"From *** get the *** function, and call it with parameters self, @@@.",
"***.*** = '***'":
"From *** get the *** attribute and set it to '***'."
}
#do they want to drill phrases first
PHRASE_FIRST = False
if len(sys.argv) == 2 and sys.argv[1] == "english":
PHRASE_FIRST = True
#load up the words from the website
for word in | urlopen(WORD_URL).readlines():
WORDS.append(word.strip())
def convert(snippet, phrase):
class_names = [w.capitalize() for w in
random.sample(WORDS, snippet.count("%%%"))]
other_names = random.sample(WORDS, snippet.count("***"))
results = []
param_names = []
for i in range(0, snippet.count("@@@")):
param_count = random.randint(1,3)
param_names.append(', '.join(random.sample(WORDS, param_count)))
for sentence | in snippet, phrase:
result = sentence[:]
#fake class names
for word in class_names:
result = result.replace("%%%", word, 1)
#fake other names
for word in other_names:
result = result.replace("***", word, 1)
#fake parameter lists
for word in param_names:
result = result.replace("@@@", word, 1)
results.append(result)
return results
#keep going until they hit CTRL-D
try:
while True:
snippets = PHRASES.keys()
random.shuffle(snippets)
for snippet in snippets:
phrase = PHRASES[snippet]
question, answer = convert(snippet, phrase)
if PHRASE_FIRST:
question, answer = answer, question
print question
raw_input("> ")
print "ANSWER: %s\n\n" % answer
except EOFError:
print "\nBye" |
rohitranjan1991/home-assistant | tests/components/homeassistant/test_scene.py | Python | mit | 10,833 | 0.000831 | """Test Home Assistant scenes."""
from unittest.mock import patch
import pytest
import voluptuous as vol
from homeassistant.components.homeassistant import scene as ha_scene
from homeassistant.components.homeassistant.scene import EVENT_SCENE_RELOADED
from homeassistant.const import STATE_UNKNOWN
from homeassistant.setup import async_setup_component
from tests.common import async_capture_events, async_mock_service
async def test_reload_config_service(hass):
"""Test the reload config service."""
assert await async_setup_component(hass, "scene", {})
test_reloaded_event = async_capture_events(hass, EVENT_SCENE_RELOADED)
with patch(
"homeassistant.config.load_yaml_config_file",
autospec=True,
return_value={"scene": {"name": "Hallo", "entities": {"light.kitchen": "on"}}},
):
await hass.services.async_call("scene", "reload", blocking=True)
await hass.async_block_till_done()
assert hass.states.get("scene.hallo") is not None
assert len(test_reloaded_event) == 1
with patch(
"homeassistant.config.load_yaml_config_file",
autospec=True,
return_value={"scene": {"name": "Bye", "entities": {"light.kitchen": "on"}}},
):
await hass.services.async_call("scene", "reload", blocking=True)
await hass.async_block_till_done()
assert len(test_reloaded_event) == 2
assert hass.states.get("scene.hallo") is None
assert hass.states.get("scene.bye") is not None
async def test_apply_service(hass):
"""Test the apply service."""
assert await async_setup_component(hass, "scene", {})
assert await async_setup_component(hass, "light", {"light": {"platform": "demo"}})
await hass.async_block_till_done()
assert await hass.services.async_call(
"scene", "apply", {"entities": {"light.bed_light": "off"}}, blocking=True
)
assert hass.states.get("light.bed_light").state == "off"
assert await hass.services.async_call(
"scene",
"apply",
{"entities": {"light.bed_light": {"state": "on", "brightness": 50}}},
blocking=True,
)
state = hass.states.get("light.bed_light")
assert state.state == "on"
assert state.attributes["brightness"] == 50
turn_on_calls = async_mock_service(hass, "light", "turn_on")
assert await hass.services.async_call(
"scene",
"apply",
{
"transition": 42,
"entities": {"light.bed_light": {"state": "on", "brightness": 50}},
},
blocking=True,
)
assert len(turn_on_calls) == 1
assert turn_on_calls[0].domain == "light"
assert turn_on_calls[0].service == "turn_on"
assert turn_on_calls[0].data.get("transition") == 42
assert turn_on_calls[0].data.get("entity_id") == "light.bed_light"
assert turn_on_calls[0].data.get("brightness") == 50
async def test_create_service(hass, caplog):
"""Test the create service."""
assert await async_setup_component(
hass,
"scene",
{"scene": {"name": "hallo_2", "entities": {"light.kitchen": "on"}}},
)
await hass.async_block_till_done()
assert hass.states.get("scene.hallo") is None
assert hass.states.get("scene.hallo_2") is not None
assert await hass.services.async_call(
"scene",
"create",
{"scene_id": "hallo", "entities": {}, "snapshot_entities": []},
blocking=True,
)
await hass.async_block_till_done()
assert "Empty scenes are not allowed" in caplog.text
assert hass.states.get("scene.hallo") is None
assert await hass.services.async_call(
"scene",
"create",
{
"scene_id": "hallo",
"entities": {"light.bed_light": {"state": "on", "brightness": 50}},
},
blocking=True,
)
await hass.async_block_till_done()
scene = hass.states.get("scene.hallo")
assert scene is not None
assert scene.domain == "scene"
assert scene.name == "hallo"
assert scene.state == STATE_UNKNOWN
assert scene.attributes.get("entity_id") == ["light.bed_light"]
assert await hass.services.async_call(
"scene",
"create",
{
"scene_id": "hallo",
"entities": {"light.kitchen_light": {"state": "on", "brightness": 100}},
},
blocking=True,
)
await hass.async_block_till_done()
scene = hass.states.get("scene.hallo")
assert scene is not None
assert scene.domain == "scene"
assert scene.name == "hallo"
assert scene.state == STATE_UNKNOWN
assert scene.attributes.get("entity_id") == ["light.kitchen_light"]
assert await hass.services.async_call(
"scene",
"create",
{
"scene_id": "hallo_2",
"entities": {"light.bed_light": {"state": "on", "brightness": 50}},
},
blocking=True,
)
await hass.async_block_till_done()
assert "The scene scene.hallo_2 already exists" in caplog.text
scene = hass.states.get("scene.hallo_2")
assert scene is not None
assert scene.domain == "scene"
assert scene.name == "hallo_2"
assert scene.state == STATE_UNKNOWN
assert scene.attributes.get("entity_id") == ["light.kitchen"]
async def test_snapshot_service(hass, caplog):
"""Test the snapshot option."""
assert await async_setup_component(hass, "scene", {"scene": {}})
await hass.async_block_till_done()
hass.states.async_set("light.my_light", "on", {"hs_color": (345, 75)})
assert hass.states.get("scene.hal | lo") is None
assert await hass.services.async_call(
"scene",
"create",
{"scene_id": "hallo", "snapshot_entities": ["light.my_light"]},
| blocking=True,
)
await hass.async_block_till_done()
scene = hass.states.get("scene.hallo")
assert scene is not None
assert scene.attributes.get("entity_id") == ["light.my_light"]
hass.states.async_set("light.my_light", "off", {"hs_color": (123, 45)})
turn_on_calls = async_mock_service(hass, "light", "turn_on")
assert await hass.services.async_call(
"scene", "turn_on", {"entity_id": "scene.hallo"}, blocking=True
)
await hass.async_block_till_done()
assert len(turn_on_calls) == 1
assert turn_on_calls[0].data.get("entity_id") == "light.my_light"
assert turn_on_calls[0].data.get("hs_color") == (345, 75)
assert await hass.services.async_call(
"scene",
"create",
{"scene_id": "hallo_2", "snapshot_entities": ["light.not_existent"]},
blocking=True,
)
await hass.async_block_till_done()
assert hass.states.get("scene.hallo_2") is None
assert (
"Entity light.not_existent does not exist and therefore cannot be snapshotted"
in caplog.text
)
assert await hass.services.async_call(
"scene",
"create",
{
"scene_id": "hallo_3",
"entities": {"light.bed_light": {"state": "on", "brightness": 50}},
"snapshot_entities": ["light.my_light"],
},
blocking=True,
)
await hass.async_block_till_done()
scene = hass.states.get("scene.hallo_3")
assert scene is not None
assert "light.my_light" in scene.attributes.get("entity_id")
assert "light.bed_light" in scene.attributes.get("entity_id")
async def test_ensure_no_intersection(hass):
"""Test that entities and snapshot_entities do not overlap."""
assert await async_setup_component(hass, "scene", {"scene": {}})
await hass.async_block_till_done()
with pytest.raises(vol.MultipleInvalid) as ex:
assert await hass.services.async_call(
"scene",
"create",
{
"scene_id": "hallo",
"entities": {"light.my_light": {"state": "on", "brightness": 50}},
"snapshot_entities": ["light.my_light"],
},
blocking=True,
)
await hass.async_block_till_done()
assert "entities and snapshot_entities must not overlap" in str(ex.value)
assert hass.states.get("scene.hallo") is None
async def test_scenes_with_entity(hass):
"""Test findin |
yukihirai0505/tutorial-program | programming/python/machine-learning/ch02/and_gate01.py | Python | mit | 198 | 0 | def AND(x1, x2):
| w1, w2, theta = 0.5, 0.5, 0 | .7
tmp = x1 * w1 + x2 * w2
if tmp <= theta:
print(0)
elif tmp > theta:
print(1)
AND(0, 0)
AND(1, 0)
AND(0, 1)
AND(1, 1)
|
chubbymaggie/datasketch | benchmark/lsh_benchmark.py | Python | mit | 3,795 | 0.004743 | import time, argparse, sys, json
from sklearn.datasets import fetch_20newsgroups
import numpy as np
import scipy.stats
from datasketch import MinHashLSH, MinHash
from lshforest_benchmark import bootstrap_data, _compute_jaccard
def benchmark_lsh(num_perm, threshold, index_data, query_data):
print("Building LSH index")
lsh = MinHashLSH(threshold, num_perm)
for key, minhash in zip(index_data.keys, index_data.minhashes[num_perm]):
lsh.insert(key, minhash)
print("Querying")
times = []
results = []
for qs, minhash in zip(query_data.sets, query_data.minhashes[num_perm]):
start = time.clock()
result = lsh.query(minhash)
duration = time.clock() - start
times.append(duration)
results.append(sorted([[key, _compute_jaccard(qs, index_data.sets[key])]
for key in result],
key=lambda x : x[1], reverse=True))
return times, results
def benchmark_linearscan(num_perm, threshold, index_data, query_data):
times = []
results = []
for qs, q in zip(query_data.sets, query_data.minhashes[num_perm]):
start = time.clock()
result = []
for key, m in zip(index_data.keys, index_data.minhashes[num_perm]):
j = q.jaccard(m)
if j >= threshold:
result.append(key)
duration = time.clock() - start
times.append(duration)
results.append(sorted([[key, _compute_jaccard(qs, index_data.sets[key])]
for key in result],
key=lambda x : x[1], reverse=True))
return times, results
def benchmark_ground_truth(threshold, index_data, query_data):
times = []
results = []
for q in query_data.sets:
start = time.clock()
result = []
for key, a in zip(index_data.keys, index_data.sets):
j = _compute_jaccard(q, a)
if j >= threshold:
result.append([key, j])
duration = time.clock() - start
results.append(sorted(result, key=lambda x : x[1], reverse=True))
times.append(duration)
return times, results
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--output", type=str, default="lsh_benchmark.json")
args = parser.parse_args(sys.argv[1:])
num_perms = [32, 64, 96, 128, 160, 192, | 224, 256]
output = { | "num_perms" : num_perms,
"lsh_times" : [], "lsh_results" : [],
"linearscan_times" : [], "linearscan_results" : [],
"ground_truth_times" : None, "ground_truth_results" : None}
index_data, query_data = bootstrap_data(num_perms, 1000, 500,
scipy.stats.randint(10, 500))
threshold = 0.9
for num_perm in num_perms:
print("Use num_perm = %d" % num_perm)
result = {}
print("Running linear scan benchmark")
linearscan_times, linearscan_results = benchmark_linearscan(num_perm, threshold, index_data, query_data)
print("Running LSH benchmark")
lsh_times, lsh_results = benchmark_lsh(num_perm, threshold, index_data, query_data)
output["lsh_times"].append(lsh_times)
output["lsh_results"].append(lsh_results)
output["linearscan_times"].append(linearscan_times)
output["linearscan_results"].append(linearscan_results)
print("Running ground truth benchmark")
output["ground_truth_times"], output["ground_truth_results"] =\
benchmark_ground_truth(threshold, index_data, query_data)
average_cardinality = np.mean([len(s) for s in
index_data.sets + query_data.sets])
print("Average cardinality is", average_cardinality)
with open(args.output, 'w') as f:
json.dump(output, f)
|
RudolfCardinal/crate | crate_anon/nlp_manager/number.py | Python | gpl-3.0 | 1,874 | 0 | #!/usr/bin/env python
"""
crate_anon/nlp_manager/number.py
===============================================================================
Copyright (C) 2015-2021 Rudolf Cardinal (rudolf@pobox.com).
This file is part of CRATE.
CRATE is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CRATE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CRATE. If not, see <https://www.gnu.org/licenses/>.
===============================================================================
**Number conversion functions.**
"""
from typing import Optional
def to_float(s: str) -> Optional[float]:
"""
Convert a string to a float, or return ``None``.
Before converting:
- strips out commas (as thousands separator); this is not internationalized
well!
- replace Unicode minus and en dash with a hyphen (minus sign)
"""
if s:
s = s.replace(',', '') # comma as thousands separator
s = s.replace('−', '-') # Unicode minus
s = s.replace('–', '-') # en dash
try:
return float(s)
exce | pt (TypeError, ValueError):
return None
def to_pos_float(s: str) -> Optional[float]:
"""
Converts a string to a positive f | loat, by using :func:`to_float` followed
by :func:`abs`. Returns ``None`` on failure.
"""
try:
return abs(to_float(s))
except TypeError: # to_float() returned None
return None
|
moxuanchen/BMS | core/utils/vcode.py | Python | apache-2.0 | 1,445 | 0 | # -*- coding: utf-8 -*-
import hashlib
import random
import requests
from flask import current_app
class Vcode(object):
@staticmethod
def random_verification_code():
return ''.join(random.choice('0123456789') for i in range(6))
@staticmethod
def generate_vcode_sign(data):
SMS_SECRET = 'XiaoYeZi.SMS'
keys = data.keys()
keys.sort()
parameters = "%s%s" % (
SMS_SECRET,
str().join('&%s=%s' % (key, data[key]) for key in keys)
)
sms_sign = hashlib.md5(parameters).hexdigest()
return sms_sign
@staticmethod
def send_verification_code(phone, vcode):
data = {
'sign_name': '尤克里里教室',
'template_code': 'SMS_0001',
'json_params': '{"code": "%s"}' % vcode,
'phone_numbers': str(phone)
}
resp = requests.post(current_app.config['SMS_SERVICE_ | URL'].format(
sign=Vcode.generate_vcode_sign(data)), data=data)
if resp.status_code == 200 and resp.json()['code'] == 0:
return True
else:
print 'error:', str(resp), str(resp.json())
return False
| @staticmethod
def save_vcode_and_phone(phone, vcode):
current_app.redis.set(phone, vcode)
current_app.redis.expire(phone, 5 * 60)
@staticmethod
def get_vcode_by_phone(phone):
return current_app.redis.get(phone)
|
gvlproject/libcloudbridge | cloudbridge/providers/aws/services.py | Python | mit | 63,632 | 0 | """Services | implemented by the AWS provider."""
import ipaddress
import logging
import string
import uuid
from botocore.exceptions import ClientError
import tenacity
import cloudbridge.base.helpers as cb_helpers
from cloudbridge.base.middleware import dispatch
from cloudbridge.base.resources import ClientPagedResultList
from cloudbridge.base.resources import ServerPagedResultList
from cloudbridge.base.services import BaseBucketObjectService
fro | m cloudbridge.base.services import BaseBucketService
from cloudbridge.base.services import BaseComputeService
from cloudbridge.base.services import BaseDnsRecordService
from cloudbridge.base.services import BaseDnsService
from cloudbridge.base.services import BaseDnsZoneService
from cloudbridge.base.services import BaseFloatingIPService
from cloudbridge.base.services import BaseGatewayService
from cloudbridge.base.services import BaseImageService
from cloudbridge.base.services import BaseInstanceService
from cloudbridge.base.services import BaseKeyPairService
from cloudbridge.base.services import BaseNetworkService
from cloudbridge.base.services import BaseNetworkingService
from cloudbridge.base.services import BaseRegionService
from cloudbridge.base.services import BaseRouterService
from cloudbridge.base.services import BaseSecurityService
from cloudbridge.base.services import BaseSnapshotService
from cloudbridge.base.services import BaseStorageService
from cloudbridge.base.services import BaseSubnetService
from cloudbridge.base.services import BaseVMFirewallRuleService
from cloudbridge.base.services import BaseVMFirewallService
from cloudbridge.base.services import BaseVMTypeService
from cloudbridge.base.services import BaseVolumeService
from cloudbridge.interfaces.exceptions import DuplicateResourceException
from cloudbridge.interfaces.exceptions import \
InvalidConfigurationException
from cloudbridge.interfaces.exceptions import InvalidParamException
from cloudbridge.interfaces.exceptions import InvalidValueException
from cloudbridge.interfaces.resources import KeyPair
from cloudbridge.interfaces.resources import MachineImage
from cloudbridge.interfaces.resources import Network
from cloudbridge.interfaces.resources import Snapshot
from cloudbridge.interfaces.resources import TrafficDirection
from cloudbridge.interfaces.resources import VMFirewall
from cloudbridge.interfaces.resources import VMType
from cloudbridge.interfaces.resources import Volume
from .helpers import BotoEC2Service
from .helpers import BotoS3Service
from .helpers import trim_empty_params
from .resources import AWSBucket
from .resources import AWSBucketObject
from .resources import AWSDnsRecord
from .resources import AWSDnsZone
from .resources import AWSFloatingIP
from .resources import AWSInstance
from .resources import AWSInternetGateway
from .resources import AWSKeyPair
from .resources import AWSLaunchConfig
from .resources import AWSMachineImage
from .resources import AWSNetwork
from .resources import AWSRegion
from .resources import AWSRouter
from .resources import AWSSnapshot
from .resources import AWSSubnet
from .resources import AWSVMFirewall
from .resources import AWSVMFirewallRule
from .resources import AWSVMType
from .resources import AWSVolume
log = logging.getLogger(__name__)
class AWSSecurityService(BaseSecurityService):
def __init__(self, provider):
super(AWSSecurityService, self).__init__(provider)
# Initialize provider services
self._key_pairs = AWSKeyPairService(provider)
self._vm_firewalls = AWSVMFirewallService(provider)
self._vm_firewall_rule_svc = AWSVMFirewallRuleService(provider)
@property
def key_pairs(self):
return self._key_pairs
@property
def vm_firewalls(self):
return self._vm_firewalls
@property
def _vm_firewall_rules(self):
return self._vm_firewall_rule_svc
class AWSKeyPairService(BaseKeyPairService):
def __init__(self, provider):
super(AWSKeyPairService, self).__init__(provider)
self.svc = BotoEC2Service(provider=self.provider,
cb_resource=AWSKeyPair,
boto_collection_name='key_pairs')
@dispatch(event="provider.security.key_pairs.get",
priority=BaseKeyPairService.STANDARD_EVENT_PRIORITY)
def get(self, key_pair_id):
log.debug("Getting Key Pair Service %s", key_pair_id)
return self.svc.get(key_pair_id)
@dispatch(event="provider.security.key_pairs.list",
priority=BaseKeyPairService.STANDARD_EVENT_PRIORITY)
def list(self, limit=None, marker=None):
return self.svc.list(limit=limit, marker=marker)
@dispatch(event="provider.security.key_pairs.find",
priority=BaseKeyPairService.STANDARD_EVENT_PRIORITY)
def find(self, **kwargs):
name = kwargs.pop('name', None)
# All kwargs should have been popped at this time.
if len(kwargs) > 0:
raise InvalidParamException(
"Unrecognised parameters for search: %s. Supported "
"attributes: %s" % (kwargs, 'name'))
log.debug("Searching for Key Pair %s", name)
return self.svc.find(filters={'key-name': name})
@dispatch(event="provider.security.key_pairs.create",
priority=BaseKeyPairService.STANDARD_EVENT_PRIORITY)
def create(self, name, public_key_material=None):
AWSKeyPair.assert_valid_resource_name(name)
private_key = None
if not public_key_material:
public_key_material, private_key = cb_helpers.generate_key_pair()
try:
kp = self.svc.create('import_key_pair', KeyName=name,
PublicKeyMaterial=public_key_material)
kp.material = private_key
return kp
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidKeyPair.Duplicate':
raise DuplicateResourceException(
'Keypair already exists with name {0}'.format(name))
else:
raise e
@dispatch(event="provider.security.key_pairs.delete",
priority=BaseKeyPairService.STANDARD_EVENT_PRIORITY)
def delete(self, key_pair):
key_pair = (key_pair if isinstance(key_pair, AWSKeyPair) else
self.get(key_pair))
if key_pair:
# pylint:disable=protected-access
key_pair._key_pair.delete()
class AWSVMFirewallService(BaseVMFirewallService):
def __init__(self, provider):
super(AWSVMFirewallService, self).__init__(provider)
self.svc = BotoEC2Service(provider=self.provider,
cb_resource=AWSVMFirewall,
boto_collection_name='security_groups')
@dispatch(event="provider.security.vm_firewalls.get",
priority=BaseVMFirewallService.STANDARD_EVENT_PRIORITY)
def get(self, vm_firewall_id):
log.debug("Getting Firewall Service with the id: %s", vm_firewall_id)
return self.svc.get(vm_firewall_id)
@dispatch(event="provider.security.vm_firewalls.list",
priority=BaseVMFirewallService.STANDARD_EVENT_PRIORITY)
def list(self, limit=None, marker=None):
return self.svc.list(limit=limit, marker=marker)
@cb_helpers.deprecated_alias(network_id='network')
@dispatch(event="provider.security.vm_firewalls.create",
priority=BaseVMFirewallService.STANDARD_EVENT_PRIORITY)
def create(self, label, network, description=None):
AWSVMFirewall.assert_valid_resource_label(label)
name = AWSVMFirewall._generate_name_from_label(label, 'cb-fw')
network_id = network.id if isinstance(network, Network) else network
obj = self.svc.create('create_security_group', GroupName=name,
Description=name,
VpcId=network_id)
obj.label = label
obj.description = description
return obj
@dispatch(event="provider.security.vm_firewalls.find",
priority=BaseVMFirewallService.STANDARD_EVENT_PRIORITY)
def find(self, ** |
codesmythe/gnucash | bindings/python/example_scripts/priceDB_test.py | Python | gpl-2.0 | 1,772 | 0.009594 | #!/usr/bin/env python3
# Test file for price database stuff
# To update the price database call
# $PATH/gnucash --add-price-quotes $PATHTOFILE
# before running this.
# Adding to a calling bash script would be better
# Although calling it from here would be even better!
# OR: export PYTHONPATH=<path-to-gnucash-inst-dir>/lib/python3.7/site-packages:$PYTHONPATH
# You may have to adjust the above path to your local system (lib->lib64, python3.7->...)
# Then: ipython3
# The account file is not saved but always use a disposable copy.
# Change, FILE, CURRENCY and STOCK to those defined in your test account.
## @file
# @brief Test file for price database stuff
# @author Mike Evans
# @ingroup python_bindings_examples
f | rom gnucash import Session
# FILE should be the path to your existing gnucash file/database
# For a file, simply pass the pathname, for a database you can use
# these forms:
# mysql://user:password@host/dbname
# postgres://user:passw | ord@host[:port]/dbname (the port is optional)
#
FILE = "PATH_TO_YOUR_TEST_FILE" ## Fail is not saved but use a copy anyway
session = Session(FILE, True, False, False)
root = session.book.get_root_account()
book = session.book
pdb = book.get_price_db()
comm_table = book.get_table()
gbp = comm_table.lookup("CURRENCY", "SOME_CURRENCY")
arm = comm_table.lookup("NASDAQ", "SOME_STOCK")
latest = pdb.lookup_latest(arm,gbp) # from the table, NOT live data
value = latest.get_value()
pl = pdb.get_prices(arm,gbp)
for pr in pl:
source = pr.get_source()
time = pr.get_time64()
v=pr.get_value()
price = float(v.num)/v.denom
print(time, source, price)
if len(pl) > 0:
v0 = pl[0].get_value()
print(arm.get_fullname(), float(v0.num) / float(v0.denom ))
session.end()
session.destroy()
quit()
|
chenjun0210/tensorflow | tensorflow/contrib/learn/python/learn/estimators/state_saving_rnn_estimator_test.py | Python | apache-2.0 | 26,488 | 0.002303 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is d | istributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for learn.estimators.state_saving_rnn_estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
from te | nsorflow.contrib import lookup
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.layers.python.layers import target_column as target_column_lib
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.estimators import rnn_common
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import state_saving_rnn_estimator as ssre
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class PrepareInputsForRnnTest(test.TestCase):
def _test_prepare_inputs_for_rnn(self, sequence_features, context_features,
sequence_feature_columns, num_unroll,
expected):
features_by_time = ssre._prepare_inputs_for_rnn(sequence_features,
context_features,
sequence_feature_columns,
num_unroll)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(data_flow_ops.initialize_all_tables())
features_val = sess.run(features_by_time)
self.assertAllEqual(expected, features_val)
def testPrepareInputsForRnnBatchSize1(self):
num_unroll = 3
expected = [
np.array([[11., 31., 5., 7.]]), np.array([[12., 32., 5., 7.]]),
np.array([[13., 33., 5., 7.]])
]
sequence_features = {
'seq_feature0': constant_op.constant([[11., 12., 13.]]),
'seq_feature1': constant_op.constant([[31., 32., 33.]])
}
sequence_feature_columns = [
feature_column.real_valued_column(
'seq_feature0', dimension=1),
feature_column.real_valued_column(
'seq_feature1', dimension=1),
]
context_features = {
'ctx_feature0': constant_op.constant([[5.]]),
'ctx_feature1': constant_op.constant([[7.]])
}
self._test_prepare_inputs_for_rnn(sequence_features, context_features,
sequence_feature_columns, num_unroll,
expected)
def testPrepareInputsForRnnBatchSize2(self):
num_unroll = 3
expected = [
np.array([[11., 31., 5., 7.], [21., 41., 6., 8.]]),
np.array([[12., 32., 5., 7.], [22., 42., 6., 8.]]),
np.array([[13., 33., 5., 7.], [23., 43., 6., 8.]])
]
sequence_features = {
'seq_feature0':
constant_op.constant([[11., 12., 13.], [21., 22., 23.]]),
'seq_feature1':
constant_op.constant([[31., 32., 33.], [41., 42., 43.]])
}
sequence_feature_columns = [
feature_column.real_valued_column(
'seq_feature0', dimension=1),
feature_column.real_valued_column(
'seq_feature1', dimension=1),
]
context_features = {
'ctx_feature0': constant_op.constant([[5.], [6.]]),
'ctx_feature1': constant_op.constant([[7.], [8.]])
}
self._test_prepare_inputs_for_rnn(sequence_features, context_features,
sequence_feature_columns, num_unroll,
expected)
def testPrepareInputsForRnnNoContext(self):
num_unroll = 3
expected = [
np.array([[11., 31.], [21., 41.]]), np.array([[12., 32.], [22., 42.]]),
np.array([[13., 33.], [23., 43.]])
]
sequence_features = {
'seq_feature0':
constant_op.constant([[11., 12., 13.], [21., 22., 23.]]),
'seq_feature1':
constant_op.constant([[31., 32., 33.], [41., 42., 43.]])
}
sequence_feature_columns = [
feature_column.real_valued_column(
'seq_feature0', dimension=1),
feature_column.real_valued_column(
'seq_feature1', dimension=1),
]
context_features = None
self._test_prepare_inputs_for_rnn(sequence_features, context_features,
sequence_feature_columns, num_unroll,
expected)
def testPrepareInputsForRnnSparse(self):
num_unroll = 2
embedding_dimension = 8
expected = [
np.array([[1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1.]]),
np.array([[1., 1., 1., 1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2., 2., 2., 2.],
[1., 1., 1., 1., 1., 1., 1., 1.]])
]
sequence_features = {
'wire_cast':
sparse_tensor.SparseTensor(
indices=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 0], [1, 1, 1],
[2, 0, 0], [2, 1, 1]],
values=[
b'marlo', b'stringer', b'omar', b'stringer', b'marlo',
b'marlo', b'omar'
],
dense_shape=[3, 2, 2])
}
wire_cast = feature_column.sparse_column_with_keys(
'wire_cast', ['marlo', 'omar', 'stringer'])
sequence_feature_columns = [
feature_column.embedding_column(
wire_cast,
dimension=embedding_dimension,
combiner='sum',
initializer=init_ops.ones_initializer())
]
context_features = None
self._test_prepare_inputs_for_rnn(sequence_features, context_features,
sequence_feature_columns, num_unroll,
expected)
def testPrepareInputsForRnnSparseAndDense(self):
num_unroll = 2
embedding_dimension = 8
dense_dimension = 2
expected = [
np.array([[1., 1., 1., 1., 1., 1., 1., 1., 111., 112.],
[1., 1., 1., 1., 1., 1., 1., 1., 211., 212.],
[1., 1., 1., 1., 1., 1., 1., 1., 311., 312.]]),
np.array([[1., 1., 1., 1., 1., 1., 1., 1., 121., 122.],
[2., 2., 2., 2., 2., 2., 2., 2., 221., 222.],
[1., 1., 1., 1., 1., 1., 1., 1., 321., 322.]])
]
sequence_features = {
'wire_cast':
sparse_tensor.SparseTensor(
indices=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 0], [1, 1, 1],
[2, 0, 0], [2, 1, 1]],
values=[
b'marlo', b'stringer', b'omar', b'stringer', b'marlo',
b'marlo', b'omar'
],
dense_shape=[3, 2, 2]),
'seq_feature0':
constant_op.constant([[[111., 112.], [121., 122.]],
[[211., 212.], [221., 222.]],
|
rhinstaller/blivet-gui | tests/blivetgui_tests/utils_test.py | Python | gpl-2.0 | 5,595 | 0.003932 | # -*- coding: utf-8 -*-
import unittest
from unittest.mock import MagicMock, patch
from blivetgui.blivet_utils import BlivetUtils, FreeSpaceDevice
from blivetgui.i18n import _
from blivet.size import Size
class FreeSpaceDeviceTest(unittest.TestCase):
def test_free_basic(self):
free = FreeSpaceDevice(free_size=Size("8 GiB"), dev_id=0, start=0, end=1, parents=[MagicMock(type="disk")], logical=True)
self.assertTrue(free.is_logical)
self.assertFalse(free.is_extended)
self.assertFalse(free.is_primary)
self.assertEqual(len(free.children), 0)
self.assertEqual(free.type, "free space")
self.assertIsNotNone(free.format)
self.assertIsNone(free.format.type)
self.assertEqual(free.disk, free.parents[0])
def test_free_type(self):
disk = MagicMock(type="disk", children=[], is_disk=True, format=MagicMock(type="disklabel"))
free = FreeSpaceDevice(free_size=Size("8 GiB"), dev_id=0, start=0, end=1, parents=[disk])
self.assertTrue(free.is_empty_disk)
self.assertFalse(free.is_uninitialized_disk)
self.assertFalse(free.is_free_region)
disk = MagicMock(type="disk", children=[], is_disk=True, format=MagicMock(type=None))
free = FreeSpaceDevice(free_size=Size("8 GiB"), dev_id=0, start=0, end=1, parents=[disk])
self.assertTrue(free.is_uninitialized_disk)
self.assertFalse(free.is_empty_disk)
self.assertFalse(free.is_free_region)
disk = MagicMock(type="disk", children=[MagicMock()], is_disk=True, format=MagicMock(type="disklabel"))
free = FreeSpaceDevice(free_size=Size("8 GiB"), dev_id=0, start=0, end=1, parents=[disk])
self.assertTrue(free.is_free_region)
self.assertFalse(free.is_empty_disk)
self.assertFalse(free.is_uninitialized_disk)
def test_free_disk(self):
# free space on a disk
disk = MagicMock(type="disk", children=[], is_disk | =True, format=MagicMock(type=None))
free = FreeSpaceDevice(free_size=Size("8 GiB"), dev_id=0, start=0, end=1, parents=[disk])
self.assertEqual(free.disk, disk)
# free space in a vg
parent = MagicMock(type="lvmvg", children=[], is_disk=False, format=MagicMock | (type=None),
parents=[MagicMock(type="partition", children=[MagicMock()], is_disk=False, parents=[disk],
format=MagicMock(type="lvmpv"))])
free = FreeSpaceDevice(free_size=Size("8 GiB"), dev_id=0, start=0, end=1, parents=[parent])
self.assertEqual(free.disk, disk)
def test_free_protected(self):
disk = MagicMock(type="disk", children=[], is_disk=True, format=MagicMock(type=None))
free = FreeSpaceDevice(free_size=Size("8 GiB"), dev_id=0, start=0, end=1, parents=[disk])
self.assertEqual(free.protected, disk.protected)
class BlivetUtilsTest(unittest.TestCase):
def test_resizable(self):
with patch("blivetgui.blivet_utils.BlivetUtils.blivet_reset", lambda _: True):
storage = BlivetUtils()
device = MagicMock(type="", size=Size("1 GiB"), protected=False, format_immutable=False, children=[])
device.format = MagicMock(exists=True, system_mountpoint=None)
device.format.return_value = None
# swap is not resizable
device.format.configure_mock(type="swap")
res = storage.device_resizable(device)
self.assertFalse(res.resizable)
self.assertEqual(res.error, _("Resizing of swap format is currently not supported"))
self.assertEqual(res.min_size, Size("1 MiB"))
self.assertEqual(res.max_size, Size("1 GiB"))
# mounted devices are not resizable
device.format.configure_mock(type="ext4", system_mountpoint="/")
res = storage.device_resizable(device)
self.assertFalse(res.resizable)
self.assertEqual(res.error, _("Mounted devices cannot be resized"))
self.assertEqual(res.min_size, Size("1 MiB"))
self.assertEqual(res.max_size, Size("1 GiB"))
# resizable device
device.configure_mock(resizable=True, max_size=Size("2 GiB"), min_size=Size("500 MiB"))
device.format.configure_mock(resizable=True, type="ext4", system_mountpoint=None)
res = storage.device_resizable(device)
self.assertTrue(res.resizable)
self.assertIsNone(res.error)
self.assertEqual(res.min_size, Size("500 MiB"))
self.assertEqual(res.max_size, Size("2 GiB"))
# resizable device and non-resizable format
device.configure_mock(resizable=True, max_size=Size("2 GiB"), min_size=Size("500 MiB"))
device.format.configure_mock(resizable=False, type="ext4")
res = storage.device_resizable(device)
self.assertFalse(res.resizable)
self.assertIsNone(res.error)
self.assertEqual(res.min_size, Size("1 MiB"))
self.assertEqual(res.max_size, Size("1 GiB"))
# LV with snapshot -> not resizable
with patch("blivetgui.blivet_utils.BlivetUtils._has_snapshots", lambda _, device: True):
device.configure_mock(type="lvmlv", resizable=True, max_size=Size("2 GiB"), min_size=Size("500 MiB"))
device.format.configure_mock(resizable=True, type="ext4")
res = storage.device_resizable(device)
self.assertFalse(res.resizable)
self.assertIsNotNone(res.error)
self.assertEqual(res.min_size, Size("1 MiB"))
self.assertEqual(res.max_size, Size("1 GiB"))
if __name__ == "__main__":
unittest.main()
|
thomas-sterrenburg/fingerprinting-python | src/main.py | Python | mit | 16,603 | 0.002831 | # Copyright 2017 Thomas Sterrenburg
#
# Licensed under the MIT License (the License); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at https://opensource.org/licenses/MIT#
import glob
import re
import sys
import time
from src.io.storage import get_request_items, store_fingerprint, get_number_of_malformed_requests
from static import variables
from static.arguments import parse_arguments
from static.blacklist import Blacklist
from static.logger import setup_logger, LOGNAME_START
from src.exchange.http import Request, UrlInfo, submit_string
from src.static.constants import NO_RESPONSE_CODE, DATA_NONE, LEXICAL, SEMANTIC, SYNTACTIC, DATA_LIST, KNOWN, \
SERVER_NAMES
logger = setup_logger()
global host_total
def add_characteristic(category, name, value, fingerprint, data_type=DATA_NONE):
if not fingerprint[category].has_key(name):
# TODO maybe remove data type
if data_type == 'list':
value = [value]
fingerprint[category][name] = value
return
if fingerprint[category][name] == value:
return
def get_characteristics(test_name, response, fingerprint, host, host_index, NO_RESPONSE=None):
# logger.debug("applying %s", test_name, extra={'logname': host, 'host_index': host_index, 'host_total': variables.host_total})
response_code, response_text = response.return_code()
server_name_claimed = response.server_name()
if response_code not in [NO_RESPONSE, NO_RESPONSE_CODE]:
add_characteristic(LEXICAL, response_code, response_text, fingerprint)
add_characteristic(LEXICAL, 'SERVER_NAME_CLAIMED', server_name_claimed, fingerprint)
# nginx 404 test
# if response_code == '404':
# server_name_404 = get_server_name_404(response)
# if len(server_name_404) > 0:
# add_characteristic(LEXICAL, 'SERVER_NAME_404', server_name_404, fingerprint)
if test_name.startswith('malformed_'):
add_characteristic(SEMANTIC, test_name, response_code, fingerprint)
if response.has_header('Allow'):
data = response.header_data('Allow')
add_characteristic(SYNTACTIC, 'ALLOW_ORDER', data, fingerprint)
if response.has_header('Public'):
data = response.header_data('Public')
add_characteristic(SYNTACTIC, 'PUBLIC_ORDER', data, fingerprint)
if response.has_header('Vary'):
data = response.header_data('Vary')
add_characteristic(SYNTACTIC, 'VARY_ORDER', data, fingerprint)
if response_code not in [NO_RESPONSE_CODE, NO_RESPONSE]:
header_names = response.header_names()
add_characteristic(SYNTACTIC, 'HEADER_ORDER', header_names, fingerprint, data_type=DATA_LIST)
if response.has_header('ETag'):
data = response.header_data('ETag')
add_characteristic(SYNTACTIC, 'ETag', data, fingerprint)
elif response.has_header('Etag'):
data = response.header_data('Etag')
add_characteristic(SYNTACTIC, 'ETag', data, fingerprint)
def default_get(host, host_index, fingerprint):
request = Request(host, host_index, logger)
response = request.submit
if response.response_code == NO_RESPONSE_CODE:
raise ValueError('default_get failed')
else:
get_characteristics('default_get', response, fingerprint, host, host_index)
def default_options(host, host_index, fingerprint):
request = Request(host, host_index, logger, method='OPTIONS')
response = request.submit
get_characteristics('default_options', response, fingerprint, host, host_index)
def unknown_method(host, host_index, fingerprint):
request = Request(host, host_index, logger, method='ABCDEFG')
response = request.submit
get_characteristics('unknown_method', response, fingerprint, host, host_index)
def unauthorized_activity(host, host_index, fingerprint):
activities = ('OPTIONS', 'TRACE', 'GET', 'HEAD', 'DELETE',
'PUT', 'POST', 'COPY', 'MOVE', 'MKCOL',
'PROPFIND', 'PROPPATCH', 'LOCK', 'UNLOCK',
'SEARCH')
for activity in activities:
request = Request(host, host_index, logger, method=activity)
response = request.submit
get_characteristics('unauthorized_activity_' + activity, response, fingerprint, host, host_index)
def empty_uri(host, host_index, fingerprint):
request = Request(host, host_index, logger, local_uri='/ABCDEFG')
response = request.submit
get_characteristics('empty_uri', response, fingerprint, host, host_index)
def malformed_method(host, host_index, fingerprint):
malformed_methods = get_malformed_methods()
for index, method in enumerate(malformed_methods):
request = Request(host, host_index, logger)
request.method_line = method
response = request.submit
get_characteristic | s('MALFORMED_' + ('000' + str(index))[-3:], response, fingerprint, host, host_index)
def get_malformed_methods():
activities = 'GET', 'HEAD', 'POST', 'PUT'
malformed_methods_list = []
for activity in a | ctivities:
malformed_methods = (
activity,
activity + '/',
activity + '/1.0',
activity + ' / HTTP/123.45',
activity + ' / HTTP/999.99',
activity + ' / HTP/1.0',
activity + ' / HTT/1.0',
activity + ' / HTTP/7.Q',
activity + ' / HTTP/1.0X',
activity + ' /abcdefghijklmnopqrstuvwxyz/.. HTTP/1.0',
activity + ' /./././././././././././././././ HTTP/1.0',
activity + ' /.. HTTP/1.0',
activity + '\t/\tHTTP/1.0',
activity + '\t/\tHTTP/1.0',
activity + ' / H',
activity + ' / ' + 'HTTP/' + '1' * 1000 + '.0',
activity + ' FTP://abcdefghi HTTP/1.0',
activity + ' C:\ HTTP/1.0',
' ' * 1000 + activity + ' / HTTP/1.0',
'\n' + activity + ' / HTTP/1.0',
)
malformed_methods_list += malformed_methods
malformed_activity_independent = (
'GET GET GET',
'HELLO',
'%47%45%54 / HTTP/1.0',
'GEX\bT / HTTP/1.0'
)
malformed_methods_list += malformed_activity_independent
return malformed_methods_list
def unavailable_accept(host, host_index, fingerprint):
request = Request(host, host_index, logger)
request.add_header('Accept', 'abcd/efgh')
response = request.submit
get_characteristics('unavailable_accept', response, fingerprint, host, host_index)
def long_content_length(host, host_index, fingerprint):
request = Request(host, host_index, logger)
request.add_header('Content-Length', str(sys.maxint))
request.body = 'abcdefgh'
response = request.submit
get_characteristics('long_content_length', response, fingerprint, host, host_index)
def get_fingerprint(host, host_index, blacklist):
fingerprint = {
LEXICAL: {},
SYNTACTIC: {},
SEMANTIC: {}
}
url_info = UrlInfo(host)
request_items = get_request_items()
for name, request_string in request_items.iteritems():
try:
response = submit_string(request_string, name, url_info, host_index, logger)
get_characteristics(name, response, fingerprint, host, host_index)
except ValueError as e:
logger.warning("%s", e,
extra={'logname': host, 'host_index': host_index, 'host_total': variables.host_total})
return fingerprint
# TODO deprecate
fingerprint_methods = [default_get, default_options, unknown_method, unauthorized_activity, empty_uri,
malformed_method, unavailable_accept, long_content_length]
for method in fingerprint_methods:
# logger.debug("processing %s", method.__name__, extra={'logname': host, 'host_index': host_index, 'host_total': variables.host_total})
try:
logger.debug('applying method %s', method.__name__,
extra={'logname': host, 'host_index': host_index, 'host_total': variables.host_total})
method(host, host_index, fingerprint)
except ValueErr |
RajShahCrazy/youtube-dl | youtube_dl/__main__.py | Python | unlicense | 390 | 0.002564 | #!/usr/bin/env python
# Execute with
# $ python youtube_dl/__main__.py (2.6+)
# $ pytho | n -m youtube_dl (2.7+)
import sys
if __package__ is None and not hasattr(sys, "frozen"):
# direct call of __main__.py
import os.path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import youtube_dl
if __name__ == '__main__':
youtube_dl. | main()
|
MichaelAquilina/numpy | numpy/core/tests/test_multiarray.py | Python | bsd-3-clause | 220,691 | 0.001137 | from __future__ import division, absolute_import, print_function
import collections
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from nose import SkipTest
from numpy.compat import asbytes, getexception, strchar, unicode, sixu
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose,
assert_array_less, runstring, dec
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and suboffsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
class TestFlags(TestCase):
def setUp(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed bytewise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(TestCase):
def setUp(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two. | size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.cha | r, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize)
except:
raise RuntimeError(getexception())
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
self.assertRaises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dt |
jan-huiskes/Project | data/csv2json.py | Python | gpl-3.0 | 1,876 | 0.002665 | # Jan Huiskes
# 10740929
import csv
import json
import sys
reload(sys)
sys.setdefaultencoding('utf8')
js | onfile = open('data.json', 'w')
csvfile3 = open('data.csv', 'r')
def p2f(x):
return float(x.strip('%'))
jsonfile.write('{"data" : {')
jsonfile.write('\n')
i = 0
for row in csvfile3:
row = row.split()
if len(row) == 12:
j = 1
else:
j = 0
jsonfile.write('"' + row[10 + j] + '": {')
if int(row[4 + j]) == 0:
jsonfile.write('"fillKey": "republican",')
jsonfile. | write('\n')
jsonfile.write('"kiesman": ' + row[7 + j] + ',')
elif int(row[7 + j]) == 0:
jsonfile.write('"fillKey": "democrat",')
jsonfile.write('\n')
jsonfile.write('"kiesman": ' + row[4 + j] + ',')
else:
jsonfile.write('"fillKey": "tot",')
jsonfile.write('\n')
jsonfile.write('"totkiesman": ' + str((int(row[4 + j]) + int(row[7 + j]))) + ',')
jsonfile.write('\n')
jsonfile.write('"Dkiesman": ' + row[4 + j] + ',')
jsonfile.write('\n')
jsonfile.write('"Rkiesman": ' + row[7 + j] + ',')
jsonfile.write('\n')
jsonfile.write('"Rvote": ' + str(p2f(row[6 + j])) + ',')
jsonfile.write('\n')
jsonfile.write('"Dvote": ' + str(p2f(row[3 + j])) + ',')
jsonfile.write('\n')
if j == 0:
jsonfile.write('"state": "' + row[0] + '",')
else:
jsonfile.write('"state": "' + row[0] + ' ' + row[1] + '",')
jsonfile.write('\n')
jsonfile.write('"Lvote": ' + str(p2f(row[9 + j])) + ',')
jsonfile.write('\n')
jsonfile.write('"Ovote": ' + str((100 - p2f(row[3 + j]) - p2f(row[6 + j]) - p2f(row[9 + j]))) + '}')
if i != 51: # dont write a comma after the last row
jsonfile.write(',\n')
else:
jsonfile.write('\n')
i += 1
jsonfile.write('\n')
jsonfile.write('}}')
jsonfile.close()
csvfile3.close()
|
rbnvrw/FRAPalyzer | test.py | Python | mit | 258 | 0.003876 | import nose
from | os import path
file_path = path.abspath(__file__)
tests_path = path.join(path.abspath(path.dirname(file_path)), "tests")
nose.main(argv=[path.abspath(__file__), "--with-coverage", "--cover-erase", "--cover-packa | ge=frapalyzer", tests_path])
|
asifr/pmquery | pmquery.py | Python | mit | 6,143 | 0.030288 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
'''
pmquery - Query PubMed and download results to text files
## Requirements
If you don't have the pip utility first install it with:
> easy_install pip
Install all the mandatory dependencies by typing:
> pip install -r requirements.txt
## Usage
Before running the script make sure you edit the config file.
The `term` and `ident` parameters indicate the search term
and a unique identifier (no spaces allowed), respectively.
To execute the script pmquery uses a Makefile, although
executing the python script will produce the same results.
1 Query the database:
> make query
or
> python pmquery.py
2. Delete all data folders, this preserves zipped archives
but removes the individual text files:
> make clean
Copyright (c) 2013— Asif Rahman
License: MIT (see LICENSE for details)
'''
__author__ = 'Asif Rahman'
__version__ = (0, 1, 0, '')
__license__ = 'MIT'
from os import path, makedirs
import requests
from xml.dom import minidom
import json
import time
from ConfigParser import RawConfigParser
import logging
import subprocess
VERSION_STRING = '%d.%d.%d%s' % __version__
# Figure out installation directory
installation_dir, _ = path.split(path.abspath(__file__))
# Set up configuration settings
config = RawConfigParser()
config.read(path.join(installation_dir, 'config'))
logging.basicConfig(
filename = config.get('log', 'filename'),
level = getattr(logging, config.get('log', 'level')),
format = config.get('log', 'format'),
datefmt = config.get('log', 'datefmt')
)
logging.getLogger("requests").setLevel(logging.WARN)
# Shared logger instance
log = logging.getLogger()
term = config.get('search', 'term')
data_dir = path.join(installation_dir, config.get('data', 'dirname'))
query_results_dir = path.join(installation_dir, config.get('data', 'dirname'), config.get('search', 'ident'))
if not path.exists(query_results_dir):
makedirs(query_results_dir)
email = 'email@yourdomain.com'
tool = 'pmquery'
database = 'pubmed'
retmax = 100
retmode = 'xml'
retstart = 0
def parse_xml(elm, idx, default):
try:
if idx != None:
elm = elm[idx]
elm = elm.childNodes[0].data
return elm
except Exception:
elm = default
return elm
pass
else:
elm = default
return elm
def text_output(xml,count):
"""Returns JSON-formatted text from the XML retured from E-Fetch"""
xmldoc = minidom.parseString(xml.encode('utf-8').strip())
jsonout = []
for i in range(count):
title = ''
title = xmldoc.getElementsByTagName('ArticleTitle')
title = parse_xml(title, i, '')
pmid = ''
pmid = xmldoc.getElementsByTagName('PMID')
pmid = parse_xml(pmid, i, '')
abstract = ''
abstract = xmldoc.getElementsByTagName('AbstractText')
abstract = parse_xml(abstract, i, '')
try:
authors = xmldoc.getElementsByTagName('AuthorList')
authors = authors[i].getElementsByTagName('Author')
authorlist = []
for author in authors:
LastName = author.getElementsByTagName('LastName')
LastName = parse_xml(LastName, 0, '')
Initials = author.getElementsByTagName('Initials')
Initials = parse_xml(Initials, 0, '')
if LastName != '' and Initials != '':
author = '%s, %s' % (LastName, Initials)
else:
author = ''
authorlist.append(author)
except Exception:
authorlist = []
pass
try:
journalinfo = xmldoc.getElementsByTagName('Journal')[i]
journalIssue = journalinfo.getElementsByTagName('JournalIssue')[0]
except Exception:
journalinfo = None
journalIssue = None
pass
journal = ''
year = ''
volume = ''
issue = ''
pages = ''
if journalinfo != None:
journal = parse_xml(journalinfo.getElementsByTagName('Title'), 0, '')
year = journalIssue.getElementsByTagName('Year')
year = parse_xml(year, 0, '')
volume = journalIssue.getElementsByTagName('Volume')
volume = parse_xml(volume, | 0, '')
issue = journalIssue.getElementsByTagName('Issue')
issue = parse_xml(issue, 0, '')
pages = xmldoc.getElementsByTagName('MedlinePgn')
pages = parse_xml(pages, 0, '')
jsonout.append({
'pmid':pmid,
'title':title,
'authors':authorlist,
'journal':j | ournal,
'year':year,
'volume':volume,
'issue':issue,
'pages':pages,
'abstract':abstract
})
return json.dumps(jsonout)
# Prepare to query E-Search
utilsparams = {
'db':database,
'tool':tool,
'email':email,
'term':term,
'usehistory':'y',
'retmax':retmax,
'retstart':retstart
}
url = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?'
r = requests.get(url, params = utilsparams)
data = r.text
xmldoc = minidom.parseString(data)
ids = xmldoc.getElementsByTagName('Id')
if len(ids) == 0:
print 'QueryNotFound'
exit()
count = xmldoc.getElementsByTagName('Count')[0].childNodes[0].data
itr = int(count)/retmax
# Save some general information about this query
dest = data_dir + '/' + config.get('search','ident') + '.json'
f = open(dest, 'w+')
f.write(json.dumps({'term':term,'ident':config.get('search','ident'),'count':count,'mtime':int(time.time())}))
f.close()
# Write text files containing results from E-Fetch
for x in xrange(0,itr+1):
retstart = x*utilsparams['retmax']
utilsparams['retstart'] = retstart
url = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?'
r = requests.get(url, params = utilsparams)
data = r.text
xmldoc = minidom.parseString(data)
ids = xmldoc.getElementsByTagName('Id')
id = []
for i in ids:
id.append(i.childNodes[0].data)
fetchparams = {
'db':database,
'tool':tool,
'email':email,
'id':','.join(id),
'retmode':retmode
}
url = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?'
r = requests.get(url, params = fetchparams)
data = r.text
s = text_output(data,retmax)
dest = query_results_dir + '/query_results_%i.json' % retstart
f = open(dest, 'w+')
f.write(s)
f.close()
# Create a zipped archive of the data
PIPE = subprocess.PIPE
pd = subprocess.Popen(['/usr/bin/zip', '-r', config.get('search','ident'), config.get('search','ident'), config.get('search','ident') + '.json'],
stdout=PIPE, stderr=PIPE, cwd=data_dir)
stdout, stderr = pd.communicate() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.