repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ujiro99/auto_logger
|
logger/merge.py
|
1
|
2373
|
import fileinput
import os
import re
from datetime import datetime as dt
from logger import log
class Merge:
"""
Merge multiple log files.
"""
class Parsed:
"""
Parsed datetime and log line.
"""
def __init__(self, time=None, line=None):
self.time = time # type: datetime
self.line = line # type: bytes
TIME_FMT = '%H:%M:%S.%f' # format of time stamps
TIME_FMT_LEN = 12 # time stamps length
PATTERN = re.compile(r'(.+?)/.*') # pattern to extract timestamp
FILE_SUFFIX = '.merged.log' # Merged file's name suffix
FILE_ENCODE = 'utf8' # log file's encode
def exec(self, dir_path):
"""
Exec merge files and sort by timestamp.
:param str dir_path: Directory path which contains log files.
:return: Merge result.
:rtype bool
"""
dir_path = dir_path.rstrip('/')
log.i("- start merge: [%s]" % dir_path)
lines = [self.Parsed(dt.min, b'')]
files = list(self.__files(dir_path))
if len(files) == 0:
return False
for l in list(fileinput.input(files, mode="rb")):
p = self.__parse(l)
if p.time is None:
lines[-1].line = lines[-1].line + p.line
continue
lines.append(p)
log.i("- write merged file: [%s%s]" % (dir_path, Merge.FILE_SUFFIX))
lines = sorted(lines, key=lambda x: x.time)
with open(dir_path + Merge.FILE_SUFFIX, "wb") as fd:
for l in lines: fd.write(l.line)
return True
def __files(self, dir_path):
"""
Find files.
:return: Iterator[str]
"""
for root, dirs, files in os.walk(dir_path):
for file in files:
ret = os.path.join(root, file)
log.d(ret)
yield ret
return
def __parse(self, byte):
"""
Parse log line.
:param bytes byte:
:return: Parse result. If failed to decode, returns None time.
:rtype Merger.Parsed
"""
try:
s = byte[0:Merge.TIME_FMT_LEN].decode(Merge.FILE_ENCODE)
t = dt.strptime(s, Merge.TIME_FMT)
except Exception as e:
log.d(e)
return self.Parsed(line=byte)
return self.Parsed(t, byte)
|
mit
| 8,957,099,488,283,862,000
| 26.917647
| 76
| 0.52381
| false
| 3.742902
| false
| false
| false
|
wmde/jenkins-job-builder
|
jenkins_jobs/modules/publishers.py
|
1
|
140186
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright 2012 Varnish Software AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Publishers define actions that the Jenkins job should perform after
the build is complete.
**Component**: publishers
:Macro: publisher
:Entry Point: jenkins_jobs.publishers
"""
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
from jenkins_jobs.modules import hudson_model
from jenkins_jobs.errors import JenkinsJobsException
import logging
import sys
import random
def archive(parser, xml_parent, data):
"""yaml: archive
Archive build artifacts
:arg str artifacts: path specifier for artifacts to archive
:arg str excludes: path specifier for artifacts to exclude
:arg bool latest-only: only keep the artifacts from the latest
successful build
:arg bool allow-empty: pass the build if no artifacts are
found (default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/archive001.yaml
"""
logger = logging.getLogger("%s:archive" % __name__)
archiver = XML.SubElement(xml_parent, 'hudson.tasks.ArtifactArchiver')
artifacts = XML.SubElement(archiver, 'artifacts')
artifacts.text = data['artifacts']
if 'excludes' in data:
excludes = XML.SubElement(archiver, 'excludes')
excludes.text = data['excludes']
latest = XML.SubElement(archiver, 'latestOnly')
# backward compatibility
latest_only = data.get('latest_only', False)
if 'latest_only' in data:
logger.warn('latest_only is deprecated please use latest-only')
if 'latest-only' in data:
latest_only = data['latest-only']
if latest_only:
latest.text = 'true'
else:
latest.text = 'false'
if 'allow-empty' in data:
empty = XML.SubElement(archiver, 'allowEmptyArchive')
# Default behavior is to fail the build.
empty.text = str(data.get('allow-empty', False)).lower()
def blame_upstream(parser, xml_parent, data):
"""yaml: blame-upstream
Notify upstream commiters when build fails
Requires the Jenkins `Blame upstream commiters Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/
Blame+Upstream+Committers+Plugin>`_
Example:
.. literalinclude:: /../../tests/publishers/fixtures/blame001.yaml
"""
XML.SubElement(xml_parent,
'hudson.plugins.blame__upstream__commiters.'
'BlameUpstreamCommitersPublisher')
def campfire(parser, xml_parent, data):
"""yaml: campfire
Send build notifications to Campfire rooms.
Requires the Jenkins `Campfire Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Campfire+Plugin>`_
Campfire notifications global default values must be configured for
the Jenkins instance. Default values will be used if no specific
values are specified for each job, so all config params are optional.
:arg str subdomain: override the default campfire subdomain
:arg str token: override the default API token
:arg bool ssl: override the default 'use SSL'
:arg str room: override the default room name
Example:
.. literalinclude:: /../../tests/publishers/fixtures/campfire001.yaml
"""
root = XML.SubElement(xml_parent,
'hudson.plugins.campfire.'
'CampfireNotifier')
campfire = XML.SubElement(root, 'campfire')
if ('subdomain' in data and data['subdomain']):
subdomain = XML.SubElement(campfire, 'subdomain')
subdomain.text = data['subdomain']
if ('token' in data and data['token']):
token = XML.SubElement(campfire, 'token')
token.text = data['token']
if ('ssl' in data):
ssl = XML.SubElement(campfire, 'ssl')
ssl.text = str(data['ssl']).lower()
if ('room' in data and data['room']):
room = XML.SubElement(root, 'room')
name = XML.SubElement(room, 'name')
name.text = data['room']
XML.SubElement(room, 'campfire reference="../../campfire"')
def emotional_jenkins(parser, xml_parent, data):
"""yaml: emotional-jenkins
Emotional Jenkins.
Requires the Jenkins `Emotional Jenkins Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Emotional+Jenkins+Plugin>`_
Example:
.. literalinclude:: /../../tests/publishers/fixtures/emotional-jenkins.yaml
"""
XML.SubElement(xml_parent,
'org.jenkinsci.plugins.emotional__jenkins.'
'EmotionalJenkinsPublisher')
def trigger_parameterized_builds(parser, xml_parent, data):
"""yaml: trigger-parameterized-builds
Trigger parameterized builds of other jobs.
Requires the Jenkins `Parameterized Trigger Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/
Parameterized+Trigger+Plugin>`_
:arg str project: name of the job to trigger
:arg str predefined-parameters: parameters to pass to the other
job (optional)
:arg bool current-parameters: Whether to include the parameters passed
to the current build to the triggered job (optional)
:arg bool svn-revision: Pass svn revision to the triggered job (optional)
:arg bool git-revision: Pass git revision to the other job (optional)
:arg str condition: when to trigger the other job (default 'ALWAYS')
:arg str property-file: Use properties from file (optional)
:arg bool fail-on-missing: Blocks the triggering of the downstream jobs
if any of the files are not found in the workspace (default 'False')
:arg str restrict-matrix-project: Filter that restricts the subset
of the combinations that the downstream project will run (optional)
Example::
publishers:
- trigger-parameterized-builds:
- project: other_job, foo, bar
predefined-parameters: foo=bar
- project: other_job1, other_job2
predefined-parameters: BUILD_NUM=${BUILD_NUMBER}
property-file: version.prop
fail-on-missing: true
- project: yet_another_job
predefined-parameters: foo=bar
git-revision: true
restrict-matrix-project: label=="x86"
"""
tbuilder = XML.SubElement(xml_parent,
'hudson.plugins.parameterizedtrigger.'
'BuildTrigger')
configs = XML.SubElement(tbuilder, 'configs')
for project_def in data:
tconfig = XML.SubElement(configs,
'hudson.plugins.parameterizedtrigger.'
'BuildTriggerConfig')
tconfigs = XML.SubElement(tconfig, 'configs')
if ('predefined-parameters' in project_def
or 'git-revision' in project_def
or 'property-file' in project_def
or 'current-parameters' in project_def
or 'svn-revision' in project_def
or 'restrict-matrix-project' in project_def):
if 'predefined-parameters' in project_def:
params = XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'PredefinedBuildParameters')
properties = XML.SubElement(params, 'properties')
properties.text = project_def['predefined-parameters']
if 'git-revision' in project_def and project_def['git-revision']:
params = XML.SubElement(tconfigs,
'hudson.plugins.git.'
'GitRevisionBuildParameters')
properties = XML.SubElement(params, 'combineQueuedCommits')
properties.text = 'false'
if 'property-file' in project_def and project_def['property-file']:
params = XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'FileBuildParameters')
properties = XML.SubElement(params, 'propertiesFile')
properties.text = project_def['property-file']
failOnMissing = XML.SubElement(params, 'failTriggerOnMissing')
failOnMissing.text = str(project_def.get('fail-on-missing',
False)).lower()
if ('current-parameters' in project_def
and project_def['current-parameters']):
XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'CurrentBuildParameters')
if 'svn-revision' in project_def and project_def['svn-revision']:
XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'SubversionRevisionBuildParameters')
if ('restrict-matrix-project' in project_def
and project_def['restrict-matrix-project']):
subset = XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'matrix.MatrixSubsetBuildParameters')
XML.SubElement(subset, 'filter').text = \
project_def['restrict-matrix-project']
else:
tconfigs.set('class', 'java.util.Collections$EmptyList')
projects = XML.SubElement(tconfig, 'projects')
projects.text = project_def['project']
condition = XML.SubElement(tconfig, 'condition')
condition.text = project_def.get('condition', 'ALWAYS')
trigger_with_no_params = XML.SubElement(tconfig,
'triggerWithNoParameters')
trigger_with_no_params.text = 'false'
def trigger(parser, xml_parent, data):
"""yaml: trigger
Trigger non-parametrised builds of other jobs.
:arg str project: name of the job to trigger
:arg str threshold: when to trigger the other job (default 'SUCCESS'),
alternatives: SUCCESS, UNSTABLE, FAILURE
Example:
.. literalinclude:: /../../tests/publishers/fixtures/trigger_success.yaml
"""
tconfig = XML.SubElement(xml_parent, 'hudson.tasks.BuildTrigger')
childProjects = XML.SubElement(tconfig, 'childProjects')
childProjects.text = data['project']
tthreshold = XML.SubElement(tconfig, 'threshold')
threshold = data.get('threshold', 'SUCCESS')
supported_thresholds = ['SUCCESS', 'UNSTABLE', 'FAILURE']
if threshold not in supported_thresholds:
raise JenkinsJobsException("threshold must be one of %s" %
", ".join(supported_thresholds))
tname = XML.SubElement(tthreshold, 'name')
tname.text = hudson_model.THRESHOLDS[threshold]['name']
tordinal = XML.SubElement(tthreshold, 'ordinal')
tordinal.text = hudson_model.THRESHOLDS[threshold]['ordinal']
tcolor = XML.SubElement(tthreshold, 'color')
tcolor.text = hudson_model.THRESHOLDS[threshold]['color']
def clone_workspace(parser, xml_parent, data):
"""yaml: clone-workspace
Archive the workspace from builds of one project and reuse them as the SCM
source for another project.
Requires the Jenkins `Clone Workspace SCM Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Clone+Workspace+SCM+Plugin>`_
:arg str workspace-glob: Files to include in cloned workspace
:arg str workspace-exclude-glob: Files to exclude from cloned workspace
:arg str criteria: Criteria for build to be archived. Can be 'any',
'not failed', or 'successful'. (default: any )
:arg str archive-method: Choose the method to use for archiving the
workspace. Can be 'tar' or 'zip'. (default: tar)
:arg bool override-default-excludes: Override default ant excludes.
(default: false)
Minimal example:
.. literalinclude::
/../../tests/publishers/fixtures/clone-workspace001.yaml
Full example:
.. literalinclude::
/../../tests/publishers/fixtures/clone-workspace002.yaml
"""
cloneworkspace = XML.SubElement(
xml_parent,
'hudson.plugins.cloneworkspace.CloneWorkspacePublisher',
{'plugin': 'clone-workspace-scm'})
XML.SubElement(
cloneworkspace,
'workspaceGlob').text = data.get('workspace-glob', None)
if 'workspace-exclude-glob' in data:
XML.SubElement(
cloneworkspace,
'workspaceExcludeGlob').text = data['workspace-exclude-glob']
criteria_list = ['Any', 'Not Failed', 'Successful']
criteria = data.get('criteria', 'Any').title()
if 'criteria' in data and criteria not in criteria_list:
raise JenkinsJobsException(
'clone-workspace criteria must be one of: '
+ ', '.join(criteria_list))
else:
XML.SubElement(cloneworkspace, 'criteria').text = criteria
archive_list = ['TAR', 'ZIP']
archive_method = data.get('archive-method', 'TAR').upper()
if 'archive-method' in data and archive_method not in archive_list:
raise JenkinsJobsException(
'clone-workspace archive-method must be one of: '
+ ', '.join(archive_list))
else:
XML.SubElement(cloneworkspace, 'archiveMethod').text = archive_method
XML.SubElement(
cloneworkspace,
'overrideDefaultExcludes').text = str(data.get(
'override-default-excludes',
False)).lower()
def cloverphp(parser, xml_parent, data):
"""yaml: cloverphp
Capture code coverage reports from PHPUnit
Requires the Jenkins `Clover PHP Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Clover+PHP+Plugin>`_
Your job definition should pass to PHPUnit the --coverage-clover option
pointing to a file in the workspace (ex: clover-coverage.xml). The filename
has to be filled in the `xml-location` field.
:arg str xml-location: Path to the coverage XML file generated by PHPUnit
using --coverage-clover. Relative to workspace. (required)
:arg dict html: When existent, whether the plugin should generate a HTML
report. Note that PHPUnit already provide a HTML report via its
--cover-html option which can be set in your builder (optional):
* **dir** (str): Directory where HTML report will be generated relative
to workspace. (required in `html` dict).
* **archive** (bool): Whether to archive HTML reports (default True).
:arg list metric-targets: List of metric targets to reach, must be one of
**healthy**, **unhealthy** and **failing**. Each metric target can takes
two parameters:
* **method** Target for method coverage
* **statement** Target for statements coverage
Whenever a metric target is not filled in, the Jenkins plugin can fill in
defaults for you (as of v0.3.3 of the plugin the healthy target will have
method: 70 and statement: 80 if both are left empty). Jenkins Job Builder
will mimic that feature to ensure clean configuration diff.
Minimal example:
.. literalinclude:: /../../tests/publishers/fixtures/cloverphp001.yaml
Full example:
.. literalinclude:: /../../tests/publishers/fixtures/cloverphp002.yaml
"""
cloverphp = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.cloverphp.CloverPHPPublisher')
# The plugin requires clover XML file to parse
if 'xml-location' not in data:
raise JenkinsJobsException('xml-location must be set')
# Whether HTML publishing has been checked
html_publish = False
# By default, disableArchiving = false. Note that we use
# reversed logic.
html_archive = True
if 'html' in data:
html_publish = True
html_dir = data['html'].get('dir', None)
html_archive = data['html'].get('archive', html_archive)
if html_dir is None:
# No point in going further, the plugin would not work
raise JenkinsJobsException('htmldir is required in a html block')
XML.SubElement(cloverphp, 'publishHtmlReport').text = \
str(html_publish).lower()
if html_publish:
XML.SubElement(cloverphp, 'reportDir').text = html_dir
XML.SubElement(cloverphp, 'xmlLocation').text = data.get('xml-location')
XML.SubElement(cloverphp, 'disableArchiving').text = \
str(not html_archive).lower()
# Handle targets
# Plugin v0.3.3 will fill defaults for us whenever healthy targets are both
# blanks.
default_metrics = {
'healthy': {'method': 70, 'statement': 80}
}
allowed_metrics = ['healthy', 'unhealthy', 'failing']
metrics = data.get('metric-targets', [])
# list of dicts to dict
metrics = dict(kv for m in metrics for kv in m.iteritems())
# Populate defaults whenever nothing has been filled by user.
for default in default_metrics.keys():
if metrics.get(default, None) is None:
metrics[default] = default_metrics[default]
# The plugin would at least define empty targets so make sure
# we output them all in the XML regardless of what the user
# has or has not entered.
for target in allowed_metrics:
cur_target = XML.SubElement(cloverphp, target + 'Target')
for t_type in ['method', 'statement']:
val = metrics.get(target, {}).get(t_type)
if val is None or type(val) != int:
continue
if val < 0 or val > 100:
raise JenkinsJobsException(
"Publisher cloverphp metric target %s:%s = %s "
"is not in valid range 0-100." % (target, t_type, val))
XML.SubElement(cur_target, t_type + 'Coverage').text = str(val)
def coverage(parser, xml_parent, data):
"""yaml: coverage
WARNING: The coverage function is deprecated. Instead, use the
cobertura function to generate a cobertura coverage report.
Requires the Jenkins `Cobertura Coverage Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Cobertura+Plugin>`_
Example::
publishers:
- coverage
"""
logger = logging.getLogger(__name__)
logger.warn("Coverage function is deprecated. Switch to cobertura.")
cobertura = XML.SubElement(xml_parent,
'hudson.plugins.cobertura.CoberturaPublisher')
XML.SubElement(cobertura, 'coberturaReportFile').text = '**/coverage.xml'
XML.SubElement(cobertura, 'onlyStable').text = 'false'
healthy = XML.SubElement(cobertura, 'healthyTarget')
targets = XML.SubElement(healthy, 'targets', {
'class': 'enum-map',
'enum-type': 'hudson.plugins.cobertura.targets.CoverageMetric'})
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'CONDITIONAL'
XML.SubElement(entry, 'int').text = '70'
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'LINE'
XML.SubElement(entry, 'int').text = '80'
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'METHOD'
XML.SubElement(entry, 'int').text = '80'
unhealthy = XML.SubElement(cobertura, 'unhealthyTarget')
targets = XML.SubElement(unhealthy, 'targets', {
'class': 'enum-map',
'enum-type': 'hudson.plugins.cobertura.targets.CoverageMetric'})
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'CONDITIONAL'
XML.SubElement(entry, 'int').text = '0'
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'LINE'
XML.SubElement(entry, 'int').text = '0'
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'METHOD'
XML.SubElement(entry, 'int').text = '0'
failing = XML.SubElement(cobertura, 'failingTarget')
targets = XML.SubElement(failing, 'targets', {
'class': 'enum-map',
'enum-type': 'hudson.plugins.cobertura.targets.CoverageMetric'})
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'CONDITIONAL'
XML.SubElement(entry, 'int').text = '0'
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'LINE'
XML.SubElement(entry, 'int').text = '0'
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'METHOD'
XML.SubElement(entry, 'int').text = '0'
XML.SubElement(cobertura, 'sourceEncoding').text = 'ASCII'
def cobertura(parser, xml_parent, data):
"""yaml: cobertura
Generate a cobertura coverage report.
Requires the Jenkins `Cobertura Coverage Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Cobertura+Plugin>`_
:arg str report-file: This is a file name pattern that can be used
to locate the cobertura xml report files (optional)
:arg bool only-stable: Include only stable builds (default false)
:arg bool fail-no-reports: fail builds if no coverage reports are found
(default false)
:arg bool fail-unhealthy: Unhealthy projects will be failed
(default false)
:arg bool fail-unstable: Unstable projects will be failed (default false)
:arg bool health-auto-update: Auto update threshold for health on
successful build (default false)
:arg bool stability-auto-update: Auto update threshold for stability on
successful build (default false)
:arg bool zoom-coverage-chart: Zoom the coverage chart and crop area below
the minimum and above the maximum coverage
of the past reports (default false)
:arg str source-encoding: Override the source encoding (default ASCII)
:arg dict targets:
:targets: (packages, files, classes, method, line, conditional)
* **healthy** (`int`): Healthy threshold (default 0)
* **unhealthy** (`int`): Unhealthy threshold (default 0)
* **failing** (`int`): Failing threshold (default 0)
Example::
publishers:
- cobertura:
report-file: "/reports/cobertura/coverage.xml"
only-stable: "true"
fail-no-reports: "true"
fail-unhealthy: "true"
fail-unstable: "true"
health-auto-update: "true"
stability-auto-update: "true"
zoom-coverage-chart: "true"
source-encoding: "Big5"
targets:
- files:
healthy: 10
unhealthy: 20
failing: 30
- method:
healthy: 50
unhealthy: 40
failing: 30
"""
cobertura = XML.SubElement(xml_parent,
'hudson.plugins.cobertura.CoberturaPublisher')
XML.SubElement(cobertura, 'coberturaReportFile').text = data.get(
'report-file', '**/coverage.xml')
XML.SubElement(cobertura, 'onlyStable').text = str(
data.get('only-stable', False)).lower()
XML.SubElement(cobertura, 'failUnhealthy').text = str(
data.get('fail-unhealthy', False)).lower()
XML.SubElement(cobertura, 'failUnstable').text = str(
data.get('fail-unstable', False)).lower()
XML.SubElement(cobertura, 'autoUpdateHealth').text = str(
data.get('health-auto-update', False)).lower()
XML.SubElement(cobertura, 'autoUpdateStability').text = str(
data.get('stability-auto-update', False)).lower()
XML.SubElement(cobertura, 'zoomCoverageChart').text = str(
data.get('zoom-coverage-chart', False)).lower()
XML.SubElement(cobertura, 'failNoReports').text = str(
data.get('fail-no-reports', False)).lower()
healthy = XML.SubElement(cobertura, 'healthyTarget')
targets = XML.SubElement(healthy, 'targets', {
'class': 'enum-map',
'enum-type': 'hudson.plugins.cobertura.targets.CoverageMetric'})
for item in data['targets']:
item_name = item.keys()[0]
item_values = item.get(item_name, 0)
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry,
'hudson.plugins.cobertura.targets.'
'CoverageMetric').text = str(item_name).upper()
XML.SubElement(entry, 'int').text = str(item_values.get('healthy', 0))
unhealthy = XML.SubElement(cobertura, 'unhealthyTarget')
targets = XML.SubElement(unhealthy, 'targets', {
'class': 'enum-map',
'enum-type': 'hudson.plugins.cobertura.targets.CoverageMetric'})
for item in data['targets']:
item_name = item.keys()[0]
item_values = item.get(item_name, 0)
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.'
'CoverageMetric').text = str(item_name).upper()
XML.SubElement(entry, 'int').text = str(item_values.get('unhealthy',
0))
failing = XML.SubElement(cobertura, 'failingTarget')
targets = XML.SubElement(failing, 'targets', {
'class': 'enum-map',
'enum-type': 'hudson.plugins.cobertura.targets.CoverageMetric'})
for item in data['targets']:
item_name = item.keys()[0]
item_values = item.get(item_name, 0)
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.'
'CoverageMetric').text = str(item_name).upper()
XML.SubElement(entry, 'int').text = str(item_values.get('failing', 0))
XML.SubElement(cobertura, 'sourceEncoding').text = data.get(
'source-encoding', 'ASCII')
def jacoco(parser, xml_parent, data):
"""yaml: jacoco
Generate a JaCoCo coverage report.
Requires the Jenkins `JaCoCo Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/JaCoCo+Plugin>`_
:arg str exec-pattern: This is a file name pattern that can be used to
locate the jacoco report files (default
``**/**.exec``)
:arg str class-pattern: This is a file name pattern that can be used
to locate class files (default ``**/classes``)
:arg str source-pattern: This is a file name pattern that can be used
to locate source files (default ``**/src/main/java``)
:arg bool update-build-status: Update the build according to the results
(default False)
:arg str inclusion-pattern: This is a file name pattern that can be used
to include certain class files (optional)
:arg str exclusion-pattern: This is a file name pattern that can be used
to exclude certain class files (optional)
:arg dict targets:
:targets: (instruction, branch, complexity, line, method, class)
* **healthy** (`int`): Healthy threshold (default 0)
* **unhealthy** (`int`): Unhealthy threshold (default 0)
Example::
publishers:
- jacoco:
exec-pattern: "**/**.exec"
class-pattern: "**/classes"
source-pattern: "**/src/main/java"
status-update: true
targets:
- branch:
healthy: 10
unhealthy: 20
- method:
healthy: 50
unhealthy: 40
"""
jacoco = XML.SubElement(xml_parent,
'hudson.plugins.jacoco.JacocoPublisher')
XML.SubElement(jacoco, 'execPattern').text = data.get(
'exec-pattern', '**/**.exec')
XML.SubElement(jacoco, 'classPattern').text = data.get(
'class-pattern', '**/classes')
XML.SubElement(jacoco, 'sourcePattern').text = data.get(
'source-pattern', '**/src/main/java')
XML.SubElement(jacoco, 'changeBuildStatus').text = data.get(
'update-build-status', False)
XML.SubElement(jacoco, 'inclusionPattern').text = data.get(
'inclusion-pattern', '')
XML.SubElement(jacoco, 'exclusionPattern').text = data.get(
'exclusion-pattern', '')
itemsList = ['instruction',
'branch',
'complexity',
'line',
'method',
'class']
for item in data['targets']:
item_name = item.keys()[0]
if item_name not in itemsList:
raise JenkinsJobsException("item entered is not valid must be "
"one of: %s" % ",".join(itemsList))
item_values = item.get(item_name, 0)
XML.SubElement(jacoco,
'maximum' +
item_name.capitalize() +
'Coverage').text = str(item_values.get('healthy', 0))
XML.SubElement(jacoco,
'minimum' +
item_name.capitalize() +
'Coverage').text = str(item_values.get('unhealthy', 0))
def ftp(parser, xml_parent, data):
"""yaml: ftp
Upload files via FTP.
Requires the Jenkins `Publish over FTP Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Publish+Over+FTP+Plugin>`_
:arg str site: name of the ftp site
:arg str target: destination directory
:arg bool target-is-date-format: whether target is a date format. If true,
raw text should be quoted (defaults to False)
:arg bool clean-remote: should the remote directory be deleted before
transferring files (defaults to False)
:arg str source: source path specifier
:arg str excludes: excluded file pattern (optional)
:arg str remove-prefix: prefix to remove from uploaded file paths
(optional)
:arg bool fail-on-error: fail the build if an error occurs (defaults to
False).
Example::
publishers:
- ftp:
site: 'ftp.example.com'
target: 'dest/dir'
source: 'base/source/dir/**'
remove-prefix: 'base/source/dir'
excludes: '**/*.excludedfiletype'
"""
console_prefix = 'FTP: '
plugin_tag = 'jenkins.plugins.publish__over__ftp.BapFtpPublisherPlugin'
publisher_tag = 'jenkins.plugins.publish__over__ftp.BapFtpPublisher'
transfer_tag = 'jenkins.plugins.publish__over__ftp.BapFtpTransfer'
plugin_reference_tag = 'jenkins.plugins.publish_over_ftp.' \
'BapFtpPublisherPlugin'
(_, transfer_node) = base_publish_over(xml_parent,
data,
console_prefix,
plugin_tag,
publisher_tag,
transfer_tag,
plugin_reference_tag)
XML.SubElement(transfer_node, 'asciiMode').text = 'false'
def junit(parser, xml_parent, data):
"""yaml: junit
Publish JUnit test results.
:arg str results: results filename
:arg bool keep-long-stdio: Retain long standard output/error in test
results (default true).
:arg bool test-stability: Add historical information about test
results stability (default false).
Requires the Jenkins `Test stability Plugin
<https://wiki.jenkins-ci.org/display/JENKINS/Test+stability+plugin>`_.
Minimal example using defaults:
.. literalinclude:: /../../tests/publishers/fixtures/junit001.yaml
Full example:
.. literalinclude:: /../../tests/publishers/fixtures/junit002.yaml
"""
junitresult = XML.SubElement(xml_parent,
'hudson.tasks.junit.JUnitResultArchiver')
XML.SubElement(junitresult, 'testResults').text = data['results']
XML.SubElement(junitresult, 'keepLongStdio').text = str(
data.get('keep-long-stdio', True)).lower()
datapublisher = XML.SubElement(junitresult, 'testDataPublishers')
if str(data.get('test-stability', False)).lower() == 'true':
XML.SubElement(datapublisher,
'de.esailors.jenkins.teststability'
'.StabilityTestDataPublisher')
def xunit(parser, xml_parent, data):
"""yaml: xunit
Publish tests results. Requires the Jenkins `xUnit Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/xUnit+Plugin>`_
:arg str thresholdmode: whether thresholds represents an absolute \
number of tests or a percentage. Either 'number' or 'percent', will \
default to 'number' if omitted.
:arg dict thresholds: list containing the thresholds for both \
'failed' and 'skipped' tests. Each entry should in turn have a \
list of "threshold name: values". The threshold names are \
'unstable', 'unstablenew', 'failure', 'failurenew'. Omitting a \
value will resort on xUnit default value (should be 0).
:arg dict types: per framework configuration. The key should be \
one of the internal types we support:\
'aunit', 'boosttest', 'checktype', 'cpptest', 'cppunit', 'fpcunit', \
'junit', 'mstest', 'nunit', 'phpunit', 'tusar', 'unittest', 'valgrind'. \
The 'custom' type is not supported.
Each framework type can be configured using the following parameters:
:arg str pattern: An Ant pattern to look for Junit result files, \
relative to the workspace root.
:arg bool requireupdate: fail the build whenever fresh tests \
results have not been found (default: true).
:arg bool deleteoutput: delete temporary JUnit files (default: true)
:arg bool stoponerror: Fail the build whenever an error occur during \
a result file processing (default: true).
Example::
publishers:
- xunit:
thresholdmode: 'percent'
thresholds:
- failed:
unstable: 0
unstablenew: 0
failure: 0
failurenew: 0
- skipped:
unstable: 0
unstablenew: 0
failure: 0
failurenew: 0
types:
- phpunit:
pattern: junit.log
- cppUnit:
pattern: cppunit.log
"""
logger = logging.getLogger(__name__)
xunit = XML.SubElement(xml_parent, 'xunit')
# Map our internal types to the XML element names used by Jenkins plugin
types_to_plugin_types = {
'aunit': 'AUnitJunitHudsonTestType',
'boosttest': 'BoostTestJunitHudsonTestType',
'checktype': 'CheckType',
'cpptest': 'CppTestJunitHudsonTestType',
'cppunit': 'CppUnitJunitHudsonTestType',
'fpcunit': 'FPCUnitJunitHudsonTestType',
'junit': 'JUnitType',
'mstest': 'MSTestJunitHudsonTestType',
'nunit': 'NUnitJunitHudsonTestType',
'phpunit': 'PHPUnitJunitHudsonTestType',
'tusar': 'TUSARJunitHudsonTestType',
'unittest': 'UnitTestJunitHudsonTestType',
'valgrind': 'ValgrindJunitHudsonTestType',
# FIXME should implement the 'custom' type
}
implemented_types = types_to_plugin_types.keys() # shortcut
# Unit framework we are going to generate xml for
supported_types = []
for configured_type in data['types']:
type_name = configured_type.keys()[0]
if type_name not in implemented_types:
logger.warn("Requested xUnit type '%s' is not yet supported" %
type_name)
else:
# Append for generation
supported_types.append(configured_type)
# Generate XML for each of the supported framework types
xmltypes = XML.SubElement(xunit, 'types')
for supported_type in supported_types:
framework_name = supported_type.keys()[0]
xmlframework = XML.SubElement(xmltypes,
types_to_plugin_types[framework_name])
XML.SubElement(xmlframework, 'pattern').text = \
supported_type[framework_name].get('pattern', '')
XML.SubElement(xmlframework, 'failIfNotNew').text = \
str(supported_type[framework_name].get(
'requireupdate', True)).lower()
XML.SubElement(xmlframework, 'deleteOutputFiles').text = \
str(supported_type[framework_name].get(
'deleteoutput', True)).lower()
XML.SubElement(xmlframework, 'stopProcessingIfError').text = \
str(supported_type[framework_name].get(
'stoponerror', True)).lower()
xmlthresholds = XML.SubElement(xunit, 'thresholds')
if 'thresholds' in data:
for t in data['thresholds']:
if not ('failed' in t or 'skipped' in t):
logger.warn(
"Unrecognized threshold, should be 'failed' or 'skipped'")
continue
elname = "org.jenkinsci.plugins.xunit.threshold.%sThreshold" \
% t.keys()[0].title()
el = XML.SubElement(xmlthresholds, elname)
for threshold_name, threshold_value in t.values()[0].items():
# Normalize and craft the element name for this threshold
elname = "%sThreshold" % threshold_name.lower().replace(
'new', 'New')
XML.SubElement(el, elname).text = threshold_value
# Whether to use percent of exact number of tests.
# Thresholdmode is either:
# - 1 : absolute (number of tests), default.
# - 2 : relative (percentage of tests)
thresholdmode = '1'
if 'percent' == data.get('thresholdmode', 'number'):
thresholdmode = '2'
XML.SubElement(xunit, 'thresholdMode').text = \
thresholdmode
def _violations_add_entry(xml_parent, name, data):
vmin = data.get('min', 10)
vmax = data.get('max', 999)
vunstable = data.get('unstable', 999)
pattern = data.get('pattern', None)
entry = XML.SubElement(xml_parent, 'entry')
XML.SubElement(entry, 'string').text = name
tconfig = XML.SubElement(entry, 'hudson.plugins.violations.TypeConfig')
XML.SubElement(tconfig, 'type').text = name
XML.SubElement(tconfig, 'min').text = str(vmin)
XML.SubElement(tconfig, 'max').text = str(vmax)
XML.SubElement(tconfig, 'unstable').text = str(vunstable)
XML.SubElement(tconfig, 'usePattern').text = 'false'
if pattern:
XML.SubElement(tconfig, 'pattern').text = pattern
else:
XML.SubElement(tconfig, 'pattern')
def violations(parser, xml_parent, data):
"""yaml: violations
Publish code style violations.
Requires the Jenkins `Violations Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Violations>`_
The violations component accepts any number of dictionaries keyed
by the name of the violations system. The dictionary has the
following values:
:arg int min: sunny threshold
:arg int max: stormy threshold
:arg int unstable: unstable threshold
:arg str pattern: report filename pattern
Any system without a dictionary provided will use default values.
Valid systems are:
checkstyle, codenarc, cpd, cpplint, csslint, findbugs, fxcop,
gendarme, jcreport, jslint, pep8, pmd, pylint, simian, stylecop
Example::
publishers:
- violations:
pep8:
min: 0
max: 1
unstable: 1
pattern: '**/pep8.txt'
"""
violations = XML.SubElement(xml_parent,
'hudson.plugins.violations.'
'ViolationsPublisher')
config = XML.SubElement(violations, 'config')
suppressions = XML.SubElement(config, 'suppressions',
{'class': 'tree-set'})
XML.SubElement(suppressions, 'no-comparator')
configs = XML.SubElement(config, 'typeConfigs')
XML.SubElement(configs, 'no-comparator')
for name in [
'checkstyle',
'codenarc',
'cpd',
'cpplint',
'csslint',
'findbugs',
'fxcop',
'gendarme',
'jcreport',
'jslint',
'pep8',
'pmd',
'pylint',
'simian',
'stylecop']:
_violations_add_entry(configs, name, data.get(name, {}))
XML.SubElement(config, 'limit').text = '100'
XML.SubElement(config, 'sourcePathPattern')
XML.SubElement(config, 'fauxProjectPath')
XML.SubElement(config, 'encoding').text = 'default'
def checkstyle(parser, xml_parent, data):
"""yaml: checkstyle
Publish trend reports with Checkstyle.
Requires the Jenkins `Checkstyle Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Checkstyle+Plugin>`_
The checkstyle component accepts a dictionary with the
following values:
:arg str pattern: report filename pattern
:arg bool canRunOnFailed: also runs for failed builds
(instead of just stable or unstable builds)
:arg bool shouldDetectModules:
:arg int healthy: sunny threshold
:arg int unHealthy: stormy threshold
:arg str healthThreshold: threshold priority for health status
(high: only high, normal: high and normal, low: all)
:arg dict thresholds:
:thresholds:
* **unstable** (`dict`)
:unstable: * **totalAll** (`int`)
* **totalHigh** (`int`)
* **totalNormal** (`int`)
* **totalLow** (`int`)
* **failed** (`dict`)
:failed: * **totalAll** (`int`)
* **totalHigh** (`int`)
* **totalNormal** (`int`)
* **totalLow** (`int`)
:arg str defaultEncoding: encoding for parsing or showing files
(empty will use platform default)
Example::
publishers:
- checkstyle:
pattern: '**/checkstyle-result.xml'
healthy: 0
unHealthy: 100
healthThreshold: 'high'
thresholds:
unstable:
totalHigh: 10
failed:
totalHigh: 1
"""
checkstyle = XML.SubElement(xml_parent,
'hudson.plugins.checkstyle.'
'CheckStylePublisher')
dval = data.get('healthy', None)
if dval:
XML.SubElement(checkstyle, 'healthy').text = str(dval)
else:
XML.SubElement(checkstyle, 'healthy')
dval = data.get('unHealthy', None)
if dval:
XML.SubElement(checkstyle, 'unHealthy').text = str(dval)
else:
XML.SubElement(checkstyle, 'unHealthy')
XML.SubElement(checkstyle, 'thresholdLimit').text = \
data.get('healthThreshold', 'low')
XML.SubElement(checkstyle, 'pluginName').text = '[CHECKSTYLE] '
XML.SubElement(checkstyle, 'defaultEncoding').text = \
data.get('defaultEncoding', '')
if data.get('canRunOnFailed', False):
XML.SubElement(checkstyle, 'canRunOnFailed').text = 'true'
else:
XML.SubElement(checkstyle, 'canRunOnFailed').text = 'false'
XML.SubElement(checkstyle, 'useStableBuildAsReference').text = 'false'
XML.SubElement(checkstyle, 'useDeltaValues').text = 'false'
dthresholds = data.get('thresholds', {})
dunstable = dthresholds.get('unstable', {})
dfailed = dthresholds.get('failed', {})
thresholds = XML.SubElement(checkstyle, 'thresholds')
dval = dunstable.get('totalAll', None)
if dval:
XML.SubElement(thresholds, 'unstableTotalAll').text = str(dval)
else:
XML.SubElement(thresholds, 'unstableTotalAll')
dval = dunstable.get('totalHigh', None)
if dval:
XML.SubElement(thresholds, 'unstableTotalHigh').text = str(dval)
else:
XML.SubElement(thresholds, 'unstableTotalHigh')
dval = dunstable.get('totalNormal', None)
if dval:
XML.SubElement(thresholds, 'unstableTotalNormal').text = str(dval)
else:
XML.SubElement(thresholds, 'unstableTotalNormal')
dval = dunstable.get('totalLow', None)
if dval:
XML.SubElement(thresholds, 'unstableTotalLow').text = str(dval)
else:
XML.SubElement(thresholds, 'unstableTotalLow')
dval = dfailed.get('totalAll', None)
if dval:
XML.SubElement(thresholds, 'failedTotalAll').text = str(dval)
else:
XML.SubElement(thresholds, 'failedTotalAll')
dval = dfailed.get('totalHigh', None)
if dval:
XML.SubElement(thresholds, 'failedTotalHigh').text = str(dval)
else:
XML.SubElement(thresholds, 'failedTotalHigh')
dval = dfailed.get('totalNormal', None)
if dval:
XML.SubElement(thresholds, 'failedTotalNormal').text = str(dval)
else:
XML.SubElement(thresholds, 'failedTotalNormal')
dval = dfailed.get('totalLow', None)
if dval:
XML.SubElement(thresholds, 'failedTotalLow').text = str(dval)
else:
XML.SubElement(thresholds, 'failedTotalLow')
if data.get('shouldDetectModules', False):
XML.SubElement(checkstyle, 'shouldDetectModules').text = 'true'
else:
XML.SubElement(checkstyle, 'shouldDetectModules').text = 'false'
XML.SubElement(checkstyle, 'dontComputeNew').text = 'true'
XML.SubElement(checkstyle, 'doNotResolveRelativePaths').text = 'false'
XML.SubElement(checkstyle, 'pattern').text = data.get('pattern', '')
def scp(parser, xml_parent, data):
"""yaml: scp
Upload files via SCP
Requires the Jenkins `SCP Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/SCP+plugin>`_
:arg str site: name of the scp site
:arg str target: destination directory
:arg str source: source path specifier
:arg bool keep-hierarchy: keep the file hierarchy when uploading
(default false)
:arg bool copy-after-failure: copy files even if the job fails
(default false)
:arg bool copy-console: copy the console log (default false); if
specified, omit 'target'
Example:
.. literalinclude:: /../../tests/publishers/fixtures/scp001.yaml
"""
site = data['site']
scp = XML.SubElement(xml_parent,
'be.certipost.hudson.plugin.SCPRepositoryPublisher')
XML.SubElement(scp, 'siteName').text = site
entries = XML.SubElement(scp, 'entries')
for entry in data['files']:
entry_e = XML.SubElement(entries, 'be.certipost.hudson.plugin.Entry')
XML.SubElement(entry_e, 'filePath').text = entry['target']
XML.SubElement(entry_e, 'sourceFile').text = entry.get('source', '')
if entry.get('keep-hierarchy', False):
XML.SubElement(entry_e, 'keepHierarchy').text = 'true'
else:
XML.SubElement(entry_e, 'keepHierarchy').text = 'false'
if entry.get('copy-console', False):
XML.SubElement(entry_e, 'copyConsoleLog').text = 'true'
else:
XML.SubElement(entry_e, 'copyConsoleLog').text = 'false'
if entry.get('copy-after-failure', False):
XML.SubElement(entry_e, 'copyAfterFailure').text = 'true'
else:
XML.SubElement(entry_e, 'copyAfterFailure').text = 'false'
def ssh(parser, xml_parent, data):
"""yaml: ssh
Upload files via SCP.
Requires the Jenkins `Publish over SSH Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Publish+Over+SSH+Plugin>`_
:arg str site: name of the ssh site
:arg str target: destination directory
:arg bool target-is-date-format: whether target is a date format. If true,
raw text should be quoted (defaults to False)
:arg bool clean-remote: should the remote directory be deleted before
transferring files (defaults to False)
:arg str source: source path specifier
:arg str command: a command to execute on the remote server (optional)
:arg int timeout: timeout in milliseconds for the Exec command (optional)
:arg bool use-pty: run the exec command in pseudo TTY (defaults to False)
:arg str excludes: excluded file pattern (optional)
:arg str remove-prefix: prefix to remove from uploaded file paths
(optional)
:arg bool fail-on-error: fail the build if an error occurs (defaults to
False).
Example::
publishers:
- ssh:
site: 'server.example.com'
target: 'dest/dir'
source: 'base/source/dir/**'
remove-prefix: 'base/source/dir'
excludes: '**/*.excludedfiletype'
use-pty: true
command: 'rm -r jenkins_$BUILD_NUMBER'
timeout: 1800000
"""
console_prefix = 'SSH: '
plugin_tag = 'jenkins.plugins.publish__over__ssh.BapSshPublisherPlugin'
publisher_tag = 'jenkins.plugins.publish__over__ssh.BapSshPublisher'
transfer_tag = 'jenkins.plugins.publish__over__ssh.BapSshTransfer'
plugin_reference_tag = 'jenkins.plugins.publish_over_ssh.' \
'BapSshPublisherPlugin'
base_publish_over(xml_parent,
data,
console_prefix,
plugin_tag,
publisher_tag,
transfer_tag,
plugin_reference_tag)
def pipeline(parser, xml_parent, data):
"""yaml: pipeline
Specify a downstream project in a pipeline.
Requires the Jenkins `Build Pipeline Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Build+Pipeline+Plugin>`_
:arg str project: the name of the downstream project
:arg str predefined-parameters: parameters to pass to the other
job (optional)
:arg bool current-parameters: Whether to include the parameters passed
to the current build to the triggered job (optional)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/pipeline002.yaml
You can build pipeline jobs that are re-usable in different pipelines by
using a :ref:`job-template` to define the pipeline jobs,
and variable substitution to specify the name of
the downstream job in the pipeline.
Job-specific substitutions are useful here (see :ref:`project`).
See 'samples/pipeline.yaml' for an example pipeline implementation.
"""
if 'project' in data and data['project'] != '':
pippub = XML.SubElement(xml_parent,
'au.com.centrumsystems.hudson.plugin.'
'buildpipeline.trigger.BuildPipelineTrigger')
configs = XML.SubElement(pippub, 'configs')
if 'predefined-parameters' in data:
params = XML.SubElement(configs,
'hudson.plugins.parameterizedtrigger.'
'PredefinedBuildParameters')
properties = XML.SubElement(params, 'properties')
properties.text = data['predefined-parameters']
if ('current-parameters' in data
and data['current-parameters']):
XML.SubElement(configs,
'hudson.plugins.parameterizedtrigger.'
'CurrentBuildParameters')
XML.SubElement(pippub, 'downstreamProjectNames').text = data['project']
def email(parser, xml_parent, data):
"""yaml: email
Email notifications on build failure.
:arg str recipients: Recipient email addresses
:arg bool notify-every-unstable-build: Send an email for every
unstable build (default true)
:arg bool send-to-individuals: Send an email to the individual
who broke the build (default false)
Example::
publishers:
- email:
recipients: breakage@example.com
"""
# TODO: raise exception if this is applied to a maven job
mailer = XML.SubElement(xml_parent,
'hudson.tasks.Mailer')
XML.SubElement(mailer, 'recipients').text = data['recipients']
# Note the logic reversal (included here to match the GUI
if data.get('notify-every-unstable-build', True):
XML.SubElement(mailer, 'dontNotifyEveryUnstableBuild').text = 'false'
else:
XML.SubElement(mailer, 'dontNotifyEveryUnstableBuild').text = 'true'
XML.SubElement(mailer, 'sendToIndividuals').text = str(
data.get('send-to-individuals', False)).lower()
def claim_build(parser, xml_parent, data):
"""yaml: claim-build
Claim build failures
Requires the Jenkins `Claim Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Claim+plugin>`_
Example::
publishers:
- claim-build
"""
XML.SubElement(xml_parent, 'hudson.plugins.claim.ClaimPublisher')
def base_email_ext(parser, xml_parent, data, ttype):
trigger = XML.SubElement(xml_parent,
'hudson.plugins.emailext.plugins.trigger.'
+ ttype)
email = XML.SubElement(trigger, 'email')
XML.SubElement(email, 'recipientList').text = ''
XML.SubElement(email, 'subject').text = '$PROJECT_DEFAULT_SUBJECT'
XML.SubElement(email, 'body').text = '$PROJECT_DEFAULT_CONTENT'
XML.SubElement(email, 'sendToDevelopers').text = 'false'
XML.SubElement(email, 'sendToRequester').text = 'false'
XML.SubElement(email, 'includeCulprits').text = 'false'
XML.SubElement(email, 'sendToRecipientList').text = 'true'
def email_ext(parser, xml_parent, data):
"""yaml: email-ext
Extend Jenkin's built in email notification
Requires the Jenkins `Email-ext Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Email-ext+plugin>`_
:arg str recipients: Comma separated list of emails
:arg str reply-to: Comma separated list of emails that should be in
the Reply-To header for this project (default is $DEFAULT_RECIPIENTS)
:arg str content-type: The content type of the emails sent. If not set, the
Jenkins plugin uses the value set on the main configuration page.
Possible values: 'html', 'text' or 'default' (default 'default')
:arg str subject: Subject for the email, can include variables like
${BUILD_NUMBER} or even groovy or javascript code
:arg str body: Content for the body of the email, can include variables
like ${BUILD_NUMBER}, but the real magic is using groovy or
javascript to hook into the Jenkins API itself
:arg bool attach-build-log: Include build log in the email (default false)
:arg str attachments: pattern of files to include as attachment (optional)
:arg bool unstable: Send an email for an unstable result (default false)
:arg bool first-failure: Send an email for just the first failure
(default false)
:arg bool not-built: Send an email if not built (default false)
:arg bool aborted: Send an email if the build is aborted (default false)
:arg bool regression: Send an email if there is a regression
(default false)
:arg bool failure: Send an email if the build fails (default true)
:arg bool improvement: Send an email if the build improves (default false)
:arg bool still-failing: Send an email if the build is still failing
(default false)
:arg bool success: Send an email for a successful build (default false)
:arg bool fixed: Send an email if the build is fixed (default false)
:arg bool still-unstable: Send an email if the build is still unstable
(default false)
:arg bool pre-build: Send an email before the build (default false)
:arg str matrix-trigger: If using matrix projects, when to trigger
:matrix-trigger values:
* **both**
* **only-parent**
* **only-configurations**
Example:
.. literalinclude:: /../../tests/publishers/fixtures/email-ext001.yaml
"""
emailext = XML.SubElement(xml_parent,
'hudson.plugins.emailext.ExtendedEmailPublisher')
if 'recipients' in data:
XML.SubElement(emailext, 'recipientList').text = data['recipients']
else:
XML.SubElement(emailext, 'recipientList').text = '$DEFAULT_RECIPIENTS'
ctrigger = XML.SubElement(emailext, 'configuredTriggers')
if data.get('unstable', False):
base_email_ext(parser, ctrigger, data, 'UnstableTrigger')
if data.get('first-failure', False):
base_email_ext(parser, ctrigger, data, 'FirstFailureTrigger')
if data.get('not-built', False):
base_email_ext(parser, ctrigger, data, 'NotBuiltTrigger')
if data.get('aborted', False):
base_email_ext(parser, ctrigger, data, 'AbortedTrigger')
if data.get('regression', False):
base_email_ext(parser, ctrigger, data, 'RegressionTrigger')
if data.get('failure', True):
base_email_ext(parser, ctrigger, data, 'FailureTrigger')
if data.get('improvement', False):
base_email_ext(parser, ctrigger, data, 'ImprovementTrigger')
if data.get('still-failing', False):
base_email_ext(parser, ctrigger, data, 'StillFailingTrigger')
if data.get('success', False):
base_email_ext(parser, ctrigger, data, 'SuccessTrigger')
if data.get('fixed', False):
base_email_ext(parser, ctrigger, data, 'FixedTrigger')
if data.get('still-unstable', False):
base_email_ext(parser, ctrigger, data, 'StillUnstableTrigger')
if data.get('pre-build', False):
base_email_ext(parser, ctrigger, data, 'PreBuildTrigger')
content_type_mime = {
'text': 'text/plain',
'html': 'text/html',
'default': 'default',
}
ctype = data.get('content-type', 'default')
if ctype not in content_type_mime:
raise JenkinsJobsException('email-ext content type must be one of: %s'
% ', '.join(content_type_mime.keys()))
XML.SubElement(emailext, 'contentType').text = content_type_mime[ctype]
XML.SubElement(emailext, 'defaultSubject').text = data.get(
'subject', '$DEFAULT_SUBJECT')
XML.SubElement(emailext, 'defaultContent').text = data.get(
'body', '$DEFAULT_CONTENT')
XML.SubElement(emailext, 'attachmentsPattern').text = data.get(
'attachments', '')
XML.SubElement(emailext, 'presendScript').text = ''
XML.SubElement(emailext, 'attachBuildLog').text = \
str(data.get('attach-build-log', False)).lower()
XML.SubElement(emailext, 'replyTo').text = data.get('reply-to',
'$DEFAULT_RECIPIENTS')
matrix_dict = {'both': 'BOTH',
'only-configurations': 'ONLY_CONFIGURATIONS',
'only-parent': 'ONLY_PARENT'}
matrix_trigger = data.get('matrix-trigger', None)
## If none defined, then do not create entry
if matrix_trigger is not None:
if matrix_trigger not in matrix_dict:
raise JenkinsJobsException("matrix-trigger entered is not valid, "
"must be one of: %s" %
", ".join(matrix_dict.keys()))
XML.SubElement(emailext, 'matrixTriggerMode').text = \
matrix_dict.get(matrix_trigger)
def fingerprint(parser, xml_parent, data):
"""yaml: fingerprint
Fingerprint files to track them across builds
:arg str files: files to fingerprint, follows the @includes of Ant fileset
(default is blank)
:arg bool record-artifacts: fingerprint all archived artifacts
(default false)
Example::
publishers:
- fingerprint:
files: builddir/test*.xml
record-artifacts: false
"""
finger = XML.SubElement(xml_parent, 'hudson.tasks.Fingerprinter')
XML.SubElement(finger, 'targets').text = data.get('files', '')
XML.SubElement(finger, 'recordBuildArtifacts').text = str(data.get(
'record-artifacts', False)).lower()
def aggregate_tests(parser, xml_parent, data):
"""yaml: aggregate-tests
Aggregate downstream test results
:arg bool include-failed-builds: whether to include failed builds
Example::
publishers:
- aggregate-tests:
include-failed-builds: true
"""
agg = XML.SubElement(xml_parent,
'hudson.tasks.test.AggregatedTestResultPublisher')
XML.SubElement(agg, 'includeFailedBuilds').text = str(data.get(
'include-failed-builds', False)).lower()
def cppcheck(parser, xml_parent, data):
"""yaml: cppcheck
Cppcheck result publisher
Requires the Jenkins `Cppcheck Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Cppcheck+Plugin>`_
:arg str pattern: file pattern for cppcheck xml report
for more optional parameters see the example
Example::
publishers:
- cppcheck:
pattern: "**/cppcheck.xml"
# the rest is optional
# build status (new) error count thresholds
thresholds:
unstable: 5
new-unstable: 5
failure: 7
new-failure: 3
# severities which count towards the threshold, default all true
severity:
error: true
warning: true
information: false
graph:
xysize: [500, 200]
# which errors to display, default only sum
display:
sum: false
error: true
"""
cppextbase = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.cppcheck.'
'CppcheckPublisher')
cppext = XML.SubElement(cppextbase, 'cppcheckConfig')
XML.SubElement(cppext, 'pattern').text = data['pattern']
XML.SubElement(cppext, 'ignoreBlankFiles').text = \
str(data.get('ignoreblankfiles', False)).lower()
csev = XML.SubElement(cppext, 'configSeverityEvaluation')
thrsh = data.get('thresholds', {})
XML.SubElement(csev, 'threshold').text = str(thrsh.get('unstable', ''))
XML.SubElement(csev, 'newThreshold').text = \
str(thrsh.get('new-unstable', ''))
XML.SubElement(csev, 'failureThreshold').text = \
str(thrsh.get('failure', ''))
XML.SubElement(csev, 'newFailureThreshold').text = \
str(thrsh.get('new-failure', ''))
XML.SubElement(csev, 'healthy').text = str(thrsh.get('healthy', ''))
XML.SubElement(csev, 'unHealthy').text = str(thrsh.get('unhealthy', ''))
sev = thrsh.get('severity', {})
XML.SubElement(csev, 'severityError').text = \
str(sev.get('error', True)).lower()
XML.SubElement(csev, 'severityWarning').text = \
str(sev.get('warning', True)).lower()
XML.SubElement(csev, 'severityStyle').text = \
str(sev.get('style', True)).lower()
XML.SubElement(csev, 'severityPerformance').text = \
str(sev.get('performance', True)).lower()
XML.SubElement(csev, 'severityInformation').text = \
str(sev.get('information', True)).lower()
graph = data.get('graph', {})
cgraph = XML.SubElement(cppext, 'configGraph')
x, y = graph.get('xysize', [500, 200])
XML.SubElement(cgraph, 'xSize').text = str(x)
XML.SubElement(cgraph, 'ySize').text = str(y)
gdisplay = graph.get('display', {})
XML.SubElement(cgraph, 'displayAllErrors').text = \
str(gdisplay.get('sum', True)).lower()
XML.SubElement(cgraph, 'displayErrorSeverity').text = \
str(gdisplay.get('error', False)).lower()
XML.SubElement(cgraph, 'displayWarningSeverity').text = \
str(gdisplay.get('warning', False)).lower()
XML.SubElement(cgraph, 'displayStyleSeverity').text = \
str(gdisplay.get('style', False)).lower()
XML.SubElement(cgraph, 'displayPerformanceSeverity').text = \
str(gdisplay.get('performance', False)).lower()
XML.SubElement(cgraph, 'displayInformationSeverity').text = \
str(gdisplay.get('information', False)).lower()
def logparser(parser, xml_parent, data):
"""yaml: logparser
Requires the Jenkins `Log Parser Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Log+Parser+Plugin>`_
:arg str parse-rules: full path to parse rules
:arg bool unstable-on-warning: mark build unstable on warning
:arg bool fail-on-error: mark build failed on error
Example::
publishers:
- logparser:
parse-rules: "/path/to/parserules"
unstable-on-warning: true
fail-on-error: true
"""
clog = XML.SubElement(xml_parent,
'hudson.plugins.logparser.LogParserPublisher')
XML.SubElement(clog, 'unstableOnWarning').text = \
str(data.get('unstable-on-warning', False)).lower()
XML.SubElement(clog, 'failBuildOnError').text = \
str(data.get('fail-on-error', False)).lower()
# v1.08: this must be the full path, the name of the rules is not enough
XML.SubElement(clog, 'parsingRulesPath').text = data.get('parse-rules', '')
def copy_to_master(parser, xml_parent, data):
"""yaml: copy-to-master
Copy files to master from slave
Requires the Jenkins `Copy To Slave Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Copy+To+Slave+Plugin>`_
:arg list includes: list of file patterns to copy
:arg list excludes: list of file patterns to exclude
:arg string destination: absolute path into which the files will be copied.
If left blank they will be copied into the
workspace of the current job
Example::
publishers:
- copy-to-master:
includes:
- file1
- file2*.txt
excludes:
- file2bad.txt
"""
p = 'com.michelin.cio.hudson.plugins.copytoslave.CopyToMasterNotifier'
cm = XML.SubElement(xml_parent, p)
XML.SubElement(cm, 'includes').text = ','.join(data.get('includes', ['']))
XML.SubElement(cm, 'excludes').text = ','.join(data.get('excludes', ['']))
XML.SubElement(cm, 'destinationFolder').text = \
data.get('destination', '')
if data.get('destination', ''):
XML.SubElement(cm, 'overrideDestinationFolder').text = 'true'
def jira(parser, xml_parent, data):
"""yaml: jira
Update relevant JIRA issues
Requires the Jenkins `JIRA Plugin
<https://wiki.jenkins-ci.org/display/JENKINS/JIRA+Plugin>`_
Example::
publishers:
- jira
"""
XML.SubElement(xml_parent, 'hudson.plugins.jira.JiraIssueUpdater')
def groovy_postbuild(parser, xml_parent, data):
"""yaml: groovy-postbuild
Execute a groovy script.
Requires the Jenkins `Groovy Postbuild Plugin
<https://wiki.jenkins-ci.org/display/JENKINS/Groovy+Postbuild+Plugin>`_
:Parameter: the groovy script to execute
Example::
publishers:
- groovy-postbuild: "manager.buildFailure()"
"""
root_tag = 'org.jvnet.hudson.plugins.groovypostbuild.'\
'GroovyPostbuildRecorder'
groovy = XML.SubElement(xml_parent, root_tag)
XML.SubElement(groovy, 'groovyScript').text = data
def base_publish_over(xml_parent, data, console_prefix,
plugin_tag, publisher_tag,
transferset_tag, reference_plugin_tag):
outer = XML.SubElement(xml_parent, plugin_tag)
XML.SubElement(outer, 'consolePrefix').text = console_prefix
delegate = XML.SubElement(outer, 'delegate')
publishers = XML.SubElement(delegate, 'publishers')
inner = XML.SubElement(publishers, publisher_tag)
XML.SubElement(inner, 'configName').text = data['site']
XML.SubElement(inner, 'verbose').text = 'true'
transfers = XML.SubElement(inner, 'transfers')
transfersset = XML.SubElement(transfers, transferset_tag)
XML.SubElement(transfersset, 'remoteDirectory').text = data['target']
XML.SubElement(transfersset, 'sourceFiles').text = data['source']
if 'command' in data:
XML.SubElement(transfersset, 'execCommand').text = data['command']
if 'timeout' in data:
XML.SubElement(transfersset, 'execTimeout').text = str(data['timeout'])
if 'use-pty' in data:
XML.SubElement(transfersset, 'usePty').text = \
str(data.get('use-pty', False)).lower()
XML.SubElement(transfersset, 'excludes').text = data.get('excludes', '')
XML.SubElement(transfersset, 'removePrefix').text = \
data.get('remove-prefix', '')
XML.SubElement(transfersset, 'remoteDirectorySDF').text = \
str(data.get('target-is-date-format', False)).lower()
XML.SubElement(transfersset, 'flatten').text = 'false'
XML.SubElement(transfersset, 'cleanRemote').text = \
str(data.get('clean-remote', False)).lower()
XML.SubElement(inner, 'useWorkspaceInPromotion').text = 'false'
XML.SubElement(inner, 'usePromotionTimestamp').text = 'false'
XML.SubElement(delegate, 'continueOnError').text = 'false'
XML.SubElement(delegate, 'failOnError').text = \
str(data.get('fail-on-error', False)).lower()
XML.SubElement(delegate, 'alwaysPublishFromMaster').text = 'false'
XML.SubElement(delegate, 'hostConfigurationAccess',
{'class': reference_plugin_tag,
'reference': '../..'})
return (outer, transfersset)
def cifs(parser, xml_parent, data):
"""yaml: cifs
Upload files via CIFS.
Requires the Jenkins `Publish over CIFS Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Publish+Over+CIFS+Plugin>`_
:arg str site: name of the cifs site/share
:arg str target: destination directory
:arg bool target-is-date-format: whether target is a date format. If true,
raw text should be quoted (defaults to False)
:arg bool clean-remote: should the remote directory be deleted before
transferring files (defaults to False)
:arg str source: source path specifier
:arg str excludes: excluded file pattern (optional)
:arg str remove-prefix: prefix to remove from uploaded file paths
(optional)
:arg bool fail-on-error: fail the build if an error occurs (defaults to
False).
Example::
publishers:
- cifs:
site: 'cifs.share'
target: 'dest/dir'
source: 'base/source/dir/**'
remove-prefix: 'base/source/dir'
excludes: '**/*.excludedfiletype'
"""
console_prefix = 'CIFS: '
plugin_tag = 'jenkins.plugins.publish__over__cifs.CifsPublisherPlugin'
publisher_tag = 'jenkins.plugins.publish__over__cifs.CifsPublisher'
transfer_tag = 'jenkins.plugins.publish__over__cifs.CifsTransfer'
plugin_reference_tag = 'jenkins.plugins.publish_over_cifs.' \
'CifsPublisherPlugin'
base_publish_over(xml_parent,
data,
console_prefix,
plugin_tag,
publisher_tag,
transfer_tag,
plugin_reference_tag)
def sonar(parser, xml_parent, data):
"""yaml: sonar
Sonar plugin support.
Requires the Jenkins `Sonar Plugin.
<http://docs.codehaus.org/pages/viewpage.action?pageId=116359341>`_
:arg str jdk: JDK to use (inherited from the job if omitted). (optional)
:arg str branch: branch onto which the analysis will be posted (optional)
:arg str language: source code language (optional)
:arg str maven-opts: options given to maven (optional)
:arg str additional-properties: sonar analysis parameters (optional)
:arg dict skip-global-triggers:
:Triggers: * **skip-when-scm-change** (`bool`): skip analysis when
build triggered by scm
* **skip-when-upstream-build** (`bool`): skip analysis when
build triggered by an upstream build
* **skip-when-envvar-defined** (`str`): skip analysis when
the specified environment variable is set to true
This publisher supports the post-build action exposed by the Jenkins
Sonar Plugin, which is triggering a Sonar Analysis with Maven.
Example::
publishers:
- sonar:
jdk: MyJdk
branch: myBranch
language: java
maven-opts: -DskipTests
additional-properties: -DsonarHostURL=http://example.com/
skip-global-triggers:
skip-when-scm-change: true
skip-when-upstream-build: true
skip-when-envvar-defined: SKIP_SONAR
"""
sonar = XML.SubElement(xml_parent, 'hudson.plugins.sonar.SonarPublisher')
if 'jdk' in data:
XML.SubElement(sonar, 'jdk').text = data['jdk']
XML.SubElement(sonar, 'branch').text = data.get('branch', '')
XML.SubElement(sonar, 'language').text = data.get('language', '')
XML.SubElement(sonar, 'mavenOpts').text = data.get('maven-opts', '')
XML.SubElement(sonar, 'jobAdditionalProperties').text = \
data.get('additional-properties', '')
if 'skip-global-triggers' in data:
data_triggers = data['skip-global-triggers']
triggers = XML.SubElement(sonar, 'triggers')
XML.SubElement(triggers, 'skipScmCause').text = \
str(data_triggers.get('skip-when-scm-change', False)).lower()
XML.SubElement(triggers, 'skipUpstreamCause').text = \
str(data_triggers.get('skip-when-upstream-build', False)).lower()
XML.SubElement(triggers, 'envVar').text = \
data_triggers.get('skip-when-envvar-defined', '')
def performance(parser, xml_parent, data):
"""yaml: performance
Publish performance test results from jmeter and junit.
Requires the Jenkins `Performance Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Performance+Plugin>`_
:arg int failed-threshold: Specify the error percentage threshold that
set the build failed. A negative value means
don't use this threshold (default 0)
:arg int unstable-threshold: Specify the error percentage threshold that
set the build unstable. A negative value means
don't use this threshold (default 0)
:arg dict report:
:(jmeter or junit): (`dict` or `str`): Specify a custom report file
(optional; jmeter default \**/*.jtl, junit default **/TEST-\*.xml)
Examples::
publishers:
- performance:
failed-threshold: 85
unstable-threshold: -1
report:
- jmeter: "/special/file.jtl"
- junit: "/special/file.xml"
publishers:
- performance:
failed-threshold: 85
unstable-threshold: -1
report:
- jmeter
- junit
publishers:
- performance:
failed-threshold: 85
unstable-threshold: -1
report:
- jmeter: "/special/file.jtl"
- junit: "/special/file.xml"
- jmeter
- junit
"""
logger = logging.getLogger(__name__)
perf = XML.SubElement(xml_parent, 'hudson.plugins.performance.'
'PerformancePublisher')
XML.SubElement(perf, 'errorFailedThreshold').text = str(data.get(
'failed-threshold', 0))
XML.SubElement(perf, 'errorUnstableThreshold').text = str(data.get(
'unstable-threshold', 0))
parsers = XML.SubElement(perf, 'parsers')
for item in data['report']:
if isinstance(item, dict):
item_name = item.keys()[0]
item_values = item.get(item_name, None)
if item_name == 'jmeter':
jmhold = XML.SubElement(parsers, 'hudson.plugins.performance.'
'JMeterParser')
XML.SubElement(jmhold, 'glob').text = str(item_values)
elif item_name == 'junit':
juhold = XML.SubElement(parsers, 'hudson.plugins.performance.'
'JUnitParser')
XML.SubElement(juhold, 'glob').text = str(item_values)
else:
logger.fatal("You have not specified jmeter or junit, or "
"you have incorrectly assigned the key value.")
sys.exit(1)
elif isinstance(item, str):
if item == 'jmeter':
jmhold = XML.SubElement(parsers, 'hudson.plugins.performance.'
'JMeterParser')
XML.SubElement(jmhold, 'glob').text = '**/*.jtl'
elif item == 'junit':
juhold = XML.SubElement(parsers, 'hudson.plugins.performance.'
'JUnitParser')
XML.SubElement(juhold, 'glob').text = '**/TEST-*.xml'
else:
logger.fatal("You have not specified jmeter or junit, or "
"you have incorrectly assigned the key value.")
sys.exit(1)
def join_trigger(parser, xml_parent, data):
"""yaml: join-trigger
Trigger a job after all the immediate downstream jobs have completed
:arg list projects: list of projects to trigger
Example::
publishers:
- join-trigger:
projects:
- project-one
- project-two
"""
jointrigger = XML.SubElement(xml_parent, 'join.JoinTrigger')
# Simple Project List
joinProjectsText = ','.join(data.get('projects', ['']))
XML.SubElement(jointrigger, 'joinProjects').text = joinProjectsText
def jabber(parser, xml_parent, data):
"""yaml: jabber
Integrates Jenkins with the Jabber/XMPP instant messaging protocol
Requires the Jenkins `Jabber Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Jabber+Plugin>`_
:arg bool notify-on-build-start: Whether to send notifications
to channels when a build starts (default false)
:arg bool notify-scm-committers: Whether to send notifications
to the users that are suspected of having broken this build
(default false)
:arg bool notify-scm-culprits: Also send notifications to 'culprits'
from previous unstable/failed builds (default false)
:arg bool notify-upstream-committers: Whether to send notifications to
upstream committers if no committers were found for a broken build
(default false)
:arg bool notify-scm-fixers: Whether to send notifications to the users
that have fixed a broken build (default false)
:arg list group-targets: List of group targets to notify
:arg list individual-targets: List of individual targets to notify
:arg dict strategy: When to send notifications (default all)
:strategy values:
* **all** -- Always
* **failure** -- On any failure
* **failure-fixed** -- On failure and fixes
* **change** -- Only on state change
:arg dict message: Channel notification message (default summary-scm)
:message values:
* **summary-scm** -- Summary + SCM changes
* **summary** -- Just summary
* **summary-build** -- Summary and build parameters
* **summary-scm-fail** -- Summary, SCM changes, and failed tests
Example::
publishers:
- jabber:
notify-on-build-start: true
group-targets:
- "foo-room@conference-2-fooserver.foo.com"
individual-targets:
- "foo-user@conference-2-fooserver.foo.com"
strategy: all
message: summary-scm
"""
j = XML.SubElement(xml_parent, 'hudson.plugins.jabber.im.transport.'
'JabberPublisher')
t = XML.SubElement(j, 'targets')
if 'group-targets' in data:
for group in data['group-targets']:
gcimt = XML.SubElement(t, 'hudson.plugins.im.'
'GroupChatIMMessageTarget')
XML.SubElement(gcimt, 'name').text = group
XML.SubElement(gcimt, 'notificationOnly').text = 'false'
if 'individual-targets' in data:
for individual in data['individual-targets']:
dimt = XML.SubElement(t, 'hudson.plugins.im.'
'DefaultIMMessageTarget')
XML.SubElement(dimt, 'value').text = individual
strategy = data.get('strategy', 'all')
strategydict = {'all': 'ALL',
'failure': 'ANY_FAILURE',
'failure-fixed': 'FAILURE_AND_FIXED',
'change': 'STATECHANGE_ONLY'}
if strategy not in strategydict:
raise JenkinsJobsException("Strategy entered is not valid, must be " +
"one of: all, failure, failure-fixed, or "
"change")
XML.SubElement(j, 'strategy').text = strategydict[strategy]
XML.SubElement(j, 'notifyOnBuildStart').text = str(
data.get('notify-on-build-start', False)).lower()
XML.SubElement(j, 'notifySuspects').text = str(
data.get('notify-scm-committers', False)).lower()
XML.SubElement(j, 'notifyCulprits').text = str(
data.get('notify-scm-culprits', False)).lower()
XML.SubElement(j, 'notifyFixers').text = str(
data.get('notify-scm-fixers', False)).lower()
XML.SubElement(j, 'notifyUpstreamCommitters').text = str(
data.get('notify-upstream-committers', False)).lower()
message = data.get('message', 'summary-scm')
messagedict = {'summary-scm': 'DefaultBuildToChatNotifier',
'summary': 'SummaryOnlyBuildToChatNotifier',
'summary-build': 'BuildParametersBuildToChatNotifier',
'summary-scm-fail': 'PrintFailingTestsBuildToChatNotifier'}
if message not in messagedict:
raise JenkinsJobsException("Message entered is not valid, must be one "
"of: summary-scm, summary, summary-build "
"or summary-scm-fail")
XML.SubElement(j, 'buildToChatNotifier', {
'class': 'hudson.plugins.im.build_notify.' + messagedict[message]})
XML.SubElement(j, 'matrixMultiplier').text = 'ONLY_CONFIGURATIONS'
def workspace_cleanup(parser, xml_parent, data):
"""yaml: workspace-cleanup (post-build)
Requires the Jenkins `Workspace Cleanup Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Workspace+Cleanup+Plugin>`_
The pre-build workspace-cleanup is available as a wrapper.
:arg list include: list of files to be included
:arg list exclude: list of files to be excluded
:arg bool dirmatch: Apply pattern to directories too (default: false)
:arg list clean-if: clean depending on build status
:clean-if values:
* **success** (`bool`) (default: true)
* **unstable** (`bool`) (default: true)
* **failure** (`bool`) (default: true)
* **aborted** (`bool`) (default: true)
* **not-built** (`bool`) (default: true)
:arg bool fail-build: Fail the build if the cleanup fails (default: true)
:arg bool clean-parent: Cleanup matrix parent workspace (default: false)
Example::
publishers:
- workspace-cleanup:
include:
- "*.zip"
clean-if:
- success: true
- not-built: false
"""
p = XML.SubElement(xml_parent,
'hudson.plugins.ws__cleanup.WsCleanup')
p.set("plugin", "ws-cleanup@0.14")
if "include" in data or "exclude" in data:
patterns = XML.SubElement(p, 'patterns')
for inc in data.get("include", []):
ptrn = XML.SubElement(patterns, 'hudson.plugins.ws__cleanup.Pattern')
XML.SubElement(ptrn, 'pattern').text = inc
XML.SubElement(ptrn, 'type').text = "INCLUDE"
for exc in data.get("exclude", []):
ptrn = XML.SubElement(patterns, 'hudson.plugins.ws__cleanup.Pattern')
XML.SubElement(ptrn, 'pattern').text = exc
XML.SubElement(ptrn, 'type').text = "EXCLUDE"
XML.SubElement(p, 'deleteDirs').text = \
str(data.get("dirmatch", False)).lower()
XML.SubElement(p, 'cleanupMatrixParent').text = \
str(data.get("clean-parent", False)).lower()
mask = {'success': 'cleanWhenSuccess', 'unstable': 'cleanWhenUnstable',
'failure': 'cleanWhenFailure', 'not-built': 'cleanWhenNotBuilt',
'aborted': 'cleanWhenAborted'}
clean = data.get('clean-if', [])
cdict = dict()
for d in clean:
cdict.update(d)
for k, v in mask.iteritems():
XML.SubElement(p, v).text = str(cdict.pop(k, True)).lower()
if len(cdict) > 0:
raise ValueError('clean-if must be one of: %r' % list(mask.keys()))
if str(data.get("fail-build", False)).lower() == 'false':
XML.SubElement(p, 'notFailBuild').text = 'true'
else:
XML.SubElement(p, 'notFailBuild').text = 'false'
def maven_deploy(parser, xml_parent, data):
"""yaml: maven-deploy
Deploy artifacts to Maven repository.
:arg str id: Repository ID
:arg str url: Repository URL
:arg bool unique-version: Assign unique versions to snapshots
(default true)
:arg bool deploy-unstable: Deploy even if the build is unstable
(default false)
Example::
publishers:
- maven-deploy:
id: example
url: http://repo.example.com/maven2/
unique-version: true
deploy-unstable: false
"""
p = XML.SubElement(xml_parent, 'hudson.maven.RedeployPublisher')
if 'id' in data:
XML.SubElement(p, 'id').text = data['id']
XML.SubElement(p, 'url').text = data['url']
XML.SubElement(p, 'uniqueVersion').text = str(
data.get('unique-version', True)).lower()
XML.SubElement(p, 'evenIfUnstable').text = str(
data.get('deploy-unstable', False)).lower()
def text_finder(parser, xml_parent, data):
"""yaml: text-finder
This plugin lets you search keywords in the files you specified and
additionally check build status
Requires the Jenkins `Text-finder Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Text-finder+Plugin>`_
:arg str regexp: Specify a regular expression
:arg str fileset: Specify the path to search
:arg bool also-check-console-output:
Search the console output (default False)
:arg bool succeed-if-found:
Force a build to succeed if a string was found (default False)
:arg bool unstable-if-found:
Set build unstable instead of failing the build (default False)
Example::
publishers:
- text-finder:
regexp: "some string"
fileset: "file.txt"
also-check-console-output: true
succeed-if-found: false
unstable-if-found: false
"""
finder = XML.SubElement(xml_parent,
'hudson.plugins.textfinder.TextFinderPublisher')
if ('fileset' in data):
XML.SubElement(finder, 'fileSet').text = data['fileset']
XML.SubElement(finder, 'regexp').text = data['regexp']
check_output = str(data.get('also-check-console-output', False)).lower()
XML.SubElement(finder, 'alsoCheckConsoleOutput').text = check_output
succeed_if_found = str(data.get('succeed-if-found', False)).lower()
XML.SubElement(finder, 'succeedIfFound').text = succeed_if_found
unstable_if_found = str(data.get('unstable-if-found', False)).lower()
XML.SubElement(finder, 'unstableIfFound').text = unstable_if_found
def html_publisher(parser, xml_parent, data):
"""yaml: html-publisher
This plugin publishes HTML reports.
Requires the Jenkins `HTML Publisher Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/HTML+Publisher+Plugin>`_
:arg str name: Report name
:arg str dir: HTML directory to archive
:arg str files: Specify the pages to display
:arg bool keep-all: keep HTML reports for each past build (Default False)
:arg bool allow-missing: Allow missing HTML reports (Default False)
Example::
publishers:
- html-publisher:
name: "some name"
dir: "path/"
files: "index.html"
keep-all: true
allow-missing: true
"""
reporter = XML.SubElement(xml_parent, 'htmlpublisher.HtmlPublisher')
targets = XML.SubElement(reporter, 'reportTargets')
ptarget = XML.SubElement(targets, 'htmlpublisher.HtmlPublisherTarget')
XML.SubElement(ptarget, 'reportName').text = data['name']
XML.SubElement(ptarget, 'reportDir').text = data['dir']
XML.SubElement(ptarget, 'reportFiles').text = data['files']
keep_all = str(data.get('keep-all', False)).lower()
XML.SubElement(ptarget, 'keepAll').text = keep_all
allow_missing = str(data.get('allow-missing', False)).lower()
XML.SubElement(ptarget, 'allowMissing').text = allow_missing
XML.SubElement(ptarget, 'wrapperName').text = "htmlpublisher-wrapper.html"
def tap(parser, xml_parent, data):
"""yaml: tap
Adds support to TAP test result files
Requires the Jenkins `TAP Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/TAP+Plugin>`_
:arg str results: TAP test result files
:arg bool fail-if-no-results: Fail if no result (default False)
:arg bool failed-tests-mark-build-as-failure:
Mark build as failure if test fails (default False)
:arg bool output-tap-to-console: Output tap to console (default True)
:arg bool enable-subtests: Enable subtests (Default True)
:arg bool discard-old-reports: Discard old reports (Default False)
:arg bool todo-is-failure: Handle TODO's as failures (Default True)
Example::
publishers:
- tap:
results: puiparts.tap
todo-is-failure: false
"""
tap = XML.SubElement(xml_parent, 'org.tap4j.plugin.TapPublisher')
XML.SubElement(tap, 'testResults').text = data['results']
XML.SubElement(tap, 'failIfNoResults').text = str(
data.get('fail-if-no-results', False)).lower()
XML.SubElement(tap, 'failedTestsMarkBuildAsFailure').text = str(
data.get('failed-tests-mark-build-as-failure', False)).lower()
XML.SubElement(tap, 'outputTapToConsole').text = str(
data.get('output-tap-to-console', True)).lower()
XML.SubElement(tap, 'enableSubtests').text = str(
data.get('enable-subtests', True)).lower()
XML.SubElement(tap, 'discardOldReports').text = str(
data.get('discard-old-reports', False)).lower()
XML.SubElement(tap, 'todoIsFailure').text = str(
data.get('todo-is-failure', True)).lower()
def post_tasks(parser, xml_parent, data):
"""yaml: post-tasks
Adds support to post build task plugin
Requires the Jenkins `Post Build Task plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Post+build+task>`_
:arg dict task: Post build task definition
:arg list task[matches]: list of matches when to run the task
:arg dict task[matches][*]: match definition
:arg str task[matches][*][log-text]: text to match against the log
:arg str task[matches][*][operator]: operator to apply with the next match
:task[matches][*][operator] values (default 'AND'):
* **AND**
* **OR**
:arg bool task[escalate-status]: Escalate the task status to the job
(default 'false')
:arg bool task[run-if-job-successful]: Run only if the job was successful
(default 'false')
:arg str task[script]: Shell script to run (default '')
Example::
publishers:
- post-tasks:
- matches:
- log-text: line to match
operator: AND
- log-text: line to match
operator: OR
- log-text: line to match
operator: AND
escalate-status: false
run-if-job-successful:false
script: |
echo "Here goes the task script"
"""
pb_xml = XML.SubElement(xml_parent,
'hudson.plugins.postbuildtask.PostbuildTask')
tasks_xml = XML.SubElement(pb_xml, 'tasks')
for task in data:
task_xml = XML.SubElement(
tasks_xml,
'hudson.plugins.postbuildtask.TaskProperties')
matches_xml = XML.SubElement(task_xml, 'logTexts')
for match in task.get('matches', []):
lt_xml = XML.SubElement(
matches_xml,
'hudson.plugins.postbuildtask.LogProperties')
XML.SubElement(lt_xml, 'logText').text = str(
match.get('log-text', ''))
XML.SubElement(lt_xml, 'operator').text = str(
match.get('operator', 'AND')).upper()
XML.SubElement(task_xml, 'EscalateStatus').text = str(
task.get('escalate-status', False)).lower()
XML.SubElement(task_xml, 'RunIfJobSuccessful').text = str(
task.get('run-if-job-successful', False)).lower()
XML.SubElement(task_xml, 'script').text = str(
task.get('script', ''))
def xml_summary(parser, xml_parent, data):
"""yaml: xml-summary
Adds support for the Summary Display Plugin
Requires the Jenkins `Summary Display Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Summary+Display+Plugin>`_
:arg str files: Files to parse (default '')
Example::
publishers:
- xml-summary:
files: '*_summary_report.xml'
"""
summary = XML.SubElement(xml_parent,
'hudson.plugins.summary__report.'
'ACIPluginPublisher')
XML.SubElement(summary, 'name').text = data['files']
def robot(parser, xml_parent, data):
"""yaml: robot
Adds support for the Robot Framework Plugin
Requires the Jenkins `Robot Framework Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Robot+Framework+Plugin>`_
:arg str output-path: Path to directory containing robot xml and html files
relative to build workspace. (default '')
:arg str log-file-link: Name of log or report file to be linked on jobs
front page (default '')
:arg str report-html: Name of the html file containing robot test report
(default 'report.html')
:arg str log-html: Name of the html file containing detailed robot test log
(default 'log.html')
:arg str output-xml: Name of the xml file containing robot output
(default 'output.xml')
:arg str pass-threshold: Minimum percentage of passed tests to consider
the build successful (default 0.0)
:arg str unstable-threshold: Minimum percentage of passed test to
consider the build as not failed (default 0.0)
:arg bool only-critical: Take only critical tests into account when
checking the thresholds (default true)
:arg list other-files: list other files to archive (default '')
Example::
- publishers:
- robot:
output-path: reports/robot
log-file-link: report.html
report-html: report.html
log-html: log.html
output-xml: output.xml
pass-threshold: 80.0
unstable-threshold: 60.0
only-critical: false
other-files:
- extra-file1.html
- extra-file2.txt
"""
parent = XML.SubElement(xml_parent, 'hudson.plugins.robot.RobotPublisher')
XML.SubElement(parent, 'outputPath').text = data['output-path']
XML.SubElement(parent, 'logFileLink').text = str(
data.get('log-file-link', ''))
XML.SubElement(parent, 'reportFileName').text = str(
data.get('report-html', 'report.html'))
XML.SubElement(parent, 'logFileName').text = str(
data.get('log-html', 'log.html'))
XML.SubElement(parent, 'outputFileName').text = str(
data.get('output-xml', 'output.xml'))
XML.SubElement(parent, 'passThreshold').text = str(
data.get('pass-threshold', 0.0))
XML.SubElement(parent, 'unstableThreshold').text = str(
data.get('unstable-threshold', 0.0))
XML.SubElement(parent, 'onlyCritical').text = str(
data.get('only-critical', True)).lower()
other_files = XML.SubElement(parent, 'otherFiles')
for other_file in data['other-files']:
XML.SubElement(other_files, 'string').text = str(other_file)
def warnings(parser, xml_parent, data):
"""yaml: warnings
Generate trend report for compiler warnings in the console log or
in log files. Requires the Jenkins `Warnings Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Warnings+Plugin>`_
:arg list console-log-parsers: The parser to use to scan the console
log (default '')
:arg dict workspace-file-scanners:
:workspace-file-scanners:
* **file-pattern** (`str`) -- Fileset 'includes' setting that
specifies the files to scan for warnings
* **scanner** (`str`) -- The parser to use to scan the files
provided in workspace-file-pattern (default '')
:arg str files-to-include: Comma separated list of regular
expressions that specifies the files to include in the report
(based on their absolute filename). By default all files are
included
:arg str files-to-ignore: Comma separated list of regular expressions
that specifies the files to exclude from the report (based on their
absolute filename). (default '')
:arg bool run-always: By default, this plug-in runs only for stable or
unstable builds, but not for failed builds. Set to true if the
plug-in should run even for failed builds. (default false)
:arg bool detect-modules: Determines if Ant or Maven modules should be
detected for all files that contain warnings. Activating this
option may increase your build time since the detector scans
the whole workspace for 'build.xml' or 'pom.xml' files in order
to assign the correct module names. (default false)
:arg bool resolve-relative-paths: Determines if relative paths in
warnings should be resolved using a time expensive operation that
scans the whole workspace for matching files. Deactivate this
option if you encounter performance problems. (default false)
:arg int health-threshold-high: The upper threshold for the build
health. If left empty then no health report is created. If
the actual number of warnings is between the provided
thresholds then the build health is interpolated (default '')
:arg int health-threshold-low: The lower threshold for the build
health. See health-threshold-high. (default '')
:arg dict health-priorities: Determines which warning priorities
should be considered when evaluating the build health (default
all-priorities)
:health-priorities values:
* **priority-high** -- Only priority high
* **high-and-normal** -- Priorities high and normal
* **all-priorities** -- All priorities
:arg dict total-thresholds: If the number of total warnings is greater
than one of these thresholds then a build is considered as unstable
or failed, respectively. (default '')
:total-thresholds:
* **unstable** (`dict`)
:unstable: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
* **failed** (`dict`)
:failed: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
:arg dict new-thresholds: If the specified number of new warnings exceeds
one of these thresholds then a build is considered as unstable or
failed, respectively. (default '')
:new-thresholds:
* **unstable** (`dict`)
:unstable: * **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-low** (`int`)
* **failed** (`dict`)
:failed: * **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-high** (`int`)
:arg bool use-delta-for-new-warnings: If set then the number of new
warnings is calculated by subtracting the total number of warnings
of the current build from the reference build. This may lead to wrong
results if you have both fixed and new warnings in a build. If not set,
then the number of new warnings is calculated by an asymmetric set
difference of the warnings in the current and reference build. This
will find all new warnings even if the number of total warnings is
decreasing. However, sometimes false positives will be reported due
to minor changes in a warning (refactoring of variable of method
names, etc.) (default false)
:arg bool only-use-stable-builds-as-reference: The number of new warnings
will be calculated based on the last stable build, allowing reverts
of unstable builds where the number of warnings was decreased.
(default false)
:arg str default-encoding: Default encoding when parsing or showing files
Leave empty to use default encoding of platform (default '')
Example::
publishers:
- warnings:
console-log-parsers:
- FxCop
- CodeAnalysis
workspace-file-scanners:
- file-pattern: '**/*.out'
scanner: 'AcuCobol Compiler
- file-pattern: '**/*.warnings'
scanner: FxCop
files-to-include: '[a-zA-Z]\.java,[a-zA-Z]\.cpp'
files-to-ignore: '[a-zA-Z]\.html,[a-zA-Z]\.js'
run-always: true
detect-modules: true
resolve-relative-paths: true
health-threshold-high: 50
health-threshold-low: 25
health-priorities: high-and-normal
total-thresholds:
unstable:
total-all: 90
total-high: 90
total-normal: 40
total-low: 30
failed:
total-all: 100
total-high: 100
total-normal: 50
total-low: 40
new-thresholds:
unstable:
new-all: 100
new-high: 50
new-normal: 30
new-low: 10
failed:
new-all: 100
new-high: 60
new-normal: 50
new-low: 40
use-delta-for-new-warnings: true
only-use-stable-builds-as-reference: true
default-encoding: ISO-8859-9
"""
warnings = XML.SubElement(xml_parent,
'hudson.plugins.warnings.'
'WarningsPublisher')
console = XML.SubElement(warnings, 'consoleParsers')
for parser in data.get('console-log-parsers', []):
console_parser = XML.SubElement(console,
'hudson.plugins.warnings.'
'ConsoleParser')
XML.SubElement(console_parser, 'parserName').text = parser
workspace = XML.SubElement(warnings, 'parserConfigurations')
for wfs in data.get('workspace-file-scanners', []):
workspace_pattern = XML.SubElement(workspace,
'hudson.plugins.warnings.'
'ParserConfiguration')
XML.SubElement(workspace_pattern, 'pattern').text = \
wfs['file-pattern']
XML.SubElement(workspace_pattern, 'parserName').text = \
wfs['scanner']
warnings_to_include = data.get('files-to-include', '')
XML.SubElement(warnings, 'includePattern').text = warnings_to_include
warnings_to_ignore = data.get('files-to-ignore', '')
XML.SubElement(warnings, 'excludePattern').text = warnings_to_ignore
run_always = str(data.get('run-always', False)).lower()
XML.SubElement(warnings, 'canRunOnFailed').text = run_always
detect_modules = str(data.get('detect-modules', False)).lower()
XML.SubElement(warnings, 'shouldDetectModules').text = detect_modules
#Note the logic reversal (included here to match the GUI)
XML.SubElement(warnings, 'doNotResolveRelativePaths').text = \
str(not data.get('resolve-relative-paths', False)).lower()
health_threshold_high = str(data.get('health-threshold-high', ''))
XML.SubElement(warnings, 'healthy').text = health_threshold_high
health_threshold_low = str(data.get('health-threshold-low', ''))
XML.SubElement(warnings, 'unHealthy').text = health_threshold_low
prioritiesDict = {'priority-high': 'high',
'high-and-normal': 'normal',
'all-priorities': 'low'}
priority = data.get('health-priorities', 'all-priorities')
if priority not in prioritiesDict:
raise JenkinsJobsException("Health-Priority entered is not valid must "
"be one of: %s" %
",".join(prioritiesDict.keys()))
XML.SubElement(warnings, 'thresholdLimit').text = prioritiesDict[priority]
td = XML.SubElement(warnings, 'thresholds')
for base in ["total", "new"]:
thresholds = data.get("%s-thresholds" % base, {})
for status in ["unstable", "failed"]:
bystatus = thresholds.get(status, {})
for level in ["all", "high", "normal", "low"]:
val = str(bystatus.get("%s-%s" % (base, level), ''))
XML.SubElement(td, "%s%s%s" % (status,
base.capitalize(), level.capitalize())
).text = val
if data.get('new-thresholds'):
XML.SubElement(warnings, 'dontComputeNew').text = 'false'
delta = data.get('use-delta-for-new-warnings', False)
XML.SubElement(warnings, 'useDeltaValues').text = str(delta).lower()
use_stable_builds = data.get('only-use-stable-builds-as-reference',
False)
XML.SubElement(warnings, 'useStableBuildAsReference').text = str(
use_stable_builds).lower()
else:
XML.SubElement(warnings, 'dontComputeNew').text = 'true'
XML.SubElement(warnings, 'useStableBuildAsReference').text = 'false'
XML.SubElement(warnings, 'useDeltaValues').text = 'false'
encoding = data.get('default-encoding', '')
XML.SubElement(warnings, 'defaultEncoding').text = encoding
def sloccount(parser, xml_parent, data):
"""yaml: sloccount
Generates the trend report for SLOCCount
Requires the Jenkins `SLOCCount Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/SLOCCount+Plugin>`_
:arg str report-files: Setting that specifies the generated raw
SLOCCount report files.
Be sure not to include any non-report files into
this pattern. The report files must have been
generated by sloccount using the
"--wide --details" options.
(default: '\*\*/sloccount.sc')
:arg str charset: The character encoding to be used to read the SLOCCount
result files. (default: 'UTF-8')
Example::
publishers:
- sloccount:
report-files: sloccount.sc
charset: UTF-8
"""
top = XML.SubElement(xml_parent,
'hudson.plugins.sloccount.SloccountPublisher')
XML.SubElement(top, 'pattern').text = data.get('report-files',
'**/sloccount.sc')
XML.SubElement(top, 'encoding').text = data.get('charset', 'UTF-8')
def ircbot(parser, xml_parent, data):
"""yaml: ircbot
ircbot enables Jenkins to send build notifications via IRC and lets you
interact with Jenkins via an IRC bot.
Requires the Jenkins `IRC Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/IRC+Plugin>`_
:arg string strategy: When to send notifications
:strategy values:
* **all** always (default)
* **any-failure** on any failure_and_fixed
* **failure-and-fixed** on failure and fixes
* **new-failure-and-fixed** on new failure and fixes
* **statechange-only** only on state change
:arg bool notify-start: Whether to send notifications to channels when a
build starts
(default: false)
:arg bool notify-committers: Whether to send notifications to the users
that are suspected of having broken this build
(default: false)
:arg bool notify-culprits: Also send notifications to 'culprits' from
previous unstable/failed builds
(default: false)
:arg bool notify-upstream: Whether to send notifications to upstream
committers if no committers were found for a
broken build
(default: false)
:arg bool notify-fixers: Whether to send notifications to the users that
have fixed a broken build
(default: false)
:arg string message-type: Channel Notification Message.
:message-type values:
* **summary-scm** for summary and SCM changes (default)
* **summary** for summary only
* **summary-params** for summary and build parameters
* **summary-scm-fail** for summary, SCM changes, failures)
:arg list channels: list channels definitions
If empty, it takes channel from Jenkins configuration.
(default: empty)
WARNING: the IRC plugin requires the channel to be
configured in the system wide configuration or the jobs
will fail to emit notifications to the channel
:Channel: * **name** (`str`) Channel name
* **password** (`str`) Channel password (optional)
* **notify-only** (`bool`) Set to true if you want to
disallow bot commands (default: false)
:arg string matrix-notifier: notify for matrix projects
instant-messaging-plugin injects an additional
field in the configuration form whenever the
project is a multi-configuration project
:matrix-notifier values:
* **all**
* **only-configurations** (default)
* **only-parent**
Example::
publishers:
- ircbot:
strategy: all
notify-start: false
notify-committers: false
notify-culprits: false
notify-upstream: false
notify-fixers: false
message-type: summary-scm
channels:
- name: '#jenkins-channel1'
password: secrete
notify-only: false
- name: '#jenkins-channel2'
notify-only: true
matrix-notifier: only-configurations
"""
top = XML.SubElement(xml_parent, 'hudson.plugins.ircbot.IrcPublisher')
message_dict = {'summary-scm': 'DefaultBuildToChatNotifier',
'summary': 'SummaryOnlyBuildToChatNotifier',
'summary-params': 'BuildParametersBuildToChatNotifier',
'summary-scm-fail': 'PrintFailingTestsBuildToChatNotifier'}
message = data.get('message-type', 'summary-scm')
if message not in message_dict:
raise JenkinsJobsException("message-type entered is not valid, must "
"be one of: %s" %
", ".join(message_dict.keys()))
message = "hudson.plugins.im.build_notify." + message_dict.get(message)
XML.SubElement(top, 'buildToChatNotifier', attrib={'class': message})
strategy_dict = {'all': 'ALL',
'any-failure': 'ANY_FAILURE',
'failure-and-fixed': 'FAILURE_AND_FIXED',
'new-failure-and-fixed': 'NEW_FAILURE_AND_FIXED',
'statechange-only': 'STATECHANGE_ONLY'}
strategy = data.get('strategy', 'all')
if strategy not in strategy_dict:
raise JenkinsJobsException("strategy entered is not valid, must be "
"one of: %s" %
", ".join(strategy_dict.keys()))
XML.SubElement(top, 'strategy').text = strategy_dict.get(strategy)
targets = XML.SubElement(top, 'targets')
channels = data.get('channels', [])
for channel in channels:
sub = XML.SubElement(targets,
'hudson.plugins.im.GroupChatIMMessageTarget')
XML.SubElement(sub, 'name').text = channel.get('name')
XML.SubElement(sub, 'password').text = channel.get('password')
XML.SubElement(sub, 'notificationOnly').text = str(
channel.get('notify-only', False)).lower()
XML.SubElement(top, 'notifyOnBuildStart').text = str(
data.get('notify-start', False)).lower()
XML.SubElement(top, 'notifySuspects').text = str(
data.get('notify-committers', False)).lower()
XML.SubElement(top, 'notifyCulprits').text = str(
data.get('notify-culprits', False)).lower()
XML.SubElement(top, 'notifyFixers').text = str(
data.get('notify-fixers', False)).lower()
XML.SubElement(top, 'notifyUpstreamCommitters').text = str(
data.get('notify-upstream', False)).lower()
matrix_dict = {'all': 'ALL',
'only-configurations': 'ONLY_CONFIGURATIONS',
'only-parent': 'ONLY_PARENT'}
matrix = data.get('matrix-notifier', 'only_configurations')
if matrix not in matrix_dict:
raise JenkinsJobsException("matrix-notifier entered is not valid, "
"must be one of: %s" %
", ".join(matrix_dict.keys()))
XML.SubElement(top, 'matrixMultiplier').text = matrix_dict.get(matrix)
def plot(parser, xml_parent, data):
"""yaml: plot
Plot provides generic plotting (or graphing).
Requires the Jenkins `Plot Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Plot+Plugin>`_
:arg str title: title for the graph
(default: '')
:arg str yaxis: title of Y axis
:arg str group: name of the group to which the plot belongs
:arg int num-builds: number of builds to plot across
(default: plot all builds)
:arg str style: Specifies the graph style of the plot
Can be: area, bar, bar3d, line, line3d, stackedArea,
stackedbar, stackedbar3d, waterfall
(default: 'line')
:arg bool use-description: When false, the X-axis labels are formed
using build numbers and dates, and the
corresponding tooltips contain the build
descriptions. When enabled, the contents of
the labels and tooltips are swapped, with the
descriptions used as X-axis labels and the
build number and date used for tooltips.
(default: False)
:arg str csv-file-name: Use for choosing the file name in which the data
will be persisted. If none specified and random
name is generated as done in the Jenkins Plot
plugin.
(default: random generated .csv filename, same
behaviour as the Jenkins Plot plugin)
:arg list series: list data series definitions
:Serie: * **file** (`str`) : files to include
* **inclusion-flag** filtering mode for CSV files. Possible
values are:
* **off** (default)
* **include-by-string**
* **exclude-by-string**
* **include-by-column**
* **exclude-by-column**
* **exclude** (`str`) : exclude pattern for CSV file.
* **url** (`str`) : for 'csv' and 'xml' file types
used when you click on a point (default: empty)
* **display-table** (`bool`) : for 'csv' file type
if true, original CSV will be shown above plot (default: False)
* **label** (`str`) : used by 'properties' file type
Specifies the legend label for this data series.
(default: empty)
* **format** (`str`) : Type of file where we get datas.
Can be: properties, csv, xml
* **xpath-type** (`str`) : The result type of the expression must
be supplied due to limitations in the java.xml.xpath parsing.
The result can be: node, nodeset, boolean, string, or number.
Strings and numbers will be converted to double. Boolean will
be converted to 1 for true, and 0 for false. (default: 'node')
* **xpath** (`str`) : used by 'xml' file type
Xpath which selects the values that should be plotted.
Example::
publishers:
- plot:
- title: MyPlot
yaxis: Y
group: PlotGroup
num-builds: ''
style: line
use-description: false
series:
- file: graph-me-second.properties
label: MyLabel
format: properties
- file: graph-me-first.csv
url: 'http://srv1'
inclusion-flag: 'off'
display-table: true
format: csv
- title: MyPlot2
yaxis: Y
group: PlotGroup
style: line
use-description: false
series:
- file: graph-me-third.xml
url: 'http://srv2'
format: xml
xpath-type: 'node'
xpath: '/*'
"""
top = XML.SubElement(xml_parent, 'hudson.plugins.plot.PlotPublisher')
plots = XML.SubElement(top, 'plots')
format_dict = {'properties': 'hudson.plugins.plot.PropertiesSeries',
'csv': 'hudson.plugins.plot.CSVSeries',
'xml': 'hudson.plugins.plot.XMLSeries'}
xpath_dict = {'nodeset': 'NODESET', 'node': 'NODE', 'string': 'STRING',
'boolean': 'BOOLEAN', 'number': 'NUMBER'}
inclusion_dict = {'off': 'OFF',
'include-by-string': 'INCLUDE_BY_STRING',
'exclude-by-string': 'EXCLUDE_BY_STRING',
'include-by-column': 'INCLUDE_BY_COLUMN',
'exclude-by-column': 'EXCLUDE_BY_COLUMN'}
for plot in data:
plugin = XML.SubElement(plots, 'hudson.plugins.plot.Plot')
XML.SubElement(plugin, 'title').text = plot.get('title', '')
XML.SubElement(plugin, 'yaxis').text = plot['yaxis']
XML.SubElement(plugin, 'csvFileName').text = \
plot.get('csv-file-name', '%s.csv' % random.randrange(2 << 32))
topseries = XML.SubElement(plugin, 'series')
series = plot['series']
for serie in series:
format_data = serie.get('format')
if format_data not in format_dict:
raise JenkinsJobsException("format entered is not valid, must "
"be one of: %s" %
" , ".join(format_dict.keys()))
subserie = XML.SubElement(topseries, format_dict.get(format_data))
XML.SubElement(subserie, 'file').text = serie.get('file')
if format_data == 'properties':
XML.SubElement(subserie, 'label').text = serie.get('label', '')
if format_data == 'csv':
inclusion_flag = serie.get('inclusion-flag', 'off')
if inclusion_flag not in inclusion_dict:
raise JenkinsJobsException("Inclusion flag result entered "
"is not valid, must be one of: "
"%s"
% ", ".join(inclusion_dict))
XML.SubElement(subserie, 'inclusionFlag').text = \
inclusion_dict.get(inclusion_flag)
XML.SubElement(subserie, 'exclusionValues').text = \
serie.get('exclude', '')
XML.SubElement(subserie, 'url').text = serie.get('url', '')
XML.SubElement(subserie, 'displayTableFlag').text = \
str(plot.get('display-table', False)).lower()
if format_data == 'xml':
XML.SubElement(subserie, 'url').text = serie.get('url', '')
XML.SubElement(subserie, 'xpathString').text = \
serie.get('xpath')
xpathtype = serie.get('xpath-type', 'node')
if xpathtype not in xpath_dict:
raise JenkinsJobsException("XPath result entered is not "
"valid, must be one of: %s" %
", ".join(xpath_dict))
XML.SubElement(subserie, 'nodeTypeString').text = \
xpath_dict.get(xpathtype)
XML.SubElement(subserie, 'fileType').text = serie.get('format')
XML.SubElement(plugin, 'group').text = plot['group']
XML.SubElement(plugin, 'useDescr').text = \
str(plot.get('use-description', False)).lower()
XML.SubElement(plugin, 'numBuilds').text = plot.get('num-builds', '')
style_list = ['area', 'bar', 'bar3d', 'line', 'line3d', 'stackedArea',
'stackedbar', 'stackedbar3d', 'waterfall']
style = plot.get('style', 'line')
if style not in style_list:
raise JenkinsJobsException("style entered is not valid, must be "
"one of: %s" % ", ".join(style_list))
XML.SubElement(plugin, 'style').text = style
def git(parser, xml_parent, data):
"""yaml: git
This plugin will configure the Jenkins Git plugin to
push merge results, tags, and/or branches to
remote repositories after the job completes.
Requires the Jenkins `Git Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Git+Plugin>`_
:arg bool push-merge: push merges back to the origin specified in the
pre-build merge options (Default: False)
:arg bool push-only-if-success: Only push to remotes if the build succeeds
- otherwise, nothing will be pushed.
(Default: True)
:arg list tags: tags to push at the completion of the build
:tag: * **remote** (`str`) remote repo name to push to
(Default: 'origin')
* **name** (`str`) name of tag to push
* **message** (`str`) message content of the tag
* **create-tag** (`bool`) whether or not to create the tag
after the build, if this is False then the tag needs to
exist locally (Default: False)
* **update-tag** (`bool`) whether to overwrite a remote tag
or not (Default: False)
:arg list branches: branches to push at the completion of the build
:branch: * **remote** (`str`) remote repo name to push to
(Default: 'origin')
* **name** (`str`) name of remote branch to push to
:arg list notes: notes to push at the completion of the build
:note: * **remote** (`str`) remote repo name to push to
(Default: 'origin')
* **message** (`str`) content of the note
* **namespace** (`str`) namespace of the note
(Default: master)
* **replace-note** (`bool`) whether to overwrite a note or not
(Default: False)
Example::
publishers:
- git:
push-merge: true
push-only-if-success: false
tags:
- tag:
remote: tagremotename
name: tagname
message: "some tag message"
create-tag: true
update-tag: true
branches:
- branch:
remote: branchremotename
name: "some/branch"
notes:
- note:
remote: remotename
message: "some note to push"
namespace: commits
replace-note: true
"""
mappings = [('push-merge', 'pushMerge', False),
('push-only-if-success', 'pushOnlyIfSuccess', True)]
tag_mappings = [('remote', 'targetRepoName', 'origin'),
('name', 'tagName', None),
('message', 'tagMessage', ''),
('create-tag', 'createTag', False),
('update-tag', 'updateTag', False)]
branch_mappings = [('remote', 'targetRepoName', 'origin'),
('name', 'branchName', None)]
note_mappings = [('remote', 'targetRepoName', 'origin'),
('message', 'noteMsg', None),
('namespace', 'noteNamespace', 'master'),
('replace-note', 'noteReplace', False)]
def handle_entity_children(entity, entity_xml, child_mapping):
for prop in child_mapping:
opt, xmlopt, default_val = prop[:3]
val = entity.get(opt, default_val)
if val is None:
raise JenkinsJobsException('Required option missing: %s' % opt)
if type(val) == bool:
val = str(val).lower()
XML.SubElement(entity_xml, xmlopt).text = val
top = XML.SubElement(xml_parent, 'hudson.plugins.git.GitPublisher')
XML.SubElement(top, 'configVersion').text = '2'
handle_entity_children(data, top, mappings)
tags = data.get('tags', [])
if tags:
xml_tags = XML.SubElement(top, 'tagsToPush')
for tag in tags:
xml_tag = XML.SubElement(
xml_tags,
'hudson.plugins.git.GitPublisher_-TagToPush')
handle_entity_children(tag['tag'], xml_tag, tag_mappings)
branches = data.get('branches', [])
if branches:
xml_branches = XML.SubElement(top, 'branchesToPush')
for branch in branches:
xml_branch = XML.SubElement(
xml_branches,
'hudson.plugins.git.GitPublisher_-BranchToPush')
handle_entity_children(branch['branch'], xml_branch,
branch_mappings)
notes = data.get('notes', [])
if notes:
xml_notes = XML.SubElement(top, 'notesToPush')
for note in notes:
xml_note = XML.SubElement(
xml_notes,
'hudson.plugins.git.GitPublisher_-NoteToPush')
handle_entity_children(note['note'], xml_note, note_mappings)
def github_notifier(parser, xml_parent, data):
"""yaml: github-notifier
Set build status on Github commit.
Requires the Jenkins `Github Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/GitHub+Plugin>`_
Example:
.. literalinclude:: /../../tests/publishers/fixtures/github-notifier.yaml
"""
XML.SubElement(xml_parent,
'com.cloudbees.jenkins.GitHubCommitNotifier')
def build_publisher(parser, xml_parent, data):
"""yaml: build-publisher
This plugin allows records from one Jenkins to be published
on another Jenkins.
Requires the Jenkins `Build Publisher Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Build+Publisher+Plugin>`_
:arg str servers: Specify the servers where to publish
Example::
publishers:
- build-publisher:
name: servername
publish-unstable-builds: true
publish-failed-builds: true
days-to-keep: -1
num-to-keep: -1
artifact-days-to-keep: -1
artifact-num-to-keep: -1
"""
reporter = XML.SubElement(
xml_parent,
'hudson.plugins.build__publisher.BuildPublisher')
XML.SubElement(reporter, 'serverName').text = data['name']
XML.SubElement(reporter, 'publishUnstableBuilds').text = \
str(data.get('publish-unstable-builds', True)).lower()
XML.SubElement(reporter, 'publishFailedBuilds').text = \
str(data.get('publish-failed-builds', True)).lower()
logrotator = XML.SubElement(reporter, 'logRotator')
XML.SubElement(logrotator, 'daysToKeep').text = \
str(data.get('days-to-keep', -1))
XML.SubElement(logrotator, 'numToKeep').text = \
str(data.get('num-to-keep', -1))
XML.SubElement(logrotator, 'artifactDaysToKeep').text = \
str(data.get('artifact-days-to-keep', -1))
XML.SubElement(logrotator, 'artifactNumToKeep').text = \
str(data.get('artifact-num-to-keep', -1))
def stash(parser, xml_parent, data):
"""yaml: stash
This plugin will configure the Jenkins Stash Notifier plugin to
notify Atlassian Stash after job completes.
Requires the Jenkins `StashNotifier Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/StashNotifier+Plugin>`_
:arg string url: Base url of Stash Server (Default: "")
:arg string username: Username of Stash Server (Default: "")
:arg string password: Password of Stash Server (Default: "")
:arg bool ignore-ssl: Ignore unverified SSL certificate (Default: False)
:arg string commit-sha1: Commit SHA1 to notify (Default: "")
:arg bool include-build-number: Include build number in key
(Default: False)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/stash001.yaml
"""
top = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.stashNotifier.StashNotifier')
XML.SubElement(top, 'stashServerBaseUrl').text = data.get('url', '')
XML.SubElement(top, 'stashUserName').text = data.get('username', '')
XML.SubElement(top, 'stashUserPassword').text = data.get('password', '')
XML.SubElement(top, 'ignoreUnverifiedSSLPeer').text = str(
data.get('ignore-ssl', False)).lower()
XML.SubElement(top, 'commitSha1').text = data.get('commit-sha1', '')
XML.SubElement(top, 'includeBuildNumberInKey').text = str(
data.get('include-build-number', False)).lower()
def description_setter(parser, xml_parent, data):
"""yaml: description-setter
This plugin sets the description for each build,
based upon a RegEx test of the build log file.
Requires the Jenkins `Description Setter Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Description+Setter+Plugin>`_
:arg str regexp: A RegEx which is used to scan the build log file
:arg str regexp-for-failed: A RegEx which is used for failed builds
(optional)
:arg str description: The description to set on the build (optional)
:arg str description-for-failed: The description to set on
the failed builds (optional)
:arg bool set-for-matrix: Also set the description on
a multi-configuration build (Default False)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/description-setter001.yaml
"""
descriptionsetter = XML.SubElement(
xml_parent,
'hudson.plugins.descriptionsetter.DescriptionSetterPublisher')
XML.SubElement(descriptionsetter, 'regexp').text = data.get('regexp', '')
XML.SubElement(descriptionsetter, 'regexpForFailed').text = \
data.get('regexp-for-failed', '')
if 'description' in data:
XML.SubElement(descriptionsetter, 'description').text = \
data['description']
if 'description-for-failed' in data:
XML.SubElement(descriptionsetter, 'descriptionForFailed').text = \
data['description-for-failed']
for_matrix = str(data.get('set-for-matrix', False)).lower()
XML.SubElement(descriptionsetter, 'setForMatrix').text = for_matrix
def sitemonitor(parser, xml_parent, data):
"""yaml: sitemonitor
This plugin checks the availability of an url.
It requires the `sitemonitor plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/SiteMonitor+Plugin>`_
:arg list sites: List of URLs to check
Example:
.. literalinclude:: /../../tests/publishers/fixtures/sitemonitor001.yaml
"""
mon = XML.SubElement(xml_parent,
'hudson.plugins.sitemonitor.SiteMonitorRecorder')
if data.get('sites'):
sites = XML.SubElement(mon, 'mSites')
for siteurl in data.get('sites'):
site = XML.SubElement(sites,
'hudson.plugins.sitemonitor.model.Site')
XML.SubElement(site, 'mUrl').text = siteurl['url']
def testng(parser, xml_parent, data):
"""yaml: testng
This plugin publishes TestNG test reports.
Requires the Jenkins `TestNG Results Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/testng-plugin>`_
:arg str pattern: filename pattern to locate the TestNG XML report files
:arg bool escape-test-description: escapes the description string
associated with the test method while displaying test method details
(Default True)
:arg bool escape-exception-msg: escapes the test method's exception
messages. (Default True)
Example::
.. literalinclude::
/../../tests/publishers/fixtures/testng001.yaml
"""
reporter = XML.SubElement(xml_parent, 'hudson.plugins.testng.Publisher')
if not data['pattern']:
raise JenkinsJobsException("A filename pattern must be specified.")
XML.SubElement(reporter, 'reportFilenamePattern').text = data['pattern']
XML.SubElement(reporter, 'escapeTestDescp').text = str(data.get(
'escape-test-description', True))
XML.SubElement(reporter, 'escapeExceptionMsg').text = str(data.get(
'escape-exception-msg', True))
def artifact_deployer(parser, xml_parent, data):
"""yaml: artifact-deployer
This plugin makes it possible to copy artifacts to remote locations.
Requires the Jenkins `ArtifactDeployer Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/ArtifactDeployer+Plugin>`_
:arg list entries:
:entries:
* **files** (`str`) - files to deploy
* **basedir** (`str`) - the dir from files are deployed
* **excludes** (`str`) - the mask to exclude files
* **remote** (`str`) - a remote output directory
* **flatten** (`bool`) - ignore the source directory structure
(Default: False)
* **delete-remote** (`bool`) - clean-up remote directory
before deployment (Default: False)
* **delete-remote-artifacts** (`bool`) - delete remote artifacts
when the build is deleted (Default: False)
* **fail-no-files** (`bool`) - fail build if there are no files
(Default: False)
* **groovy-script** (`str`) - execute a Groovy script
before a build is deleted
:arg bool deploy-if-fail: Deploy if the build is failed (Default: False)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/artifact-dep.yaml
"""
deployer = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.artifactdeployer.'
'ArtifactDeployerPublisher')
if data is None or 'entries' not in data:
raise Exception('entries field is missing')
elif data.get('entries', None) is None:
entries = XML.SubElement(deployer, 'entries', {'class': 'empty-list'})
else:
entries = XML.SubElement(deployer, 'entries')
for entry in data.get('entries'):
deployer_entry = XML.SubElement(
entries,
'org.jenkinsci.plugins.artifactdeployer.ArtifactDeployerEntry')
XML.SubElement(deployer_entry, 'includes').text = \
entry.get('files')
XML.SubElement(deployer_entry, 'basedir').text = \
entry.get('basedir')
XML.SubElement(deployer_entry, 'excludes').text = \
entry.get('excludes')
XML.SubElement(deployer_entry, 'remote').text = entry.get('remote')
XML.SubElement(deployer_entry, 'flatten').text = \
str(entry.get('flatten', False)).lower()
XML.SubElement(deployer_entry, 'deleteRemote').text = \
str(entry.get('delete-remote', False)).lower()
XML.SubElement(deployer_entry, 'deleteRemoteArtifacts').text = \
str(entry.get('delete-remote-artifacts', False)).lower()
XML.SubElement(deployer_entry, 'failNoFilesDeploy').text = \
str(entry.get('fail-no-files', False)).lower()
XML.SubElement(deployer_entry, 'groovyExpression').text = \
entry.get('groovy-script')
deploy_if_fail = str(data.get('deploy-if-fail', False)).lower()
XML.SubElement(deployer, 'deployEvenBuildFail').text = deploy_if_fail
class Publishers(jenkins_jobs.modules.base.Base):
sequence = 70
component_type = 'publisher'
component_list_type = 'publishers'
def gen_xml(self, parser, xml_parent, data):
publishers = XML.SubElement(xml_parent, 'publishers')
for action in data.get('publishers', []):
self.registry.dispatch('publisher', parser, publishers, action)
|
apache-2.0
| -4,559,542,569,043,464,700
| 40.622922
| 79
| 0.600895
| false
| 4.133573
| true
| false
| false
|
joshwalawender/POCS
|
pocs/focuser/focuser.py
|
1
|
16806
|
from .. import PanBase
from ..utils import images
from ..utils import current_time
import matplotlib
matplotlib.use('AGG')
import matplotlib.pyplot as plt
from astropy.modeling import models, fitting
import numpy as np
import os
from threading import Event, Thread
class AbstractFocuser(PanBase):
"""
Base class for all focusers
"""
def __init__(self,
name='Generic Focuser',
model='simulator',
port=None,
camera=None,
initial_position=None,
autofocus_range=None,
autofocus_step=None,
autofocus_seconds=None,
autofocus_size=None,
autofocus_merit_function=None,
autofocus_merit_function_kwargs=None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.model = model
self.port = port
self.name = name
self._connected = False
self._serial_number = 'XXXXXX'
self._position = initial_position
if autofocus_range:
self.autofocus_range = (int(autofocus_range[0]), int(autofocus_range[1]))
else:
self.autofocus_range = None
if autofocus_step:
self.autofocus_step = (int(autofocus_step[0]), int(autofocus_step[1]))
else:
self.autofocus_step = None
self.autofocus_seconds = autofocus_seconds
self.autofocus_size = autofocus_size
self.autofocus_merit_function = autofocus_merit_function
self.autofocus_merit_function_kwargs = autofocus_merit_function_kwargs
self._camera = camera
self.logger.debug('Focuser created: {} on {}'.format(self.name, self.port))
##################################################################################################
# Properties
##################################################################################################
@property
def uid(self):
""" A serial number for the focuser """
return self._serial_number
@property
def is_connected(self):
""" Is the focuser available """
return self._connected
@property
def position(self):
""" Current encoder position of the focuser """
return self._position
@position.setter
def position(self, position):
""" Move focusser to new encoder position """
self.move_to(position)
@property
def camera(self):
"""
Reference to the Camera object that the Focuser is assigned to, if any. A Focuser
should only ever be assigned to one or zero Cameras!
"""
return self._camera
@camera.setter
def camera(self, camera):
if self._camera:
self.logger.warning("{} already assigned to camera {}, skipping attempted assignment to {}!".format(
self, self.camera, camera))
else:
self._camera = camera
@property
def min_position(self):
""" Get position of close limit of focus travel, in encoder units """
raise NotImplementedError
@property
def max_position(self):
""" Get position of far limit of focus travel, in encoder units """
raise NotImplementedError
##################################################################################################
# Methods
##################################################################################################
def move_to(self, position):
""" Move focusser to new encoder position """
raise NotImplementedError
def move_by(self, increment):
""" Move focusser by a given amount """
raise NotImplementedError
def autofocus(self,
seconds=None,
focus_range=None,
focus_step=None,
thumbnail_size=None,
merit_function=None,
merit_function_kwargs=None,
coarse=False,
plots=True,
blocking=False,
*args, **kwargs):
"""
Focuses the camera using the specified merit function. Optionally performs a coarse focus first before
performing the default fine focus. The expectation is that coarse focus will only be required for first use
of a optic to establish the approximate position of infinity focus and after updating the intial focus
position in the config only fine focus will be required.
Args:
seconds (scalar, optional): Exposure time for focus exposures, if not specified will use value from config
focus_range (2-tuple, optional): Coarse & fine focus sweep range, in encoder units. Specify to override
values from config
focus_step (2-tuple, optional): Coarse & fine focus sweep steps, in encoder units. Specofy to override
values from config
thumbnail_size (int, optional): Size of square central region of image to use, default 500 x 500 pixels
merit_function (str/callable, optional): Merit function to use as a focus metric, default vollath_F4
merit_function_kwargs (dict, optional): Dictionary of additional keyword arguments for the merit function
coarse (bool, optional): Whether to begin with coarse focusing, default False
plots (bool, optional: Whether to write focus plots to images folder, default True.
blocking (bool, optional): Whether to block until autofocus complete, default False
Returns:
threading.Event: Event that will be set when autofocusing is complete
"""
assert self._camera.is_connected, self.logger.error("Camera must be connected for autofocus!")
assert self.is_connected, self.logger.error("Focuser must be connected for autofocus!")
if not focus_range:
if self.autofocus_range:
focus_range = self.autofocus_range
else:
raise ValueError("No focus_range specified, aborting autofocus of {}!".format(self._camera))
if not focus_step:
if self.autofocus_step:
focus_step = self.autofocus_step
else:
raise ValueError("No focus_step specified, aborting autofocus of {}!".format(self._camera))
if not seconds:
if self.autofocus_seconds:
seconds = self.autofocus_seconds
else:
raise ValueError("No focus exposure time specified, aborting autofocus of {}!".format(self._camera))
if not thumbnail_size:
if self.autofocus_size:
thumbnail_size = self.autofocus_size
else:
raise ValueError("No focus thumbnail size specified, aborting autofocus of {}!".format(self._camera))
if not merit_function:
if self.autofocus_merit_function:
merit_function = self.autofocus_merit_function
else:
merit_function = 'vollath_F4'
if not merit_function_kwargs:
if self.autofocus_merit_function_kwargs:
merit_function_kwargs = self.autofocus_merit_function_kwargs
else:
merit_function_kwargs = {}
if coarse:
coarse_event = Event()
coarse_thread = Thread(target=self._autofocus,
args=args,
kwargs={'seconds': seconds,
'focus_range': focus_range,
'focus_step': focus_step,
'thumbnail_size': thumbnail_size,
'merit_function': merit_function,
'merit_function_kwargs': merit_function_kwargs,
'coarse': coarse,
'plots': plots,
'start_event': None,
'finished_event': coarse_event,
**kwargs})
coarse_thread.start()
else:
coarse_event = None
fine_event = Event()
fine_thread = Thread(target=self._autofocus,
args=args,
kwargs={'seconds': seconds,
'focus_range': focus_range,
'focus_step': focus_step,
'thumbnail_size': thumbnail_size,
'merit_function': merit_function,
'merit_function_kwargs': merit_function_kwargs,
'coarse': coarse,
'plots': plots,
'start_event': coarse_event,
'finished_event': fine_event,
**kwargs})
fine_thread.start()
if blocking:
fine_event.wait()
return fine_event
def _autofocus(self, seconds, focus_range, focus_step, thumbnail_size, merit_function,
merit_function_kwargs, coarse, plots, start_event, finished_event, *args, **kwargs):
# If passed a start_event wait until Event is set before proceeding (e.g. wait for coarse focus
# to finish before starting fine focus).
if start_event:
start_event.wait()
initial_focus = self.position
if coarse:
self.logger.debug("Beginning coarse autofocus of {} - initial focus position: {}".format(self._camera,
initial_focus))
else:
self.logger.debug("Beginning autofocus of {} - initial focus position: {}".format(self._camera,
initial_focus))
# Set up paths for temporary focus files, and plots if requested.
image_dir = self.config['directories']['images']
start_time = current_time(flatten=True)
file_path = "{}/{}/{}/{}.{}".format(image_dir,
'focus',
self._camera.uid,
start_time,
self._camera.file_extension)
if plots:
# Take an image before focusing, grab a thumbnail from the centre and add it to the plot
thumbnail = self._camera.get_thumbnail(seconds, file_path, thumbnail_size)
fig = plt.figure(figsize=(9, 18), tight_layout=True)
ax1 = fig.add_subplot(3, 1, 1)
im1 = ax1.imshow(thumbnail, interpolation='none', cmap='cubehelix')
fig.colorbar(im1)
ax1.set_title('Initial focus position: {}'.format(initial_focus))
# Set up encoder positions for autofocus sweep, truncating at focus travel limits if required.
if coarse:
focus_range = focus_range[1]
focus_step = focus_step[1]
else:
focus_range = focus_range[0]
focus_step = focus_step[0]
focus_positions = np.arange(max(initial_focus - focus_range / 2, self.min_position),
min(initial_focus + focus_range / 2, self.max_position) + 1,
focus_step, dtype=np.int)
n_positions = len(focus_positions)
metric = np.empty((n_positions))
for i, position in enumerate(focus_positions):
# Move focus, updating focus_positions with actual encoder position after move.
focus_positions[i] = self.move_to(position)
# Take exposure
thumbnail = self._camera.get_thumbnail(seconds, file_path, thumbnail_size)
# Calculate Vollath F4 focus metric
metric[i] = images.focus_metric(thumbnail, merit_function, **merit_function_kwargs)
self.logger.debug("Focus metric at position {}: {}".format(position, metric[i]))
# Find maximum values
imax = metric.argmax()
if imax == 0 or imax == (n_positions - 1):
# TODO: have this automatically switch to coarse focus mode if this happens
self.logger.warning("Best focus outside sweep range, aborting autofocus on {}!".format(self._camera))
best_focus = focus_positions[imax]
elif not coarse:
# Fit to data around the max value to determine best focus position. Lorentz function seems to fit OK
# provided you only fit in the immediate vicinity of the max value.
# Initialise models
fit = models.Lorentz1D(x_0=focus_positions[imax], amplitude=metric.max())
# Initialise fitter
fitter = fitting.LevMarLSQFitter()
# Select data range for fitting. Tries to use 2 points either side of max, if in range.
fitting_indices = (max(imax - 2, 0), min(imax + 2, n_positions - 1))
# Fit models to data
fit = fitter(fit,
focus_positions[fitting_indices[0]:fitting_indices[1] + 1],
metric[fitting_indices[0]:fitting_indices[1] + 1])
best_focus = fit.x_0.value
# Guard against fitting failures, force best focus to stay within sweep range
if best_focus < focus_positions[0]:
self.logger.warning("Fitting failure: best focus {} below sweep limit {}".format(best_focus,
focus_positions[0]))
best_focus = focus_positions[0]
if best_focus > focus_positions[-1]:
self.logger.warning("Fitting failure: best focus {} above sweep limit {}".format(best_focus,
focus_positions[-1]))
best_focus = focus_positions[-1]
else:
# Coarse focus, just use max value.
best_focus = focus_positions[imax]
if plots:
ax2 = fig.add_subplot(3, 1, 2)
ax2.plot(focus_positions, metric, 'bo', label='{}'.format(merit_function))
if not (imax == 0 or imax == (n_positions - 1)) and not coarse:
fs = np.arange(focus_positions[fitting_indices[0]], focus_positions[fitting_indices[1]] + 1)
ax2.plot(fs, fit(fs), 'b-', label='Lorentzian fit')
ax2.set_xlim(focus_positions[0] - focus_step / 2, focus_positions[-1] + focus_step / 2)
u_limit = 1.10 * metric.max()
l_limit = min(0.95 * metric.min(), 1.05 * metric.min())
ax2.set_ylim(l_limit, u_limit)
ax2.vlines(initial_focus, l_limit, u_limit, colors='k', linestyles=':',
label='Initial focus')
ax2.vlines(best_focus, l_limit, u_limit, colors='k', linestyles='--',
label='Best focus')
ax2.set_xlabel('Focus position')
ax2.set_ylabel('Focus metric')
if coarse:
ax2.set_title('{} coarse focus at {}'.format(self._camera, start_time))
else:
ax2.set_title('{} fine focus at {}'.format(self._camera, start_time))
ax2.legend(loc='best')
final_focus = self.move_to(best_focus)
if plots:
thumbnail = self._camera.get_thumbnail(seconds, file_path, thumbnail_size)
ax3 = fig.add_subplot(3, 1, 3)
im3 = ax3.imshow(thumbnail, interpolation='none', cmap='cubehelix')
fig.colorbar(im3)
ax3.set_title('Final focus position: {}'.format(final_focus))
plot_path = os.path.splitext(file_path)[0] + '.png'
fig.savefig(plot_path)
plt.close(fig)
if coarse:
self.logger.info('Coarse focus plot for camera {} written to {}'.format(self._camera, plot_path))
else:
self.logger.info('Fine focus plot for camera {} written to {}'.format(self._camera, plot_path))
self.logger.debug('Autofocus of {} complete - final focus position: {}'.format(self._camera, final_focus))
if finished_event:
finished_event.set()
return initial_focus, final_focus
def __str__(self):
return "{} ({}) on {}".format(self.name, self.uid, self.port)
|
mit
| -576,820,583,751,011,800
| 41.872449
| 118
| 0.529751
| false
| 4.636138
| false
| false
| false
|
danielrenechaparro/Python
|
Clases.py
|
1
|
4406
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Clases.py
#
# Copyright 2016 Daniel Rene <danielrenechaparro@openmailbox.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
#Clase normal, sin herencia
class MyClase:
"""Esto es la documentacion de la clase"""
#Contructor de la clase, inicializa los atributos, siempre debe tener self
def __init__(self, valInicial):
self.valInicial = valInicial
print("Inicialice la variable en el constructor")
#Todos los metodos de la clase, siempre deben tener como primer parametro self
def metodoNormal(self):
"Estos metodos pueden llevar documentacion"
print("El valor inicial es: " + self.valInicial)
def __del__(self):
print("destructor")
def __str__(self):
return "metodo equivalente a toString"
#metodo llamado cuando se intenta comparar este objeto con otro: <, <=, >, >=
def __cmp__(self, otroObjetoAComparar) :
if otroObjetoAComparar > 0: #la comparacion con el objeto pasado dio q ese objeto es mayor q este, retorna num positivo
return 1
if otroObjetoAComparar < 0: #la comparacion con el objeto pasado dio q ese objeto es menor q este, retorna num negativo
return -1
else:
return 0 #la comparacion con el objeto pasado dio q son iguales, retorna cero
def __len__(self):
return 777
# Herencia
class ClaseDerivada1(MyClase):
"Esta clase hereda y no crea ni modifica nada del padre"
pass
class ClaseDerivada2(MyClase):
"En esta clase si se modifica el init, haciendo uso del init de la super clase, pero agregandole otras cosas"
def __init__(self, valInicial):
MyClase.__init__(self, valInicial)
print("Inicializado desde la clase hija")
class Clase2:
"Esta clase sive xa mostrar la herencia multiple"
def __init__(self, valInicial):
self.valInicial = valInicial
print("Cargue el valor desde la clase2")
def metodo2(self, mensaje):
print("este es el otro mensaje: "+mensaje)
#definiendo los getters y setters
def getValInicial(self):
return self.valInicial
def setValInicial(self, valInicial):
self.valInicial = valInicial
class ClaseDerivada3(MyClase, Clase2):
"Esta clase hereda de MyClase y de Clase2, si las clases tienen metodos q se llaman igual y reciben los mismos parametros, se sobre escriben, prevaleciendo los metodos de la clase mas a la izquierda, en este caso MyClase"
def metodoPublico(self):
print("Publico")
def __metodoPrivado(self):
print("Privado")
#otra forma de definir los getters y setter como una propiedad del atributo
class Clase3:
def setDia(self, dia):
if dia > 0 and dia < 32:
self.__dia = dia
else:
print("error")
def getDia(self):
return self.__dia
dia = property(getDia, setDia)
def main():
objeto = ClaseDerivada3("cadena de texto")
objeto.metodoNormal()
objeto.metodo2("Kenicito")
objeto.metodoPublico()
#objeto.__metodoPrivado()
#En realidad cuando un metodo inicial con __ el interprete lo renombra con el nombre de la clase, x lo cual xa poder acceder a este se debe usar el nombre de la clase y el del metodo
objeto._ClaseDerivada3__metodoPrivado()
print("El valor pasado a la clase fue: "+objeto.getValInicial())
p = Clase3()
p.dia = 33
print(p.dia)
q = MyClase(10)
print("Longitud: ")
print(len(q))
print("comparacion: ")
print(cmp(q,10))
print(q)
cadenaLarga = ("se puede partir una cadena"
"en varias lineas sin problema"
"gracias al uso de los parentesis, corcheteso llaves")
cadenaLarga2 = """con el uso de 3 comillas
se pueden partir las cadenas
sin problemas"""
print(cadenaLarga2)
return 0
#esta sentencia hace que el interprete sepa por donde comenzar el programa
if __name__ == '__main__':
main()
|
gpl-3.0
| 7,002,834,793,237,624,000
| 28.178808
| 222
| 0.715161
| false
| 2.684948
| false
| false
| false
|
MartinPyka/Parametric-Anatomical-Modeling
|
pam/connection_mapping.py
|
1
|
19453
|
"""This module contains the functions and classes needed for mapping points between layers"""
import logging
import mathutils
import random
import numpy
from .mesh import *
from .constants import *
logger = logging.getLogger(__package__)
def computePoint(v1, v2, v3, v4, x1, x2):
"""Interpolates point on a quad
:param v1, v2, v3, v4: Vertices of the quad
:type v1, v2, v3, v4: mathutils.Vector
:param x1, x2: The interpolation values
:type x1, x2: float [0..1]"""
mv12_co = v1.co * x1 + v2.co * (1 - x1)
mv34_co = v3.co * (1 - x1) + v4.co * x1
mv_co = mv12_co * x2 + mv34_co * (1 - x2)
return mv_co
def selectRandomPoint(obj):
"""Selects a random point on the mesh of an object
:param obj: The object from which to select
:type obj: bpy.types.Object"""
# select a random polygon
p_select = random.random() * obj['area_sum']
polygon = obj.data.polygons[
numpy.nonzero(numpy.array(obj['area_cumsum']) > p_select)[0][0]]
# define position on the polygon
vert_inds = polygon.vertices[:]
poi = computePoint(obj.data.vertices[vert_inds[0]],
obj.data.vertices[vert_inds[1]],
obj.data.vertices[vert_inds[2]],
obj.data.vertices[vert_inds[3]],
random.random(), random.random())
p, n, f = obj.closest_point_on_mesh(poi)
return p, n, f
class MappingException(Exception):
def __init__(self):
pass
def __str__(self):
return "MappingException"
class Mapping():
"""Based on a list of layers, connections-properties and distance-properties,
this class can compute the 3d-point, the 2d-uv-point and the distance from a given
point on the first layer to the corresponding point on the last layer
"""
def __init__(self, layers, connections, distances, debug = False):
""":param list layers: layers connecting the pre-synaptic layer with the synaptic layer
:param list connections: values determining the type of layer-mapping
:param list distances: values determining the calculation of the distances between layers
:param bool debug: if true, compute mapping returns a list of layers that it was able
to pass. Helps to debug the mapping-definitions in order to figure
our where exactly the mapping stops"""
self.layers = layers
self.connections = connections
self.distances = distances
self.debug = debug
self.initFunctions()
def initFunctions(self):
"""Initializes the function lists from the connections and distances lists.
Needs to be called after connections or distances have changed"""
self.mapping_functions = [connection_dict[i] for i in self.connections]
self.distance_functions = [distance_dict[self.connections[i]][self.distances[i]] for i in range(len(self.distances))]
self.distance_functions[-1] = distance_dict_syn[self.connections[-1]][self.distances[-1]]
def computeMapping(self, point):
"""Compute the mapping of a single point
:param mathutils.Vector point: vector for which the mapping should be calculated
Return values
-------------
p3d list of 3d-vector of the neuron position on all layers until the last
last position before the synapse. Note, that this might be before the
synapse layer!!! This depends on the distance-property.
p2d 2d-vector of the neuron position on the UV map of the last layer
d distance between neuron position on the first layer and last position before
the synapse! This is not the distance to the p3d point! This is either the
distance to the 3d-position of the last but one layer or, in case
euclidean-uv-distance was used, the distance to the position of the last
layer determind by euclidean-distance. Functions, like computeConnectivity()
add the distance to the synapse to value d in order to retrieve
the complete distance from the pre- or post-synaptic neuron
to the synapse
"""
self.p3d = [point]
for i in range(0, len(self.connections)):
layer = self.layers[i]
layer_next = self.layers[i + 1]
con_func = self.mapping_functions[i]
dis_func = self.distance_functions[i]
try:
p3d_n = con_func(self, layer, layer_next, dis_func)
except MappingException:
if self.debug:
return self.p3d, i, None
else:
return None, None, None
# for the synaptic layer, compute the uv-coordinates
p2d = layer_next.map3dPointToUV(p3d_n)
return self.p3d, p2d, compute_path_length(self.p3d)
"""
The following functions are the combinations of all distance and mapping types.
If you wish to add a new mapping or distance type, you will have to create a
function for each possible combination. If you add a mapping function, you also
have to add a function that computes the point on the next layer (p3d_n),
prefix "con_", and pass it down to the distance function. You also have to add
your functions to the lists below.
"""
"""Euclidean mapping"""
def con_euclid(self, layer, layer_next, dis_func):
p3d_n = layer_next.map3dPointTo3d(layer_next, self.p3d[-1])
dis_func(self, p3d_n, layer, layer_next)
return p3d_n
def euclid_dis_euclid(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def euclid_dis_euclid_uv(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def euclid_dis_jump_uv(self, p3d_n, layer, layer_next):
self.self, p3d.append(p3d_n)
def euclid_dis_uv_jump(self, p3d_n, layer, layer_next):
p3d_t = layer.map3dPointTo3d(layer, p3d_n)
self.p3d = self.p3d + layer.interpolateUVTrackIn3D(self.p3d[-1], p3d_t)
self.p3d.append(p3d_n)
def euclid_dis_normal_uv(self, p3d_n, layer, layer_next):
p, n, f = layer.closest_point_on_mesh(self.p3d[-1])
p3d_t = layer_next.map3dPointTo3d(layer_next, p, n)
if p3d_t is None:
raise MappingException()
self.p3d.append(p3d_t)
self.p3d = self.p3d + layer_next.interpolateUVTrackIn3D(p3d_t, p3d_n)
self.p3d.append(p3d_n)
def euclid_dis_uv_normal(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
# If before the synaptic layer
def euclid_dis_euclid_syn(self, p3d_n, layer, layer_next):
pass
def euclid_dis_euclid_uv_syn(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def euclid_dis_jump_uv_syn(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def euclid_dis_uv_jump_syn(self, p3d_n, layer, layer_next):
pass
def euclid_dis_normal_uv_syn(self, p3d_n, layer, layer_next):
p, n, f = layer.closest_point_on_mesh(self.p3d[-1])
p3d_t = layer_next.map3dPointTo3d(layer_next, p, n)
if p3d_t is None:
raise MappingException()
self.p3d.append(p3d_t)
def euclid_dis_uv_normal_syn(self, p3d_n, layer, layer_next):
pass
"""Normal Mapping"""
def con_normal(self, layer, layer_next, dis_func):
# compute normal on layer for the last point
p, n, f = layer.closest_point_on_mesh(self.p3d[-1])
# determine new point
p3d_n = layer_next.map3dPointTo3d(layer_next, p, n)
# if there is no intersection, abort
if p3d_n is None:
raise MappingException()
dis_func(self, p3d_n, layer, layer_next)
return p3d_n
def normal_dis_euclid(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def normal_dis_euclid_uv(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def normal_dis_jump_uv(self, p3d_n, layer, layer_next):
p3d_t = layer_next.map3dPointTo3d(layer_next, self.p3d[-1])
self.p3d.append(p3d_t)
self.p3d = self.p3d + layer_next.interpolateUVTrackIn3D(p3d_t, p3d_n)
self.p3d.append(p3d_n)
def normal_dis_uv_jump(self, p3d_n, layer, layer_next):
p3d_t = layer.map3dPointTo3d(layer, p3d_n)
self.p3d = self.p3d + layer.interpolateUVTrackIn3D(self.p3d[-1], p3d_t)
self.p3d.append(p3d_n)
def normal_dis_normal_uv(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def normal_dis_uv_normal(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def normal_dis_euclid_syn(self, p3d_n, layer, layer_next):
pass
def normal_dis_euclid_uv_syn(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def normal_dis_jump_uv_syn(self, p3d_n, layer, layer_next):
p3d_t = layer_next.map3dPointTo3d(layer_next, self.p3d[-1])
self.p3d.append(p3d_t)
def normal_dis_uv_jump_syn(self, p3d_n, layer, layer_next):
pass
def normal_dis_normal_uv_syn(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def normal_dis_uv_normal_syn(self, p3d_n, layer, layer_next):
pass
"""Random Mapping"""
def con_random(self, layer, layer_next, dis_func):
p3d_n, _, _ = selectRandomPoint(layer_next.obj)
dis_func(self, p3d_n, layer, layer_next)
return p3d_n
def random_dis_euclid(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def random_dis_euclid_uv(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def random_dis_jump_uv(self, p3d_n, layer, layer_next):
p3d_t = layer_next.map3dPointTo3d(layer_next, self.p3d[-1])
self.p3d.append(p3d_t)
self.p3d = self.p3d + layer_next.interpolateUVTrackIn3D(p3d_t, p3d_n)
self.p3d.append(p3d_n)
def random_dis_uv_jump(self, p3d_n, layer, layer_next):
p3d_t = layer.map3dPointTo3d(layer, p3d_n)
self.p3d = self.p3d + layer.interpolateUVTrackIn3D(self.p3d[-1], p3d_t)
self.p3d.append(p3d_n)
def random_dis_normal_uv(self, p3d_n, layer, layer_next):
p, n, f = layer.closest_point_on_mesh(self.p3d[-1])
p3d_t = layer_next.map3dPointTo3d(layer_next, p, n)
self.p3d.append(p3d_t)
self.p3d = self.p3d + layer_next.interpolateUVTrackIn3D(p3d_t, p3d_n)
self.p3d.append(p3d_n)
def random_dis_uv_normal(self, p3d_n, layer, layer_next):
p, n, f = layer_next.closest_point_on_mesh(p3d_n)
p3d_t = layer.map3dPointTo3d(layer, p, n)
if p3d_t is None:
raise MappingException()
self.p3d = self.p3d + layer.interpolateUVTrackIn3D(self.p3d[-1], p3d_t)
self.p3d.append(p3d_t)
self.p3d.append(p3d_n)
def random_dis_euclid_syn(self, p3d_n, layer, layer_next):
pass
def random_dis_euclid_uv_syn(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def random_dis_jump_uv_syn(self, p3d_n, layer, layer_next):
p3d_t = layer_next.map3dPointTo3d(layer_next, self.p3d[-1])
self.p3d.append(p3d_t)
def random_dis_uv_jump_syn(self, p3d_n, layer, layer_next):
pass
def random_dis_normal_uv_syn(self, p3d_n, layer, layer_next):
p, n, f = layer.closest_point_on_mesh(self.p3d[-1])
# determine new point
p3d_t = layer_next.map3dPointTo3d(layer_next, p, n)
if p3d_t is None:
raise MappingException()
self.p3d.append(p3d_t)
def random_dis_uv_normal_syn(self, p3d_n, layer, layer_next):
pass
"""Topological mapping"""
def con_top(self, layer, layer_next, dis_func):
p3d_n = layer.map3dPointTo3d(layer_next, self.p3d[-1])
dis_func(self, p3d_n, layer, layer_next)
return p3d_n
def top_dis_euclid(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def top_dis_euclid_uv(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def top_dis_jump_uv(self, p3d_n, layer, layer_next):
p3d_t = layer_next.map3dPointTo3d(layer_next, self.p3d[-1])
self.p3d.append(p3d_t)
self.p3d = self.p3d + layer_next.interpolateUVTrackIn3D(p3d_t, p3d_n)
self.p3d.append(p3d_n)
def top_dis_uv_jump(self, p3d_n, layer, layer_next):
p3d_t = layer.map3dPointTo3d(layer, p3d_n)
self.p3d = self.p3d + layer.interpolateUVTrackIn3D(self.p3d[-1], p3d_t)
self.p3d.append(p3d_n)
def top_dis_normal_uv(self, p3d_n, layer, layer_next):
p, n, f = layer.closest_point_on_mesh(self.p3d[-1])
p3d_t = layer_next.map3dPointTo3d(layer_next, p, n)
if p3d_t is None:
raise MappingException()
self.p3d.append(p3d_t)
self.p3d = self.p3d + layer_next.interpolateUVTrackIn3D(p3d_t, p3d_n)
self.p3d.append(p3d_n)
def top_dis_uv_normal(self, p3d_n, layer, layer_next):
p, n, f = layer_next.closest_point_on_mesh(p3d_n)
p3d_t = layer.map3dPointTo3d(layer, p, n)
if p3d_t is None:
raise MappingException()
self.p3d = self.p3d + layer.interpolateUVTrackIn3D(self.p3d[-1], p3d_t)
self.p3d.append(p3d_t)
self.p3d.append(p3d_n)
def top_dis_euclid_syn(self, p3d_n, layer, layer_next):
pass
def top_dis_euclid_uv_syn(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def top_dis_jump_uv_syn(self, p3d_n, layer, layer_next):
p3d_t = layer_next.map3dPointTo3d(layer_next, self.p3d[-1])
self.p3d.append(p3d_t)
def top_dis_uv_jump_syn(self, p3d_n, layer, layer_next):
pass
def top_dis_normal_uv_syn(self, p3d_n, layer, layer_next):
p, n, f = layer.closest_point_on_mesh(self.p3d[-1])
# determine new point
p3d_t = layer_next.map3dPointTo3d(layer_next, p, n)
if p3d_t is None:
raise MappingException()
self.p3d.append(p3d_t)
def top_dis_uv_normal_syn(self, p3d_n, layer, layer_next):
pass
"""UV mapping"""
def con_uv(self, layer, layer_next, dis_func):
p2d_t = layer.map3dPointToUV(self.p3d[-1])
p3d_n = layer_next.mapUVPointTo3d([p2d_t])
if p3d_n == []:
raise MappingException()
p3d_n = p3d_n[0]
dis_func(self, p3d_n, layer, layer_next)
return p3d_n
def uv_dis_euclid(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def uv_dis_euclid_uv(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def uv_dis_jump_uv(self, p3d_n, layer, layer_next):
p3d_t = layer_next.map3dPointTo3d(layer_next, self.p3d[-1])
self.p3d.append(p3d_t)
self.p3d = self.p3d + layer_next.interpolateUVTrackIn3D(p3d_t, p3d_n)
self.p3d.append(p3d_n)
def uv_dis_uv_jump(self, p3d_n, layer, layer_next):
p3d_t = layer.map3dPointTo3d(layer, p3d_n)
self.p3d = self.p3d + layer.interpolateUVTrackIn3D(self.p3d[-1], p3d_t)
self.p3d.append(p3d_n)
def uv_dis_normal_uv(self, p3d_n, layer, layer_next):
p, n, f = layer.closest_point_on_mesh(self.p3d[-1])
p3d_t = layer_next.map3dPointTo3d(layer_next, p, n)
if p3d_t is None:
raise MappingException()
self.p3d.append(p3d_t)
self.p3d = self.p3d + layer_next.interpolateUVTrackIn3D(p3d_t, p3d_n)
self.p3d.append(p3d_n)
def uv_dis_uv_normal(self, p3d_n, layer, layer_next):
p, n, f = layer_next.closest_point_on_mesh(p3d_n)
p3d_t = layer.map3dPointTo3d(layer, p, n)
if p3d_t is None:
raise MappingException()
self.p3d = self.p3d + layer.interpolateUVTrackIn3D(self.p3d[-1], p3d_t)
self.p3d.append(p3d_t)
self.p3d.append(p3d_n)
def uv_dis_euclid_syn(self, p3d_n, layer, layer_next):
pass
def uv_dis_euclid_uv_syn(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def uv_dis_jump_uv_syn(self, p3d_n, layer, layer_next):
p3d_t = layer_next.map3dPointTo3d(layer_next, self.p3d[-1])
self.p3d.append(p3d_t)
def uv_dis_uv_jump_syn(self, p3d_n, layer, layer_next):
pass
def uv_dis_normal_uv_syn(self, p3d_n, layer, layer_next):
p, n, f = layer.closest_point_on_mesh(self.p3d[-1])
# determine new point
p3d_t = layer_next.map3dPointTo3d(layer_next, p, n)
if p3d_t is None:
raise MappingException()
self.p3d.append(p3d_t)
def uv_dis_uv_normal_syn(self, p3d_n, layer, layer_next):
pass
def con_mask3d(self, layer, layer_next, dis_func):
if not checkPointInObject(layer_next.obj, self.p3d[-1]):
raise MappingException()
else:
p3d_n = self.p3d[-1]
self.p3d.append(p3d_n)
return p3d_n
connection_dict = {
MAP_euclid: con_euclid,
MAP_normal: con_normal,
MAP_random: con_random,
MAP_top: con_top,
MAP_uv: con_uv,
MAP_mask3D: con_mask3d
}
distance_dict = {
MAP_euclid: {
DIS_euclid: euclid_dis_euclid,
DIS_euclidUV: euclid_dis_euclid_uv,
DIS_jumpUV: euclid_dis_jump_uv,
DIS_UVjump: euclid_dis_uv_jump,
DIS_normalUV: euclid_dis_normal_uv,
DIS_UVnormal: euclid_dis_uv_normal
},
MAP_normal: {
DIS_euclid: normal_dis_euclid,
DIS_euclidUV: normal_dis_euclid_uv,
DIS_jumpUV: normal_dis_jump_uv,
DIS_UVjump: normal_dis_uv_jump,
DIS_normalUV: normal_dis_normal_uv,
DIS_UVnormal: normal_dis_uv_normal
},
MAP_random: {
DIS_euclid: random_dis_euclid,
DIS_euclidUV: random_dis_euclid_uv,
DIS_jumpUV: random_dis_jump_uv,
DIS_UVjump: random_dis_uv_jump,
DIS_normalUV: random_dis_normal_uv,
DIS_UVnormal: random_dis_uv_normal
},
MAP_top: {
DIS_euclid: top_dis_euclid,
DIS_euclidUV: top_dis_euclid_uv,
DIS_jumpUV: top_dis_jump_uv,
DIS_UVjump: top_dis_uv_jump,
DIS_normalUV: top_dis_normal_uv,
DIS_UVnormal: top_dis_uv_normal
},
MAP_uv: {
DIS_euclid: uv_dis_euclid,
DIS_euclidUV: uv_dis_euclid_uv,
DIS_jumpUV: uv_dis_jump_uv,
DIS_UVjump: uv_dis_uv_jump,
DIS_normalUV: uv_dis_normal_uv,
DIS_UVnormal: uv_dis_uv_normal
},
MAP_mask3D: {
DIS_euclid: None,
DIS_euclidUV: None,
DIS_jumpUV: None,
DIS_UVjump: None,
DIS_normalUV: None,
DIS_UVnormal: None
},
}
distance_dict_syn = {
MAP_euclid: {
DIS_euclid: euclid_dis_euclid_syn,
DIS_euclidUV: euclid_dis_euclid_uv_syn,
DIS_jumpUV: euclid_dis_jump_uv_syn,
DIS_UVjump: euclid_dis_uv_jump_syn,
DIS_normalUV: euclid_dis_normal_uv_syn,
DIS_UVnormal: euclid_dis_uv_normal_syn
},
MAP_normal: {
DIS_euclid: normal_dis_euclid_syn,
DIS_euclidUV: normal_dis_euclid_uv_syn,
DIS_jumpUV: normal_dis_jump_uv_syn,
DIS_UVjump: normal_dis_uv_jump_syn,
DIS_normalUV: normal_dis_normal_uv_syn,
DIS_UVnormal: normal_dis_uv_normal_syn
},
MAP_random: {
DIS_euclid: random_dis_euclid_syn,
DIS_euclidUV: random_dis_euclid_uv_syn,
DIS_jumpUV: random_dis_jump_uv_syn,
DIS_UVjump: random_dis_uv_jump_syn,
DIS_normalUV: random_dis_normal_uv_syn,
DIS_UVnormal: random_dis_uv_normal_syn
},
MAP_top: {
DIS_euclid: top_dis_euclid_syn,
DIS_euclidUV: top_dis_euclid_uv_syn,
DIS_jumpUV: top_dis_jump_uv_syn,
DIS_UVjump: top_dis_uv_jump_syn,
DIS_normalUV: top_dis_normal_uv_syn,
DIS_UVnormal: top_dis_uv_normal_syn
},
MAP_uv: {
DIS_euclid: uv_dis_euclid_syn,
DIS_euclidUV: uv_dis_euclid_uv_syn,
DIS_jumpUV: uv_dis_jump_uv_syn,
DIS_UVjump: uv_dis_uv_jump_syn,
DIS_normalUV: uv_dis_normal_uv_syn,
DIS_UVnormal: uv_dis_uv_normal_syn
},
MAP_mask3D: {
DIS_euclid: None,
DIS_euclidUV: None,
DIS_jumpUV: None,
DIS_UVjump: None,
DIS_normalUV: None,
DIS_UVnormal: None
},
}
|
gpl-2.0
| 2,615,298,735,748,952,000
| 37.370809
| 125
| 0.637331
| false
| 2.675791
| false
| false
| false
|
CroatianMeteorNetwork/CMN-codes
|
HMM_radio2txt/HMM_radio2txt.py
|
1
|
1416
|
# Copyright 2014 Denis Vida, denis.vida@gmail.com
# The HMM_radio2txt is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, version 2.
# The HMM_radio2txt is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with the HMM_radio2txt ; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from scipy.io.wavfile import read
import numpy as np
import datetime, math
numchunks = 6000
file_name = "RAD_BEDOUR_20111007_0135_BEUCCL_SYS001.wav" #WAV source file name
samprate, wavdata = read(file_name) #Get data from WAV file (samprate = samples/sec), wavdata contains raw levels data
chunks = np.array_split(wavdata, numchunks) #Split array into chunks
dbs = [np.mean(chunk) for chunk in chunks] #Calculate dB values from the mean of the chunks
print samprate
data_file = open('wav_data.txt', 'w')
data_file.write('Sample rate: '+str(samprate)+' samples/sec reduced to '+str(numchunks)+' chunks\n')
for no, line in enumerate(dbs):
data_file.write(str(no+1)+' '+str(line)+'\n')
data_file.close()
|
gpl-2.0
| 3,662,256,188,912,551,000
| 34.4
| 118
| 0.750706
| false
| 3.38756
| false
| false
| false
|
dstufft/potpie
|
potpie/pseudo/splitters.py
|
1
|
5418
|
# -*- coding: utf-8 -*-
import re
from polib import unescape
class ValidationError(Exception):
pass
class BaseValidator(object):
"""Base class for validators.
Implements the decorator pattern.
"""
def __init__(self, source_language=None, target_language=None, rule=None):
self.slang = source_language
self.tlang = target_language
self.rule = rule
def __call__(self, old, new):
"""Validate the `new` translation against the `old` one.
No checks are needed for deleted translations
Args:
old: The old translation.
new: The new translation.
Raises:
A ValidationError with an appropriate message.
"""
if not new or not self.precondition():
return
self.validate(old, new)
def precondition(self):
"""Check whether this validator is applicable to the situation."""
return True
def validate(self, old, new):
"""Actual validation method.
Subclasses must override this method.
Args:
old: The old translation.
new: The new translation.
Raises:
A ValidationError with an appropriate message.
"""
pass
class PrintfValidator(BaseValidator):
"""Validator that checks that the number of printf formats specifiers
is the same in the translation.
This is valid only if the plurals in the two languages are the same.
"""
printf_re = re.compile(
'%((?:(?P<ord>\d+)\$|\((?P<key>\w+)\))?(?P<fullvar>[+#-]*(?:\d+)?'\
'(?:\.\d+)?(hh\|h\|l\|ll)?(?P<type>[\w%])))'
)
def precondition(self):
"""Check if the number of plurals in the two languages is the same."""
return self.tlang.nplurals == self.slang.nplurals and \
super(PrintfValidator, self).precondition()
def validate(self, old, new):
old = unescape(old)
new = unescape(new)
old_matches = list(self.printf_re.finditer(old))
new_matches = list(self.printf_re.finditer(new))
if len(old_matches) != len(new_matches):
raise ValidationError("The number of arguments seems to differ "
"between the source string and the translation."
)
def next_splitter_or_func(string, splitters, func, pseudo_type):
"""
Helper for doing the next splitter check.
If the list is not empty, call the next splitter decorator appropriately,
otherwise call the decorated function.
"""
if splitters:
return splitters[0](string, splitters[1:])(func)(pseudo_type,
string)
else:
return func(pseudo_type, string)
class SplitterDecorators(object):
"""
A class decorator that receives a list of splitter decorator classes and
calls the first splitter from the list passing the decorated function as
an argument as well as the list of splitters without the called splitter.
In case the list of splitters is empty, it calls the decorated function
right away.
This decorator must be only used with method of classes inheriting from
``transifex.resources.formats.pseudo.PseudoTypeMixin``.
"""
def __init__(self, splitters):
self.splitters = splitters
def __call__(self, func):
def _wrapper(pseudo_type, string):
return next_splitter_or_func(string, self.splitters, func,
pseudo_type)
return _wrapper
class BaseSplitter(object):
"""
Base class decorator for splitting a given string based on a regex and
call the subsequent splitter class available in the ``splitters`` var or
the decorated method.
"""
REGEX = r''
def __init__(self, string, splitters):
self.string = string
self.splitters = splitters
def __call__(self, func):
def _wrapped(pseudo_type, string, **kwargs):
text = []
keys = [l.group() for l in self._regex_matches(string)]
nkeys = len(keys)
i = 0
for key in keys:
t = string.split(key, 1)
string = t[0]
string = next_splitter_or_func(string, self.splitters,
func, pseudo_type)
text.extend([string, key])
i += 1
string = t[1]
string = next_splitter_or_func(string, self.splitters,
func, pseudo_type)
text.append(string)
return "".join(text)
return _wrapped
@classmethod
def _regex_matches(cls, string):
return re.finditer(cls.REGEX, string)
class PrintfSplitter(BaseSplitter):
"""
Split the string on printf placeholders, such as %s, %d, %i, %(foo)s, etc.
"""
# Lets reuse the printf regex from the validators
REGEX = PrintfValidator.printf_re
class TagSplitter(BaseSplitter):
"""
Split the string on XML/HTML tags, such as <b>, </b>, <a href="">, etc.
"""
REGEX = r'(<|<)(.|\n)*?(>|>)'
class EscapedCharsSplitter(BaseSplitter):
"""
Split the string on escaped chars, such as \\\\n, \\\\t, etc.
"""
REGEX = r'(\\\\[\w]{1})'
class HTMLSpecialEntitiesSplitter(BaseSplitter):
"""
Splits the string on HTML special entities, such as <, &, etc.
"""
REGEX = r'&[a-zA-Z]+;'
|
gpl-2.0
| 4,022,284,664,336,294,400
| 28.769231
| 82
| 0.591362
| false
| 4.117021
| false
| false
| false
|
spasmilo/electrum
|
scripts/authenticator.py
|
1
|
11237
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import android
import sys
import os
import imp
import base64
script_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(script_dir, 'packages'))
import qrcode
imp.load_module('electrum', *imp.find_module('lib'))
from electrum import SimpleConfig, Wallet, WalletStorage, format_satoshis
from electrum import util
from electrum.transaction import Transaction
from electrum.bitcoin import base_encode, base_decode
def modal_dialog(title, msg = None):
droid.dialogCreateAlert(title,msg)
droid.dialogSetPositiveButtonText('OK')
droid.dialogShow()
droid.dialogGetResponse()
droid.dialogDismiss()
def modal_input(title, msg, value = None, etype=None):
droid.dialogCreateInput(title, msg, value, etype)
droid.dialogSetPositiveButtonText('OK')
droid.dialogSetNegativeButtonText('Cancel')
droid.dialogShow()
response = droid.dialogGetResponse()
result = response.result
droid.dialogDismiss()
if result is None:
return modal_input(title, msg, value, etype)
if result.get('which') == 'positive':
return result.get('value')
def modal_question(q, msg, pos_text = 'OK', neg_text = 'Cancel'):
droid.dialogCreateAlert(q, msg)
droid.dialogSetPositiveButtonText(pos_text)
droid.dialogSetNegativeButtonText(neg_text)
droid.dialogShow()
response = droid.dialogGetResponse()
result = response.result
droid.dialogDismiss()
if result is None:
return modal_question(q, msg, pos_text, neg_text)
return result.get('which') == 'positive'
def make_layout(s):
content = """
<LinearLayout
android:id="@+id/zz"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:background="#ff222222">
<TextView
android:id="@+id/textElectrum"
android:text="Electrum Authenticator"
android:textSize="7pt"
android:textColor="#ff4444ff"
android:gravity="left"
android:layout_height="wrap_content"
android:layout_width="match_parent"
/>
</LinearLayout>
%s """%s
return """<?xml version="1.0" encoding="utf-8"?>
<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
android:id="@+id/background"
android:orientation="vertical"
android:layout_width="match_parent"
android:layout_height="match_parent"
android:background="#ff000022">
%s
</LinearLayout>"""%content
def qr_layout(title):
title_view= """
<TextView android:id="@+id/addrTextView"
android:layout_width="match_parent"
android:layout_height="50"
android:text="%s"
android:textAppearance="?android:attr/textAppearanceLarge"
android:gravity="center_vertical|center_horizontal|center">
</TextView>"""%title
image_view="""
<ImageView
android:id="@+id/qrView"
android:gravity="center"
android:layout_width="match_parent"
android:antialias="false"
android:src=""
/>
"""
return make_layout(title_view + image_view)
def add_menu():
droid.clearOptionsMenu()
droid.addOptionsMenuItem("Seed", "seed", None,"")
droid.addOptionsMenuItem("Public Key", "xpub", None,"")
droid.addOptionsMenuItem("Transaction", "scan", None,"")
droid.addOptionsMenuItem("Password", "password", None,"")
def make_bitmap(data):
# fixme: this is highly inefficient
import qrcode
from electrum import bmp
qr = qrcode.QRCode()
qr.add_data(data)
bmp.save_qrcode(qr,"/sdcard/sl4a/qrcode.bmp")
droid = android.Android()
wallet = None
class Authenticator:
def __init__(self):
global wallet
self.qr_data = None
storage = WalletStorage({'wallet_path':'/sdcard/electrum/authenticator'})
if not storage.file_exists:
action = self.restore_or_create()
if not action:
exit()
password = droid.dialogGetPassword('Choose a password').result
if password:
password2 = droid.dialogGetPassword('Confirm password').result
if password != password2:
modal_dialog('Error', 'Passwords do not match')
exit()
else:
password = None
if action == 'create':
wallet = Wallet(storage)
seed = wallet.make_seed()
modal_dialog('Your seed is:', seed)
elif action == 'import':
seed = self.seed_dialog()
if not seed:
exit()
if not Wallet.is_seed(seed):
exit()
wallet = Wallet.from_seed(seed, storage)
else:
exit()
wallet.add_seed(seed, password)
wallet.create_master_keys(password)
wallet.create_main_account(password)
else:
wallet = Wallet(storage)
def restore_or_create(self):
droid.dialogCreateAlert("Seed not found", "Do you want to create a new seed, or to import it?")
droid.dialogSetPositiveButtonText('Create')
droid.dialogSetNeutralButtonText('Import')
droid.dialogSetNegativeButtonText('Cancel')
droid.dialogShow()
response = droid.dialogGetResponse().result
droid.dialogDismiss()
if not response: return
if response.get('which') == 'negative':
return
return 'import' if response.get('which') == 'neutral' else 'create'
def seed_dialog(self):
if modal_question("Enter your seed", "Input method", 'QR Code', 'mnemonic'):
code = droid.scanBarcode()
r = code.result
if r:
seed = r['extras']['SCAN_RESULT']
else:
return
else:
seed = modal_input('Mnemonic', 'Please enter your seed phrase')
return str(seed)
def show_qr(self, data):
path = "/sdcard/sl4a/qrcode.bmp"
if data:
droid.dialogCreateSpinnerProgress("please wait")
droid.dialogShow()
try:
make_bitmap(data)
finally:
droid.dialogDismiss()
else:
with open(path, 'w') as f: f.write('')
droid.fullSetProperty("qrView", "src", 'file://'+path)
self.qr_data = data
def show_title(self, title):
droid.fullSetProperty("addrTextView","text", title)
def get_password(self):
if wallet.use_encryption:
password = droid.dialogGetPassword('Password').result
try:
wallet.check_password(password)
except:
return False
return password
def main(self):
add_menu()
welcome = 'Use the menu to scan a transaction.'
droid.fullShow(qr_layout(welcome))
while True:
event = droid.eventWait().result
if not event:
continue
elif event["name"] == "key":
if event["data"]["key"] == '4':
if self.qr_data:
self.show_qr(None)
self.show_title(welcome)
else:
break
elif event["name"] == "seed":
password = self.get_password()
if password is False:
modal_dialog('Error','incorrect password')
continue
seed = wallet.get_mnemonic(password)
modal_dialog('Your seed is', seed)
elif event["name"] == "password":
self.change_password_dialog()
elif event["name"] == "xpub":
mpk = wallet.get_master_public_key()
self.show_qr(mpk)
self.show_title('master public key')
elif event["name"] == "scan":
r = droid.scanBarcode()
r = r.result
if not r:
continue
data = r['extras']['SCAN_RESULT']
data = base_decode(data.encode('utf8'), None, base=43)
data = ''.join(chr(ord(b)) for b in data).encode('hex')
tx = Transaction.deserialize(data)
#except:
# modal_dialog('Error', 'Cannot parse transaction')
# continue
if not wallet.can_sign(tx):
modal_dialog('Error', 'Cannot sign this transaction')
continue
lines = map(lambda x: x[0] + u'\t\t' + format_satoshis(x[1]) if x[1] else x[0], tx.get_outputs())
if not modal_question('Sign?', '\n'.join(lines)):
continue
password = self.get_password()
if password is False:
modal_dialog('Error','incorrect password')
continue
droid.dialogCreateSpinnerProgress("Signing")
droid.dialogShow()
wallet.sign_transaction(tx, password)
droid.dialogDismiss()
data = base_encode(str(tx).decode('hex'), base=43)
self.show_qr(data)
self.show_title('Signed Transaction')
droid.makeToast("Bye!")
def change_password_dialog(self):
if wallet.use_encryption:
password = droid.dialogGetPassword('Your seed is encrypted').result
if password is None:
return
else:
password = None
try:
wallet.check_password(password)
except Exception:
modal_dialog('Error', 'Incorrect password')
return
new_password = droid.dialogGetPassword('Choose a password').result
if new_password == None:
return
if new_password != '':
password2 = droid.dialogGetPassword('Confirm new password').result
if new_password != password2:
modal_dialog('Error', 'passwords do not match')
return
wallet.update_password(password, new_password)
if new_password:
modal_dialog('Password updated', 'Your seed is encrypted')
else:
modal_dialog('No password', 'Your seed is not encrypted')
if __name__ == "__main__":
a = Authenticator()
a.main()
|
gpl-3.0
| 6,965,951,914,015,369,000
| 30.47619
| 113
| 0.575331
| false
| 4.119135
| false
| false
| false
|
nschloe/quadpy
|
src/quadpy/t2/_dunavant/__init__.py
|
1
|
2732
|
import pathlib
from sympy import Rational as frac
from ...helpers import article
from .._helpers import T2Scheme, _read, register
source = article(
authors=["D.A. Dunavant"],
title="High Degree Efficient Symmetrical Gaussian Quadrature Rules for the Triangle",
journal="Article in International Journal for Numerical Methods in Engineering",
volume="21",
number="6",
pages="1129-1148",
month="jun",
year="1985",
url="https://doi.org/10.1002/nme.1620210612",
)
this_dir = pathlib.Path(__file__).resolve().parent
def dunavant_01():
d = {"centroid": [[1]]}
return T2Scheme("Dunavant 1", d, 1, source, 7.850e-17)
def dunavant_02():
d = {"d3_aa": [[frac(1, 3)], [frac(1, 6)]]}
return T2Scheme("Dunavant 2", d, 2, source, 2.220e-16)
def dunavant_03():
d = {"centroid": [[-frac(9, 16)]], "d3_aa": [[frac(25, 48)], [frac(1, 5)]]}
return T2Scheme("Dunavant 3", d, 3, source, 6.661e-16)
def dunavant_04():
return _read(this_dir / "dunavant_04.json", source)
def dunavant_05():
return _read(this_dir / "dunavant_05.json", source)
def dunavant_06():
return _read(this_dir / "dunavant_06.json", source)
def dunavant_07():
return _read(this_dir / "dunavant_07.json", source)
def dunavant_08():
return _read(this_dir / "dunavant_08.json", source)
def dunavant_09():
# DUP equals TRIEX 19
return _read(this_dir / "dunavant_09.json", source)
def dunavant_10():
return _read(this_dir / "dunavant_10.json", source)
def dunavant_11():
return _read(this_dir / "dunavant_11.json", source)
def dunavant_12():
return _read(this_dir / "dunavant_12.json", source)
def dunavant_13():
return _read(this_dir / "dunavant_13.json", source)
def dunavant_14():
return _read(this_dir / "dunavant_14.json", source)
def dunavant_15():
return _read(this_dir / "dunavant_15.json", source)
def dunavant_16():
return _read(this_dir / "dunavant_16.json", source)
def dunavant_17():
return _read(this_dir / "dunavant_17.json", source)
def dunavant_18():
return _read(this_dir / "dunavant_18.json", source)
def dunavant_19():
return _read(this_dir / "dunavant_19.json", source)
def dunavant_20():
return _read(this_dir / "dunavant_20.json", source)
register(
[
dunavant_01,
dunavant_02,
dunavant_03,
dunavant_04,
dunavant_05,
dunavant_06,
dunavant_07,
dunavant_08,
dunavant_09,
dunavant_10,
dunavant_11,
dunavant_12,
dunavant_13,
dunavant_14,
dunavant_15,
dunavant_16,
dunavant_17,
dunavant_18,
dunavant_19,
dunavant_20,
]
)
|
mit
| 5,399,588,959,320,617,000
| 19.984615
| 89
| 0.61327
| false
| 2.625602
| false
| false
| false
|
oswjk/py-mklink-wrapper
|
mklink.py
|
1
|
2540
|
"""
A wrapper script for ln.exe to make it look like the MKLINK utility
found from Windows Vista onwards. To fully utilise this, you should
also have a batch script that should look something like this:
@ECHO OFF
python %~dp0mklink.py %*
Name the file "mklink.cmd" and put it in PATH. Now you can use the
fake mklink utility like you would use the real.
You can find instruction for installing ln.exe from
http://schinagl.priv.at/nt/hardlinkshellext/hardlinkshellext.html#symboliclinksforwindowsxp
"""
import argparse
import subprocess
import sys
def MyFormatter(raw):
"""Make the help output look a little bit more like the real deal
(i.e., this omits the "usage: " part in the beginning of the help).
"""
class MyFormatter_(argparse.HelpFormatter):
def format_help(self):
return raw
return MyFormatter_
usage_str = """Creates a symbolic link.
MKLINK [[/D] | [/H] | [/J]] Link Target
/D Creates a directory symbolic link. Default is a file
symbolic link.
/H Creates a hard link instead of a symbolic link.
/J Creates a Directory Junction.
Link specifies the new symbolic link name.
Target specifies the path (relative or absolute) that the new link
refers to.
"""
parser = argparse.ArgumentParser(prog='MKLINK', prefix_chars='/',
usage=usage_str, add_help=False,
formatter_class=MyFormatter(raw=usage_str))
parser.add_argument('/?', dest='help', action='help')
group = parser.add_mutually_exclusive_group()
group.add_argument('/D', dest='symlink', default=False, action='store_true')
group.add_argument('/d', dest='symlink', default=False, action='store_true')
group.add_argument('/H', dest='hardlink', default=False, action='store_true')
group.add_argument('/h', dest='hardlink', default=False, action='store_true')
group.add_argument('/J', dest='junction', default=False, action='store_true')
group.add_argument('/j', dest='junction', default=False, action='store_true')
parser.add_argument('link')
parser.add_argument('target')
args = parser.parse_args()
if (not args.symlink) and (not args.hardlink) and (not args.junction):
args.symlink = True
if args.symlink:
sys.exit(subprocess.call(['ln.exe', '-s', args.target, args.link]))
elif args.hardlink:
sys.exit(subprocess.call(['ln.exe', args.target, args.link]))
elif args.junction:
sys.exit(subprocess.call(['ln.exe', '-j', args.target, args.link]))
else:
print("invalid options!")
sys.exit(1)
|
mit
| 7,813,175,411,467,521,000
| 34.277778
| 95
| 0.688189
| false
| 3.527778
| false
| false
| false
|
openstack/octavia
|
octavia/common/utils.py
|
1
|
5621
|
# Copyright 2011, VMware, Inc., 2014 A10 Networks
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Borrowed from nova code base, more utilities will be added/borrowed as and
# when needed.
"""Utilities and helper functions."""
import base64
import hashlib
import re
import socket
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from stevedore import driver as stevedore_driver
from octavia.common import constants
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def get_hostname():
return socket.gethostname()
def base64_sha1_string(string_to_hash):
"""Get a b64-encoded sha1 hash of a string. Not intended to be secure!"""
# TODO(rm_work): applying nosec here because this is not intended to be
# secure, it's just a way to get a consistent ID. Changing this would
# break backwards compatibility with existing loadbalancers.
hash_str = hashlib.sha1(string_to_hash.encode('utf-8')).digest() # nosec
b64_str = base64.b64encode(hash_str, str.encode('_-', 'ascii'))
b64_sha1 = b64_str.decode('UTF-8')
# https://github.com/haproxy/haproxy/issues/644
return re.sub(r"^-", "x", b64_sha1)
def get_amphora_driver():
amphora_driver = stevedore_driver.DriverManager(
namespace='octavia.amphora.drivers',
name=CONF.controller_worker.amphora_driver,
invoke_on_load=True
).driver
return amphora_driver
def get_network_driver():
CONF.import_group('controller_worker', 'octavia.common.config')
network_driver = stevedore_driver.DriverManager(
namespace='octavia.network.drivers',
name=CONF.controller_worker.network_driver,
invoke_on_load=True
).driver
return network_driver
def is_ipv4(ip_address):
"""Check if ip address is IPv4 address."""
ip = netaddr.IPAddress(ip_address)
return ip.version == 4
def is_ipv6(ip_address):
"""Check if ip address is IPv6 address."""
ip = netaddr.IPAddress(ip_address)
return ip.version == 6
def is_cidr_ipv6(cidr):
"""Check if CIDR is IPv6 address with subnet prefix."""
ip = netaddr.IPNetwork(cidr)
return ip.version == 6
def is_ipv6_lla(ip_address):
"""Check if ip address is IPv6 link local address."""
ip = netaddr.IPAddress(ip_address)
return ip.version == 6 and ip.is_link_local()
def ip_port_str(ip_address, port):
"""Return IP port as string representation depending on address family."""
ip = netaddr.IPAddress(ip_address)
if ip.version == 4:
return "{ip}:{port}".format(ip=ip, port=port)
return "[{ip}]:{port}".format(ip=ip, port=port)
def netmask_to_prefix(netmask):
return netaddr.IPAddress(netmask).netmask_bits()
def ip_netmask_to_cidr(ip, netmask):
net = netaddr.IPNetwork("0.0.0.0/0")
if ip and netmask:
net = netaddr.IPNetwork(
"{ip}/{netmask}".format(ip=ip, netmask=netmask)
)
return "{ip}/{netmask}".format(ip=net.network, netmask=net.prefixlen)
def get_vip_security_group_name(loadbalancer_id):
if loadbalancer_id:
return constants.VIP_SECURITY_GROUP_PREFIX + loadbalancer_id
return None
def get_compatible_value(value):
if isinstance(value, str):
value = value.encode('utf-8')
return value
def get_compatible_server_certs_key_passphrase():
key = CONF.certificates.server_certs_key_passphrase
if isinstance(key, str):
key = key.encode('utf-8')
return base64.urlsafe_b64encode(key)
def subnet_ip_availability(nw_ip_avail, subnet_id, req_num_ips):
for subnet in nw_ip_avail.subnet_ip_availability:
if subnet['subnet_id'] == subnet_id:
return subnet['total_ips'] - subnet['used_ips'] >= req_num_ips
return None
def b(s):
return s.encode('utf-8')
def expand_expected_codes(codes):
"""Expand the expected code string in set of codes.
200-204 -> 200, 201, 202, 204
200, 203 -> 200, 203
"""
retval = set()
codes = re.split(', *', codes)
for code in codes:
if not code:
continue
if '-' in code:
low, hi = code.split('-')[:2]
retval.update(
str(i) for i in range(int(low), int(hi) + 1))
else:
retval.add(code)
return retval
class exception_logger(object):
"""Wrap a function and log raised exception
:param logger: the logger to log the exception default is LOG.exception
:returns: origin value if no exception raised; re-raise the exception if
any occurred
"""
def __init__(self, logger=None):
self.logger = logger
def __call__(self, func):
if self.logger is None:
_LOG = logging.getLogger(func.__module__)
self.logger = _LOG.exception
def call(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
with excutils.save_and_reraise_exception():
self.logger(e)
return None
return call
|
apache-2.0
| -6,552,113,401,978,608,000
| 28.124352
| 78
| 0.652019
| false
| 3.664276
| false
| false
| false
|
ramunasd/mapgang
|
mapgang/metatile.py
|
1
|
1671
|
#!/usr/bin/python
import os
import struct
from cStringIO import StringIO
from mapgang.constants import METATILE, META_MAGIC
class MetaTile():
def __init__(self, style, x, y, z):
self.style = style
self.x = x
self.y = y
self.z = z
self.content = StringIO()
m2 = METATILE * METATILE
# space for header
self.content.write(struct.pack("4s4i", META_MAGIC, m2, 0, 0, 0))
# space for offset/size table
self.content.write(struct.pack("2i", 0, 0) * m2)
self.sizes = {}
self.offsets = {}
def get_header(self):
return struct.pack("4s4i", META_MAGIC, METATILE * METATILE, self.x, self.y, self.z)
def write_header(self):
self.content.seek(0)
# write header
self.content.write(self.get_header())
# Write out the offset/size table
for n in range(0, METATILE * METATILE):
if n in self.sizes:
self.content.write(struct.pack("2i", self.offsets[n], self.sizes[n]))
else:
self.content.write(struct.pack("2i", 0, 0))
def write_tile(self, x, y, tile):
mask = METATILE - 1
n = (x & mask) * METATILE + (y & mask)
# seek to end
self.content.seek(0, os.SEEK_END)
# mark offset
self.offsets[n] = self.content.tell()
# write content
self.content.write(tile)
# mark size
self.sizes[n] = len(tile)
def getvalue(self):
self.write_header()
return self.content.getvalue()
def to_string(self):
return "%s/%d/%d/%d" % (self.style, self.z, self.x, self.y)
|
lgpl-2.1
| 4,790,900,869,442,645,000
| 29.944444
| 91
| 0.552962
| false
| 3.348697
| false
| false
| false
|
hakril/PythonForWindows
|
windows/winproxy/apis/dbghelp.py
|
1
|
8510
|
import ctypes
import windows.generated_def as gdef
from windows.pycompat import int_types
from ..apiproxy import ApiProxy, NeededParameter
from ..error import fail_on_zero
class DbgHelpProxy(ApiProxy):
APIDLL = "dbghelp"
default_error_check = staticmethod(fail_on_zero)
# We keep the simple definition where callback UserContext are PVOID
# Be we want to be able to pass arbitrary python object (list/dict)
# So ctypes magic to make the py_object->pvoid transformation
# !! this code loose a ref to obj.
# Should still work as our calling-caller method keep a ref
def transform_pyobject_to_pvoid(obj):
if obj is None or isinstance(obj, int_types):
return obj
return ctypes.POINTER(gdef.PVOID)(ctypes.py_object(obj))[0]
@DbgHelpProxy()
def SymInitialize(hProcess, UserSearchPath, fInvadeProcess):
return SymInitialize.ctypes_function(hProcess, UserSearchPath, fInvadeProcess)
@DbgHelpProxy()
def SymCleanup(hProcess):
return SymCleanup.ctypes_function(hProcess)
@DbgHelpProxy()
def SymLoadModuleExA(hProcess, hFile, ImageName, ModuleName, BaseOfDll, DllSize, Data, Flags):
return SymLoadModuleExA.ctypes_function(hProcess, hFile, ImageName, ModuleName, BaseOfDll, DllSize, Data, Flags)
@DbgHelpProxy()
def SymLoadModuleExW(hProcess, hFile, ImageName, ModuleName, BaseOfDll, DllSize, Data, Flags):
return SymLoadModuleExW.ctypes_function(hProcess, hFile, ImageName, ModuleName, BaseOfDll, DllSize, Data, Flags)
@DbgHelpProxy()
def SymUnloadModule64(hProcess, BaseOfDll):
return SymUnloadModule64.ctypes_function(hProcess, BaseOfDll)
@DbgHelpProxy()
def SymFromAddr(hProcess, Address, Displacement, Symbol):
return SymFromAddr.ctypes_function(hProcess, Address, Displacement, Symbol)
@DbgHelpProxy()
def SymGetModuleInfo64(hProcess, dwAddr, ModuleInfo):
return SymGetModuleInfo64.ctypes_function(hProcess, dwAddr, ModuleInfo)
@DbgHelpProxy()
def SymFromName(hProcess, Name, Symbol):
return SymFromName.ctypes_function(hProcess, Name, Symbol)
@DbgHelpProxy()
def SymLoadModuleEx(hProcess, hFile, ImageName, ModuleName, BaseOfDll, DllSize, Data, Flags):
return SymLoadModuleEx.ctypes_function(hProcess, hFile, ImageName, ModuleName, BaseOfDll, DllSize, Data, Flags)
@DbgHelpProxy(error_check=None)
def SymSetOptions(SymOptions):
return SymSetOptions.ctypes_function(SymOptions)
@DbgHelpProxy(error_check=None)
def SymGetOptions():
return SymGetOptions.ctypes_function()
@DbgHelpProxy()
def SymGetSearchPath(hProcess, SearchPath, SearchPathLength=None):
if SearchPath and SearchPathLength is None:
SearchPathLength = len(SearchPath)
return SymGetSearchPath.ctypes_function(hProcess, SearchPath, SearchPathLength)
@DbgHelpProxy()
def SymGetSearchPathW(hProcess, SearchPath, SearchPathLength=None):
if SearchPath and SearchPathLength is None:
SearchPathLength = len(SearchPath)
return SymGetSearchPathW.ctypes_function(hProcess, SearchPath, SearchPathLength)
@DbgHelpProxy()
def SymSetSearchPath(hProcess, SearchPath):
return SymSetSearchPath.ctypes_function(hProcess, SearchPath)
@DbgHelpProxy()
def SymSetSearchPathW(hProcess, SearchPath):
return SymSetSearchPathW.ctypes_function(hProcess, SearchPath)
@DbgHelpProxy()
def SymGetTypeInfo(hProcess, ModBase, TypeId, GetType, pInfo):
return SymGetTypeInfo.ctypes_function(hProcess, ModBase, TypeId, GetType, pInfo)
@DbgHelpProxy()
def SymEnumSymbols(hProcess, BaseOfDll, Mask, EnumSymbolsCallback, UserContext=None):
UserContext = transform_pyobject_to_pvoid(UserContext)
return SymEnumSymbols.ctypes_function(hProcess, BaseOfDll, Mask, EnumSymbolsCallback, UserContext)
@DbgHelpProxy()
def SymEnumSymbolsEx(hProcess, BaseOfDll, Mask, EnumSymbolsCallback, UserContext=None, Options=NeededParameter):
UserContext = transform_pyobject_to_pvoid(UserContext)
return SymEnumSymbolsEx.ctypes_function(hProcess, BaseOfDll, Mask, EnumSymbolsCallback, UserContext, Options)
@DbgHelpProxy()
def SymEnumSymbolsForAddr(hProcess, Address, EnumSymbolsCallback, UserContext=None):
UserContext = transform_pyobject_to_pvoid(UserContext)
return SymEnumSymbolsForAddr.ctypes_function(hProcess, Address, EnumSymbolsCallback, UserContext)
@DbgHelpProxy()
def SymEnumSymbolsForAddrW(hProcess, Address, EnumSymbolsCallback, UserContext=None):
UserContext = transform_pyobject_to_pvoid(UserContext)
return SymEnumSymbolsForAddrW.ctypes_function(hProcess, Address, EnumSymbolsCallback, UserContext)
@DbgHelpProxy()
def SymEnumTypes(hProcess, BaseOfDll, EnumSymbolsCallback, UserContext=None):
UserContext = transform_pyobject_to_pvoid(UserContext)
return SymEnumTypes.ctypes_function(hProcess, BaseOfDll, EnumSymbolsCallback, UserContext)
@DbgHelpProxy()
def SymEnumTypesByName(hProcess, BaseOfDll, mask, EnumSymbolsCallback, UserContext=None):
UserContext = transform_pyobject_to_pvoid(UserContext)
return SymEnumTypesByName.ctypes_function(hProcess, BaseOfDll, mask, EnumSymbolsCallback, UserContext)
@DbgHelpProxy()
def SymEnumerateModules64(hProcess, EnumModulesCallback, UserContext=None):
UserContext = transform_pyobject_to_pvoid(UserContext)
return SymEnumerateModules64.ctypes_function(hProcess, EnumModulesCallback, UserContext)
@DbgHelpProxy()
def SymGetTypeFromName(hProcess, BaseOfDll, Name, Symbol):
return SymGetTypeFromName.ctypes_function(hProcess, BaseOfDll, Name, Symbol)
@DbgHelpProxy()
def SymSearch(hProcess, BaseOfDll, Index, SymTag, Mask, Address, EnumSymbolsCallback, UserContext, Options):
UserContext = transform_pyobject_to_pvoid(UserContext)
return SymSearch.ctypes_function(hProcess, BaseOfDll, Index, SymTag, Mask, Address, EnumSymbolsCallback, UserContext, Options)
@DbgHelpProxy()
def SymSearchW(hProcess, BaseOfDll, Index, SymTag, Mask, Address, EnumSymbolsCallback, UserContext, Options):
UserContext = transform_pyobject_to_pvoid(UserContext)
return SymSearchW.ctypes_function(hProcess, BaseOfDll, Index, SymTag, Mask, Address, EnumSymbolsCallback, UserContext, Options)
@DbgHelpProxy()
def SymRefreshModuleList(hProcess):
return SymRefreshModuleList.ctypes_function(hProcess)
# Helpers
@DbgHelpProxy()
def SymFunctionTableAccess(hProcess, AddrBase):
return SymFunctionTableAccess.ctypes_function(hProcess, AddrBase)
@DbgHelpProxy()
def SymFunctionTableAccess64(hProcess, AddrBase):
return SymFunctionTableAccess64.ctypes_function(hProcess, AddrBase)
@DbgHelpProxy()
def SymGetModuleBase(hProcess, dwAddr):
return SymGetModuleBase.ctypes_function(hProcess, dwAddr)
@DbgHelpProxy()
def SymGetModuleBase64(hProcess, qwAddr):
return SymGetModuleBase64.ctypes_function(hProcess, qwAddr)
@DbgHelpProxy()
def SymEnumProcesses(EnumProcessesCallback, UserContext=None):
return SymEnumProcesses.ctypes_function(EnumProcessesCallback, UserContext)
## Sym callback
@DbgHelpProxy()
def SymRegisterCallback(hProcess, CallbackFunction, UserContext=None):
return SymRegisterCallback.ctypes_function(hProcess, CallbackFunction, UserContext)
@DbgHelpProxy()
def SymRegisterCallback64(hProcess, CallbackFunction, UserContext=0):
return SymRegisterCallback64.ctypes_function(hProcess, CallbackFunction, UserContext)
@DbgHelpProxy()
def SymRegisterCallbackW64(hProcess, CallbackFunction, UserContext=0):
return SymRegisterCallbackW64.ctypes_function(hProcess, CallbackFunction, UserContext)
# Stack walk
@DbgHelpProxy()
def StackWalk64(MachineType, hProcess, hThread, StackFrame, ContextRecord, ReadMemoryRoutine, FunctionTableAccessRoutine, GetModuleBaseRoutine, TranslateAddress):
return StackWalk64.ctypes_function(MachineType, hProcess, hThread, StackFrame, ContextRecord, ReadMemoryRoutine, FunctionTableAccessRoutine, GetModuleBaseRoutine, TranslateAddress)
@DbgHelpProxy()
def StackWalkEx(MachineType, hProcess, hThread, StackFrame, ContextRecord, ReadMemoryRoutine, FunctionTableAccessRoutine, GetModuleBaseRoutine, TranslateAddress, Flags):
return StackWalkEx.ctypes_function(MachineType, hProcess, hThread, StackFrame, ContextRecord, ReadMemoryRoutine, FunctionTableAccessRoutine, GetModuleBaseRoutine, TranslateAddress, Flags)
@DbgHelpProxy()
def StackWalk(MachineType, hProcess, hThread, StackFrame, ContextRecord, ReadMemoryRoutine, FunctionTableAccessRoutine, GetModuleBaseRoutine, TranslateAddress):
return StackWalk.ctypes_function(MachineType, hProcess, hThread, StackFrame, ContextRecord, ReadMemoryRoutine, FunctionTableAccessRoutine, GetModuleBaseRoutine, TranslateAddress)
|
bsd-3-clause
| 2,012,260,460,677,575,700
| 43.093264
| 191
| 0.809166
| false
| 3.539933
| false
| false
| false
|
hydroshare/hydroshare
|
hs_tracking/tests/test_dashboard.py
|
1
|
3060
|
from django.test import TestCase
from django.contrib.auth.models import Group
from hs_tracking.models import Variable
from hs_core import hydroshare
from rest_framework import status
import socket
from django.test import Client
class TestDashboard(TestCase):
def setUp(self):
self.hostname = socket.gethostname()
self.resource_url = "/resource/{res_id}/"
self.client = Client(HTTP_USER_AGENT='Mozilla/5.0') # fake use of a real browser
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
self.admin = hydroshare.create_account(
'admin@gmail.com',
username='admin',
first_name='administrator',
last_name='couch',
superuser=True,
groups=[]
)
self.dog = hydroshare.create_account(
'dog@gmail.com',
username='dog',
password='foobar',
first_name='a little arfer',
last_name='last_name_dog',
superuser=False,
groups=[]
)
# set up a logged-in session
# self.client.force_authenticate(user=self.dog)
self.client.login(username='dog', password='foobar')
self.resources_to_delete = []
self.groups_to_delete = []
self.holes = hydroshare.create_resource(
resource_type='GenericResource',
owner=self.dog,
title='all about dog holes',
metadata=[],
)
self.resources_to_delete.append(self.holes.short_id)
self.squirrels = hydroshare.create_resource(
resource_type='GenericResource',
owner=self.dog,
title='where to find squirrels',
metadata=[],
)
self.resources_to_delete.append(self.squirrels.short_id)
def tearDown(self):
for r in self.resources_to_delete:
hydroshare.delete_resource(r)
for g in self.groups_to_delete:
g.delete()
self.dog.delete()
def test_blank(self):
""" nothing in tracking database at beginning """
stuff = Variable.recent_resources(self.dog)
self.assertEqual(stuff.count(), 0)
def test_view(self):
""" a view gets recorded """
response = self.client.get(self.resource_url.format(res_id=self.holes.short_id))
self.assertEqual(response.status_code, status.HTTP_200_OK)
stuff = Variable.recent_resources(self.dog)
self.assertEqual(stuff.count(), 1)
r = stuff[0]
self.assertEqual(r.short_id, self.holes.short_id)
self.assertEqual(r.public, False)
self.assertEqual(r.published, False)
self.assertEqual(r.discoverable, False)
# there's only one record!
stuff = Variable.objects.filter(resource=self.holes)
one = stuff[0]
# the record describes the request above
self.assertEqual(one.last_resource_id, self.holes.short_id)
self.assertEqual(one.landing, True)
self.assertEqual(one.rest, False)
|
bsd-3-clause
| -1,793,111,076,543,336,400
| 30.875
| 89
| 0.603595
| false
| 3.979194
| true
| false
| false
|
vendelin8/serverApplet
|
main.py
|
1
|
12324
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Main module of serverApplet.
# Copyright (C) 2015 Gergely Bódi
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import gettext, locale, logging, os, shelve, signal, sys
from collections import OrderedDict
from datetime import datetime
from enum import Enum
from functools import partial
from PyQt4 import QtCore, QtGui
from queue import Queue, Empty
from time import sleep
from threading import Thread
from serverApplet.plugin import globalPluginFunctions, localPluginFunctions
from serverApplet.down import HideDialog
logger = logging.getLogger('main')
logging.getLogger('requests').setLevel(logging.WARNING)
ACCOUNTS = 'accounts'
Action = Enum('Action', 'load update')
currentDir = os.path.dirname(os.path.realpath(__file__))
class MainApp(QtGui.QApplication):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.schedulerQueue = Queue()
self.periodic = []
def waiting(self, title, function, functionArgs, callback=None, *callbackArgs):
'''
Showing progress dialog for actions that the user have to wait for.
:param title: the progress dialog title (inside waiting for ...)
:param function: the long processing function
:param functionArgs: arguments for that function as a list
:param callback: called when done, if it is not None
:param *callbackArgs: optional arguments for the callback function
'''
logger.info('title: {}, function: {}, functionArgs: {}, callback: {}, callbackArgs: {}'.format(title, function, functionArgs, callback, callbackArgs))
bar = QtGui.QProgressDialog(_('Waiting for: {}...').format(title), None, 0, 0)
bar.forceShow()
q = Queue()
functionArgs.append(q) # the function must put the result to the queue
thread = Thread(target=function, args=functionArgs)
thread.start()
while True:
try: # check if the function has finished
result = q.get(True, 0.05)
logger.info('result: {}'.format(result))
break
except Empty:
self.processEvents()
thread.join()
bar.cancel()
if callback is not None:
callback(result, *callbackArgs)
def updateCallback(self, params):
'''Called to update an account params, tipically with newer lastAccess.'''
self.schedulerQueue.put((Action.update, params))
def addPeriodic(self, params):
'''Adds a new fuction to call periodicly, tipically end of the process with one or more desktop notifications.'''
self.periodic.append(params)
def addAccountToMenu(self, modulename, login, loadThread=False):
'''Adds account to the system tray icon menu.'''
newMenu = menu.addMenu('{}:{}'.format(modulename, login))
accSubMenu = menu.insertMenu(subMenuAction, newMenu)
moduleImport = pluginDict[modulename]['moduleImport']
for name, value in pluginDict[modulename]['pluginFunctions'].items():
action = newMenu.addAction(name)
action.triggered.connect(partial(getattr(moduleImport, value), login))
params = shelf[ACCOUNTS][(modulename, login)]
moduleImport.load(login, params)
if loadThread:
self.schedulerQueue.put((Action.load, {'login': login, 'modulename': modulename, 'params': params}))
def mainPeriodic(self):
'''Called periodicly to check if the scheduler thread had changed something.'''
try:
params = shelfQueue.get(True, 0.05)
shelf[ACCOUNTS][(params['modulename'], params['login'])] = params['params']
except Empty:
pass
if len(self.periodic) > 0: # there might me plugin results to show as desktop notifications
for index, periodic in enumerate(list(self.periodic)): # status: True: success, None: partial success, False: fail
status, result = periodic['function'](*periodic['args']) # result: result string to show or None
if status is True:
title = _('Success!')
elif status is None:
title = _('In progress...')
else:
title = _('Fail!')
if result is not None:
try:
from os import system
system('notify-send "{}" "{}" --icon=dialog-information'.format(title, result))
except:
trayIcon.showMessage(title, result)
if status is not None: # success or fail is final: the periodic function can be deleted
del self.periodic[index]
def newAccount(self, modulename):
'''Shows new account dialog. Clears recent one, if it already exists.'''
if hasattr(self, 'newAccountDialog'):
self.newAccountDialog.clear(modulename)
else:
self.newAccountDialog = NewAccount(w, modulename)
self.newAccountDialog.show()
class NewAccount(HideDialog):
'''New account dialog with login and password field, and test login button.'''
def __init__(self, w, modulename):
super().__init__(w)
self.modulename = modulename
lytMain = QtGui.QVBoxLayout(self)
lytForm = QtGui.QFormLayout()
self.fldLogin = QtGui.QLineEdit(self)
lytForm.addRow(_('Login'), self.fldLogin)
self.fldPassword = QtGui.QLineEdit(self)
self.fldPassword.setEchoMode(QtGui.QLineEdit.Password)
lytForm.addRow(_('Password'), self.fldPassword)
lytMain.addLayout(lytForm)
lytMain.addStretch(1)
lytButtons = QtGui.QHBoxLayout()
lytButtons.addStretch(1)
btnLogin = QtGui.QPushButton('Login', self)
btnLogin.clicked.connect(self.loginClicked)
lytButtons.addWidget(btnLogin)
lytMain.addLayout(lytButtons)
def loginClicked(self):
app.waiting(_('testing'), pluginDict[self.modulename]['moduleImport'].testLogin, [self.fldLogin.text(), self.fldPassword.text()], self.doLogin)
def clear(self, modulename):
'''Reusing the dialog for a new account.'''
self.modulename = modulename
self.fldLogin.setText('')
self.fldPassword.setText('')
self.fldLogin.setFocus()
def doLogin(self, result):
'''
Callback after checking login.
:param result: True for success, False or error string for fail
'''
if result is True:
login = self.fldLogin.text()
params = {'password': self.fldPassword.text()}
shelf[ACCOUNTS][(self.modulename, login)] = params
app.addAccountToMenu(self.modulename, login, True)
self.hide()
else:
if result is False:
result = _('Login test failed.')
QtGui.QMessageBox.critical(self, _('Error'), result)
class Scheduler(Thread):
'''Scheduler thread for cron jobs. Daemon thread with rare wake ups.'''
def __init__(self, schedulerQueue, shelfQueue, accounts):
'''
:param schedulerQueue: queue for updating values up here
:param shelfQueue: queue for updating values down there
:param accounts: the state of the accounts before starting the sceduler; after this point, synchronization is required
'''
self.schedulerQueue = schedulerQueue
self.shelfQueue = shelfQueue
super().__init__(daemon=True)
self.accounts = accounts
def run(self):
while True:
sleepTime = 3600 # if nothing interesting happens, wakes up once per hour
try: # job and global actions
action, params = self.schedulerQueue.get(True, 0.05)
modulename = params['modulename']
login = params['login']
if action == Action.load: # new account loaded (or just created)
self.accounts[(modulename, login)] = params
elif action == Action.update: # existing account updated, tipically with newer lastAccess
self.accounts[(modulename, login)].update(params)
continue # do all the modifications before sleeping again
except Empty:
pass
now = datetime.utcnow()
for modulename, login in self.accounts.keys():
moduleImport = pluginDict[modulename]['moduleImport']
diff = (moduleImport.nextCron(login) - now).total_seconds()
if diff <= 0: # time for a cron job
moduleImport.startCron(login)
else: # if the next happening is less than an hour, sleeping until it only
sleepTime = min(diff, sleepTime)
shelfQueue.put({'login': login, 'modulename': modulename, 'params': moduleImport.getParams(login)})
sleep(sleepTime)
def doQuit(*args, **kw):
'''Quiting the app.'''
shelf.close()
app.quit()
signal.signal(signal.SIGINT, doQuit) # Ctrl+C for debugging reasons, may be removed for production
if __name__ == '__main__':
logging.basicConfig(filename=os.path.join(currentDir, 'main.log'), level=logging.INFO,
format='%(asctime)s,%(funcName)s,%(lineno)d: %(message)s', datefmt='%d %H:%M:%S')
pluginDict = {}
shelfQueue = Queue()
shelf = shelve.open(os.path.join(currentDir, 'serverApplet'), writeback=True)
if ACCOUNTS not in shelf:
shelf[ACCOUNTS] = OrderedDict()
locale.setlocale(locale.LC_ALL, '') # localization
loc = locale.getlocale()
filename = os.path.join(currentDir, 'res', '{}.mo'.format(locale.getlocale()[0]))
try:
logging.debug('Opening message file {} for locale {}'.format(filename, loc[0]))
trans = gettext.GNUTranslations(open(filename, 'rb'))
except IOError:
logging.debug('Locale not found. Using default messages')
trans = gettext.NullTranslations()
trans.install()
app = MainApp(sys.argv) # GUI initialization
w = QtGui.QWidget()
trayIcon = QtGui.QSystemTrayIcon(QtGui.QIcon(os.path.join(currentDir, 'res', 'tools.png')), w)
menu = QtGui.QMenu(w)
subMenu = menu.addMenu(_('Add account'))
subMenuAction = menu.addMenu(subMenu)
for file in os.listdir(os.path.join(currentDir, 'plugin')): # loading plugins
if file.endswith('.py') and not file.startswith('__init__'):
modulename = file.split('.')[0]
action = subMenu.addAction(modulename)
action.triggered.connect(partial(app.newAccount, modulename))
pluginFunctions = localPluginFunctions(modulename)()
moduleImport = __import__('plugin.{}'.format(modulename), fromlist=globalPluginFunctions + list(pluginFunctions.values()))
moduleImport.init(app)
pluginDict[modulename] = {'moduleImport': moduleImport, 'pluginFunctions': pluginFunctions}
for (modulename, login), modulParams in shelf[ACCOUNTS].items():
app.addAccountToMenu(modulename, login)
# menu.addAction(_('Manage Accounts')) #TODO
exitAction = menu.addAction(_('Exit'))
exitAction.triggered.connect(doQuit)
trayIcon.setContextMenu(menu)
trayIcon.activated.connect(lambda: trayIcon.contextMenu().popup(QtGui.QCursor.pos()))
trayIcon.show()
scheduler = Scheduler(app.schedulerQueue, shelfQueue, shelf[ACCOUNTS])
scheduler.start()
timer = QtCore.QTimer()
timer.start(5000)
timer.timeout.connect(app.mainPeriodic)
sys.exit(app.exec_())
|
gpl-2.0
| -7,590,797,168,447,875,000
| 44.472325
| 158
| 0.633368
| false
| 4.27585
| false
| false
| false
|
lduarte1991/edx-platform
|
common/lib/xmodule/xmodule/static_content.py
|
1
|
6630
|
# /usr/bin/env python
"""
This module has utility functions for gathering up the static content
that is defined by XModules and XModuleDescriptors (javascript and css)
"""
import errno
import hashlib
import logging
import os
import sys
from collections import defaultdict
from docopt import docopt
from path import Path as path
from xmodule.x_module import XModuleDescriptor
LOG = logging.getLogger(__name__)
def write_module_styles(output_root):
"""Write all registered XModule css, sass, and scss files to output root."""
return _write_styles('.xmodule_display', output_root, _list_modules())
def write_module_js(output_root):
"""Write all registered XModule js and coffee files to output root."""
return _write_js(output_root, _list_modules())
def write_descriptor_styles(output_root):
"""Write all registered XModuleDescriptor css, sass, and scss files to output root."""
return _write_styles('.xmodule_edit', output_root, _list_descriptors())
def write_descriptor_js(output_root):
"""Write all registered XModuleDescriptor js and coffee files to output root."""
return _write_js(output_root, _list_descriptors())
def _list_descriptors():
"""Return a list of all registered XModuleDescriptor classes."""
return [
desc for desc in [
desc for (_, desc) in XModuleDescriptor.load_classes()
]
]
def _list_modules():
"""Return a list of all registered XModule classes."""
return [
desc.module_class
for desc
in _list_descriptors()
]
def _ensure_dir(directory):
"""Ensure that `directory` exists."""
try:
os.makedirs(directory)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def _write_styles(selector, output_root, classes):
"""
Write the css fragments from all XModules in `classes`
into `output_root` as individual files, hashed by the contents to remove
duplicates
"""
contents = {}
css_fragments = defaultdict(set)
for class_ in classes:
class_css = class_.get_css()
for filetype in ('sass', 'scss', 'css'):
for idx, fragment in enumerate(class_css.get(filetype, [])):
css_fragments[idx, filetype, fragment].add(class_.__name__)
css_imports = defaultdict(set)
for (idx, filetype, fragment), classes in sorted(css_fragments.items()):
fragment_name = "{idx:0=3d}-{hash}.{type}".format(
idx=idx,
hash=hashlib.md5(fragment).hexdigest(),
type=filetype)
# Prepend _ so that sass just includes the files into a single file
filename = '_' + fragment_name
contents[filename] = fragment
for class_ in classes:
css_imports[class_].add(fragment_name)
module_styles_lines = [
"@import 'bourbon/bourbon';",
"@import 'lms/theme/variables';",
]
for class_, fragment_names in css_imports.items():
module_styles_lines.append("""{selector}.xmodule_{class_} {{""".format(
class_=class_, selector=selector
))
module_styles_lines.extend(' @import "{0}";'.format(name) for name in fragment_names)
module_styles_lines.append('}')
contents['_module-styles.scss'] = '\n'.join(module_styles_lines)
_write_files(output_root, contents)
def _write_js(output_root, classes):
"""
Write the javascript fragments from all XModules in `classes`
into `output_root` as individual files, hashed by the contents to remove
duplicates
"""
contents = {}
js_fragments = set()
for class_ in classes:
module_js = class_.get_javascript()
# It will enforce 000 prefix for xmodule.js.
js_fragments.add((0, 'js', module_js.get('xmodule_js')))
for filetype in ('coffee', 'js'):
for idx, fragment in enumerate(module_js.get(filetype, [])):
js_fragments.add((idx + 1, filetype, fragment))
for idx, filetype, fragment in sorted(js_fragments):
filename = "{idx:0=3d}-{hash}.{type}".format(
idx=idx,
hash=hashlib.md5(fragment).hexdigest(),
type=filetype)
contents[filename] = fragment
_write_files(output_root, contents, {'.coffee': '.js'})
return [output_root / filename for filename in contents.keys()]
def _write_files(output_root, contents, generated_suffix_map=None):
"""
Write file contents to output root.
Any files not listed in contents that exists in output_root will be deleted,
unless it matches one of the patterns in `generated_suffix_map`.
output_root (path): The root directory to write the file contents in
contents (dict): A map from filenames to file contents to be written to the output_root
generated_suffix_map (dict): Optional. Maps file suffix to generated file suffix.
For any file in contents, if the suffix matches a key in `generated_suffix_map`,
then the same filename with the suffix replaced by the value from `generated_suffix_map`
will be ignored
"""
_ensure_dir(output_root)
to_delete = set(file.basename() for file in output_root.files()) - set(contents.keys())
if generated_suffix_map:
for output_file in contents.keys():
for suffix, generated_suffix in generated_suffix_map.items():
if output_file.endswith(suffix):
to_delete.discard(output_file.replace(suffix, generated_suffix))
for extra_file in to_delete:
(output_root / extra_file).remove_p()
for filename, file_content in contents.iteritems():
output_file = output_root / filename
not_file = not output_file.isfile()
# not_file is included to short-circuit this check, because
# read_md5 depends on the file already existing
write_file = not_file or output_file.read_md5() != hashlib.md5(file_content).digest()
if write_file:
LOG.debug("Writing %s", output_file)
output_file.write_bytes(file_content)
else:
LOG.debug("%s unchanged, skipping", output_file)
def main():
"""
Generate
Usage: static_content.py <output_root>
"""
from django.conf import settings
settings.configure()
args = docopt(main.__doc__)
root = path(args['<output_root>'])
write_descriptor_js(root / 'descriptors/js')
write_descriptor_styles(root / 'descriptors/css')
write_module_js(root / 'modules/js')
write_module_styles(root / 'modules/css')
if __name__ == '__main__':
sys.exit(main())
|
agpl-3.0
| 1,384,983,547,345,485,600
| 31.821782
| 96
| 0.641026
| false
| 4.010889
| false
| false
| false
|
thomasahle/numberlink
|
gen/grid.py
|
1
|
4350
|
def sign(x):
if x == 0:
return x
return -1 if x < 0 else 1
class UnionFind:
def __init__(self, initial=None):
self.uf = initial or {}
def union(self, a, b):
a_par, b_par = self.find(a), self.find(b)
self.uf[a_par] = b_par
def find(self, a):
if self.uf.get(a, a) == a:
return a
par = self.find(self.uf.get(a, a))
# Path compression
self.uf[a] = par
return par
class Grid:
def __init__(self, w, h):
self.w, self.h = w, h
self.grid = {}
def __setitem__(self, key, val):
self.grid[key] = val
def __getitem__(self, key):
return self.grid.get(key, ' ')
def __repr__(self):
res = []
for y in range(self.h):
res.append(''.join(self[x, y] for x in range(self.w)))
return '\n'.join(res)
def __iter__(self):
return iter(self.grid.items())
def __contains__(self, key):
return key in self.grid
def __delitem__(self, key):
del self.grid[key]
def clear(self):
self.grid.clear()
def values(self):
return self.grid.values()
def shrink(self):
""" Returns a new grid of half the height and width """
small_grid = Grid(self.w // 2, self.h // 2)
for y in range(self.h // 2):
for x in range(self.w // 2):
small_grid[x, y] = self[2 * x + 1, 2 * y + 1]
return small_grid
def test_path(self, path, x0, y0, dx0=0, dy0=1):
""" Test whether the path is safe to draw on the grid, starting at x0, y0 """
return all(0 <= x0 - x + y < self.w and 0 <= y0 + x + y < self.h
and (x0 - x + y, y0 + x + y) not in self for x, y in path.xys(dx0, dy0))
def draw_path(self, path, x0, y0, dx0=0, dy0=1, loop=False):
""" Draws path on the grid. Asserts this is safe (no overlaps).
For non-loops, the first and the last character is not drawn,
as we don't know what shape they should have. """
ps = list(path.xys(dx0, dy0))
# For loops, add the second character, so we get all rotational tripples:
# abcda -> abcdab -> abc, bcd, cda, dab
if loop:
assert ps[0] == ps[-1], (path, ps)
ps.append(ps[1])
for i in range(1, len(ps) - 1):
xp, yp = ps[i - 1]
x, y = ps[i]
xn, yn = ps[i + 1]
self[x0 - x + y, y0 + x + y] = {
(1, 1, 1): '<', (-1, -1, -1): '<',
(1, 1, -1): '>', (-1, -1, 1): '>',
(-1, 1, 1): 'v', (1, -1, -1): 'v',
(-1, 1, -1): '^', (1, -1, 1): '^',
(0, 2, 0): '\\', (0, -2, 0): '\\',
(2, 0, 0): '/', (-2, 0, 0): '/'
}[xn - xp, yn - yp, sign((x - xp) * (yn - y) - (xn - x) * (y - yp))]
def make_tubes(self):
uf = UnionFind()
tube_grid = Grid(self.w, self.h)
for x in range(self.w):
d = '-'
for y in range(self.h):
# We union things down and to the right.
# This means ┌ gets to union twice.
for dx, dy in {
'/-': [(0, 1)], '\\-': [(1, 0), (0, 1)],
'/|': [(1, 0)],
' -': [(1, 0)], ' |': [(0, 1)],
'v|': [(0, 1)], '>|': [(1, 0)],
'v-': [(0, 1)], '>-': [(1, 0)],
}.get(self[x, y] + d, []):
uf.union((x, y), (x + dx, y + dy))
# We change alll <>v^ to x.
tube_grid[x, y] = {
'/-': '┐', '\\-': '┌',
'/|': '└', '\\|': '┘',
' -': '-', ' |': '|',
}.get(self[x, y] + d, 'x')
# We change direction on v and ^, but not on < and >.
if self[x, y] in '\\/v^':
d = '|' if d == '-' else '-'
return tube_grid, uf
def clear_path(self, path, x, y):
""" Removes everything contained in the path (loop) placed at x, y. """
path_grid = Grid(self.w, self.h)
path_grid.draw_path(path, x, y, loop=True)
for key, val in path_grid.make_tubes()[0]:
if val == '|':
self.grid.pop(key, None)
|
agpl-3.0
| -1,240,742,716,527,753,000
| 33.173228
| 91
| 0.408295
| false
| 3.129056
| false
| false
| false
|
zoho/books-python-wrappers
|
books/parser/ChartOfAccountsParser.py
|
1
|
5747
|
#$Id$#
from books.model.ChartOfAccount import ChartOfAccount
from books.model.ChartOfAccountList import ChartOfAccountList
from books.model.TransactionList import TransactionList
from books.model.Transaction import Transaction
from books.model.PageContext import PageContext
class ChartOfAccountsParser:
"""This class parses the json response for chart of accounts."""
def get_list(self, resp):
"""This method parses the given response and returns chart of accounts
list.
Args:
resp(dict): Dictionary containing json object for chart of accounts
list.
Returns:
instance: Chart of accounts list object.
"""
chart_of_accounts_list = ChartOfAccountList()
for value in resp['chartofaccounts']:
chart_of_accounts = ChartOfAccount()
chart_of_accounts.set_account_id(value['account_id'])
chart_of_accounts.set_account_name(value['account_name'])
chart_of_accounts.set_account_type(value['account_type'])
chart_of_accounts.set_is_active(value['is_active'])
chart_of_accounts.set_is_user_created(value['is_user_created'])
chart_of_accounts.set_is_involved_in_transaction(value[\
'is_involved_in_transaction'])
chart_of_accounts.set_is_system_account(value['is_system_account'])
chart_of_accounts_list.set_chartofaccounts(chart_of_accounts)
page_context = resp['page_context']
page_context_obj = PageContext()
page_context_obj.set_page(page_context['page'])
page_context_obj.set_per_page(page_context['per_page'])
page_context_obj.set_has_more_page(page_context['has_more_page'])
page_context_obj.set_report_name(page_context['report_name'])
page_context_obj.set_applied_filter(page_context['applied_filter'])
page_context_obj.set_sort_column(page_context['sort_column'])
page_context_obj.set_sort_order(page_context['sort_order'])
chart_of_accounts_list.set_page_context(page_context_obj)
return chart_of_accounts_list
def get_account(self, resp):
"""This method parses the given response and returns chart of
accounts object.
Args:
resp(dict): Dictionary containing json object for chart of accounts.
Returns:
instance: Chart of accounts object.
"""
chart_of_account = resp['chart_of_account']
chart_of_account_obj = ChartOfAccount()
chart_of_account_obj.set_account_id(chart_of_account['account_id'])
chart_of_account_obj.set_account_name(chart_of_account['account_name'])
chart_of_account_obj.set_is_active(chart_of_account['is_active'])
chart_of_account_obj.set_account_type(chart_of_account['account_type'])
chart_of_account_obj.set_account_type_formatted(chart_of_account[\
'account_type_formatted'])
chart_of_account_obj.set_description(chart_of_account['description'])
return chart_of_account_obj
def get_message(self, resp):
"""This method parses the given response and returns string message.
Args:
reps(dict): Dictionary containing json object for message.
Returns:
str: Success message.
"""
return resp['message']
def get_transactions_list(self, resp):
"""This method parses the given response and returns transactions list.
Args:
resp(dict): Dictionary containing json object for transactions list.
Returns:
instance: Transaction list object.
"""
transactions_list = TransactionList()
for value in resp['transactions']:
transactions = Transaction()
transactions.set_categorized_transaction_id(value[\
'categorized_transaction_id'])
transactions.set_transaction_type(value['transaction_type'])
transactions.set_transaction_id(value['transaction_id'])
transactions.set_transaction_date(value['transaction_date'])
transactions.set_transaction_type_formatted(value[\
'transaction_type_formatted'])
transactions.set_account_id(value['account_id'])
transactions.set_customer_id(value['customer_id'])
transactions.set_payee(value['payee'])
transactions.set_description(value['description'])
transactions.set_entry_number(value['entry_number'])
transactions.set_currency_id(value['currency_id'])
transactions.set_currency_code(value['currency_code'])
transactions.set_debit_or_credit(value['debit_or_credit'])
transactions.set_offset_account_name(value['offset_account_name'])
transactions.set_reference_number(value['reference_number'])
transactions.set_reconcile_status(value['reconcile_status'])
transactions.set_debit_amount(value['debit_amount'])
transactions.set_credit_amount(value['credit_amount'])
transactions_list.set_transactions(transactions)
page_context = resp['page_context']
page_context_obj = PageContext()
page_context_obj.set_page(page_context['page'])
page_context_obj.set_per_page(page_context['per_page'])
page_context_obj.set_has_more_page(page_context['has_more_page'])
page_context_obj.set_report_name(page_context['report_name'])
page_context_obj.set_sort_column(page_context['sort_column'])
page_context_obj.set_sort_order(page_context['sort_order'])
transactions_list.set_page_context(page_context_obj)
return transactions_list
|
mit
| 8,299,171,215,330,370,000
| 44.251969
| 80
| 0.652514
| false
| 4.116762
| false
| false
| false
|
brinkframework/brink
|
tests/test_fields.py
|
1
|
3215
|
from brink import fields, models
import pytest
class DummyModel(models.Model):
title = fields.CharField()
def test_field_treat():
field = fields.Field()
assert field.validate("val") == "val"
def test_field_validate_required():
field = fields.Field(required=True)
with pytest.raises(fields.FieldRequired):
field.validate(None)
assert field.validate("val") == "val"
def test_integer_field_validate_required():
field1 = fields.IntegerField(required=True)
with pytest.raises(fields.FieldRequired):
field1.validate(None)
field2 = fields.IntegerField()
field2.validate(None)
def test_integer_field_validate_type():
field = fields.IntegerField()
with pytest.raises(fields.FieldInvalidType):
field.validate("test")
assert field.validate(10) == 10
def test_char_field_validate_required():
field1 = fields.CharField(required=True)
with pytest.raises(fields.FieldRequired):
field1.validate(None)
field2 = fields.CharField()
field2.validate(None)
def test_char_field_validate_min_length():
field = fields.CharField(min_length=5)
with pytest.raises(fields.FieldInvalidLength):
field.validate("test")
assert field.validate("testing") == "testing"
def test_char_field_validate_max_length():
field = fields.CharField(max_length=5)
with pytest.raises(fields.FieldInvalidLength):
field.validate("testing")
assert field.validate("test") == "test"
def test_char_field_validate_type():
field = fields.CharField()
with pytest.raises(fields.FieldInvalidType):
field.validate(10)
assert field.validate("test") == "test"
def test_bool_field_validate_required():
field1 = fields.BooleanField(required=True)
with pytest.raises(fields.FieldRequired):
field1.validate(None)
field2 = fields.BooleanField()
field2.validate(None)
def test_bool_field_validate_type():
field = fields.BooleanField()
with pytest.raises(fields.FieldInvalidType):
field.validate("test")
assert field.validate(True)
assert not field.validate(False)
def test_list_field_validate_subtype():
field = fields.ListField(fields.CharField())
with pytest.raises(fields.FieldInvalidType):
field.validate([1, 2])
with pytest.raises(fields.FieldInvalidType):
field.validate([1, "test"])
field.validate(["test", "test2"])
assert field.validate(None) == []
def test_reference_field_treat():
field = fields.ReferenceField(DummyModel)
model = DummyModel(id="test", title="Test")
assert field.treat(model) == "test"
def test_reference_field_show():
field = fields.ReferenceField(DummyModel)
model = field.show(DummyModel(title="Test"))
assert model.title == "Test"
def test_reference_list_field_treat():
field = fields.ReferenceListField(DummyModel)
model = DummyModel(id="test", title="Test")
assert field.treat([model]) == ["test"]
def test_reference_list_field_show():
field = fields.ReferenceListField(DummyModel)
data = DummyModel(title="Test")
models = field.show([data])
for model in models:
assert model.title == "Test"
|
bsd-3-clause
| 2,837,641,944,071,696,400
| 22.467153
| 50
| 0.684292
| false
| 3.742724
| true
| false
| false
|
angr/cle
|
cle/utils.py
|
1
|
3681
|
import os
import contextlib
from .errors import CLEError, CLEFileNotFoundError
# https://code.woboq.org/userspace/glibc/include/libc-pointer-arith.h.html#43
def ALIGN_DOWN(base, size):
return base & -size
# https://code.woboq.org/userspace/glibc/include/libc-pointer-arith.h.html#50
def ALIGN_UP(base, size):
return ALIGN_DOWN(base + size - 1, size)
# To verify the mmap behavior you can compile and run the following program. Fact is that mmap file mappings
# always map in the entire page into memory from the file if available. If not, it gets zero padded
# pylint: disable=pointless-string-statement
"""#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/mman.h>
void make_test_file()
{
void* data = (void*)0xdead0000;
int fd = open("./test.data", O_CREAT | O_RDWR, S_IRUSR | S_IWUSR);
for (int i = 0; i < 0x1800; i += sizeof(void*)) // Only write 1 1/2 pages worth
{
write(fd, &data, sizeof(void*));
data += sizeof(void*);
}
close(fd);
}
int main(int argc, char* argv[])
{
make_test_file();
int fd = open("./test.data", O_RDONLY);
unsigned char* mapping = mmap(NULL, 0x123, PROT_READ, MAP_PRIVATE, fd, 4096);
for (int i=0; i < 0x1000; i++)
{
printf("%02x ", mapping[i]);
if (i % sizeof(void*) == (sizeof(void*) - 1))
printf("| ");
if (i % 16 == 15)
printf("\n");
}
}"""
def get_mmaped_data(stream, offset, length, page_size):
if offset % page_size != 0:
raise CLEError("libc helper for mmap: Invalid page offset, should be multiple of page size! Stream {}, offset {}, length: {}".format(stream, offset, length))
read_length = ALIGN_UP(length, page_size)
stream.seek(offset)
data = stream.read(read_length)
return data.ljust(read_length, b'\0')
@contextlib.contextmanager
def stream_or_path(obj, perms='rb'):
if hasattr(obj, 'read') and hasattr(obj, 'seek'):
obj.seek(0)
yield obj
else:
if not os.path.exists(obj):
raise CLEFileNotFoundError("%r is not a valid path" % obj)
with open(obj, perms) as f:
yield f
def key_bisect_floor_key(lst, key, lo=0, hi=None, keyfunc=lambda x: x):
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(lst)
while lo < hi:
mid = (lo + hi) // 2
if keyfunc(lst[mid]) <= key:
lo = mid + 1
else:
hi = mid
if lo <= len(lst) and lo > 0:
return lst[lo - 1]
return None
def key_bisect_find(lst, item, lo=0, hi=None, keyfunc=lambda x: x):
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(lst)
while lo < hi:
mid = (lo + hi) // 2
if keyfunc(lst[mid]) <= keyfunc(item):
lo = mid + 1
else:
hi = mid
return lo
def key_bisect_insort_left(lst, item, lo=0, hi=None, keyfunc=lambda x: x):
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(lst)
while lo < hi:
mid = (lo + hi) // 2
if keyfunc(lst[mid]) < keyfunc(item):
lo = mid + 1
else:
hi = mid
lst.insert(lo, item)
def key_bisect_insort_right(lst, item, lo=0, hi=None, keyfunc=lambda x: x):
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(lst)
while lo < hi:
mid = (lo + hi) // 2
if keyfunc(lst[mid]) <= keyfunc(item):
lo = mid + 1
else:
hi = mid
lst.insert(lo, item)
|
bsd-2-clause
| 2,866,465,400,092,293,600
| 27.984252
| 165
| 0.573485
| false
| 3.156947
| false
| false
| false
|
dynamicy/FloodligtModule
|
apps/qos/qospath.py
|
1
|
7564
|
#! /usr/bin/python
"""
QoSPath.py ---------------------------------------------------------------------------------------------------
Developed By: Ryan Wallner (ryan.wallner1@marist.edu)
Add QoS to a specific path in the network. Utilized circuit pusher developed by KC Wang
[Note]
*circuitpusher.py is needed in the same directory for this application to run
succesfully!
USAGE:
qospath.py <add> --qos-path <name> <source-ip> <dest-ip> <policy-object> <controller-ip> <port>
qospath.py <delete> --qos-path <name> <controller-ip> <port>
*note: This adds the Quality of Service to each switch along the path between hosts
*note Policy object can exclude the "sw" ,"enqueue-port" parameters and
"ip-src", "ip-dst" and "ingress-port" match parameters.
They will be modified based on the route anyway.
[author] - rjwallner
-----------------------------------------------------------------------------------------------------------------------
"""
import sys
import os
import time
import simplejson #used to process policies and encode/decode requests
import subprocess #spawning subprocesses
##Get switches in a circuit using circuitpusher (may need to modify to get all switches in path)
##Then use the add policy to a EACH switch in in circuit using QoSPusher to add a policy along a path.
def main():
#checks
if (len(sys.argv) == 2):
if sys.argv[1] == "--help" or sys.argv[1] == "help" or sys.argv[1] == "--h" :
usage_help()
exit()
if (len(sys.argv)) == 9:
p_name = sys.argv[3]
src = sys.argv[4]
dst = sys.argv[5]
pol = sys.argv[6]
c_ip = sys.argv[7]
prt = sys.argv[8]
add(p_name,src,dst,pol,c_ip,prt)
exit()
if (len(sys.argv)) == 6:
p_name = sys.argv[3]
c_ip = sys.argv[4]
prt = sys.argv[5]
delete(p_name,c_ip,prt)
exit()
else:
usage()
exit()
def add(name, ip_src, ip_dst, p_obj, c_ip, port):
print "Trying to create a circuit from host %s to host %s..." % (ip_src, ip_dst)
c_pusher = "circuitpusher.py"
qos_pusher = "qosmanager.py"
pwd = os.getcwd()
print pwd
try:
if (os.path.exists("%s/%s" % (pwd,c_pusher))) and (os.path.exists("%s/%s" % (pwd,qos_pusher))):
print "Necessary tools confirmed.. %s , %s" % (c_pusher,qos_pusher)
else:
print "%s/%s does not exist" %(pwd,c_pusher)
print "%s/%s does not exist" %(pwd,qos_pusher)
except ValueError as e:
print "Problem finding tools...%s , %s" % (c_pusher,qos_pusher)
print e
exit(1)
#first create the circuit and wait to json to pupulate
print "create circuit!!!"
try:
cmd = "--controller=%s:%s --type ip --src %s --dst %s --add --name %s" % (c_ip,port,ip_src,ip_dst,name)
print './circuitpusher.py %s' % cmd
c_proc = subprocess.Popen('./circuitpusher.py %s' % cmd, shell=True)
print "Process %s started to create circuit" % c_proc.pid
#wait for the circuit to be created
c_proc.wait()
except Exception as e:
print "could not create circuit, Error: %s" % str(e)
try:
subprocess.Popen("cat circuits.json",shell=True).wait()
except Exception as e:
print "Error opening file, Error: %s" % str(e)
#cannot continue without file
exit()
print "Opening circuits.json in %s" % pwd
try:
circs = "circuits.json"
c_data = open(circs)
except Exception as e:
print "Error opening file: %s" % str(e)
#load data into json format
print "Creating a QoSPath from host %s to host %s..." % (ip_src, ip_dst)
time.sleep(5)
for line in c_data:
data = simplejson.loads(line)
if data['name'] != name:
continue
else:
sw_id = data['Dpid']
in_prt = data['inPort']
out_prt = data['outPort']
print"QoS applied to this switch for circuit %s" % data['name']
print "%s: in:%s out:%s" % (sw_id,in_prt,out_prt)
p = simplejson.loads(p_obj)
#add necessary match values to policy for path
p['sw'] = sw_id
p['name'] = name+"."+sw_id
#screwed up connectivity on this match, remove
#p['ingress-port'] = str(in_prt)
p['ip-src'] = ip_src
p['ip-dst'] = ip_dst
keys = p.keys()
l = len(keys)
queue = False
service = False
for i in range(l):
if keys[i] == 'queue':
queue = True
elif keys[i] == 'service':
service = True
if queue and service:
polErr()
elif queue and not service:
p['enqueue-port'] = str(out_prt)
pol = str(p)
print "Adding Queueing Rule"
sjson = simplejson.JSONEncoder(sort_keys=False,indent=3).encode(p)
print sjson
cmd = "./qosmanager.py add policy '%s' %s %s" % (sjson,c_ip,port)
p = subprocess.Popen(cmd, shell=True).wait()
elif service and not queue:
print "Adding Type of Service"
sjson = simplejson.JSONEncoder(sort_keys=False,indent=3).encode(p)
print sjson
cmd = "./qosmanager.py add policy '%s' %s %s" % (sjson,c_ip,port)
p = subprocess.Popen(cmd, shell=True).wait()
else:
polErr()
def polErr():
print """Your policy is not defined right, check to
make sure you have a service OR a queue defined"""
def delete(name,c_ip,port):
print "Trying to delete QoSPath %s" % name
# circuitpusher --controller {IP:REST_PORT} --delete --name {CIRCUIT_NAME}
try:
print "Deleting circuit"
cmd = "./circuitpusher.py --controller %s:%s --delete --name %s" % (c_ip,port,name)
subprocess.Popen(cmd,shell=True).wait()
except Exception as e:
print "Error deleting circuit, Error: %s" % str(e)
exit()
qos_s = os.popen("./qosmanager.py list policies %s %s" %(c_ip,port)).read()
qos_s = qos_s[qos_s.find("[",qos_s.find("[")+1):qos_s.rfind("]")+1]
#print qos_s
data = simplejson.loads(qos_s)
sjson = simplejson.JSONEncoder(sort_keys=False,indent=3).encode(data)
jsond = simplejson.JSONDecoder().decode(sjson)
#find policies that start with "<pathname>."
l = len(jsond)
for i in range(l):
n = jsond[i]['name']
if name in n:
pol_id = jsond[i]['policyid']
try:
cmd = "./qosmanager.py delete policy '{\"policy-id\":\"%s\"}' %s %s " % (pol_id,c_ip,port)
print cmd
subprocess.Popen(cmd,shell=True).wait()
except Exception as e:
print "Could not delete policy in path: %s" % str(e)
def usage():
print '''type "qospath.py --help" for more details
#qospath.py <add> --qos-path <name> <source-ip> <dest-ip> <policy-object> <controller-ip> <port>
#qospath.py <delete> --qos-path <name> <controller-ip> <port>
*Policy object can exclude the "sw" ,"enqueue-port" parameters and
"ip-src", "ip-dst" and "ingress-port" match parameters.
They will be modified based on the route anyway.'''
def usage_help():
print '''
###################################
QoSPath.py
Author: Ryan Wallner (Ryan.Wallner1@marist.edu)
QoSPath is a simple service that utilizes KC Wang's
CircuitPusher to push Quality of Service along a
specific path in the network.
To add a QoS Path with a Policy
*note other match fields can be added to the policy object
qospath.py add --qos-path Path-Name 10.0.0.1 10.0.0.2 '{"queue":"2"}' 127.0.0.1 8080
qospath.py add --qos-path Path-Name 10.0.0.1 10.0.0.2 '{"service":"Best Effort"}' 127.0.0.1 8080
To delete a QoS Path
qospath.py delete --qos-path "Path-Name" 127.0.0.1 8080
###################################
'''
#Call main
if __name__ == "__main__" :
main()
|
apache-2.0
| -4,349,914,921,500,390,000
| 35.02381
| 119
| 0.593469
| false
| 2.985004
| false
| false
| false
|
JonathanSeguin/Mariana
|
setup.py
|
1
|
2757
|
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='Mariana',
version='1.0.3rc1',
description="The Cutest Deep Learning Framework",
long_description=long_description,
url='https://github.com/tariqdaouda/mariana',
author='Tariq Daouda',
author_email='tariq.daouda@umontreal.ca',
license='ApacheV2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Machine-learning',
'Topic :: Scientific/Engineering :: Deep-learning',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
],
keywords='Machine Learning deeplearning neural networks',
packages=find_packages(exclude=['trash']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=["theano", "pyGeno", "simplejson", "numpy"],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
#~ package_data={
#~ 'sample': ['package_data.dat'],
#~ },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#~ data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'sample=sample:main',
],
},
)
|
apache-2.0
| 4,313,847,788,811,178,000
| 34.805195
| 98
| 0.660138
| false
| 3.949857
| false
| false
| false
|
yardstick17/GoogleCharts
|
preprocessing/get_data.py
|
1
|
1216
|
from random import randint
import pandas as pd
def read_rating_history_dump():
rating_df = pd.read_csv('GoogleChartsFlask/data/rating_history.csv')
data = list()
for index, row in rating_df.iterrows():
data.append((row[0], row[1], row[2]))
return data
def read_daily_rating_dump():
rating_df = pd.read_csv('GoogleChartsFlask/data/daily_rating.csv')
data = []
for index, row in rating_df.iterrows():
data.append((row[0], row[1], row[2], row[3], row[4], randint(3, 5) + randint(1, 10) / 10,
randint(3, 5) + randint(1, 10) / 10, randint(3, 5) + randint(1, 10) / 10))
return data[:10]
def read_daily_rating_dump_all():
rating_df = pd.read_csv('GoogleChartsFlask/data/daily_rating.csv')
data = []
for index, row in rating_df.iterrows():
data.append((row[0], row[1], row[2], row[3], row[4], randint(3, 5) + randint(1, 10) / 10))
return data[:10]
def read_rating_hive_dump():
rating_df = pd.read_csv('hive_query_result', sep='\001', names='a b c d e'.split())
data = []
for index, row in rating_df.iterrows():
data.append((row[0], row[1], row[2], row[3], row[4], randint(3, 5) + randint(1, 10) / 10))
|
mit
| 2,226,139,648,123,469,800
| 33.742857
| 98
| 0.599507
| false
| 2.916067
| false
| false
| false
|
nbingham1/python-html
|
css.py
|
1
|
2211
|
from collections import OrderedDict
class Rgb:
def __init__(self, r = 0.0, g = 0.0, b = 0.0):
self.r = r
self.g = g
self.b = b
def rgba(self):
return "rgb({},{},{})".format(
max(0.0, min(1.0, self.r)),
max(0.0, min(1.0, self.g)),
max(0.0, min(1.0, self.b)))
def hex(self):
return "#{:02x}{:02x}{:02x}".format(
max(0, min(255, int(255.0*self.r))),
max(0, min(255, int(255.0*self.g))),
max(0, min(255, int(255.0*self.b))))
def __str__(self):
return self.hex()
class Rgba:
def __init__(self, r = 0.0, g = 0.0, b = 0.0, a = 1.0):
self.r = r
self.g = g
self.b = b
self.a = a
def rgba(self):
return "rgba({},{},{},{})".format(
max(0.0, min(1.0, self.r)),
max(0.0, min(1.0, self.g)),
max(0.0, min(1.0, self.b)),
max(0.0, min(1.0, self.a)))
def __str__(self):
return self.rgba()
class Style:
def __init__(self, prop = OrderedDict()):
self.prop = OrderedDict()
for key,value in prop.iteritems():
self.set(key, value)
def __str__(self):
return " ".join(self.emit())
def emit(self):
result = []
for key,value in self.prop.iteritems():
result.append(str(key) + ": " + str(value) + ";")
return result
def get(self, key):
return self.prop[key]
def set(self, key, value):
#if key == "background":
#elif key == "border":
#elif key == "border-bottom":
#elif key == "border-image":
#elif key == "border-left":
#elif key == "border-radius":
#elif key == "border-right":
#elif key == "border-top":
#elif key == "margin":
#elif key == "padding":
#elif key == "font":
#elif key == "list-style":
#elif key == "animation":
#elif key == "outline":
#elif key == "column-rule":
#else:
self.prop[key] = value
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
self.set(key, value)
class Css:
def __init__(self, elems = OrderedDict()):
self.elems = elems
def __str__(self):
return "\n".join(self.emit())
def emit(self):
result = []
for selector,style in self.elems.iteritems():
result.append(selector + " {")
lines = style.emit()
for line in lines:
result.append("\t" + line)
result.append("}")
result.append("")
return result
|
mit
| -8,850,375,547,768,495,000
| 20.676471
| 56
| 0.565807
| false
| 2.509648
| false
| false
| false
|
nuagenetworks/tempest
|
tempest/tests/lib/test_decorators.py
|
1
|
4381
|
# Copyright 2013 IBM Corp
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import testtools
from tempest.lib import base as test
from tempest.lib import decorators
from tempest.tests.lib import base
class TestSkipBecauseDecorator(base.TestCase):
def _test_skip_because_helper(self, expected_to_skip=True,
**decorator_args):
class TestFoo(test.BaseTestCase):
_interface = 'json'
@decorators.skip_because(**decorator_args)
def test_bar(self):
return 0
t = TestFoo('test_bar')
if expected_to_skip:
self.assertRaises(testtools.TestCase.skipException, t.test_bar)
else:
# assert that test_bar returned 0
self.assertEqual(TestFoo('test_bar').test_bar(), 0)
def test_skip_because_bug(self):
self._test_skip_because_helper(bug='12345')
def test_skip_because_bug_and_condition_true(self):
self._test_skip_because_helper(bug='12348', condition=True)
def test_skip_because_bug_and_condition_false(self):
self._test_skip_because_helper(expected_to_skip=False,
bug='12349', condition=False)
def test_skip_because_bug_without_bug_never_skips(self):
"""Never skip without a bug parameter."""
self._test_skip_because_helper(expected_to_skip=False,
condition=True)
self._test_skip_because_helper(expected_to_skip=False)
def test_skip_because_invalid_bug_number(self):
"""Raise ValueError if with an invalid bug number"""
self.assertRaises(ValueError, self._test_skip_because_helper,
bug='critical_bug')
class TestIdempotentIdDecorator(base.TestCase):
def _test_helper(self, _id, **decorator_args):
@decorators.idempotent_id(_id)
def foo():
"""Docstring"""
pass
return foo
def _test_helper_without_doc(self, _id, **decorator_args):
@decorators.idempotent_id(_id)
def foo():
pass
return foo
def test_positive(self):
_id = str(uuid.uuid4())
foo = self._test_helper(_id)
self.assertIn('id-%s' % _id, getattr(foo, '__testtools_attrs'))
self.assertTrue(foo.__doc__.startswith('Test idempotent id: %s' % _id))
def test_positive_without_doc(self):
_id = str(uuid.uuid4())
foo = self._test_helper_without_doc(_id)
self.assertTrue(foo.__doc__.startswith('Test idempotent id: %s' % _id))
def test_idempotent_id_not_str(self):
_id = 42
self.assertRaises(TypeError, self._test_helper, _id)
def test_idempotent_id_not_valid_uuid(self):
_id = '42'
self.assertRaises(ValueError, self._test_helper, _id)
class TestSkipUnlessAttrDecorator(base.TestCase):
def _test_skip_unless_attr(self, attr, expected_to_skip=True):
class TestFoo(test.BaseTestCase):
expected_attr = not expected_to_skip
@decorators.skip_unless_attr(attr)
def test_foo(self):
pass
t = TestFoo('test_foo')
if expected_to_skip:
self.assertRaises(testtools.TestCase.skipException,
t.test_foo())
else:
try:
t.test_foo()
except Exception:
raise testtools.TestCase.failureException()
def test_skip_attr_does_not_exist(self):
self._test_skip_unless_attr('unexpected_attr')
def test_skip_attr_false(self):
self._test_skip_unless_attr('expected_attr')
def test_no_skip_for_attr_exist_and_true(self):
self._test_skip_unless_attr('expected_attr', expected_to_skip=False)
|
apache-2.0
| -7,026,969,918,760,330,000
| 33.769841
| 79
| 0.617439
| false
| 3.904635
| true
| false
| false
|
suutari/shoop
|
shuup/default_tax/admin_module/views.py
|
1
|
2508
|
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import string_concat
from shuup.admin.utils.picotable import Column
from shuup.admin.utils.views import CreateOrUpdateView, PicotableListView
from shuup.default_tax.models import TaxRule
from shuup.utils.patterns import PATTERN_SYNTAX_HELP_TEXT
class TaxRuleForm(forms.ModelForm):
class Meta:
model = TaxRule
fields = [
"tax_classes",
"customer_tax_groups",
"country_codes_pattern",
"region_codes_pattern",
"postal_codes_pattern",
"priority",
"override_group",
"tax",
"enabled",
]
help_texts = {
"country_codes_pattern": string_concat(
PATTERN_SYNTAX_HELP_TEXT,
" ",
_("Use ISO 3166-1 country codes (US, FI etc.)")
),
"region_codes_pattern": string_concat(
PATTERN_SYNTAX_HELP_TEXT,
" ",
_("Use two letter state codes for the US")
),
"postal_codes_pattern": PATTERN_SYNTAX_HELP_TEXT,
}
def clean(self):
data = super(TaxRuleForm, self).clean()
data["country_codes_pattern"] = data["country_codes_pattern"].upper()
return data
class TaxRuleEditView(CreateOrUpdateView):
model = TaxRule
template_name = "shuup/default_tax/admin/edit.jinja"
form_class = TaxRuleForm
context_object_name = "tax_rule"
add_form_errors_as_messages = True
class TaxRuleListView(PicotableListView):
url_identifier = "default_tax.tax_rule"
model = TaxRule
default_columns = [
Column("id", _("Tax Rule")),
Column("tax", _("Tax")),
Column("tax_classes", _("Tax Classes")),
Column("customer_tax_groups", _("Customer Tax Groups")),
Column("country_codes_pattern", _("Countries")),
Column("region_codes_pattern", _("Regions")),
Column("postal_codes_pattern", _("Postal Codes")),
Column("priority", _(u"Priority")),
Column("override_group", _(u"Override Group")),
Column("enabled", _(u"Enabled")),
]
|
agpl-3.0
| -7,073,731,062,335,883,000
| 32
| 77
| 0.603668
| false
| 3.987281
| false
| false
| false
|
cournape/numscons
|
numscons/scons-local/scons-local-1.2.0/SCons/Platform/hpux.py
|
1
|
1759
|
"""engine.SCons.Platform.hpux
Platform-specific initialization for HP-UX systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/hpux.py 2009/09/04 16:33:07 david"
import posix
def generate(env):
posix.generate(env)
#Based on HP-UX11i: ARG_MAX=2048000 - 3000 for environment expansion
env['MAXLINELENGTH'] = 2045000
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
bsd-3-clause
| 6,774,897,390,272,396,000
| 37.23913
| 89
| 0.761228
| false
| 3.926339
| false
| false
| false
|
wwrechard/pydlm
|
pydlm/access/_dlmGet.py
|
1
|
5519
|
"""
===============================================================================
The code for all get methods
===============================================================================
"""
from numpy import dot
from pydlm.core._dlm import _dlm
class _dlmGet(_dlm):
""" The class containing all get methods for dlm class.
Methods:
_getComponent: get the component if it is in dlm
_getLatentState: get the latent state for a given component
_getLatentCov: get the latent covariance for a given component
_getComponentMean: get the mean of a given component
_getComponentVar: get the variance of a given component
"""
# function to get the corresponding latent state
def _getLatentState(self, name, filterType, start, end):
""" Get the latent states of a given component.
Args:
name: the name of the component.
filterType: the type of the latent states to be returned.
could be "forwardFilter", "backwardSmoother" or
"predict".
start: the start date for the latent states to be returned.
end: the end date to be returned.
Returns:
A list of latent states.
"""
end += 1
indx = self.builder.componentIndex[name]
patten = lambda x: x if x is None else x[indx[0]:(indx[1] + 1), 0]
if filterType == 'forwardFilter':
return list(map(patten, self.result.filteredState[start:end]))
elif filterType == 'backwardSmoother':
return list(map(patten, self.result.smoothedState[start:end]))
elif filterType == 'predict':
return list(map(patten, self.result.predictedState[start:end]))
else:
raise NameError('Incorrect filter type')
# function to get the corresponding latent covariance
def _getLatentCov(self, name, filterType, start, end):
""" Get the latent covariance of a given component.
Args:
name: the name of the component.
filterType: the type of the latent covariance to be returned.
could be "forwardFilter", "backwardSmoother" or
"predict".
start: the start date for the latent covariance to be returned.
end: the end date to be returned.
Returns:
A list of latent covariance.
"""
end += 1
indx = self.builder.componentIndex[name]
patten = lambda x: x if x is None \
else x[indx[0]:(indx[1] + 1), indx[0]:(indx[1] + 1)]
if filterType == 'forwardFilter':
return list(map(patten, self.result.filteredCov[start:end]))
elif filterType == 'backwardSmoother':
return list(map(patten, self.result.smoothedCov[start:end]))
elif filterType == 'predict':
return list(map(patten, self.result.predictedCov[start:end]))
else:
raise NameError('Incorrect filter type')
# function to get the component mean
def _getComponentMean(self, name, filterType, start, end):
""" Get the mean of a given component.
Args:
name: the name of the component.
filterType: the type of the mean to be returned.
could be "forwardFilter", "backwardSmoother" or
"predict".
start: the start date for the mean to be returned.
end: the end date to be returned.
Returns:
A list of mean.
"""
end += 1
comp = self._fetchComponent(name)
componentState = self._getLatentState(name=name,
filterType=filterType,
start=start, end=end)
result = []
for k, i in enumerate(range(start, end)):
if name in self.builder.dynamicComponents:
comp.updateEvaluation(i)
elif name in self.builder.automaticComponents:
comp.updateEvaluation(i, self.padded_data)
result.append(dot(comp.evaluation,
componentState[k]).tolist()[0][0])
return result
# function to get the component variance
def _getComponentVar(self, name, filterType, start, end):
""" Get the variance of a given component.
Args:
name: the name of the component.
filterType: the type of the variance to be returned.
could be "forwardFilter", "backwardSmoother" or
"predict".
start: the start date for the variance to be returned.
end: the end date to be returned.
Returns:
A list of variance.
"""
end += 1
comp = self._fetchComponent(name)
componentCov = self._getLatentCov(name=name,
filterType=filterType,
start=start, end=end)
result = []
for k, i in enumerate(range(start, end)):
if name in self.builder.dynamicComponents:
comp.updateEvaluation(i)
elif name in self.builder.automaticComponents:
comp.updateEvaluation(i, self.padded_data)
result.append(dot(
dot(comp.evaluation,
componentCov[k]), comp.evaluation.T).tolist()[0][0])
return result
|
bsd-3-clause
| -5,919,468,780,384,474,000
| 37.326389
| 79
| 0.548831
| false
| 4.591514
| false
| false
| false
|
vineodd/PIMSim
|
GEM5Simulation/gem5/util/systemc/systemc_within_gem5/systemc_simple_object/SystemC_Example.py
|
5
|
2697
|
# Copyright 2018 Google, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.params import *
from m5.SimObject import SimObject
from SystemC import SystemC_ScModule
# This class is a subclass of sc_module, and all the special magic which makes
# that work is handled in the base classes.
class SystemC_Printer(SystemC_ScModule):
type = 'SystemC_Printer'
cxx_class = 'Printer'
cxx_header = 'systemc_simple_object/printer.hh'
# This parameter will be available in the SystemC_PrinterParams::create
# function and can be passed to the c++ object's constructor, used to set
# one of its member variables, as a parameter to one of its methods, etc.
prefix = Param.String('', 'Prefix for each word')
# This is a standard gem5 SimObject class with no special accomodation for the
# fact that one of its parameters is a systemc object.
class Gem5_Feeder(SimObject):
type = 'Gem5_Feeder'
cxx_class = 'Feeder'
cxx_header = 'systemc_simple_object/feeder.hh'
# This parameter will be a pointer to an instance of the class above.
printer = Param.SystemC_Printer('Printer for our words.')
delay = Param.Latency('1ns', 'Time to wait between each word.')
strings = VectorParam.String([], 'Words to print.')
|
gpl-3.0
| -5,901,187,575,971,177,000
| 49.886792
| 78
| 0.763441
| false
| 4.247244
| false
| false
| false
|
oschumac/python-uart-pi-xbridge
|
BGReadings.py
|
1
|
3925
|
#!/usr/bin/python
import json
import socket
import sys
import time
import os
import array
import math
import sqlite3
import mongo
import db
import sensor
import xdriplib
WixelData = {"_id":0,"TransmitterId":"00000","CaptureDateTime":0,"RelativeTime":0,"ReceivedSignalStrength":0,"RawValue":0,"TransmissionId":0,"BatteryLife":0,"UploadAttempts":0,"Uploaded":0,"UploaderBatteryLife":0,"FilteredValue":0 }
def oldinsertIntoWixeldata_(data) :
if sensor.SensorisActive():
CurSensor=sensor.currentSensor()
print "CurSensor->" + str(CurSensor['started_at'])
TimeDelta=((long(data['CaptureDateTime'])-long(CurSensor['started_at']))*1.0)/1000/60/60
Adjusted_raw=xdriplib.calculateAgeAdjustedRawValue(TimeDelta,int(data['RawValue']))
print "BGReadings AgeAdjustedRaw -> " + str(Adjusted_raw)
Adjusted_raw=xdriplib.calculateAgeAdjustedRawValue(TimeDelta,int(data['RawValue']))
else:
print "No Sensor Active"
Adjusted_raw=0
conn = sqlite3.connect(db.openapsDBName)
sql='insert into ' + db.tableNameWixeldata
sql+='(TransmitterId, CaptureDateTime, RelativeTime, ReceivedSignalStrength, RawValue, TransmissionId, BatteryLife, UploadAttempts, Uploaded, UploaderBatteryLife, FilteredValue, age_adjusted_raw_value ) VALUES ('
sql+=" '" + str(data['TransmitterId']) + "'"
sql+=', ' + str(data['CaptureDateTime'])
sql+=', ' + str(data['RelativeTime'])
sql+=', ' + str(data['ReceivedSignalStrength'])
sql+=', ' + str(data['RawValue'])
sql+=', ' + str(data['TransmissionId'])
sql+=', ' + str(data['BatteryLife'])
sql+=', ' + str(data['UploadAttempts'])
sql+=', ' + str(data['Uploaded'])
sql+=', ' + str(data['UploaderBatteryLife'])
sql+=', ' + str(Adjusted_raw)
sql+=', ' + str(data['FilteredValue']) + ' )'
#print "(BGReadings)(insertIntoWixel) SQL->" + sql
conn.execute(sql)
conn.commit()
print "Records created successfully";
conn.close()
def oldgetrawData_():
wdata=WixelData
sql = 'select _id, TransmitterId, CaptureDateTime, RelativeTime, ReceivedSignalStrength, RawValue, TransmissionId, BatteryLife, UploadAttempts, Uploaded, UploaderBatteryLife, FilteredValue '
sql+= 'from ' + db.tableNameWixeldata + ' order by CaptureDateTime desc limit 1'
#print "(BGReadings)(getrawData) SQL->" + sql
conn = sqlite3.connect(db.openapsDBName)
cur = conn.cursor()
cur.execute(sql)
data = cur.fetchone()
conn.close()
wdata=WixelData
if data!=None:
wdata['_id']=data[0]
wdata['TransmitterId']=data[1]
wdata['CaptureDateTime']=data[2]
wdata['RelativeTime']=data[3]
wdata['ReceivedSignalStrength']=data[4]
wdata['RawValue']=data[5]
wdata['TransmissionId']=data[6]
wdata['BatteryLife']=data[7]
wdata['UploadAttempts']=data[8]
wdata['Uploaded']=data[9]
wdata['UploaderBatteryLife']=data[10]
wdata['FilteredValue']=data[11]
else:
print "(BGReadings)(getrawData) No data available"
return wdata;
def oldinitBGReadings_():
initDB()
def oldlatestRaw_(anzahl):
sql = 'select RawValue, CaptureDateTime, age_adjusted_raw_value as Timestamp '
sql+= 'from ' + db.tableNameWixeldata + ' order by CaptureDateTime desc limit ' + str(anzahl)
conn = sqlite3.connect(db.openapsDBName)
cur = conn.cursor()
cur.execute(sql)
data = cur.fetchall()
conn.close()
return data;
def oldtest_():
mydata = {"_id":1,"TransmitterId":"66PNX","CaptureDateTime":0,"RelativeTime":0,"ReceivedSignalStrength":0,"RawValue":0,"TransmissionId":0,"BatteryLife":0,"UploadAttempts":0,"Uploaded":0,"UploaderBatteryLife":0,"FilteredValue":0 }
mydata['CaptureDateTime']=long(time.time())
mydata['RelativeTime']=2121313
mydata['RawValue']="155000"
mydata['FilteredValue']="155000"
mydata['BatteryLife']="240"
mydata['TransmitterId']="00000"
mydata['ReceivedSignalStrength']=0
mydata['TransmissionId']=0
print "Time adjusted raw" + str(xdriplib.calculateAgeAdjustedRawValue(5,155000))
insertIntoWixeldata(mydata)
if __name__ == "__main__":
test()
|
mit
| -7,661,033,420,010,583,000
| 31.983193
| 232
| 0.713885
| false
| 2.909563
| false
| false
| false
|
diegojromerolopez/djanban
|
src/djanban/apps/work_hours_packages/migrations/0001_initial.py
|
1
|
2517
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-26 14:15
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('multiboards', '0004_auto_20170526_1615'),
('members', '0023_auto_20170519_1715'),
('boards', '0068_auto_20170515_1844'),
]
operations = [
migrations.CreateModel(
name='WorkHoursPackage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256, verbose_name='Name of this package')),
('description', models.TextField(help_text='Long description of this pakage describing thetype of work the workers must do', verbose_name='Description of this package')),
('number_of_hours', models.PositiveIntegerField(help_text='Number of hours of this package.', verbose_name='Number of hours')),
('is_paid', models.BooleanField(default=False, help_text='Has the client paid for this package', verbose_name='Is this package paid?')),
('payment_datetime', models.DateField(blank=True, default=None, null=True, verbose_name='When this package was paid')),
('start_work_date', models.DateField(verbose_name='Start date')),
('end_work_date', models.DateField(verbose_name='End date')),
('board', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='work_hours_packages', to='boards.Board', verbose_name='Board')),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='created_work_hours_packages', to='members.Member', verbose_name='Member')),
('label', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='work_hours_packages', to='boards.Label', verbose_name='Label')),
('members', models.ManyToManyField(blank=True, related_name='work_hours_packages', to='members.Member', verbose_name='Member')),
('multiboard', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='work_hours_packages', to='multiboards.Multiboard', verbose_name='Multiboard')),
],
),
]
|
mit
| -4,146,718,177,459,947,500
| 65.236842
| 224
| 0.657926
| false
| 4.020767
| false
| false
| false
|
coffeemakr/torweb
|
doc/conf.py
|
1
|
8854
|
# -*- coding: utf-8 -*-
#
# Torweb documentation build configuration file, created by
# sphinx-quickstart on Sun Jan 3 13:37:43 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import alabaster
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'alabaster'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Torweb'
copyright = u'2016, coffeemakr'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
autodoc_default_flags = ['members', 'show-inheritance']
autoclass_content = 'both'
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
html_theme_path = [alabaster.get_path()]
html_theme_options = {
'logo': 'logo.png',
'github_user': 'bitprophet',
'github_repo': 'alabaster',
}
html_theme = 'alabaster'
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Torwebdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Torweb.tex', u'Torweb Documentation',
u'coffeemakr', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'torweb', u'Torweb Documentation',
[u'coffeemakr'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Torweb', u'Torweb Documentation',
u'coffeemakr', 'Torweb', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
gpl-2.0
| 9,221,430,299,245,530,000
| 29.426117
| 79
| 0.69991
| false
| 3.684561
| true
| false
| false
|
cnobile2012/inventory
|
inventory/regions/admin.py
|
1
|
3582
|
# -*- coding: utf-8 -*-
#
# inventory/regions/admin.py
#
"""
Country, Language, and Timezone region admin.
"""
__docformat__ = "restructuredtext en"
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from .models import Country, Subdivision, Language, TimeZone, Currency
from .forms import (
CountryForm, SubdivisionForm, LanguageForm, TimeZoneForm, CurrencyForm)
#
# CountryAdmin
#
@admin.register(Country)
class CountryAdmin(admin.ModelAdmin):
fieldsets = (
(None, {'fields': ('country', 'code',)}),
(_('Status'), {'classes': ('collapse',),
'fields': ('active',)}),
)
list_display = ('code', 'country', 'active',)
readonly_fields = ('country', 'code',)
list_editable = ('active',)
search_fields = ('code', 'country',)
list_filter = ('active', 'code',)
ordering = ('country',)
form = CountryForm
#
# SubdivisionAdmin
#
@admin.register(Subdivision)
class SubdivisionAdmin(admin.ModelAdmin):
fieldsets = (
(None, {'fields': ('subdivision_name', 'country', 'code',)}),
(_("Status"), {'classes': ('collapse',),
'fields': ('active',)}),
)
ordering = ('country__country', 'subdivision_name',)
readonly_fields = ('subdivision_name', 'country', 'code',)
list_display = ('subdivision_name', 'country', 'code', 'active',)
list_editable = ('active',)
list_filter = ('active', 'country__country',)
search_fields = ('subdivision_name', 'code', 'country__code',
'country__country',)
form = SubdivisionForm
#
# Language
#
@admin.register(Language)
class LanguageAdmin(admin.ModelAdmin):
fieldsets = (
(None, {'fields': ('locale', 'country', 'code',)}),
(_("Status"), {'classes': ('collapse',),
'fields': ('active',)}),
)
ordering = ('locale',)
readonly_fields = ('locale', 'country', 'code',)
list_display = ('locale', 'country', 'code', 'active',)
list_editable = ('active',)
list_filter = ('active', 'country__country',)
search_fields = ('locale', 'country__code', 'country__country',)
form = LanguageForm
#
# TimeZone
#
@admin.register(TimeZone)
class TimeZoneAdmin(admin.ModelAdmin):
fieldsets = (
(None, {'fields': ('zone', 'coordinates', 'country', 'desc',)}),
(_("Status"), {'classes': ('collapse',),
'fields': ('active',)}),
)
ordering = ('zone',)
readonly_fields = ('zone', 'coordinates', 'country', 'desc',)
list_display = ('zone', 'country', 'coordinates', 'desc', 'active',)
list_editable = ('active',)
list_filter = ('active', 'country__country',)
search_fields = ('country__country', 'country__code', 'zone', 'desc',)
form = TimeZoneForm
#
# Currency
#
@admin.register(Currency)
class CurrencyAdmin(admin.ModelAdmin):
fieldsets = (
(None, {'fields': ('currency', 'country', 'alphabetic_code',
'numeric_code', 'minor_unit', 'symbol',)}),
(_('Status'), {'classes': ('collapse',),
'fields': ('active',)}),
)
readonly_fields = ('currency', 'country', 'alphabetic_code',
'numeric_code', 'minor_unit', 'symbol',)
list_display = ('currency', 'country', 'symbol', 'active',)
list_editable = ('active',)
list_filter = ('active', 'country__country',)
search_fields = ('currency', 'country__country', 'alphabetic_code',
'numeric_code',)
form = CurrencyForm
|
mit
| -2,082,895,579,007,227,400
| 30.699115
| 75
| 0.56756
| false
| 3.859914
| false
| false
| false
|
brahle/fitmarket-python-api
|
setup.py
|
1
|
3056
|
# coding: utf-8
"""
Fitmarket
Mali broj ljudi - donori - dijele dnevna mjerenja svoje težine. Iz dnevne težine jednog donora određujemo vrijednosti dviju dionica: - dionica X ima vrijednost koja odgovara težini donora na taj dan. - inverzna dionica ~X ima vrijednost (150 kg - X). Primjetimo da: - kako X raste, ~X pada. - X + ~X = 150 kg Svaki igrač počinje igru sa 10,000 kg raspoloživog novca. Igrač koristi taj novac za trgovanje dionicama. Ukupna vrijednost igrača je zbroj rapoloživog novca i aktualne vrijednosti svih dionica koje posjeduje. Cilj igre je maksimizirati ukupnu vrijednost dobrim predviđanjem kretanja vrijednosti dionica. Na primjer, u prvom danu igrac kupi 125 dionica \"X\" za 80 kg. U drugom danu, dionica naraste na 82 kg. Ako igrac proda sve dionice \"X\", zaradio je 2 kg * 125 = 250 kg! Igra ne dopušta donoru da trguje vlastitim dionicama.
OpenAPI spec version: 1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from setuptools import setup, find_packages
NAME = "fitmarket_api"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil"]
setup(
name=NAME,
version=VERSION,
description="Fitmarket",
author_email="",
url="",
keywords=["Swagger", "Fitmarket"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
Mali broj ljudi - donori - dijele dnevna mjerenja svoje težine. Iz dnevne težine jednog donora određujemo vrijednosti dviju dionica: - dionica X ima vrijednost koja odgovara težini donora na taj dan. - inverzna dionica ~X ima vrijednost (150 kg - X). Primjetimo da: - kako X raste, ~X pada. - X + ~X = 150 kg Svaki igrač počinje igru sa 10,000 kg raspoloživog novca. Igrač koristi taj novac za trgovanje dionicama. Ukupna vrijednost igrača je zbroj rapoloživog novca i aktualne vrijednosti svih dionica koje posjeduje. Cilj igre je maksimizirati ukupnu vrijednost dobrim predviđanjem kretanja vrijednosti dionica. Na primjer, u prvom danu igrac kupi 125 dionica \"X\" za 80 kg. U drugom danu, dionica naraste na 82 kg. Ako igrac proda sve dionice \"X\", zaradio je 2 kg * 125 = 250 kg! Igra ne dopušta donoru da trguje vlastitim dionicama.
"""
)
|
apache-2.0
| -2,895,560,746,953,038,300
| 55.148148
| 880
| 0.735158
| false
| 2.602575
| false
| false
| false
|
Schevo/schevo
|
schevo/database2.py
|
1
|
88861
|
"""Schevo database, format 2."""
# Copyright (c) 2001-2009 ElevenCraft Inc.
# See LICENSE for details.
import sys
from schevo.lib import optimize
import operator
import os
import random
try:
import louie
except ImportError:
# Dummy module.
class louie(object):
@staticmethod
def send(*args, **kw):
pass
from schevo import base
from schevo import change
from schevo.change import CREATE, UPDATE, DELETE
from schevo.constant import UNASSIGNED
from schevo.counter import schema_counter
from schevo import error
from schevo.entity import Entity
from schevo.expression import Expression
from schevo.extent import Extent
from schevo.field import Entity as EntityField
from schevo.field import not_fget
from schevo.lib import module
from schevo.mt.dummy import dummy_lock
from schevo.namespace import NamespaceExtension
from schevo.placeholder import Placeholder
import schevo.schema
from schevo.signal import TransactionExecuted
from schevo.trace import log
from schevo.transaction import (
CallableWrapper, Combination, Initialize, Populate, Transaction)
class Database(base.Database):
"""Schevo database, format 2.
See doc/SchevoInternalDatabaseStructures.txt for detailed information on
data structures.
"""
# By default, don't dispatch signals. Set to True to dispatch
# TransactionExecuted signals.
dispatch = False
# See dummy_lock documentation.
read_lock = dummy_lock
write_lock = dummy_lock
def __init__(self, backend):
"""Create a database.
- `backend`: The storage backend instance to use.
"""
self._sync_count = 0
self.backend = backend
# Aliases to classes in the backend.
self._BTree = backend.BTree
self._PDict = backend.PDict
self._PList = backend.PList
self._conflict_exceptions = getattr(backend, 'conflict_exceptions', ())
self._root = backend.get_root()
# Shortcuts to coarse-grained commit and rollback.
self._commit = backend.commit
self._rollback = backend.rollback
# Keep track of schema modules remembered.
self._remembered = []
# Initialization.
self._create_schevo_structures()
self._commit()
# Index to extent instances assigned by _sync.
self._extents = {}
# Index to entity classes assigned by _sync.
self._entity_classes = {}
# Vars used in transaction processing.
self._bulk_mode = False
self._executing = []
# Shortcuts.
schevo = self._root['SCHEVO']
self._extent_name_id = schevo['extent_name_id']
self._extent_maps_by_id = schevo['extents']
self._update_extent_maps_by_name()
# Plugin support.
self._plugins = []
def __repr__(self):
return '<Database %r :: V %r>' % (self.label, self.version)
@property
def _extent_id_name(self):
return dict((v, k) for k, v in self._extent_name_id.items())
def close(self):
"""Close the database."""
assert log(1, 'Stopping plugins.')
p = self._plugins
while p:
assert log(2, 'Stopping', p)
p.pop().close()
assert log(1, 'Closing storage.')
self.backend.close()
remembered = self._remembered
while remembered:
module.forget(remembered.pop())
def execute(self, *transactions, **kw):
"""Execute transaction(s)."""
if self._executing:
# Pass-through outer transactions.
return self._execute(*transactions, **kw)
else:
# Try outer transactions up to 10 times if conflicts occur.
remaining_attempts = 10
while remaining_attempts > 0:
try:
return self._execute(*transactions, **kw)
except self._conflict_exceptions:
remaining_attempts -= 1
for tx in transactions:
tx._executing = False
raise error.BackendConflictError()
def _execute(self, *transactions, **kw):
strict = kw.get('strict', True)
executing = self._executing
if len(transactions) == 0:
raise RuntimeError('Must supply at least one transaction.')
if len(transactions) > 1:
if not executing:
raise RuntimeError(
'Must supply only one top-level transaction.')
else:
# Multiple transactions are treated as a single
# transaction containing subtransactions.
tx = Combination(transactions)
else:
tx = transactions[0]
if tx._executed:
raise error.TransactionAlreadyExecuted(tx)
raise error.TransactionAlreadyExecuted('%r already executed.' % tx)
if not executing:
# Bulk mode can only be set on an outermost transaction
# and effects all inner transactions.
self._bulk_mode = kw.get('bulk_mode', False)
# Outermost transaction must be executed strict.
strict = True
# Bulk mode minimizes transaction metadata.
bulk_mode = self._bulk_mode
executing.append(tx)
assert log(1, 'Begin executing [%i]' % len(executing), tx)
try:
retval = tx._execute(self)
assert log(2, 'Result was', repr(retval))
# Enforce any indices relaxed by the transaction.
for extent_name, index_spec in frozenset(tx._relaxed):
assert log(2, 'Enforcing index', extent_name, index_spec)
self._enforce_index_field_ids(extent_name, *index_spec)
# If the transaction must be executed with strict
# validation, perform that validation now.
if strict:
c = tx._changes_requiring_validation
assert log(
2, 'Validating', len(c), 'changes requiring validation')
self._validate_changes(c)
except Exception, e:
assert log(1, e, 'was raised; undoing side-effects.')
if bulk_mode:
assert log(2, 'Bulk Mode transaction; storage rollback.')
self._rollback()
elif len(executing) == 1:
assert log(2, 'Outer transaction; storage rollback.')
self._rollback()
else:
assert log(2, 'Inner transaction; inverting.')
inversions = tx._inversions
while len(inversions):
method, args, kw = inversions.pop()
# Make sure the inverse operation doesn't append
# an inversion itself.
self._executing = None
# Perform the inversion.
method(*args, **kw)
# Restore state.
self._executing = executing
# Get rid of the current transaction on the stack since
# we're done undoing it.
executing.pop()
# Allow exception to bubble up.
raise
assert log(1, ' Done executing [%i]' % len(executing), tx)
tx._executed = True
# Post-transaction
if bulk_mode and len(executing) > 1:
assert log(2, 'Bulk Mode inner transaction.')
e2 = executing[-2]
e1 = executing[-1]
if not strict:
e2._changes_requiring_validation.extend(
e1._changes_requiring_validation)
elif bulk_mode:
assert log(2, 'Bulk Mode outer transaction; storage commit.')
# Done executing the outermost transaction. Use
# Durus-based commit.
self._commit()
elif len(executing) > 1:
assert log(2, 'Inner transaction; record inversions and changes.')
# Append the inversions from this transaction to the next
# outer transaction.
e2 = executing[-2]
e1 = executing[-1]
e2._inversions.extend(e1._inversions)
# Also append the changes made from this transaction.
e2._changes_requiring_notification.extend(
e1._changes_requiring_notification)
if not strict:
e2._changes_requiring_validation.extend(
e1._changes_requiring_validation)
else:
assert log(2, 'Outer transaction; storage commit.')
# Done executing the outermost transaction. Use
# Durus-based commit.
self._commit()
# Send a signal if told to do so.
if self.dispatch:
assert log(2, 'Dispatching TransactionExecuted signal.')
louie.send(TransactionExecuted, sender=self, transaction=tx)
executing.pop()
return retval
def extent(self, extent_name):
"""Return the named extent instance."""
return self._extents[extent_name]
def extent_names(self):
"""Return a sorted list of extent names."""
return sorted(self._extent_maps_by_name.keys())
def extents(self):
"""Return a list of extent instances sorted by name."""
extent = self.extent
return [extent(name) for name in self.extent_names()]
def pack(self):
"""Pack the database."""
if os.environ.get('SCHEVO_NOPACK', '').strip() != '1':
self.backend.pack()
def populate(self, sample_name=''):
"""Populate the database with sample data."""
tx = Populate(sample_name)
self.execute(tx)
@property
def format(self):
return self._root['SCHEVO']['format']
@property
def schema_source(self):
return self._root['SCHEVO']['schema_source']
@property
def version(self):
return self._root['SCHEVO']['version']
def _get_label(self):
SCHEVO = self._root['SCHEVO']
if 'label' not in SCHEVO:
# Older database, no label stored in it.
return u'Schevo Database'
else:
return SCHEVO['label']
def _set_label(self, new_label):
if self._executing:
raise error.DatabaseExecutingTransaction(
'Cannot change database label while executing a transaction.')
self._root['SCHEVO']['label'] = unicode(new_label)
self._commit()
label = property(_get_label, _set_label)
_label = property(_get_label, _set_label)
def _append_change(self, typ, extent_name, oid):
executing = self._executing
if executing:
info = (typ, extent_name, oid)
tx = executing[-1]
tx._changes_requiring_validation.append(info)
if not self._bulk_mode:
tx._changes_requiring_notification.append(info)
def _append_inversion(self, method, *args, **kw):
"""Append an inversion to a transaction if one is being
executed."""
if self._bulk_mode:
return
executing = self._executing
if executing:
executing[-1]._inversions.append((method, args, kw))
def _by_entity_oids(self, extent_name, *index_spec):
"""Return a list of OIDs from an extent sorted by index_spec."""
extent_map = self._extent_map(extent_name)
indices = extent_map['indices']
index_map = extent_map['index_map']
# Separate index_spec into two tuples, one containing field
# names and one containing 'ascending' bools.
field_names = []
ascending = []
for field_name in index_spec:
if field_name.startswith('-'):
field_names.append(field_name[1:])
ascending.append(False)
else:
field_names.append(field_name)
ascending.append(True)
index_spec = _field_ids(extent_map, field_names)
if index_spec not in indices:
# Specific index not found; look for an index where
# index_spec matches the beginning of that index's spec.
if index_spec not in index_map:
# None found.
raise error.IndexDoesNotExist(
extent_name,
_field_names(extent_map, index_spec),
)
# Use the first index found.
index_spec = index_map[index_spec][0]
oids = []
unique, branch = indices[index_spec]
_walk_index(branch, ascending, oids)
return oids
def _create_entity(self, extent_name, fields, related_entities,
oid=None, rev=None):
"""Create a new entity in an extent; return the oid.
- `extent_name`: Name of the extent to create a new entity in.
- `fields`: Dictionary of field_name:field_value mappings, where
each field_value is the value to be stored in the database, as
returned by a field instance's `_dump` method.
- `related_entities`: Dictionary of field_name:related_entity_set
mappings, where each related_entity_set is the set of entities
stored in the field's structure, as returned by a field
instance's `_entities_in_value` method.
- `oid`: (optional) Specific OID to create the entity as; used
for importing data, e.g. from an XML document.
- `rev`: (optional) Specific revision to create the entity as; see
`oid`.
"""
extent_map = self._extent_map(extent_name)
entities = extent_map['entities']
old_next_oid = extent_map['next_oid']
field_name_id = extent_map['field_name_id']
extent_name_id = self._extent_name_id
extent_maps_by_id = self._extent_maps_by_id
indices_added = []
ia_append = indices_added.append
links_created = []
lc_append = links_created.append
BTree = self._BTree
PDict = self._PDict
try:
if oid is None:
oid = extent_map['next_oid']
extent_map['next_oid'] += 1
if rev is None:
rev = 0
if oid in entities:
raise error.EntityExists(extent_name, oid)
# Create fields_by_id dict with field-id:field-value items.
fields_by_id = PDict()
for name, value in fields.iteritems():
field_id = field_name_id[name]
fields_by_id[field_id] = value
# Create related_entities_by_id dict with
# field-id:related-entities items.
new_links = []
nl_append = new_links.append
related_entities_by_id = PDict()
for name, related_entity_set in related_entities.iteritems():
field_id = field_name_id[name]
related_entities_by_id[field_id] = related_entity_set
for placeholder in related_entity_set:
other_extent_id = placeholder.extent_id
other_oid = placeholder.oid
nl_append((field_id, other_extent_id, other_oid))
# Make sure fields that weren't specified are set to
# UNASSIGNED.
setdefault = fields_by_id.setdefault
for field_id in field_name_id.itervalues():
setdefault(field_id, UNASSIGNED)
# Update index mappings.
indices = extent_map['indices']
for index_spec in indices.iterkeys():
field_values = tuple(fields_by_id[field_id]
for field_id in index_spec)
# Find out if the index has been relaxed.
relaxed_specs = self._relaxed[extent_name]
if index_spec in relaxed_specs:
txns, relaxed = relaxed_specs[index_spec]
else:
relaxed = None
_index_add(extent_map, index_spec, relaxed, oid, field_values,
BTree)
ia_append((extent_map, index_spec, oid, field_values))
# Update links from this entity to another entity.
referrer_extent_id = extent_name_id[extent_name]
for referrer_field_id, other_extent_id, other_oid in new_links:
other_extent_map = extent_maps_by_id[other_extent_id]
try:
other_entity_map = other_extent_map['entities'][other_oid]
except KeyError:
field_id_name = extent_map['field_id_name']
field_name = field_id_name[referrer_field_id]
other_extent_map = extent_maps_by_id[other_extent_id]
other_extent_name = other_extent_map['name']
raise error.EntityDoesNotExist(
other_extent_name, field_name=field_name)
# Add a link to the other entity.
links = other_entity_map['links']
link_key = (referrer_extent_id, referrer_field_id)
if link_key not in links: # XXX Should already be there.
links[link_key] = BTree()
links[link_key][oid] = None
other_entity_map['link_count'] += 1
lc_append((other_entity_map, links, link_key, oid))
# Create the actual entity.
entity_map = entities[oid] = PDict()
entity_map['fields'] = fields_by_id
# XXX flesh out links based on who is capable of linking
# to this one.
entity_map['link_count'] = 0
entity_map['links'] = PDict()
entity_map['related_entities'] = related_entities_by_id
entity_map['rev'] = rev
# Update the extent.
extent_map['len'] += 1
# Allow inversion of this operation.
self._append_inversion(self._delete_entity, extent_name, oid)
# Keep track of changes.
append_change = self._append_change
append_change(CREATE, extent_name, oid)
return oid
except:
# Revert changes made during create attempt.
for _e, _i, _o, _f in indices_added:
_index_remove(_e, _i, _o, _f)
for other_entity_map, links, link_key, oid in links_created:
del links[link_key][oid]
other_entity_map['link_count'] -= 1
extent_map['next_oid'] = old_next_oid
raise
def _delete_entity(self, extent_name, oid):
entity_map, extent_map = self._entity_extent_map(extent_name, oid)
all_field_ids = set(extent_map['field_id_name'].iterkeys())
extent_id = extent_map['id']
extent_name_id = self._extent_name_id
extent_maps_by_id = self._extent_maps_by_id
field_name_id = extent_map['field_name_id']
link_count = entity_map['link_count']
links = entity_map['links']
# Disallow deletion if other entities refer to this one,
# unless all references are merely from ourself or an entity
# that will be deleted.
deletes = set()
executing = self._executing
if executing:
tx = executing[-1]
deletes.update([(extent_name_id[del_entity_cls.__name__], del_oid)
for del_entity_cls, del_oid in tx._deletes])
deletes.update([(extent_name_id[del_entity_cls.__name__], del_oid)
for del_entity_cls, del_oid in tx._known_deletes])
for (other_extent_id, other_field_id), others in links.iteritems():
for other_oid in others:
if (other_extent_id, other_oid) in deletes:
continue
# Give up as soon as we find one outside reference.
if (other_extent_id, other_oid) != (extent_id, oid):
entity = self._entity(extent_name, oid)
referring_entity = self._entity(other_extent_id, other_oid)
other_field_name = extent_maps_by_id[other_extent_id][
'field_id_name'][other_field_id]
raise error.DeleteRestricted(
entity=entity,
referring_entity=referring_entity,
referring_field_name=other_field_name
)
# Get old values for use in a potential inversion.
old_fields = self._entity_fields(extent_name, oid)
old_related_entities = self._entity_related_entities(extent_name, oid)
old_rev = entity_map['rev']
# Remove index mappings.
indices = extent_map['indices']
fields_by_id = entity_map['fields']
for index_spec in indices.iterkeys():
field_values = tuple(fields_by_id.get(f_id, UNASSIGNED)
for f_id in index_spec)
_index_remove(extent_map, index_spec, oid, field_values)
# Delete links from this entity to other entities.
related_entities = entity_map['related_entities']
referrer_extent_id = extent_name_id[extent_name]
for referrer_field_id, related_set in related_entities.iteritems():
# If a field once existed, but no longer does, there will
# still be a related entity set for it in related_entities.
# Only process the fields that still exist.
if referrer_field_id in all_field_ids:
for other_value in related_set:
# Remove the link to the other entity.
other_extent_id = other_value.extent_id
other_oid = other_value.oid
link_key = (referrer_extent_id, referrer_field_id)
other_extent_map = extent_maps_by_id[other_extent_id]
if other_oid in other_extent_map['entities']:
other_entity_map = other_extent_map[
'entities'][other_oid]
links = other_entity_map['links']
other_links = links[link_key]
# The following check is due to scenarios like this:
# Entity A and entity B are both being deleted in a
# cascade delete scenario. Entity B refers to entity A.
# Entity A has already been deleted. Entity B is now
# being deleted. We must now ignore any information
# about entity A that is attached to entity B.
if oid in other_links:
del other_links[oid]
other_entity_map['link_count'] -= 1
del extent_map['entities'][oid]
extent_map['len'] -= 1
# Allow inversion of this operation.
self._append_inversion(
self._create_entity, extent_name, old_fields,
old_related_entities, oid, old_rev)
# Keep track of changes.
append_change = self._append_change
append_change(DELETE, extent_name, oid)
def _enforce_index(self, extent_name, *index_spec):
"""Call _enforce_index after converting index_spec from field
names to field IDs."""
extent_map = self._extent_map(extent_name)
index_spec = _field_ids(extent_map, index_spec)
return self._enforce_index_field_ids(extent_name, *index_spec)
def _enforce_index_field_ids(self, extent_name, *index_spec):
"""Validate and begin enforcing constraints on the specified
index if it was relaxed within the currently-executing
transaction."""
executing = self._executing
if not executing:
# No-op if called outside a transaction.
return
# Find the index to re-enforce.
extent_map = self._extent_map(extent_name)
indices = extent_map['indices']
if index_spec not in indices:
raise error.IndexDoesNotExist(
extent_name,
_field_names(extent_map, index_spec),
)
# Find out if it has been relaxed.
current_txn = executing[-1]
relaxed = self._relaxed[extent_name]
txns, added = relaxed.get(index_spec, ([], []))
if not txns:
# Was never relaxed; no-op.
return
if current_txn in txns:
current_txn._relaxed.remove((extent_name, index_spec))
txns.remove(current_txn)
# If no more transactions have relaxed this index, enforce it.
if not txns:
BTree = self._BTree
for _extent_map, _index_spec, _oid, _field_values in added:
_index_validate(_extent_map, _index_spec, _oid, _field_values,
BTree)
def _entity(self, extent_name, oid):
"""Return the entity instance."""
EntityClass = self._entity_classes[extent_name]
return EntityClass(oid)
def _entity_field(self, extent_name, oid, name):
"""Return the value of a field in an entity in named extent
with given OID."""
entity_map, extent_map = self._entity_extent_map(extent_name, oid)
field_name_id = extent_map['field_name_id']
field_id = field_name_id[name]
value = entity_map['fields'][field_id]
return value
def _entity_field_rev(self, extent_name, oid, name):
"""Return a tuple of (value, rev) of a field in an entity in
named extent with given OID."""
value = self._entity_field(extent_name, oid, name)
rev = self._entity_rev(extent_name, oid)
return value, rev
def _entity_fields(self, extent_name, oid):
"""Return a dictionary of field values for an entity in
`extent` with given OID."""
entity_classes = self._entity_classes
entity_map, extent_map = self._entity_extent_map(extent_name, oid)
field_id_name = extent_map['field_id_name']
fields = {}
for field_id, value in entity_map['fields'].iteritems():
# During database evolution, it may turn out that fields
# get removed. For time efficiency reasons, Schevo does
# not iterate through all entities to remove existing
# data. Therefore, when getting entity fields from the
# database here, ignore fields that exist in the entity
# but no longer exist in the extent.
field_name = field_id_name.get(field_id, None)
if field_name:
fields[field_name] = value
return fields
def _entity_links(self, extent_name, oid, other_extent_name=None,
other_field_name=None, return_count=False):
"""Return dictionary of (extent_name, field_name): entity_list
pairs, or list of linking entities if `other_extent_name` and
`other_field_name` are supplied; return link count instead if
`return_count` is True."""
assert log(1, '_entity_links', extent_name, oid, other_extent_name,
other_field_name, return_count)
entity_classes = self._entity_classes
entity_map = self._entity_map(extent_name, oid)
entity_links = entity_map['links']
extent_maps_by_id = self._extent_maps_by_id
if other_extent_name is not None and other_field_name is not None:
# Both extent name and field name were provided.
other_extent_map = self._extent_map(other_extent_name)
other_extent_id = other_extent_map['id']
try:
other_field_id = other_extent_map['field_name_id'][
other_field_name]
except KeyError:
raise error.FieldDoesNotExist(
other_extent_name, other_field_name)
key = (other_extent_id, other_field_id)
# Default to a dict since it has the same API as a BTree
# for our use but is faster and will stay empty anyway.
btree = entity_links.get(key, {})
if return_count:
count = len(btree)
assert log(2, 'returning len(btree)', count)
return count
else:
EntityClass = entity_classes[other_extent_name]
others = [EntityClass(oid) for oid in btree]
return others
# Shortcut if we only care about the count, with no specificity.
link_count = entity_map['link_count']
if return_count and other_extent_name is None:
assert log(2, 'returning link_count', link_count)
return link_count
# Build links tree.
specific_extent_name = other_extent_name
if return_count:
links = 0
else:
links = {}
if link_count == 0:
# No links; no need to traverse.
assert log(2, 'no links - returning', links)
return links
for key, btree in entity_links.iteritems():
other_extent_id, other_field_id = key
other_extent_map = extent_maps_by_id[other_extent_id]
other_extent_name = other_extent_map['name']
if (specific_extent_name
and specific_extent_name != other_extent_name
):
assert log(2, 'Skipping', other_extent_name)
continue
if return_count:
links += len(btree)
else:
other_field_name = other_extent_map['field_id_name'][
other_field_id]
if specific_extent_name:
link_key = other_field_name
else:
link_key = (other_extent_name, other_field_name)
EntityClass = entity_classes[other_extent_name]
others = [EntityClass(oid) for oid in btree]
if others:
links[link_key] = others
if return_count:
assert log(2, 'returning links', links)
return links
def _entity_related_entities(self, extent_name, oid):
"""Return a dictionary of related entity sets for an entity in
`extent` with given OID."""
entity_classes = self._entity_classes
entity_map, extent_map = self._entity_extent_map(extent_name, oid)
field_id_name = extent_map['field_id_name']
related_entities = {}
for field_id, related in entity_map['related_entities'].iteritems():
# During database evolution, it may turn out that fields
# get removed. For time efficiency reasons, Schevo does
# not iterate through all entities to remove existing
# data. Therefore, when getting entity fields from the
# database here, ignore fields that exist in the entity
# but no longer exist in the extent.
field_name = field_id_name.get(field_id, None)
if field_name:
related_entities[field_name] = related
return related_entities
def _entity_rev(self, extent_name, oid):
"""Return the revision of an entity in `extent` with given
OID."""
entity_map = self._entity_map(extent_name, oid)
return entity_map['rev']
def _extent_contains_oid(self, extent_name, oid):
extent_map = self._extent_map(extent_name)
return oid in extent_map['entities']
def _extent_len(self, extent_name):
"""Return the number of entities in the named extent."""
extent_map = self._extent_map(extent_name)
return extent_map['len']
def _extent_next_oid(self, extent_name):
"""Return the next OID to be assigned in the named extent."""
extent_map = self._extent_map(extent_name)
return extent_map['next_oid']
def _find_entity_oids(self, extent_name, criterion):
"""Return sequence of entity OIDs matching given field value(s)."""
assert log(1, extent_name, criterion)
extent_map = self._extent_map(extent_name)
entity_maps = extent_map['entities']
# No criterion: return all entities.
if criterion is None:
assert log(2, 'Return all oids.')
return list(entity_maps.keys())
# Equality intersection: use optimized lookup.
try:
criteria = criterion.single_extent_field_equality_criteria()
except ValueError:
pass
else:
extent_names = frozenset(key._extent for key in criteria)
if len(extent_names) > 1:
raise ValueError('Must use fields from same extent.')
return self._find_entity_oids_field_equality(
extent_name, criteria)
# More complex lookup.
return self._find_entity_oids_general_criterion(extent_name, criterion)
def _find_entity_oids_general_criterion(self, extent_name, criterion):
if (isinstance(criterion.left, Expression)
and isinstance(criterion.right, Expression)
):
left_oids = self._find_entity_oids_general_criterion(
extent_name, criterion.left)
right_oids = self._find_entity_oids_general_criterion(
extent_name, criterion.right)
return criterion.op(left_oids, right_oids)
elif (isinstance(criterion.left, type)
and issubclass(criterion.left, base.Field)
):
return self._find_entity_oids_field_criterion(
extent_name, criterion)
else:
raise ValueError('Cannot evaluate criterion', criterion)
def _find_entity_oids_field_criterion(self, extent_name, criterion):
extent_map = self._extent_map(extent_name)
entity_maps = extent_map['entities']
FieldClass, value, op = criterion.left, criterion.right, criterion.op
# Make sure extent name matches.
if FieldClass._extent.name != extent_name:
raise ValueError(
'Criterion extent does not match query extent.', criterion)
# Optimize for equality and inequality.
if op == operator.eq:
return set(self._find_entity_oids_field_equality(
extent_name, {FieldClass: value}))
if op == operator.ne:
all = entity_maps.keys()
matching = self._find_entity_oids_field_equality(
extent_name, {FieldClass: value})
return set(all) - set(matching)
# Create a writable field to convert the value and get its
# _dump'd representation.
field_id = extent_map['field_name_id'][FieldClass.name]
EntityClass = self._entity_classes[extent_name]
FieldClass = EntityClass._field_spec[FieldClass.name]
class TemporaryField(FieldClass):
readonly = False
field = TemporaryField(None)
field.set(value)
value = field._dump()
# Additional operators.
# XXX: Brute force for now.
if op in (operator.lt, operator.le, operator.gt, operator.ge):
results = []
append = results.append
for oid, entity_map in entity_maps.iteritems():
if op(entity_map['fields'].get(field_id, UNASSIGNED), value):
append(oid)
return set(results)
def _find_entity_oids_field_equality(self, extent_name, criteria):
extent_map = self._extent_map(extent_name)
entity_maps = extent_map['entities']
EntityClass = self._entity_classes[extent_name]
extent_name_id = self._extent_name_id
indices = extent_map['indices']
normalized_index_map = extent_map['normalized_index_map']
field_id_name = extent_map['field_id_name']
field_name_id = extent_map['field_name_id']
# Convert from field_name:value to field_id:value.
field_id_value = {}
field_name_value = {}
for field_class, value in criteria.iteritems():
field_name = field_class.name
try:
field_id = field_name_id[field_name]
except KeyError:
raise error.FieldDoesNotExist(extent_name, field_name)
# Create a writable field to convert the value and get its
# _dump'd representation.
class TemporaryField(field_class):
readonly = False
field = TemporaryField(None)
field.set(value)
value = field._dump()
field_id_value[field_id] = value
field_name_value[field_name] = value
# Get results, using indexes and shortcuts where possible.
results = []
field_ids = tuple(sorted(field_id_value))
assert log(3, 'field_ids', field_ids)
len_field_ids = len(field_ids)
# First, see if we can take advantage of entity links.
if len_field_ids == 1:
field_id = field_ids[0]
field_name = field_id_name[field_id]
value = field_name_value[field_name]
if isinstance(value, Entity):
# We can take advantage of entity links.
entity_map = self._entity_map(value._extent.name, value._oid)
entity_links = entity_map['links']
extent_id = extent_map['id']
key = (extent_id, field_id)
linkmap = entity_links.get(key, {})
results = linkmap.keys()
return results
# Next, see if the fields given can be found in an index. If
# so, use the index to return matches.
index_spec = None
if field_ids in normalized_index_map:
for spec in normalized_index_map[field_ids]:
if len(spec) == len_field_ids:
index_spec = spec
break
if index_spec is not None:
# We found an index to use.
assert log(2, 'Use index spec:', index_spec)
unique, branch = indices[index_spec]
match = True
for field_id in index_spec:
field_value = field_id_value[field_id]
if field_value not in branch:
# No matches found.
match = False
break
branch = branch[field_value]
if match:
# Now we're at a leaf that matches all of the
# criteria, so return the OIDs in that leaf.
results = list(branch.keys())
else:
# Fields aren't indexed, so use brute force.
assert log(2, 'Use brute force.')
append = results.append
for oid, entity_map in entity_maps.iteritems():
fields = entity_map['fields']
match = True
for field_id, value in field_id_value.iteritems():
if fields.get(field_id, UNASSIGNED) != value:
match = False
break
if match:
append(oid)
assert log(2, 'Result count', len(results))
return results
def _relax_index(self, extent_name, *index_spec):
"""Relax constraints on the specified index until a matching
enforce_index is called, or the currently-executing
transaction finishes, whichever occurs first."""
executing = self._executing
if not executing:
raise RuntimeError('Indexes can only be relaxed inside '
'transaction execution.')
# ID-ify the index_spec.
extent_map = self._extent_map(extent_name)
index_spec = _field_ids(extent_map, index_spec)
# Find the index to relax.
indices = extent_map['indices']
if index_spec not in indices:
raise error.IndexDoesNotExist(
extent_name,
_field_names(extent_map, index_spec),
)
# Keep track of the relaxation.
current_txn = executing[-1]
relaxed = self._relaxed[extent_name]
txns, added = relaxed.setdefault(index_spec, ([], []))
txns.append(current_txn)
current_txn._relaxed.add((extent_name, index_spec))
def _set_extent_next_oid(self, extent_name, next_oid):
extent_map = self._extent_map(extent_name)
extent_map['next_oid'] = next_oid
def _update_entity(self, extent_name, oid, fields, related_entities,
rev=None):
"""Update an existing entity in an extent.
- `extent_name`: Name of the extent to create a new entity in.
- `oid`: OID of the entity to update.
- `fields`: Dictionary of field_name:field_value mappings to change,
where each field_value is the value to be stored in the database, as
returned by a field instance's `_dump` method.
- `related_entities`: Dictionary of field_name:related_entity_set
mappings, where each related_entity_set is the set of entities
stored in the field's structure, as returned by a field instance's
`_entities_in_value` method.
- `rev`: (optional) Specific revision to update the entity to.
"""
# XXX: Could be optimized to update mappings only when
# necessary.
entity_classes = self._entity_classes
entity_map, extent_map = self._entity_extent_map(extent_name, oid)
field_name_id = extent_map['field_name_id']
extent_name_id = self._extent_name_id
extent_maps_by_id = self._extent_maps_by_id
indices_added = []
indices_removed = []
new_links = []
links_created = []
links_deleted = []
ia_append = indices_added.append
ir_append = indices_removed.append
nl_append = new_links.append
lc_append = links_created.append
ld_append = links_deleted.append
BTree = self._BTree
try:
# Get old values for use in a potential inversion.
old_fields = self._entity_fields(extent_name, oid)
updating_related = len(related_entities) > 0
if updating_related:
old_related_entities = self._entity_related_entities(
extent_name, oid)
else:
old_related_entities = {}
old_rev = entity_map['rev']
# Manage entity references.
if updating_related:
for name, related_entity_set in related_entities.iteritems():
field_id = field_name_id[name]
for placeholder in related_entity_set:
other_extent_id = placeholder.extent_id
other_oid = placeholder.oid
nl_append((field_id, other_extent_id, other_oid))
# Get fields, and set UNASSIGNED for any fields that are
# new since the last time the entity was stored.
fields_by_id = entity_map['fields']
all_field_ids = set(extent_map['field_id_name'])
new_field_ids = all_field_ids - set(fields_by_id)
fields_by_id.update(dict(
(field_id, UNASSIGNED) for field_id in new_field_ids))
# Create ephemeral fields for creating new mappings.
new_fields_by_id = dict(fields_by_id)
for name, value in fields.iteritems():
new_fields_by_id[field_name_id[name]] = value
if updating_related:
new_related_entities_by_id = dict(
(field_name_id[name], related_entities[name])
for name in related_entities
)
# Remove existing index mappings.
indices = extent_map['indices']
for index_spec in indices.iterkeys():
field_values = tuple(fields_by_id[field_id]
for field_id in index_spec)
# Find out if the index has been relaxed.
relaxed_specs = self._relaxed[extent_name]
if index_spec in relaxed_specs:
txns, relaxed = relaxed_specs[index_spec]
else:
relaxed = None
_index_remove(extent_map, index_spec, oid, field_values)
ir_append((extent_map, index_spec, relaxed, oid, field_values))
if updating_related:
# Delete links from this entity to other entities.
related_entities_by_id = entity_map['related_entities']
referrer_extent_id = extent_name_id[extent_name]
new_field_ids = frozenset(new_fields_by_id)
for (referrer_field_id,
related_set) in related_entities_by_id.iteritems():
# If a field once existed, but no longer does, there will
# still be a related entity set for it in related_entities.
# Only process the fields that still exist.
if referrer_field_id in all_field_ids:
# Remove only the links that no longer exist.
new_related_entities = new_related_entities_by_id.get(
referrer_field_id, set())
for other_value in related_set - new_related_entities:
# Remove the link to the other entity.
other_extent_id = other_value.extent_id
other_oid = other_value.oid
link_key = (referrer_extent_id, referrer_field_id)
other_extent_map = extent_maps_by_id[
other_extent_id]
other_entity_map = other_extent_map['entities'][
other_oid]
links = other_entity_map['links']
other_links = links[link_key]
del other_links[oid]
other_entity_map['link_count'] -= 1
ld_append((other_entity_map, links, link_key, oid))
# Create new index mappings.
for index_spec in indices.iterkeys():
field_values = tuple(new_fields_by_id[field_id]
for field_id in index_spec)
# Find out if the index has been relaxed.
relaxed_specs = self._relaxed[extent_name]
if index_spec in relaxed_specs:
txns, relaxed = relaxed_specs[index_spec]
else:
relaxed = None
_index_add(extent_map, index_spec, relaxed, oid, field_values,
BTree)
ia_append((extent_map, index_spec, oid, field_values))
if updating_related:
# Update links from this entity to another entity.
referrer_extent_id = extent_name_id[extent_name]
for referrer_field_id, other_extent_id, other_oid in new_links:
other_extent_map = extent_maps_by_id[other_extent_id]
try:
other_entity_map = other_extent_map['entities'][
other_oid]
except KeyError:
field_id_name = extent_map['field_id_name']
field_name = field_id_name[referrer_field_id]
other_extent_map = extent_maps_by_id[other_extent_id]
other_extent_name = other_extent_map['name']
raise error.EntityDoesNotExist(
other_extent_name, field_name=field_name)
# Add a link to the other entity.
links = other_entity_map['links']
link_key = (referrer_extent_id, referrer_field_id)
if link_key not in links: # XXX Should already be there.
mapping = links[link_key] = BTree()
else:
mapping = links[link_key]
if oid not in mapping:
# Only add the link if it's not already there.
links[link_key][oid] = None
other_entity_map['link_count'] += 1
lc_append((other_entity_map, links, link_key, oid))
# Update actual fields and related entities.
for name, value in fields.iteritems():
fields_by_id[field_name_id[name]] = value
if updating_related:
for name, value in related_entities.iteritems():
related_entities_by_id[field_name_id[name]] = value
# Update revision.
if rev is None:
entity_map['rev'] += 1
else:
entity_map['rev'] = rev
# Allow inversion of this operation.
self._append_inversion(
self._update_entity, extent_name, oid, old_fields,
old_related_entities, old_rev)
# Keep track of changes.
append_change = self._append_change
append_change(UPDATE, extent_name, oid)
except:
# Revert changes made during update attempt.
for _e, _i, _o, _f in indices_added:
_index_remove(_e, _i, _o, _f)
for _e, _i, _r, _o, _f in indices_removed:
_index_add(_e, _i, _r, _o, _f, BTree)
for other_entity_map, links, link_key, oid in links_created:
del links[link_key][oid]
other_entity_map['link_count'] -= 1
for other_entity_map, links, link_key, oid in links_deleted:
links[link_key][oid] = None
other_entity_map['link_count'] += 1
raise
def _create_extent(self, extent_name, field_names, entity_field_names,
key_spec=None, index_spec=None):
"""Create a new extent with a given name."""
BTree = self._BTree
PList = self._PList
PDict = self._PDict
if extent_name in self._extent_maps_by_name:
raise error.ExtentExists(extent_name)
if key_spec is None:
key_spec = []
if index_spec is None:
index_spec = []
extent_map = PDict()
extent_id = self._unique_extent_id()
indices = extent_map['indices'] = PDict()
extent_map['index_map'] = PDict()
normalized_index_map = extent_map[
'normalized_index_map'] = PDict()
extent_map['entities'] = BTree()
field_id_name = extent_map['field_id_name'] = PDict()
field_name_id = extent_map['field_name_id'] = PDict()
extent_map['id'] = extent_id
extent_map['len'] = 0
extent_map['name'] = extent_name
extent_map['next_oid'] = 1
self._extent_name_id[extent_name] = extent_id
self._extent_maps_by_id[extent_id] = extent_map
self._extent_maps_by_name[extent_name] = extent_map
# Give each field name a unique ID.
for name in field_names:
field_id = self._unique_field_id(extent_name)
field_id_name[field_id] = name
field_name_id[name] = field_id
# Convert field names to field IDs in key spec and create
# index structures.
for field_names in key_spec:
i_spec = _field_ids(extent_map, field_names)
_create_index(extent_map, i_spec, True, BTree, PList)
# Convert field names to field IDs in index spec and create
# index structures.
for field_names in index_spec:
i_spec = _field_ids(extent_map, field_names)
# Although we tell it unique=False, it may find a subset
# key, which will cause this superset to be unique=True.
_create_index(extent_map, i_spec, False, BTree, PList)
# Convert field names to field IDs for entity field names.
extent_map['entity_field_ids'] = _field_ids(
extent_map, entity_field_names)
def _delete_extent(self, extent_name):
"""Remove a named extent."""
# XXX: Need to check for links to any entity in this extent,
# and fail to remove it if so.
#
# Iterate through all entities in the extent to delete, and
# remove bidirectional link information from any entities they
# point to.
extent_map = self._extent_map(extent_name)
extent_id = extent_map['id']
for oid, entity_map in extent_map['entities'].iteritems():
related_entities = entity_map['related_entities'].iteritems()
for field_id, related_entity_set in related_entities:
for related_entity in related_entity_set:
rel_extent_id = related_entity.extent_id
rel_oid = related_entity.oid
rel_extent_map = self._extent_maps_by_id.get(
rel_extent_id, None)
if rel_extent_map is not None:
rel_entity_map = rel_extent_map['entities'][rel_oid]
rel_links = rel_entity_map['links']
key = (extent_id, field_id)
if key in rel_links:
link_count = len(rel_links[key])
del rel_links[key]
rel_entity_map['link_count'] -= link_count
# Delete the extent.
del self._extent_name_id[extent_name]
del self._extent_maps_by_id[extent_id]
del self._extent_maps_by_name[extent_name]
def _create_schevo_structures(self):
"""Create or update Schevo structures in the database."""
root = self._root
PDict = self._PDict
if 'SCHEVO' not in root:
schevo = root['SCHEVO'] = PDict()
schevo['format'] = 2
schevo['version'] = 0
schevo['extent_name_id'] = PDict()
schevo['extents'] = PDict()
schevo['schema_source'] = None
def _entity_map(self, extent_name, oid):
"""Return an entity PDict corresponding to named
`extent` and OID."""
extent_map = self._extent_map(extent_name)
try:
entity_map = extent_map['entities'][oid]
except KeyError:
raise error.EntityDoesNotExist(extent_name, oid=oid)
return entity_map
def _entity_extent_map(self, extent_name, oid):
"""Return an (entity PDict, extent PDict)
tuple corresponding to named `extent` and OID."""
extent_map = self._extent_map(extent_name)
try:
entity_map = extent_map['entities'][oid]
except KeyError:
raise error.EntityDoesNotExist(extent_name, oid=oid)
return entity_map, extent_map
def _evolve(self, schema_source, version):
"""Evolve the database to a new schema definition.
- `schema_source`: String containing the source code for the
schema to be evolved to.
- `version`: Integer with the version number of the new schema
source. Must be the current database version, plus 1.
"""
current_version = self.version
expected_version = current_version + 1
if version != self.version + 1:
raise error.DatabaseVersionMismatch(
current_version, expected_version, version)
def call(module, name):
fn = getattr(module, name, None)
if callable(fn):
tx = CallableWrapper(fn)
# Trick the database into not performing a
# storage-level commit.
self._executing = [Transaction()]
try:
self.execute(tx)
finally:
self._executing = []
# Load the new schema.
schema_name = schema_counter.next_schema_name()
schema_module = self._import_from_source(schema_source, schema_name)
try:
# Execute `before_evolve` function if defined.
call(schema_module, 'before_evolve')
# Perform first pass of evolution.
self._sync(schema_source, initialize=False, commit=False,
evolving=True)
# Execute `during_evolve` function if defined.
call(self._schema_module, 'during_evolve')
# Perform standard schema synchronization.
self._sync(schema_source, initialize=False, commit=False,
evolving=False)
# Execute `after_evolve` function if defined.
call(self._schema_module, 'after_evolve')
except:
self._rollback()
# Re-raise exception.
raise
else:
self._root['SCHEVO']['version'] = version
self._commit()
def _extent_map(self, extent_name):
"""Return an extent PDict corresponding to `extent_name`."""
try:
return self._extent_maps_by_name[extent_name]
except KeyError:
raise error.ExtentDoesNotExist(extent_name)
def _import_from_source(self, source, module_name):
"""Import a schema module from a string containing source code."""
# Now that prerequisites are loaded, load this schema.
schema_module = module.from_string(source, module_name)
# Remember the schema module.
module.remember(schema_module)
self._remembered.append(schema_module)
# Expose this database to the schema module.
schema_module.db = self
# Return the schema module.
return schema_module
def _initialize(self):
"""Populate the database with initial data."""
tx = Initialize()
self.execute(tx)
def _on_open(self):
"""Allow schema to run code after the database is opened."""
if hasattr(self, '_schema_module'):
# An empty database created without a schema source will
# not have a schema module.
fn = getattr(self._schema_module, 'on_open', None)
if callable(fn):
fn(self)
def _remove_stale_links(self, extent_id, field_id, FieldClass):
# Remove links from this field to other entities that are held
# in the structures for those other entities.
allow = FieldClass.allow
for other_name in allow:
other_extent_map = self._extent_map(other_name)
other_entities = other_extent_map['entities']
for other_entity in other_entities.itervalues():
other_link_count = other_entity['link_count']
other_links = other_entity['links']
referrer_key = (extent_id, field_id)
if referrer_key in other_links:
referrers = other_links[referrer_key]
other_link_count -= len(referrers)
del other_links[referrer_key]
other_entity['link_count'] = other_link_count
def _schema_format_compatibility_check(self, schema):
"""Return None if the given schema is compatible with this
database engine's format, or raise an error when the first
incompatibility is found.
- `schema`: The schema to check.
"""
pass
def _sync(self, schema_source=None, schema_version=None,
initialize=True, commit=True, evolving=False):
"""Synchronize the database with a schema definition.
- `schema_source`: String containing the source code for a
schema. If `None`, the schema source contained in the
database itself will be used.
- `schema_version`: If set, the schema version to use for a
newly-created database. If set to something other than None
for an existing database, raises a ValueError.
- `initialize`: True if a new database should be populated
with initial values defined in the schema.
- `commit`: True if a successful synchronization should commit
to the storage backend. False if the caller of `_sync` will
handle this task.
- `evolving`: True if the synchronization is occuring during a
database evolution.
"""
self._sync_count += 1
sync_schema_changes = True
locked = False
try:
SCHEVO = self._root['SCHEVO']
# Import old schema.
old_schema_source = SCHEVO['schema_source']
if old_schema_source is not None:
old_schema_module = None
schevo.schema.start(self, evolving)
locked = True
schema_name = schema_counter.next_schema_name()
try:
old_schema_module = self._import_from_source(
old_schema_source, schema_name)
finally:
old_schema = schevo.schema.finish(self, old_schema_module)
locked = False
self._old_schema = old_schema
self._old_schema_module = old_schema_module
else:
old_schema = self._old_schema = None
old_schema_module = self._old_schema_module = None
# Import current schema.
if schema_source is None:
schema_source = old_schema_source
if schema_source is None:
# No schema source was specified and this is a new
# database, so _sync becomes a no-op.
return
else:
# No schema source was specified and this is an
# existing database with a defined schema.
sync_schema_changes = False
if schema_source == old_schema_source:
# If the same source, it'll be the same schema.
schema = old_schema
schema_module = old_schema_module
else:
schema_module = None
schevo.schema.start(self, evolving)
locked = True
schema_name = schema_counter.next_schema_name()
try:
schema_module = self._import_from_source(
schema_source, schema_name)
finally:
schema = schevo.schema.finish(self, schema_module)
locked = False
self._schema_format_compatibility_check(schema)
self.schema = schema
self._schema_module = schema_module
# Expose database-level namespaces and make the database
# the object that the namespace is associated with, for
# more effective use with repr().
self.q = schema.q
self.q._i = self
self.t = schema.t
self.t._i = self
self.Q = schema.Q
self.Q._i = self
# Create an extenders namespace.
self.x = DatabaseExtenders('x', self, self._schema_module)
# If the schema has changed then sync with it.
if sync_schema_changes:
# Update schema source stored in database.
SCHEVO['schema_source'] = schema_source
self._sync_extents(schema, evolving)
# Create extent instances.
E = schema.E
extents = self._extents = {}
relaxed = self._relaxed = {}
entity_classes = self._entity_classes = {}
extent_name_id = self._extent_name_id
for e_name in self.extent_names():
e_id = extent_name_id[e_name]
EntityClass = E[e_name]
extent = Extent(self, e_name, e_id, EntityClass)
extents[e_id] = extents[e_name] = extent
relaxed[e_name] = {}
entity_classes[e_id] = entity_classes[e_name] = EntityClass
# Decorate this Database instance to support the
# following syntax within schema code, for example:
# tx = db.Foo.t.create()
setattr(self, e_name, extent)
# Initialize a new database.
if SCHEVO['version'] == 0:
if schema_version is None:
schema_version = 1
SCHEVO['version'] = schema_version
# Populate with initial data, unless overridden such as
# when importing from an XML file.
if initialize:
self._initialize()
elif schema_version is not None:
# Do not allow schema_version to differ from existing
# version if opening an existing database.
if SCHEVO['version'] != schema_version:
raise ValueError(
'Existing database; schema_version must be set to '
'None or to the current version of the database.')
except:
if locked:
schevo.schema.import_lock.release()
if commit:
self._rollback()
raise
else:
if commit:
self._commit()
self._on_open()
def _sync_extents(self, schema, evolving):
"""Synchronize the extents based on the schema."""
E = schema.E
old_schema = self._old_schema
# Rename extents in the database whose entity class definition
# has a `_was` attribute.
in_schema = set(iter(E))
if evolving:
for extent_name in in_schema:
EntityClass = E[extent_name]
was_named = EntityClass._was
if was_named is not None:
# Change the name of the existing extent in the
# database.
extent_name_id = self._extent_name_id
extent_map = self._extent_map(was_named)
extent_id = extent_map['id']
extent_map['name'] = extent_name
del extent_name_id[was_named]
extent_name_id[extent_name] = extent_id
self._update_extent_maps_by_name()
# Create extents that are in schema but not in db.
in_db = set(self.extent_names())
to_create = in_schema - in_db
for extent_name in to_create:
if extent_name.startswith('_'):
# Do not bother with hidden classes.
continue
EntityClass = E[extent_name]
field_spec = EntityClass._field_spec
field_names = field_spec.keys()
entity_field_names = []
for name in field_names:
FieldClass = field_spec[name]
if FieldClass.may_store_entities and not FieldClass.fget:
entity_field_names.append(name)
key_spec = EntityClass._key_spec
index_spec = EntityClass._index_spec
self._create_extent(
extent_name, field_names, entity_field_names,
key_spec, index_spec)
# Remove extents that are in the db but not in the schema.
in_db = set(self.extent_names())
to_remove = in_db - in_schema
for extent_name in to_remove:
if extent_name.startswith('_'):
# Do not bother with hidden classes.
continue
# Check for links made from entities in this extent to
# other entities, where the other entities maintain those
# link structures.
if old_schema:
extent_map = self._extent_map(extent_name)
field_name_id = extent_map['field_name_id']
extent_id = extent_map['id']
# The old extent name will not exist in the old schema
# if it was an evolve_only class definition, and we
# are not in the process of evolving.
if extent_name in old_schema.E:
for old_field_name, FieldClass in (
old_schema.E[extent_name]._field_spec.iteritems()
):
old_field_id = field_name_id[old_field_name]
if issubclass(FieldClass, EntityField):
self._remove_stale_links(
extent_id, old_field_id, FieldClass)
# Delete the extent. XXX: Need to skip system extents?
self._delete_extent(extent_name)
# Update entity_field_ids, field_id_name, and field_name_id
# for all extents.
for extent_name in self.extent_names():
EntityClass = E[extent_name]
field_spec = EntityClass._field_spec
extent_map = self._extent_map(extent_name)
extent_id = extent_map['id']
entity_field_ids = set(extent_map['entity_field_ids'])
field_id_name = extent_map['field_id_name']
field_name_id = extent_map['field_name_id']
# Rename fields with 'was' attribute.
existing_field_names = set(field_name_id.keys())
new_field_names = set(field_spec.keys())
if evolving:
for field_name in new_field_names:
FieldClass = field_spec[field_name]
was_named = FieldClass.was
if was_named is not None:
if was_named not in existing_field_names:
raise error.FieldDoesNotExist(
extent_name, was_named, field_name)
# Rename the field.
field_id = field_name_id[was_named]
del field_name_id[was_named]
field_name_id[field_name] = field_id
field_id_name[field_id] = field_name
# Remove from the set of existing field names.
existing_field_names.remove(was_named)
# Remove fields that no longer exist.
old_field_names = existing_field_names - new_field_names
for old_field_name in old_field_names:
old_field_id = field_name_id[old_field_name]
if old_schema:
# Get the field spec for the field being deleted.
# It may not exist in the old schema, if it was only
# there in an _evolve_only class definition.
if extent_name in old_schema.E:
FieldClass = old_schema.E[extent_name]._field_spec.get(
old_field_name, None)
if (FieldClass is not None and
issubclass(FieldClass, EntityField)):
self._remove_stale_links(
extent_id, old_field_id, FieldClass)
if old_field_id in entity_field_ids:
entity_field_ids.remove(old_field_id)
del field_name_id[old_field_name]
del field_id_name[old_field_id]
# Create fields IDs for new fields.
existing_field_names = set(field_name_id.keys())
fields_to_create = new_field_names - existing_field_names
for field_name in fields_to_create:
field_id = self._unique_field_id(extent_name)
field_name_id[field_name] = field_id
field_id_name[field_id] = field_name
# Check for entity field.
FieldClass = field_spec[field_name]
if (FieldClass.may_store_entities and not FieldClass.fget):
entity_field_ids.add(field_id)
extent_map['entity_field_ids'] = tuple(entity_field_ids)
# Update index specs for all extents.
for extent_name in self.extent_names():
# Skip system extents.
EntityClass = E[extent_name]
key_spec = EntityClass._key_spec
index_spec = EntityClass._index_spec
self._update_extent_key_spec(extent_name, key_spec, index_spec)
def _unique_extent_id(self):
"""Return an unused random extent ID."""
extent_name_id = self._extent_name_id
while True:
extent_id = random.randint(0, 2**31)
if extent_id not in extent_name_id:
return extent_id
def _unique_field_id(self, extent_name):
"""Return an unused random field ID."""
field_id_name = self._extent_map(extent_name)['field_id_name']
while True:
field_id = random.randint(0, 2**31)
if field_id not in field_id_name:
return field_id
def _update_extent_maps_by_name(self):
extent_maps_by_name = self._extent_maps_by_name = {}
for extent in self._extent_maps_by_id.itervalues():
extent_maps_by_name[extent['name']] = extent
def _update_extent_key_spec(self, extent_name, key_spec, index_spec):
"""Update an existing extent to match given key spec."""
extent_map = self._extent_map(extent_name)
entities = extent_map['entities']
indices = extent_map['indices']
key_spec_ids = [_field_ids(extent_map, field_names)
for field_names in key_spec]
index_spec_ids = [_field_ids(extent_map, field_names)
for field_names in index_spec]
BTree = self._BTree
PList = self._PList
# Convert key indices that have been changed to non-unique
# incides.
for i_spec in index_spec_ids:
if i_spec not in key_spec and i_spec in indices:
unique, branch = indices[i_spec]
indices[i_spec] = (False, branch)
# Create new key indices for those that don't exist.
for i_spec in key_spec_ids:
if i_spec not in indices:
# Create a new unique index and populate it.
_create_index(
extent_map, i_spec, True, BTree, PList)
for oid in entities:
fields_by_id = entities[oid]['fields']
field_values = tuple(fields_by_id.get(field_id, UNASSIGNED)
for field_id in i_spec)
_index_add(extent_map, i_spec, None, oid, field_values,
BTree)
# Create new non-unique indices for those that don't exist.
for i_spec in index_spec_ids:
if i_spec not in indices:
# Create a new non-unique index and populate it.
_create_index(extent_map, i_spec, False, BTree, PList)
for oid in entities:
fields_by_id = entities[oid]['fields']
field_values = tuple(fields_by_id.get(field_id, UNASSIGNED)
for field_id in i_spec)
_index_add(extent_map, i_spec, None, oid, field_values,
BTree)
# Remove key indices that no longer exist.
to_remove = set(indices) - set(key_spec_ids + index_spec_ids)
for i_spec in to_remove:
_delete_index(extent_map, i_spec)
# Check non-unique indices to see if any are supersets of
# unique indices. If any found, change them to 'unique' and
# validate them.
#
# XXX: Needs testing.
for i_spec, (unique, branch) in list(indices.items()):
# Look for unique index supersets of this index, and make
# it unique if any exist.
if not unique:
spec_set = set(index_spec)
for i_spec in indices:
compare_set = set(i_spec)
if compare_set.issuperset(spec_set):
unique = True
break
if unique:
# Should be unique but isn't; alter and validate.
indices[i_spec] = (unique, branch)
for oid in entities:
fields_by_id = entities[oid]['fields']
field_values = tuple(fields_by_id[field_id]
for field_id in i_spec)
_index_validate(extent_map, i_spec, oid, field_values,
BTree)
def _validate_changes(self, changes):
# Here we are applying rules defined by the entity itself, not
# the transaction, since transactions may relax certain rules.
entity_classes = self._entity_classes
changes = change.normalize(changes)
for typ, extent_name, oid in changes:
if typ in (CREATE, UPDATE):
EntityClass = entity_classes[extent_name]
entity = EntityClass(oid)
field_map = entity.s.field_map(not_fget)
for field in field_map.itervalues():
field.validate(field._value)
def _reset_all(self):
"""Clear all entities, indices, etc. in the database.
FOR USE WITH SINGLE-SCHEMA UNIT TESTS.
NOT INDENDED FOR GENERAL USE.
"""
BTree = self._BTree
for extent_name in self.extent_names():
extent_map = self._extent_map(extent_name)
extent_map['entities'] = BTree()
extent_map['len'] = 0
extent_map['next_oid'] = 1
indices = extent_map['indices']
for index_spec, (unique, index_tree) in list(indices.items()):
indices[index_spec] = (unique, BTree())
self._commit()
self.dispatch = Database.dispatch
self.label = Database.label
self._initialize()
self._on_open()
def _create_index(extent_map, index_spec, unique, BTree, PList):
"""Create a new index in the extent with the given spec and
uniqueness flag."""
assert log(1, extent_map['name'])
assert log(1, 'index_spec', index_spec)
indices = extent_map['indices']
index_map = extent_map['index_map']
normalized_index_map = extent_map['normalized_index_map']
# Look for unique index subsets of this index, and make it unique
# if any exist.
if not unique:
spec_set = set(index_spec)
for i_spec in indices:
compare_set = set(i_spec)
if compare_set.issubset(spec_set):
unique = True
break
# Continue with index creation.
assert log(2, 'unique', unique)
assert log(
2, 'normalized_index_map.keys()', list(normalized_index_map.keys()))
partial_specs = _partial_index_specs(index_spec)
assert log(3, 'partial_specs', partial_specs)
normalized_specs = _normalized_index_specs(partial_specs)
assert log(3, 'normalized_specs', normalized_specs)
index_root = BTree()
indices[index_spec] = (unique, index_root)
for partial_spec in partial_specs:
L = index_map.setdefault(partial_spec, PList())
L.append(index_spec)
for normalized_spec in normalized_specs:
L = normalized_index_map.setdefault(normalized_spec, PList())
L.append(index_spec)
assert log(
2, 'normalized_index_map.keys()', list(normalized_index_map.keys()))
def _delete_index(extent_map, index_spec):
indices = extent_map['indices']
index_map = extent_map['index_map']
normalized_index_map = extent_map['normalized_index_map']
partial_specs = _partial_index_specs(index_spec)
normalized_specs = _normalized_index_specs(partial_specs)
del indices[index_spec]
for partial_spec in partial_specs:
L = index_map[partial_spec]
L.remove(index_spec)
if not L:
del index_map[partial_spec]
for normalized_spec in normalized_specs:
if normalized_spec in normalized_index_map:
L = normalized_index_map[normalized_spec]
L.remove(index_spec)
if not L:
del normalized_index_map[normalized_spec]
def _field_ids(extent_map, field_names):
"""Convert a (field-name, ...) tuple to a (field-id, ...)
tuple for the given extent map."""
field_name_id = extent_map['field_name_id']
return tuple(field_name_id[name] for name in field_names)
def _field_names(extent_map, field_ids):
"""Convert a (field-id, ...) tuple to a (field-name, ...) tuple
for the given extent map."""
field_id_name = extent_map['field_id_name']
return tuple(field_id_name[id] for id in field_ids)
def _index_add(extent_map, index_spec, relaxed, oid, field_values, BTree):
"""Add an entry to the specified index, of entity oid having the
given values in order of the index spec."""
indices = extent_map['indices']
unique, branch = indices[index_spec]
# Traverse branches to find a leaf.
for field_id, field_value in zip(index_spec, field_values):
if field_value in branch:
branch = branch[field_value]
else:
new_branch = BTree()
branch[field_value] = new_branch
branch = new_branch
# Raise error if unique index and not an empty leaf.
if unique and len(branch) and relaxed is None:
_index_clean(extent_map, index_spec, field_values)
raise error.KeyCollision(
extent_map['name'],
_field_names(extent_map, index_spec),
field_values,
)
# Inject the OID into the leaf.
branch[oid] = True
# Keep track of the addition if relaxed.
if relaxed is not None:
relaxed.append((extent_map, index_spec, oid, field_values))
def _index_clean(extent_map, index_spec, field_values):
"""Remove stale branches from the specified index."""
indices = extent_map['indices']
unique, branch = indices[index_spec]
_index_clean_branch(branch, field_values)
def _index_clean_branch(branch, field_values):
"""Recursively clean a branch of stale child branches."""
branch_value = field_values[0]
child_values = field_values[1:]
if branch_value in branch:
if child_values:
# Clean children first.
_index_clean_branch(branch[branch_value], child_values)
# Clean ourself if empty.
if not len(branch[branch_value]):
del branch[branch_value]
def _index_remove(extent_map, index_spec, oid, field_values):
"""Remove an entry from the specified index, of entity oid having
the given values in order of the index spec."""
indices = extent_map['indices']
unique, branch = indices[index_spec]
# Traverse branches to find a leaf.
for field_id, field_value in zip(index_spec, field_values):
if field_value not in branch:
# Was never indexed for some reason, so stop traversing.
break
branch = branch[field_value]
if oid in branch:
del branch[oid]
_index_clean(extent_map, index_spec, field_values)
def _index_validate(extent_map, index_spec, oid, field_values, BTree):
"""Validate the index entry for uniqueness."""
indices = extent_map['indices']
unique, branch = indices[index_spec]
# Traverse branches to find a leaf.
for field_id, field_value in zip(index_spec, field_values):
if field_value in branch:
branch = branch[field_value]
else:
new_branch = BTree()
branch[field_value] = new_branch
branch = new_branch
# Raise error if unique index and not an empty leaf.
if unique and len(branch) > 1:
_index_clean(extent_map, index_spec, field_values)
raise error.KeyCollision(
extent_map['name'],
_field_names(extent_map, index_spec),
field_values,
)
def _normalized_index_specs(index_specs):
"""Return normalized index specs based on index_specs."""
return [tuple(sorted(spec)) for spec in index_specs]
def _partial_index_specs(index_spec):
"""Return a list of partial index specs based on index_spec."""
return [tuple(index_spec[:x+1]) for x in xrange(len(index_spec))]
def _walk_index(branch, ascending_seq, result_list):
"""Recursively walk a branch of an index, appending OIDs found to
result_list.
- `branch`: The branch to start at.
- `ascending_seq`: The sequence of ascending flags corresponding
to the current branch.
- `result_list`: List to append OIDs to.
"""
if len(ascending_seq):
# We are at a branch.
ascending, inner_ascending = ascending_seq[0], ascending_seq[1:]
if ascending:
for key, inner_branch in branch.iteritems():
_walk_index(inner_branch, inner_ascending, result_list)
else:
# XXX: SchevoZodb backend requires us to use
# `reversed(branch.keys())` rather than
# `reversed(branch)`.
keys = reversed(branch.keys())
for key in keys:
inner_branch = branch[key]
_walk_index(inner_branch, inner_ascending, result_list)
else:
# We are at a leaf.
result_list.extend(branch.iterkeys())
class DatabaseExtenders(NamespaceExtension):
"""Methods that extend the functionality of a database."""
__slots__ = NamespaceExtension.__slots__
_readonly = False
def __init__(self, name, instance, schema_module):
NamespaceExtension.__init__(self, name, instance)
# Expose functions through this namespace.
for name in dir(schema_module):
# Extender functions always have x_ prefix.
if name.startswith('x_'):
function = getattr(schema_module, name)
# Drop the 'x_' prefix.
name = name[2:]
self._set(name, function)
def convert_from_format1(backend):
"""Convert a database from format 1 to format 2.
- `backend`: Open backend connection to the database to convert.
Assumes that the database has already been verified to be a format 1
database.
"""
root = backend.get_root()
schevo = root['SCHEVO']
extent_name_id = schevo['extent_name_id']
extents = schevo['extents']
# For each extent in the database...
for extent_name, extent_id in extent_name_id.iteritems():
extent = extents[extent_id]
entity_field_ids = frozenset(extent['entity_field_ids'])
# For each entity in the extent...
for entity_oid, entity in extent['entities'].iteritems():
fields = entity['fields']
related_entities = entity['related_entities'] = backend.PDict()
# For each entity field in the entity...
for field_id in entity_field_ids:
related_entity_set = set()
# If the value is an entity reference, turn it into a
# Placeholder. Store the value, and also add it to the
# set of related entities.
value = fields.get(field_id, UNASSIGNED)
if isinstance(value, tuple):
p = Placeholder.new(*value)
fields[field_id] = p
related_entity_set.add(p)
related_entities[field_id] = frozenset(related_entity_set)
# For each index...
indices = extent['indices']
for index_spec, (unique, index_tree) in indices.iteritems():
# Convert all (extent_id, oid) tuples to Placeholder instances in
# extent indices.
_convert_index_from_format1(
entity_field_ids, index_spec, index_tree)
# Bump format from 1 to 2.
schevo['format'] = 2
def _convert_index_from_format1(entity_field_ids, index_spec, index_tree):
current_field_id, next_index_spec = index_spec[0], index_spec[1:]
is_entity_field = current_field_id in entity_field_ids
for key, child_tree in index_tree.items():
if is_entity_field and isinstance(key, tuple):
# Convert entity tuple to Placeholder.
p = Placeholder.new(*key)
# Replace old key with new key.
del index_tree[key]
index_tree[p] = child_tree
# Recurse into child structures if not at a leaf.
if len(next_index_spec) > 0:
_convert_index_from_format1(
entity_field_ids, next_index_spec, child_tree)
optimize.bind_all(sys.modules[__name__]) # Last line of module.
|
mit
| -3,918,668,678,691,859,500
| 43.209453
| 80
| 0.558569
| false
| 4.261714
| false
| false
| false
|
dalejung/nbx
|
nbx/nbmanager/tagged_gist/notebook_gisthub.py
|
1
|
6300
|
import github
import nbformat
from .gisthub import gisthub, _hashtags
import nbx.compat as compat
def parse_tags(desc):
# real tags and not system-like tags
tags = _hashtags(desc)
if '#notebook' in tags:
tags.remove('#notebook')
if '#inactive' in tags:
tags.remove('#inactive')
return tags
class NotebookGist(object):
"""
A single notebook abstraction over Gist. Normally a gist can have
mutliple files. A notebook gist pretends to be a single file.
"""
# instead of having a bunch of @property getters, define
# attrs to grab from .gist here.
_gist_attrs = ['id', 'files', 'active', 'edit', 'updated_at',
'created_at', 'public']
def __init__(self, gist, gisthub):
self.gist = gist
self.gisthub = gisthub
# unique identifier name
self.suffix = "[{0}].ipynb".format(self.id)
super(NotebookGist, self).__init__()
_name = None
@property
def name(self):
if self._name is None:
self._name = self.gist.name
return self._name
@name.setter
def name(self, value):
self._name = value
# recompute keyname
@property
def key_name(self):
return self.name + ' ' + self.suffix
_tags = None
@property
def tags(self):
if self._tags is None:
self._tags = self.gist.tags[:]
if '#notebook' in self._tags:
self._tags.remove('#notebook')
return self._tags
@tags.setter
def tags(self, tags):
self._tags = tags
def __getattr__(self, name):
if name in self._gist_attrs:
return getattr(self.gist, name)
raise AttributeError("{name} not found on .gist".format(name=name))
_notebook_content = None
@property
def notebook_content(self):
if self._notebook_content is None:
# refresh and grab file contents
file = self._get_notebook_file()
if file:
self._notebook_content = file.content
return self._notebook_content
@notebook_content.setter
def notebook_content(self, content):
if isinstance(content, compat.string_types):
self._notebook_content = content
return
try:
# maybe this is a notebook
content = nbformat.writes(content, version=nbformat.NO_CONVERT)
self._notebook_content = content
except:
raise
@property
def revisions(self):
# only return revisions for the .ipynb file
fn = self._get_notebook_file()
revisions = self.gist.revisions_for_file(fn.filename)
# convert to basic commit log. Dont' want NotebookManager
# needing to know github.GistHistoryState internals
commits = []
for state in revisions:
commit = {
'id': state.version,
'commit_date': state.committed_at
}
commits.append(commit)
return commits
def get_revision_content(self, commit_id):
fobj = self._get_notebook_file()
rev_fobj = self.gist.get_revision_file(commit_id, fobj.filename)
return rev_fobj['content']
def _refresh(self):
self.gist = self.gisthub.refresh_gist(self)
def _get_notebook_file(self):
"""
Will return the first notebook in a gist.
Iterate in sorted order so this is stable
don't know if the files order is defined per github api
"""
self._refresh()
for key in sorted(self.gist.files):
file = self.gist.files[key]
if file.filename.endswith(".ipynb"):
return file
def _edit(self, desc=None, files=None):
if desc is None:
desc = self.description
self.gist.edit(desc, files)
def _generate_payload(self):
" Gather payload to sent to Github. "
gfile = self._get_notebook_file()
file = github.InputFileContent(self.notebook_content)
files = {gfile.filename: file}
description = self._generate_description()
return {'files':files, 'description': description}
def _generate_description(self):
""" genrate the Gist description. """
name = self.name
# system type of tags
tags = ['#notebook']
if not self.active:
tags.append('#inactive')
# add the normal tags
tags += self.tags
tagstring = " ".join(tags)
description = "{name} {tags}".format(name=name, tags=tagstring)
return description
def __repr__(self):
out = "NotebookGist(name={name}, active={active}, " + \
"public={public}, tags={tags})"
return out.format(public=self.public, name=self.name,
tags=self.tags, active=self.active)
def strip_gist_id(self, key_name):
" small util to remove gist_id suffix "
# really we're assuming this will only match once, seems fine
return key_name.replace(' '+self.suffix, '')
class NotebookGistHub(object):
def __init__(self, gisthub):
self.gisthub = gisthub
def _wrap_results(self, results):
wrapped = {}
for key, gists in results.items():
# convert to NotebookGist
items = [NotebookGist(gist, self) for gist in gists]
# index by key_name
items = dict([(gist.key_name, gist) for gist in items])
wrapped[key] = items
return wrapped
def query(self, *args, **kwargs):
kwargs['filter_tag'] = '#notebook'
results = self.gisthub.query(*args, **kwargs)
return self._wrap_results(results)
def refresh_gist(self, gist):
return self.gisthub.refresh_gist(gist)
def save(self, gist):
payload = gist._generate_payload()
gist._edit(payload['description'], payload['files'])
self.gisthub.update_gist(gist.gist)
def create_gist(self, name, tags, content='', public=True):
gist = self.gisthub.create_gist(name, tags, content, public)
nb = NotebookGist(gist, self)
return nb
def notebook_gisthub(user, password):
g = gisthub(user, password)
return NotebookGistHub(g)
|
mit
| 5,413,795,379,214,303,000
| 30.5
| 75
| 0.584127
| false
| 3.915475
| false
| false
| false
|
partofthething/home-assistant
|
homeassistant/components/plaato/config_flow.py
|
2
|
7438
|
"""Config flow for Plaato."""
import logging
from pyplaato.plaato import PlaatoDeviceType
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_SCAN_INTERVAL, CONF_TOKEN, CONF_WEBHOOK_ID
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_CLOUDHOOK,
CONF_DEVICE_NAME,
CONF_DEVICE_TYPE,
CONF_USE_WEBHOOK,
DEFAULT_SCAN_INTERVAL,
DOCS_URL,
PLACEHOLDER_DEVICE_NAME,
PLACEHOLDER_DEVICE_TYPE,
PLACEHOLDER_DOCS_URL,
PLACEHOLDER_WEBHOOK_URL,
)
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__package__)
class PlaatoConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handles a Plaato config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize."""
self._init_info = {}
async def async_step_user(self, user_input=None):
"""Handle user step."""
if user_input is not None:
self._init_info[CONF_DEVICE_TYPE] = PlaatoDeviceType(
user_input[CONF_DEVICE_TYPE]
)
self._init_info[CONF_DEVICE_NAME] = user_input[CONF_DEVICE_NAME]
return await self.async_step_api_method()
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(
CONF_DEVICE_NAME,
default=self._init_info.get(CONF_DEVICE_NAME, None),
): str,
vol.Required(
CONF_DEVICE_TYPE,
default=self._init_info.get(CONF_DEVICE_TYPE, None),
): vol.In(list(PlaatoDeviceType)),
}
),
)
async def async_step_api_method(self, user_input=None):
"""Handle device type step."""
device_type = self._init_info[CONF_DEVICE_TYPE]
if user_input is not None:
token = user_input.get(CONF_TOKEN, None)
use_webhook = user_input.get(CONF_USE_WEBHOOK, False)
if not token and not use_webhook:
errors = {"base": PlaatoConfigFlow._get_error(device_type)}
return await self._show_api_method_form(device_type, errors)
self._init_info[CONF_USE_WEBHOOK] = use_webhook
self._init_info[CONF_TOKEN] = token
return await self.async_step_webhook()
return await self._show_api_method_form(device_type)
async def async_step_webhook(self, user_input=None):
"""Validate config step."""
use_webhook = self._init_info[CONF_USE_WEBHOOK]
if use_webhook and user_input is None:
webhook_id, webhook_url, cloudhook = await self._get_webhook_id()
self._init_info[CONF_WEBHOOK_ID] = webhook_id
self._init_info[CONF_CLOUDHOOK] = cloudhook
return self.async_show_form(
step_id="webhook",
description_placeholders={
PLACEHOLDER_WEBHOOK_URL: webhook_url,
PLACEHOLDER_DOCS_URL: DOCS_URL,
},
)
return await self._async_create_entry()
async def _async_create_entry(self):
"""Create the entry step."""
webhook_id = self._init_info.get(CONF_WEBHOOK_ID, None)
auth_token = self._init_info[CONF_TOKEN]
device_name = self._init_info[CONF_DEVICE_NAME]
device_type = self._init_info[CONF_DEVICE_TYPE]
unique_id = auth_token if auth_token else webhook_id
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=device_type.name,
data=self._init_info,
description_placeholders={
PLACEHOLDER_DEVICE_TYPE: device_type.name,
PLACEHOLDER_DEVICE_NAME: device_name,
},
)
async def _show_api_method_form(
self, device_type: PlaatoDeviceType, errors: dict = None
):
data_schema = vol.Schema({vol.Optional(CONF_TOKEN, default=""): str})
if device_type == PlaatoDeviceType.Airlock:
data_schema = data_schema.extend(
{vol.Optional(CONF_USE_WEBHOOK, default=False): bool}
)
return self.async_show_form(
step_id="api_method",
data_schema=data_schema,
errors=errors,
description_placeholders={PLACEHOLDER_DEVICE_TYPE: device_type.name},
)
async def _get_webhook_id(self):
"""Generate webhook ID."""
webhook_id = self.hass.components.webhook.async_generate_id()
if self.hass.components.cloud.async_active_subscription():
webhook_url = await self.hass.components.cloud.async_create_cloudhook(
webhook_id
)
cloudhook = True
else:
webhook_url = self.hass.components.webhook.async_generate_url(webhook_id)
cloudhook = False
return webhook_id, webhook_url, cloudhook
@staticmethod
def _get_error(device_type: PlaatoDeviceType):
if device_type == PlaatoDeviceType.Airlock:
return "no_api_method"
return "no_auth_token"
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return PlaatoOptionsFlowHandler(config_entry)
class PlaatoOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle Plaato options."""
def __init__(self, config_entry: ConfigEntry):
"""Initialize domain options flow."""
super().__init__()
self._config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
use_webhook = self._config_entry.data.get(CONF_USE_WEBHOOK, False)
if use_webhook:
return await self.async_step_webhook()
return await self.async_step_user()
async def async_step_user(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Optional(
CONF_SCAN_INTERVAL,
default=self._config_entry.options.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
),
): cv.positive_int
}
),
)
async def async_step_webhook(self, user_input=None):
"""Manage the options for webhook device."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
webhook_id = self._config_entry.data.get(CONF_WEBHOOK_ID, None)
webhook_url = (
""
if webhook_id is None
else self.hass.components.webhook.async_generate_url(webhook_id)
)
return self.async_show_form(
step_id="webhook",
description_placeholders={PLACEHOLDER_WEBHOOK_URL: webhook_url},
)
|
mit
| 1,609,999,554,006,146,000
| 32.35426
| 85
| 0.583625
| false
| 3.998925
| true
| false
| false
|
v-legoff/pa-poc1
|
dc/yaml/connector.py
|
1
|
8550
|
# Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module defining the YAMLConnector class."""
import os
driver = True
try:
import yaml
except ImportError:
driver = False
from dc.connector import DataConnector
from dc import exceptions
from model import exceptions as mod_exceptions
from model.functions import *
class YAMLConnector(DataConnector):
"""Data connector for YAML.
This data connector should read and write datas in YML format, using
the yaml library.
A very short example:
# Table: users
- id: 1
username: admin
email_address: admin@python-aboard.org
"""
name = "yaml"
def __init__(self):
"""Check the driver presence.
If not found, raise a DriverNotFound exception.
"""
if not driver:
raise exceptions.DriverNotFound(
"the yaml library can not be found")
self.location = None
self.auto_increments = {}
self.to_update = set()
def setup(self, location=None):
"""Setup the data connector."""
if location is None:
raise exceptions.InsufficientConfiguration(
"the location for storing datas was not specified for " \
"the YAML data connector")
location = location.replace("\\", "/")
if location.startswith("~"):
location = os.path.expanduser("~") + location[1:]
if location.endswith("/"):
location = location[:-1]
if not os.path.exists(location):
# Try to create it
os.makedirs(location)
if not os.access(location, os.R_OK):
raise exceptions.DriverInitializationError(
"cannot read in {}".format(location))
if not os.access(location, os.W_OK):
raise exceptions.DriverInitializationError(
"cannot write in {}".format(location))
DataConnector.__init__(self)
self.location = location
self.files = {}
def close(self):
"""Close the data connector (nothing to be done)."""
pass
def destroy(self):
"""Erase EVERY stored data."""
for file in os.listdir(self.location):
os.remove(self.location + "/" + file)
self.clear_cache()
def record_model(self, model):
"""Record the given model."""
name = DataConnector.record_model(self, model)
filename = self.location + "/" + name + ".yml"
if os.path.exists(filename):
with open(filename, "r") as file:
self.read_table(name, file)
self.files[name] = filename
def read_table(self, table_name, file):
"""Read a whoe table contained in a file.
This file is supposed to be formatted as a YAML file. Furthermore,
the 'yaml.load' function should return a list of dictionaries.
The first dictionary describes some table informations, as
the status of the autoincrement fields. Each following dictionary
is a line of data which sould describe a model object.
"""
name = table_name
content = file.read()
datas = yaml.load(content)
if not isinstance(datas, list):
raise exceptions.DataFormattingError(
"the file {} must contain a YAML formatted list".format(
self.files[name]))
class_table = self.models[name]
class_datas = datas[0]
if not isinstance(class_datas, dict):
raise exceptions.DataFormattingError(
"the table informations are not stored in a YAML " \
"dictionary in the file {}".format(self.files[name]))
self.read_table_header(name, class_datas)
objects = {}
for line in datas[1:]:
object = class_table.build(**line)
pkey = get_pkey_values(object)
if len(pkey) == 1:
pkey = pkey[0]
objects[pkey] = object
self.objects_tree[name] = objects
def read_table_header(self, name, datas):
"""Read the table header.
This header should describe some informations concerning the
table (as the autoincrement fields).
"""
auto_increments = datas.get("auto_increments", [])
self.auto_increments[name] = auto_increments
def loop(self):
"""Write the YAML tables."""
for table in self.to_update:
self.write_table(table)
self.to_update.clear()
def write_table(self, name):
"""Write the table in a file."""
# First, we get the header
header = {}
if name in self.auto_increments:
header["auto_increments"] = self.auto_increments[name]
# Next we browse the object
objects = []
for object in self.objects_tree[name].values():
objects.append(object.__dict__)
objects.insert(0, header)
content = yaml.dump(objects, default_flow_style=False)
with open(self.location + "/" + name + ".yml", "w") as file:
file.write(content)
def get_all_objects(self, model):
"""Return all the model's object in a list."""
name = get_name(model)
return list(self.objects_tree.get(name, {}).values())
def find_object(self, model, pkey_values):
"""Return, if found, the selected object.
Raise a model.exceptions.ObjectNotFound if not found.
"""
# Look for the object in the cached tree
object = self.get_from_cache(model, pkey_values)
if object:
return object
raise mod_exceptions.ObjectNotFound(model, pkey_values)
def add_object(self, object):
"""Save the object, issued from a model."""
name = get_name(type(object))
fields = get_fields(type(object))
auto_increments = self.auto_increments.get(name, {})
for field in fields:
if not field.auto_increment:
continue
value = auto_increments.get(field.field_name, 1)
update_attr(object, field.field_name, value)
auto_increments[field.field_name] = value + 1
self.cache_object(object)
self.auto_increments[name] = auto_increments
self.to_update.add(name)
def update_object(self, object, attribute):
"""Update an object."""
self.check_update(object)
name = get_name(type(object))
self.to_update.add(name)
def remove_object(self, object):
"""Delete the object."""
# Delete from cache only
self.uncache_object(object)
name = get_name(type(object))
self.to_update.add(name)
|
bsd-3-clause
| -1,075,166,643,516,734,300
| 34.185185
| 79
| 0.598713
| false
| 4.519027
| false
| false
| false
|
tuzonghua/CloudBot
|
plugins/youtube.py
|
1
|
5598
|
import re
import time
import isodate
import requests
from cloudbot import hook
from cloudbot.util import timeformat
from cloudbot.util.formatting import pluralize
youtube_re = re.compile(r'(?:youtube.*?(?:v=|/v/)|youtu\.be/|yooouuutuuube.*?id=)([-_a-zA-Z0-9]+)', re.I)
base_url = 'https://www.googleapis.com/youtube/v3/'
api_url = base_url + 'videos?part=contentDetails%2C+snippet%2C+statistics&id={}&key={}'
search_api_url = base_url + 'search?part=id&maxResults=1'
playlist_api_url = base_url + 'playlists?part=snippet%2CcontentDetails%2Cstatus'
video_url = "http://youtu.be/%s"
err_no_api = "The YouTube API is off in the Google Developers Console."
def get_video_description(video_id):
json = requests.get(api_url.format(video_id, dev_key)).json()
if json.get('error'):
if json['error']['code'] == 403:
return err_no_api
else:
return
data = json['items']
snippet = data[0]['snippet']
statistics = data[0]['statistics']
content_details = data[0]['contentDetails']
out = '\x02{}\x02'.format(snippet['title'])
if not content_details.get('duration'):
return out
length = isodate.parse_duration(content_details['duration'])
out += ' - length \x02{}\x02'.format(timeformat.format_time(int(length.total_seconds()), simple=True))
total_votes = float(statistics.get('likeCount', 0)) + float(statistics.get('dislikeCount', 0))
if total_votes != 0:
# format
likes = pluralize(int(statistics.get('likeCount', 0)), "like")
dislikes = pluralize(int(statistics.get('dislikeCount', 0)), "dislike")
percent = 100 * float(statistics.get('likeCount', 0)) / total_votes
out += ' - {}, {} (\x02{:.1f}\x02%)'.format(likes,
dislikes, percent)
if 'viewCount' in statistics:
views = int(statistics['viewCount'])
out += ' - \x02{:,}\x02 view{}'.format(views, "s"[views == 1:])
uploader = snippet['channelTitle']
upload_time = time.strptime(snippet['publishedAt'], "%Y-%m-%dT%H:%M:%S.000Z")
out += ' - \x02{}\x02 on \x02{}\x02'.format(uploader,
time.strftime("%Y.%m.%d", upload_time))
if 'contentRating' in content_details:
out += ' - \x034NSFW\x02'
return out
@hook.on_start()
def load_key(bot):
global dev_key
dev_key = bot.config.get("api_keys", {}).get("google_dev_key", None)
@hook.regex(youtube_re)
def youtube_url(match):
return get_video_description(match.group(1))
@hook.command("youtube", "you", "yt", "y")
def youtube(text):
"""youtube <query> -- Returns the first YouTube search result for <query>."""
if not dev_key:
return "This command requires a Google Developers Console API key."
json = requests.get(search_api_url, params={"q": text, "key": dev_key, "type": "video"}).json()
if json.get('error'):
if json['error']['code'] == 403:
return err_no_api
else:
return 'Error performing search.'
if json['pageInfo']['totalResults'] == 0:
return 'No results found.'
video_id = json['items'][0]['id']['videoId']
return get_video_description(video_id) + " - " + video_url % video_id
@hook.command("youtime", "ytime")
def youtime(text):
"""youtime <query> -- Gets the total run time of the first YouTube search result for <query>."""
if not dev_key:
return "This command requires a Google Developers Console API key."
json = requests.get(search_api_url, params={"q": text, "key": dev_key, "type": "video"}).json()
if json.get('error'):
if json['error']['code'] == 403:
return err_no_api
else:
return 'Error performing search.'
if json['pageInfo']['totalResults'] == 0:
return 'No results found.'
video_id = json['items'][0]['id']['videoId']
json = requests.get(api_url.format(video_id, dev_key)).json()
if json.get('error'):
return
data = json['items']
snippet = data[0]['snippet']
content_details = data[0]['contentDetails']
statistics = data[0]['statistics']
if not content_details.get('duration'):
return
length = isodate.parse_duration(content_details['duration'])
l_sec = int(length.total_seconds())
views = int(statistics['viewCount'])
total = int(l_sec * views)
length_text = timeformat.format_time(l_sec, simple=True)
total_text = timeformat.format_time(total, accuracy=8)
return 'The video \x02{}\x02 has a length of {} and has been viewed {:,} times for ' \
'a total run time of {}!'.format(snippet['title'], length_text, views,
total_text)
ytpl_re = re.compile(r'(.*:)//(www.youtube.com/playlist|youtube.com/playlist)(:[0-9]+)?(.*)', re.I)
@hook.regex(ytpl_re)
def ytplaylist_url(match):
location = match.group(4).split("=")[-1]
json = requests.get(playlist_api_url, params={"id": location, "key": dev_key}).json()
if json.get('error'):
if json['error']['code'] == 403:
return err_no_api
else:
return 'Error looking up playlist.'
data = json['items']
snippet = data[0]['snippet']
content_details = data[0]['contentDetails']
title = snippet['title']
author = snippet['channelTitle']
num_videos = int(content_details['itemCount'])
count_videos = ' - \x02{:,}\x02 video{}'.format(num_videos, "s"[num_videos == 1:])
return "\x02{}\x02 {} - \x02{}\x02".format(title, count_videos, author)
|
gpl-3.0
| -2,821,733,049,324,311,000
| 32.321429
| 106
| 0.602358
| false
| 3.413415
| false
| false
| false
|
yhpeng-git/mxnet
|
docs/mxdoc.py
|
1
|
9901
|
"""A sphnix-doc plugin to build mxnet docs"""
import subprocess
import re
import os
import json
from recommonmark import transform
import pypandoc
# start or end of a code block
_CODE_MARK = re.compile('^([ ]*)```([\w]*)')
# language names and the according file extensions and comment symbol
_LANGS = {'python' : ('py', '#'),
'r' : ('R','#'),
'scala' : ('scala', '#'),
'julia' : ('jl', '#'),
'perl' : ('pl', '#'),
'cpp' : ('cc', '//'),
'bash' : ('sh', '#')}
_LANG_SELECTION_MARK = 'INSERT SELECTION BUTTONS'
_SRC_DOWNLOAD_MARK = 'INSERT SOURCE DOWNLOAD BUTTONS'
def _run_cmd(cmds):
"""Run commands, raise exception if failed"""
if not isinstance(cmds, str):
cmds = "".join(cmds)
print("Execute \"%s\"" % cmds)
try:
subprocess.check_call(cmds, shell=True)
except subprocess.CalledProcessError as err:
print(err)
raise err
def generate_doxygen(app):
"""Run the doxygen make commands"""
_run_cmd("cd %s/.. && make doxygen" % app.builder.srcdir)
_run_cmd("cp -rf doxygen/html %s/doxygen" % app.builder.outdir)
def build_mxnet(app):
"""Build mxnet .so lib"""
_run_cmd("cd %s/.. && cp make/config.mk config.mk && make -j$(nproc) DEBUG=1" %
app.builder.srcdir)
def build_r_docs(app):
"""build r pdf"""
r_root = app.builder.srcdir + '/../R-package'
pdf_path = root_path + '/docs/api/r/mxnet-r-reference-manual.pdf'
_run_cmd('cd ' + r_root +
'; R -e "roxygen2::roxygenize()"; R CMD Rd2pdf . --no-preview -o ' + pdf_path)
dest_path = app.builder.outdir + '/api/r/'
_run_cmd('mkdir -p ' + dest_path + '; mv ' + pdf_path + ' ' + dest_path)
def build_scala_docs(app):
"""build scala doc and then move the outdir"""
scala_path = app.builder.srcdir + '/../scala-package/core/src/main/scala/ml/dmlc/mxnet'
# scaldoc fails on some apis, so exit 0 to pass the check
_run_cmd('cd ' + scala_path + '; scaladoc `find . | grep .*scala`; exit 0')
dest_path = app.builder.outdir + '/api/scala/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
scaladocs = ['index', 'index.html', 'ml', 'lib', 'index.js', 'package.html']
for doc_file in scaladocs:
_run_cmd('cd ' + scala_path + ' && mv -f ' + doc_file + ' ' + dest_path)
def _convert_md_table_to_rst(table):
"""Convert a markdown table to rst format"""
if len(table) < 3:
return ''
out = '```eval_rst\n.. list-table::\n :header-rows: 1\n\n'
for i,l in enumerate(table):
cols = l.split('|')[1:-1]
if i == 0:
ncol = len(cols)
else:
if len(cols) != ncol:
return ''
if i == 1:
for c in cols:
if len(c) is not 0 and '---' not in c:
return ''
else:
for j,c in enumerate(cols):
out += ' * - ' if j == 0 else ' - '
out += pypandoc.convert_text(
c, 'rst', format='md').replace('\n', ' ').replace('\r', '') + '\n'
out += '```\n'
return out
def convert_table(app, docname, source):
"""Find tables in a markdown and then convert them into the rst format"""
num_tables = 0
for i,j in enumerate(source):
table = []
output = ''
in_table = False
for l in j.split('\n'):
r = l.strip()
if r.startswith('|'):
table.append(r)
in_table = True
else:
if in_table is True:
converted = _convert_md_table_to_rst(table)
if converted is '':
print("Failed to convert the markdown table")
print(table)
else:
num_tables += 1
output += converted
in_table = False
table = []
output += l + '\n'
source[i] = output
if num_tables > 0:
print('Converted %d tables in %s' % (num_tables, docname))
def _parse_code_lines(lines):
"""A iterator that returns if a line is within a code block
Returns
-------
iterator of (str, bool, str, int)
- line: the line
- in_code: if this line is in a code block
- lang: the code block langunage
- indent: the code indent
"""
in_code = False
lang = None
indent = None
for l in lines:
m = _CODE_MARK.match(l)
if m is not None:
if not in_code:
if m.groups()[1].lower() in _LANGS:
lang = m.groups()[1].lower()
indent = len(m.groups()[0])
in_code = True
yield (l, in_code, lang, indent)
else:
yield (l, in_code, lang, indent)
lang = None
indent = None
in_code = False
else:
yield (l, in_code, lang, indent)
def _get_lang_selection_btn(langs):
active = True
btngroup = '<div class="text-center">\n<div class="btn-group opt-group" role="group">'
for l in langs:
btngroup += '<button type="button" class="btn btn-default opt %s">%s</button>\n' % (
'active' if active else '', l[0].upper()+l[1:].lower())
active = False
btngroup += '</div>\n</div> <script type="text/javascript" src="../../_static/js/options.js"></script>'
return btngroup
def _get_blocks(lang, lines):
cur_block = []
pre_in_code = None
for (l, in_code, cur_lang, _) in _parse_code_lines(lines):
if in_code and cur_lang != lang:
in_code = False
if in_code != pre_in_code:
if pre_in_code and len(cur_block) >= 2:
cur_block = cur_block[1:-1] # remove ```
# remove empty lines at head
while len(cur_block) > 0:
if len(cur_block[0]) == 0:
cur_block.pop(0)
else:
break
# remove empty lines at tail
while len(cur_block) > 0:
if len(cur_block[-1]) == 0:
cur_block.pop()
else:
break
if len(cur_block):
yield (pre_in_code, cur_block)
cur_block = []
cur_block.append(l)
pre_in_code = in_code
if len(cur_block):
yield (pre_in_code, cur_block)
def _get_jupyter_notebook(lang, lines):
cells = []
for in_code, lines in _get_blocks(lang, lines):
cell = {
"cell_type": "code" if in_code else "markdown",
"metadata": {},
"source": '\n'.join(lines)
}
if in_code:
cell.update({
"outputs": [],
"execution_count": None,
})
cells.append(cell)
ipynb = {"nbformat" : 4,
"nbformat_minor" : 2,
"metadata" : {"language":lang, "display_name":'', "name":''},
"cells" : cells}
return ipynb
def _get_source(lang, lines):
cmt = _LANGS[lang][1] + ' '
out = []
for in_code, lines in _get_blocks(lang, lines):
if in_code:
out.append('')
for l in lines:
if in_code:
if '%matplotlib' not in l:
out.append(l)
else:
if ('<div>' in l or '</div>' in l or
'<script>' in l or '</script>' in l or
'<!--' in l or '-->' in l or
'%matplotlib' in l ):
continue
out.append(cmt+l)
if in_code:
out.append('')
return out
def _get_src_download_btn(out_prefix, langs, lines):
btn = '<div class="btn-group" role="group">\n'
for lang in langs:
ipynb = out_prefix + '_' + lang + '.ipynb'
with open(ipynb, 'w') as f:
json.dump(_get_jupyter_notebook(lang, lines), f)
src = out_prefix + '.' + _LANGS[lang][0]
with open(src, 'w') as f:
f.write('\n'.join(_get_source(lang, lines)))
for f in [ipynb, src]:
f = f.split('/')[-1]
btn += '<button type="button" class="btn btn-default download" '
btn += 'onclick="window.location=\'%s\'"><span class="glyphicon glyphicon-download-alt"></span> %s </button>\n' % (f, f)
btn += '</div>\n'
return btn
def add_buttons(app, docname, source):
out_prefix = app.builder.outdir + '/' + docname
dirname = os.path.dirname(out_prefix)
if not os.path.exists(dirname):
os.makedirs(dirname)
for i,j in enumerate(source):
lines = j.split('\n')
langs = set([l for (_, _, l, _) in _parse_code_lines(lines)
if l is not None and l in _LANGS])
# first convert
for k,l in enumerate(lines):
if _SRC_DOWNLOAD_MARK in l:
lines[k] = _get_src_download_btn(
out_prefix, langs, lines)
# then add lang buttons
for k,l in enumerate(lines):
if _LANG_SELECTION_MARK in l:
lines[k] = _get_lang_selection_btn(langs)
source[i] = '\n'.join(lines)
def setup(app):
app.connect("builder-inited", build_mxnet)
app.connect("builder-inited", generate_doxygen)
app.connect("builder-inited", build_scala_docs)
# skipped to build r, it requires to install latex, which is kinds of too heavy
# app.connect("builder-inited", build_r_docs)
app.connect('source-read', convert_table)
app.connect('source-read', add_buttons)
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: 'http://mxnet.io/' + url,
'enable_eval_rst': True,
}, True)
app.add_transform(transform.AutoStructify)
|
apache-2.0
| 6,479,518,435,546,730,000
| 34.360714
| 132
| 0.503888
| false
| 3.520982
| false
| false
| false
|
adampresley/trackathon
|
model/DateHelper.py
|
1
|
2129
|
from Service import Service
from datetime import tzinfo, timedelta, datetime
from dateutil import tz
class DateHelper(Service):
utc = tz.gettz("UTC")
pyToJsFormatMapping = {
"%m/%d/%Y": "MM/dd/yyyy",
"%d/%m/%Y": "dd/MM/yyyy",
"%Y-%m-%d": "yyyy-MM-dd"
}
def __init__(self, db, timezone = "UTC", dateFormat = "%m/%d/%Y", timeFormat = "%I:%M %p"):
self.db = db
self._timezone = timezone
self._dateFormat = dateFormat
self._timeFormat = timeFormat
def addDays(self, d, numDays = 1, format = "%Y-%m-%d"):
if not self.isDateType(d):
d = datetime.strptime(d, format)
newDate = d + timedelta(days = numDays)
return newDate
def dateFormat(self, d):
return self.utcToTimezone(d, self._timezone).strftime(self._dateFormat)
def dateTimeFormat(self, d):
return self.utcToTimezone(d, self._timezone).strftime("%s %s" % (self._dateFormat, self._timeFormat))
def isDateType(self, d):
result = True
try:
d.today()
except AttributeError as e:
result = False
return result
def localNow(self):
return self.utcToTimezone(datetime.now(self.utc), self._timezone)
def now(self):
return datetime.now(self.utc)
def pyToJsDateFormat(self, pyDateFormat):
return self.pyToJsFormatMapping[pyDateFormat]
def restDateFormat(self, d):
return d.strftime("%Y-%m-%d")
def restDateTime(self, d):
return d.strftime("%Y-%m-%d %H:%M")
def timeFormat(self, d):
return self.utcToTimezone(d, self._timezone).strftime(self._timeFormat)
def utcToTimezone(self, d, timezone):
targetTZ = tz.gettz(timezone)
d = d.replace(tzinfo = self.utc)
return d.astimezone(targetTZ)
def validateDateRange(self, start, end, format = "%Y-%m-%d"):
#
# Basically if the range between start and end is greater than 91
# days kick it back with today's date as default.
#
parsedStart = datetime.strptime(start, format)
parsedEnd = datetime.strptime(end, format)
delta = parsedEnd - parsedStart
newStart = start
newEnd = end
if delta.days > 91:
newStart = self.restDateFormat(self.localNow())
newEnd = self.restDateFormat(self.localNow())
return (newStart, newEnd)
|
mit
| 6,435,613,590,564,856,000
| 24.058824
| 103
| 0.685298
| false
| 2.973464
| false
| false
| false
|
JmeHsieh/issue_aggregator
|
generate_url_list.py
|
1
|
1184
|
#!/usr/bin/env python3
import json
from subprocess import PIPE, Popen
from requests import get
repo_master_raw = 'https://raw.githubusercontent.com/g0v/awesome-g0v/master/'
readme = 'readme.md'
parser = 'parse.ls'
awesome_g0v = 'awesome-g0v.json'
outfile = 'url_list.json'
def get_source():
readme_url = repo_master_raw + readme
parser_url = repo_master_raw + parser
with open('./data/{}'.format(readme), 'wb+') as f:
response = get(readme_url)
f.write(response.content)
with open('./data/{}'.format(parser), 'wb+') as f:
response = get(parser_url)
f.write(response.content)
def run_parser():
try:
with Popen(['lsc', parser], cwd='./data/', stdout=PIPE) as p:
print(p.stdout.read().decode('utf-8'))
except Exception as e:
print(e)
def output_url_list():
with open('./data/{}'.format(awesome_g0v), 'r') as f:
js = json.load(f)
rs = [j['repository'] for j in js if 'github.com' in j['repository']]
with open('./data/{}'.format(outfile), 'w+') as f:
f.write(json.dumps(rs))
if __name__ == "__main__":
get_source()
run_parser()
output_url_list()
|
mit
| -4,816,565,584,934,220,000
| 23.666667
| 77
| 0.597128
| false
| 3.148936
| false
| false
| false
|
Pistachitos/Sick-Beard
|
sickbeard/history.py
|
1
|
2727
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import db
import datetime
from sickbeard.common import SNATCHED, SUBTITLED, Quality
dateFormat = "%Y%m%d%H%M%S"
def _logHistoryItem(action, showid, season, episode, quality, resource, provider):
logDate = datetime.datetime.today().strftime(dateFormat)
myDB = db.DBConnection()
myDB.action("INSERT INTO history (action, date, showid, season, episode, quality, resource, provider) VALUES (?,?,?,?,?,?,?,?)",
[action, logDate, showid, season, episode, quality, resource, provider])
def logSnatch(searchResult):
for curEpObj in searchResult.episodes:
showid = int(curEpObj.show.tvdbid)
season = int(curEpObj.season)
episode = int(curEpObj.episode)
quality = searchResult.quality
providerClass = searchResult.provider
if providerClass != None:
provider = providerClass.name
else:
provider = "unknown"
action = Quality.compositeStatus(SNATCHED, searchResult.quality)
resource = searchResult.name
_logHistoryItem(action, showid, season, episode, quality, resource, provider)
def logDownload(episode, filename, new_ep_quality, release_group=None):
showid = int(episode.show.tvdbid)
season = int(episode.season)
epNum = int(episode.episode)
quality = new_ep_quality
# store the release group as the provider if possible
if release_group:
provider = release_group
else:
provider = -1
action = episode.status
_logHistoryItem(action, showid, season, epNum, quality, filename, provider)
def logSubtitle(showid, season, episode, status, subtitleResult):
resource = subtitleResult.release if subtitleResult.release else ''
provider = subtitleResult.service
status, quality = Quality.splitCompositeStatus(status)
action = Quality.compositeStatus(SUBTITLED, quality)
_logHistoryItem(action, showid, season, episode, quality, resource, provider)
|
gpl-3.0
| 3,868,721,296,955,677,700
| 32.268293
| 132
| 0.703337
| false
| 3.87358
| false
| false
| false
|
meine-stadt-transparent/meine-stadt-transparent
|
mainapp/models/person.py
|
1
|
1634
|
from django.db import models
from django.urls import reverse
from django.utils.translation import gettext as _
from .helper import DefaultFields, DummyInterface
from .location import Location
class Person(DefaultFields, DummyInterface):
name = models.CharField(max_length=100)
given_name = models.CharField(max_length=100)
family_name = models.CharField(max_length=100)
location = models.ForeignKey(
Location, null=True, blank=True, on_delete=models.CASCADE
)
def __str__(self):
return self.name
def name_autocomplete(self):
"""A workaround to prevent empty values in the autocomplete-field in elasticsearch, which throws an error"""
return self.name if len(self.name) > 0 else " "
def get_default_link(self):
return reverse("person", args=[self.id])
def organization_ids(self):
return [organization.id for organization in self.membership_set.all()]
def sort_date(self):
if hasattr(self, "sort_date_prefetch"):
if self.sort_date_prefetch:
return self.sort_date_prefetch[0].start
else:
return self.created
# The most recent time this person joined a new organization
latest = (
self.membership_set.filter(start__isnull=False).order_by("-start").first()
)
if latest:
return latest.start
else:
return self.created
@classmethod
def dummy(cls, oparl_id: str) -> "Person":
return Person(
name=_("Missing Person"), given_name=_("Missing"), family_name=_("Missing")
)
|
mit
| 1,775,969,022,176,492,000
| 31.68
| 116
| 0.638311
| false
| 4.244156
| false
| false
| false
|
quanticio/backupstore
|
backupstore/src/core/common/metaoperation.py
|
1
|
14620
|
# coding=utf8
'''
@author : quanticio44
@contact : quanticio44@gmail.com
@license : See with Quanticio44
@summary : metaoperation for rsync complete
@since : 22/08/2014
'''
#Standard package
import os
import hashlib
import zipfile
import tarfile
#Internal package
import backupstoredbfile
import tools
class metaOperation(object):
''' checksum class definition '''
verbose = False
def __init__(self, checksum, compression, compressionLevel, compressionAlgo, BSDbFile=None, Verbose=False):
''' Constructor
@param checksum: checksum to sign the file ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
@param compression: compression boolean to compress the archive
@param compressionLevel: level compression for zlib using
@param compressionAlgo: Algorithm to use [zlib, zip, tar, gzip, bz2]
@param Verbose: Verbosity mode '''
self.verbose = Verbose
self.obj_BSDbFile = None
self.lstFSObjToRemove = ([],[])
if not checksum or checksum.lower() not in hashlib.algorithms:
self.checksum = ''
else:
self.checksum = checksum.lower()
if self.checksum == 'md5':
self.hasher = hashlib.md5
elif self.checksum == 'sha1':
self.hasher = hashlib.sha1
elif self.checksum == 'sha224':
self.hasher = hashlib.sha224
elif self.checksum == 'sha256':
self.hasher = hashlib.sha256
elif self.checksum == 'sha384':
self.hasher = hashlib.sha384
elif self.checksum == 'sha512':
self.hasher = hashlib.sha512
if not compression:
self.compression = 0
else:
self.compression = compression
if not compressionLevel:
self.compressionLevel = 0
else:
self.compressionLevel = compressionLevel
if not compressionAlgo or compressionAlgo not in ('zip', 'tar', 'gzip', 'bz2'):
self.compressionAlgo = 'zip'
else:
self.compressionAlgo = compressionAlgo
if BSDbFile:
self.obj_BSDbFile = backupstoredbfile.BackupStoreDbFile(BSDbFile)
self.obj_BSDbFile.open()
def getChecksumOfFile(self, path, hexaCheckSum=True, BlockSize=65536):
''' Set checksum of file
@param path: path of file for the checksum '''
if self.checksum == '':
return
hasher = self.hasher()
with open(path, 'rb') as currentfile:
mybuffer = currentfile.read(BlockSize)
while len(mybuffer) > 0:
hasher.update(mybuffer)
mybuffer = currentfile.read(BlockSize)
if hexaCheckSum:
return hasher.hexdigest()
else:
return hasher.digest()
def updateFile(self, path):
''' UpdateMetaData of file in the database
@param path: path of file '''
try:
if not self.obj_BSDbFile:
return
self.obj_BSDbFile.updateFile(name = os.path.basename(path), path=os.path.dirname(path), stat=os.stat(path), checksum=self.getChecksumOfFile(path))
except:
print os.path.basename(path)
print os.path.dirname(path)
print os.stat(path)
print self.getChecksumOfFile(path)
raise
def removeFile(self, path):
''' Remove file in the database (the file does not exist)
@param path: path of file '''
if not self.obj_BSDbFile:
return
if not os.path.isfile(path):
self.obj_BSDbFile.removeFileOrFolder(name = os.path.basename(path), path=os.path.dirname(path))
def makeDir(self, pathfolder):
''' Make a folder in the database (the folder files must exist)
@param pathfolder: path of folder '''
if not self.obj_BSDbFile:
return
if os.path.isdir(pathfolder):
self.obj_BSDbFile.addFolder(path=pathfolder)
def preRemoveTree(self, path):
''' Pre-remove tree in the database
@param path: path of file '''
if not self.obj_BSDbFile:
return
self.lstFSObjToRemove = self.__getFSObjList(path, [path], [])
def postRemoveTree(self):
''' Post-remove tree in the database
@param path: path of file '''
if not self.obj_BSDbFile:
return
if len(self.lstFSObjToRemove[0]) == 0 and len(self.lstFSObjToRemove[1]) == 0:
return
# Remove files
for thisfile in self.lstFSObjToRemove[1]:
if not os.path.isfile(thisfile):
self.obj_BSDbFile.removeFileOrFolder(name = os.path.basename(thisfile), path = os.path.dirname(thisfile))
# Remove folders
for thisfolder in self.lstFSObjToRemove[0]:
if not os.path.isdir(thisfolder):
self.obj_BSDbFile.removeFileOrFolder(name = '', path = thisfolder)
def listdir(self, folder):
''' Get all filesystem object in a folder
@param folder: folder path '''
return self.obj_BSDbFile.getObjInFolderList(folder)
def getFSObject(self, path):
''' Get FileSystem object (file or directory)
@path: Path to search
@return: Return a BackupStoreFSObjProperty object '''
return self.obj_BSDbFile.getFSObj(path)
def shouldUpdate(self, cookie, sink, target):
''' Define if the file was changing
@param cookie: rsync cookie (params of all operation)
@param sink: original path
@param target: BackupStoreFSObjProperty object
@return if the file change '''
try:
sink_st = os.stat(sink)
sink_sz = sink_st.st_size
sink_mt = sink_st.st_mtime
except:
self.log("Fail to retrieve information about sink %s (skip update)" % sink, True)
return 1
try:
target_sz = target.getPropertyInStat(propertyLabel='st_size')
target_mt = target.getPropertyInStat(propertyLabel='st_mtime')
except:
self.log("Fail to retrieve information about sink %s (skip update)" % sink, True)
return 1
try:
if self.getChecksumOfFile(sink) != target.checksum:
return 1
except:
self.log("Fail to retrieve information about sink %s (skip update)" % sink, True)
return 1
if cookie.update:
return target_mt < sink_mt - cookie.modify_window
if cookie.ignore_time:
return 1
if target_sz != sink_sz:
return 1
if cookie.size_only:
return 0
return abs(target_mt - sink_mt) > cookie.modify_window
def isdir(self, folder):
''' Test if folder exist in the database
@param folder: folder path '''
return self.obj_BSDbFile.isFolder(folder)
def isfile(self, filepath):
''' Test if folder exist in the database
@param filepath: file path '''
return self.obj_BSDbFile.isFile(name = os.path.basename(filepath), path = os.path.dirname(filepath))
def log(self, message, error=False):
''' Log all operation
@param message: Message to log
@param error: Set an error (False by default) '''
if not self.obj_BSDbFile:
return
if error:
self.obj_BSDbFile.addTrace(message, self.obj_BSDbFile.ERROR)
else:
self.obj_BSDbFile.addTrace(message, self.obj_BSDbFile.INFO)
def __getFSObjList(self, path, lst_dir=[], lst_file=[]):
''' Getting the list of folder and file in a root folder
@param path: root folder
@param lst_dir: list of folder
@param lst_file: list of files '''
for obj in os.listdir(path):
abs_path = path + os.sep + obj
if os.path.isfile(abs_path):
lst_file.append(abs_path)
elif os.path.isdir(abs_path):
lst_dir.append(abs_path)
lst_dir1, lst_file1 = self.__getFSObjList(abs_path, lst_dir, lst_file)
lst_dir.extend(lst_dir1)
lst_file.extend(lst_file1)
return (lst_dir, lst_file)
def compressData(self, target_dir, filenames):
''' Compress data in the target_dir folder (all files) and clean files
@param target_dir: path to the folder
@param filenames: FileSystem object list '''
if not self.compression:
return
# Getting all files
allAbsFilesLst = []
for curfile in filenames:
if not os.path.isdir(target_dir + os.sep + curfile):
allAbsFilesLst.append(target_dir + os.sep + curfile)
if self.compressionAlgo.lower() == 'zip':
self.__compressDataToZipFile(self.__getArchiveName(target_dir, '.zip'), allAbsFilesLst)
elif self.compressionAlgo.lower() in ('tar', 'gzip', 'bz2'):
self.__compressDataToTarFile(self.__getArchiveName(target_dir, '.' + self.compressionAlgo.lower()), allAbsFilesLst)
def __getArchiveName(self, target_dir, extension):
''' Getting archive name with extension
@param target_dir: path to the folder
@param extension: Extension of archive
@return: Archive name '''
templatename = 'BS_files_' + extension
ArchiveName = target_dir + os.sep + templatename
nameexist = True
while nameexist:
if os.path.isfile(ArchiveName):
ArchiveName += '.' + templatename
else:
nameexist = False
return ArchiveName
def __compressDataToZipFile(self, zipfilename, allAbsFilesLst):
''' Compress data to a data file in the folder
@param zipfilename: Name of archive
@param allAbsFilesLst: All files to add '''
# Get compression type
if self.compressionLevel <= 1:
compress = zipfile.ZIP_STORED
else:
compress = zipfile.ZIP_DEFLATED
# Size verify : if the files in the folder (not in the subfolder) is more 2Go we use allowZip64
if tools.folderInformation(os.path.dirname(zipfilename)).getLocalSize() >= 2147483648:
allowZip64 = True
else:
allowZip64 = False
# Create zipfile
with zipfile.ZipFile(zipfilename, 'w', compress, allowZip64=allowZip64) as currentzip:
for curfile in allAbsFilesLst:
currentzip.write(curfile, os.path.basename(curfile))
# Verify and clean
error = ''
if zipfile.is_zipfile(zipfilename):
obj_zip = zipfile.ZipFile(zipfilename, 'r')
if len(obj_zip.namelist()) != len(allAbsFilesLst):
error = 'Archive is not correct (number files is not correct) !'
if obj_zip.testzip() != None:
error = 'Archive is not correct !'
obj_zip.close()
else:
error = 'Archive is not a zipfile !'
# Clean files in the folder
if error == '':
for curfile in allAbsFilesLst:
os.remove(curfile)
else:
if self.verbose:
print error
self.log(error, error=True)
def __compressDataToTarFile(self, tarfilename, allAbsFilesLst, algo='tar'):
''' Compress data to a data file in the folder
@param zipfilename: Name of archive
@param allAbsFilesLst: All files to add '''
# Get compression type
mode = 'w'
if algo == 'gzip':
mode += ':gz'
elif algo == 'bz2':
mode += ':bz2'
# Create zipfile
with tarfile.open(tarfilename, mode) as currenttar:
for curfile in allAbsFilesLst:
currenttar.add(curfile, arcname=os.path.basename(curfile), recursive=False)
# Verify and clean
error = ''
currenttar = tarfile.open(tarfilename, 'r')
if len(currenttar.getmembers()) != len(allAbsFilesLst):
error = 'Archive is not correct (number files is not correct) !'
currenttar.close()
# Clean files in the folder
if error == '':
for curfile in allAbsFilesLst:
os.remove(curfile)
else:
if self.verbose:
print error
self.log(error, error=True)
def unCompressData(self, target_dir):
''' Uncompress data in the target_dir folder (all files) and clean archive
@param target_dir: path to the folder '''
if not self.compression:
return
algo = self.compressionAlgo.lower()
ArchiveName = ''
templatename = 'BS_files_' + '.' + self.compressionAlgo
if algo in ('zip', 'tar', 'gzip', 'bz2'):
for name in os.listdir(target_dir):
if os.path.isfile(target_dir + os.sep + name) and name[len(name) - len(templatename):] == templatename:
ArchiveName = target_dir + os.sep + name
break
if ArchiveName == '':
return
raise EnvironmentError('Not found the archive for uncompress operation in %s' % target_dir)
if algo == 'zip':
with zipfile.ZipFile(ArchiveName, 'r') as currentzip:
currentzip.extractall(target_dir)
elif algo in ('tar', 'gzip', 'bz2'):
mode = 'r'
if algo == 'gzip':
mode += ':gz'
elif algo == 'bz2':
mode += ':bz2'
with tarfile.open(ArchiveName, mode) as currenttar:
currenttar.extractall(target_dir)
os.remove(ArchiveName)
|
gpl-2.0
| -7,056,456,634,023,718,000
| 34.831234
| 158
| 0.5487
| false
| 4.321608
| false
| false
| false
|
zathras777/atavism
|
atavism/http11/cookies.py
|
1
|
3542
|
from datetime import datetime
def stripped_split(ss, c, n=-1):
return [p.strip() for p in ss.split(c, n)]
class Cookie(object):
def __init__(self, path=None, key=None, value=None, domain=None, expires=None, max_age=None, secure=False):
self.path = path
self.key = key
self.value = value
self.expires = expires
self.domain = domain
self.max_age = max_age
self.secure = secure
self.http_only = False
def __eq__(self, other):
if other.path != self.path or other.key != self.key or other.domain != self.domain:
return False
return True
def __str__(self):
base = ['{}={}'.format(self.key or '', self.value or '')]
if self.path is not None:
base.append("Path={}".format(self.path))
if self.domain is not None:
base.append("Domain={}".format(self.domain))
if self.http_only:
base.append('HttpOnly')
return "; ".join(base)
def set_expires(self, dtstr):
self.expires = datetime.strptime(dtstr, "%a, %d-%b-%Y %H:%M:%S %Z")
def as_header(self):
return "{}={}".format(self.key, self.value)
def is_relevant(self, _path=None):
if self.expires is not None:
if self.expires < datetime.utcnow():
return False
if _path is None:
return False
if self.path is None or _path == '/':
return True
if _path[:len(self.path)].lower() == self.path.lower():
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if len(self.path) == len(other.path):
return self.key < other.key
return len(self.path) < len(other.path)
def __gt__(self, other):
return len(self.path) > len(other.path)
class CookieJar(object):
def __init__(self):
self.cookies = []
def __len__(self):
return len(self.cookies)
def add_cookie(self, _cookie):
for cc in self.cookies:
if cc == _cookie:
cc.value = _cookie.value
return
self.cookies.append(_cookie)
def __getitem__(self, item):
for c in self.cookies:
if c.key == item:
return c.value
return None
def get_cookie(self, item):
for c in self.cookies:
if c.key == item:
return c
return None
def parse_set_cookie(self, hdr_string):
if '=' not in hdr_string:
return
parts = stripped_split(hdr_string, ';')
c = Cookie()
c.key, c.value = stripped_split(parts[0], '=', 1)
for p in parts[1:]:
if p == 'HttpOnly':
c.http_only = True
continue
k, v = stripped_split(p, '=', 1)
if k.lower() == 'expires':
c.set_expires(v)
else:
setattr(c, k.lower(), v)
self.add_cookie(c)
def check_cookies(self, http_obj):
cookies = http_obj.get('set-cookie')
if cookies is None:
return
for c_str in cookies:
self.parse_set_cookie(c_str)
def get_cookies(self, _path):
matched = []
for c in self.cookies:
if c.is_relevant(_path):
matched.append(c)
if len(matched) == 0:
return None
return '; '.join([c.as_header() for c in sorted(matched)])
|
unlicense
| -4,666,415,880,981,012,000
| 27.111111
| 111
| 0.513552
| false
| 3.812702
| false
| false
| false
|
rafaelosoto/stream
|
{{cookiecutter.script_name}}/setup.py
|
1
|
2382
|
# -*- coding: utf-8 -*-
import re
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
REQUIRES = [
'docopt',
]
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
def find_version(fname):
'''Attempts to find the version number in the file names fname.
Raises RuntimeError if not found.
'''
version = ''
with open(fname, 'r') as fp:
reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
for line in fp:
m = reg.match(line)
if m:
version = m.group(1)
break
if not version:
raise RuntimeError('Cannot find version information')
return version
__version__ = find_version("{{ cookiecutter.script_name }}.py")
def read(fname):
with open(fname) as fp:
content = fp.read()
return content
setup(
name='{{ cookiecutter.script_name }}',
version="{{ cookiecutter.version }}",
description='{{ cookiecutter.short_description }}',
long_description=read("README.rst"),
author='{{ cookiecutter.full_name }}',
author_email='{{ cookiecutter.email }}',
url='https://github.com/{{ cookiecutter.github_username }}/{{ cookiecutter.script_name }}',
install_requires=REQUIRES,
license=read("LICENSE"),
zip_safe=False,
keywords='{{ cookiecutter.script_name }}',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
py_modules=["{{ cookiecutter.script_name }}"],
entry_points={
'console_scripts': [
"{{cookiecutter.script_name}} = {{cookiecutter.script_name}}:main"
]
},
tests_require=['pytest'],
cmdclass={'test': PyTest}
)
|
mit
| -7,396,378,709,102,490,000
| 28.775
| 95
| 0.594039
| false
| 3.8112
| true
| false
| false
|
m-ober/byceps
|
tests/api/tourney/match/comments/test_get_comments_for_match.py
|
1
|
3509
|
"""
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import pytest
from byceps.services.tourney import (
match_comment_service as comment_service,
match_service,
)
def test_get_comments_for_match(
api_client, api_client_authz_header, match, comment
):
url = f'/api/tourney/matches/{match.id}/comments'
headers = [api_client_authz_header]
response = api_client.get(url, headers=headers)
assert response.status_code == 200
assert response.content_type == 'application/json'
assert response.get_json() == {
'comments': [
{
'comment_id': str(comment.id),
'match_id': str(comment.match_id),
'created_at': comment.created_at.isoformat(),
'creator': {
'user_id': str(comment.created_by.id),
'screen_name': comment.created_by.screen_name,
'suspended': False,
'deleted': False,
'avatar_url': None,
'is_orga': False,
},
'body_text': 'Denn man tau.',
'body_html': 'Denn man tau.',
'last_edited_at': None,
'last_editor': None,
'hidden': False,
'hidden_at': None,
'hidden_by_id': None,
}
]
}
def test_get_comments_for_match_with_edited_comment(
api_client, api_client_authz_header, match, edited_comment
):
url = f'/api/tourney/matches/{match.id}/comments'
headers = [api_client_authz_header]
response = api_client.get(url, headers=headers)
assert response.status_code == 200
assert response.content_type == 'application/json'
assert response.get_json() == {
'comments': [
{
'comment_id': str(edited_comment.id),
'match_id': str(edited_comment.match_id),
'created_at': edited_comment.created_at.isoformat(),
'creator': {
'user_id': str(edited_comment.created_by.id),
'screen_name': edited_comment.created_by.screen_name,
'suspended': False,
'deleted': False,
'avatar_url': None,
'is_orga': False,
},
'body_text': '[b]So nicht[/b], Freundchen!',
'body_html': '<strong>So nicht</strong>, Freundchen!',
'last_edited_at': edited_comment.last_edited_at.isoformat(),
'last_editor': {
'user_id': str(edited_comment.last_edited_by.id),
'screen_name': edited_comment.last_edited_by.screen_name,
'suspended': False,
'deleted': False,
'avatar_url': None,
'is_orga': False,
},
'hidden': False,
'hidden_at': None,
'hidden_by_id': None,
}
]
}
# helpers
@pytest.fixture
def match(app):
return match_service.create_match()
@pytest.fixture
def comment(app, match, user):
return comment_service.create_comment(match.id, user.id, 'Denn man tau.')
@pytest.fixture
def edited_comment(app, comment, admin):
comment_service.update_comment(
comment.id, admin.id, '[b]So nicht[/b], Freundchen!'
)
return comment_service.get_comment(comment.id)
|
bsd-3-clause
| -1,295,547,686,589,896,400
| 31.192661
| 77
| 0.520946
| false
| 3.839168
| true
| false
| false
|
yaolei313/python-study
|
algorithm_study/region_merge.py
|
1
|
3025
|
# Definition for an interval.
class Interval:
def __init__(self, s=0, e=0):
self.start = s
self.end = e
def __str__(self):
return "[{},{}]".format(self.start, self.end)
class Solution:
def merge(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[Interval]
"""
if intervals is None:
return []
elif len(intervals) == 1:
return intervals
# self.quickSort(intervals, 0, len(intervals) - 1, lambda x: x.start)
intervals.sort(key=lambda x: x.start)
for interval in intervals:
print('%s' % interval, end='')
print()
rst = []
region_left = None
region_right = None
for t1 in intervals:
if region_left is None:
region_left = t1.start
region_right = t1.end
continue
if region_right >= t1.start:
region_right = max(region_right, t1.end)
else:
rst.append(Interval(region_left, region_right))
region_left = t1.start
region_right = t1.end
if region_left is not None:
rst.append(Interval(region_left, region_right))
return rst
def quickSort(self, lst, l, r, func):
if l >= r:
return
key_idx = l
key = lst[l]
compare_key = func(lst[l])
i, j = l, r
while i < j:
while func(lst[j]) >= compare_key and i < j:
j -= 1
if i < j:
lst[key_idx] = lst[j]
while func(lst[i]) <= compare_key and i < j:
i += 1
if i < j:
lst[j] = lst[i]
key_idx = i
lst[key_idx] = key
self.quickSort(lst, l, key_idx - 1, func)
self.quickSort(lst, key_idx + 1, r, func)
def quickSort2(self, lst, l, r):
"""
:type lst: List[int]
:rtype List[int]
"""
if l < r:
key = lst[l]
i = l
j = r
while i < j:
while lst[j] >= key and i < j:
j -= 1
if i < j:
lst[i] = lst[j]
while lst[i] <= key and i < j:
i += 1
if i < j:
lst[j] = lst[i]
lst[i] = key
self.quickSort2(lst, l, i - 1)
self.quickSort2(lst, i + 1, r)
if __name__ == "__main__":
t = Solution()
input_grid = [Interval(1, 3), Interval(8, 10), Interval(2, 6), Interval(15, 18)]
t_result = t.merge(input_grid)
for item in t_result:
print('%s' % item, end='')
# input_array = [2, 5, 33, 2, 17, 5, 2]
# t.quickSort(input_array, 0, len(input_array) - 1, lambda x: x)
# print(input_array)
# input_array2 = [2, 5, 33, 2, 17, 5, 2]
# t.quickSort2(input_array2, 0, len(input_array2) - 1)
# print(input_array2)
|
gpl-2.0
| 81,288,053,263,669,400
| 26.5
| 84
| 0.447603
| false
| 3.538012
| false
| false
| false
|
cloudera/ibis
|
ibis/backends/sqlite/client.py
|
1
|
9430
|
import errno
import functools
import inspect
import math
import os
from typing import Optional
import regex as re
import sqlalchemy as sa
import ibis.backends.base_sqlalchemy.alchemy as alch
from ibis.client import Database
from .compiler import SQLiteDialect
class SQLiteTable(alch.AlchemyTable):
pass
class SQLiteDatabase(Database):
pass
_SQLITE_UDF_REGISTRY = set()
_SQLITE_UDAF_REGISTRY = set()
def udf(f):
"""Create a SQLite scalar UDF from `f`
Parameters
----------
f
A callable object
Returns
-------
callable
A callable object that returns ``None`` if any of its inputs are
``None``.
"""
@functools.wraps(f)
def wrapper(*args):
if any(arg is None for arg in args):
return None
return f(*args)
_SQLITE_UDF_REGISTRY.add(wrapper)
return wrapper
def udaf(cls):
"""Register a UDAF class with any SQLite connection."""
_SQLITE_UDAF_REGISTRY.add(cls)
return cls
@udf
def _ibis_sqlite_reverse(string):
return string[::-1]
@udf
def _ibis_sqlite_string_ascii(string):
return ord(string[0])
@udf
def _ibis_sqlite_capitalize(string):
return string.capitalize()
@udf
def _ibis_sqlite_translate(string, from_string, to_string):
table = str.maketrans(from_string, to_string)
return string.translate(table)
@udf
def _ibis_sqlite_regex_search(string, regex):
"""Return whether `regex` exists in `string`.
Parameters
----------
string : str
regex : str
Returns
-------
found : bool
"""
return re.search(regex, string) is not None
@udf
def _ibis_sqlite_regex_replace(string, pattern, replacement):
"""Replace occurences of `pattern` in `string` with `replacement`.
Parameters
----------
string : str
pattern : str
replacement : str
Returns
-------
result : str
"""
return re.sub(pattern, replacement, string)
@udf
def _ibis_sqlite_regex_extract(string, pattern, index):
"""Extract match of regular expression `pattern` from `string` at `index`.
Parameters
----------
string : str
pattern : str
index : int
Returns
-------
result : str or None
"""
result = re.search(pattern, string)
if result is not None and 0 <= index <= (result.lastindex or -1):
return result.group(index)
return None
@udf
def _ibis_sqlite_exp(arg):
"""Exponentiate `arg`.
Parameters
----------
arg : number
Number to raise to `e`.
Returns
-------
result : Optional[number]
None If the input is None
"""
return math.exp(arg)
@udf
def _ibis_sqlite_log(arg, base):
if arg < 0 or base < 0:
return None
return math.log(arg, base)
@udf
def _ibis_sqlite_ln(arg):
if arg < 0:
return None
return math.log(arg)
@udf
def _ibis_sqlite_log2(arg):
return _ibis_sqlite_log(arg, 2)
@udf
def _ibis_sqlite_log10(arg):
return _ibis_sqlite_log(arg, 10)
@udf
def _ibis_sqlite_floor(arg):
return math.floor(arg)
@udf
def _ibis_sqlite_ceil(arg):
return math.ceil(arg)
@udf
def _ibis_sqlite_sign(arg):
if not arg:
return 0
return math.copysign(1, arg)
@udf
def _ibis_sqlite_floordiv(left, right):
return left // right
@udf
def _ibis_sqlite_mod(left, right):
return left % right
@udf
def _ibis_sqlite_power(arg, power):
"""Raise `arg` to the `power` power.
Parameters
----------
arg : number
Number to raise to `power`.
power : number
Number to raise `arg` to.
Returns
-------
result : Optional[number]
None If either argument is None or we're trying to take a fractional
power or a negative number
"""
if arg < 0.0 and not power.is_integer():
return None
return arg ** power
@udf
def _ibis_sqlite_sqrt(arg):
"""Square root of `arg`.
Parameters
----------
arg : Optional[number]
Number to take the square root of
Returns
-------
result : Optional[number]
None if `arg` is None or less than 0 otherwise the square root
"""
return None if arg is None or arg < 0.0 else math.sqrt(arg)
class _ibis_sqlite_var:
def __init__(self, offset):
self.mean = 0.0
self.sum_of_squares_of_differences = 0.0
self.count = 0
self.offset = offset
def step(self, value):
if value is not None:
self.count += 1
delta = value - self.mean
self.mean += delta / self.count
self.sum_of_squares_of_differences += delta * (value - self.mean)
def finalize(self):
count = self.count
if count:
return self.sum_of_squares_of_differences / (count - self.offset)
return None
@udaf
class _ibis_sqlite_var_pop(_ibis_sqlite_var):
def __init__(self):
super().__init__(0)
@udaf
class _ibis_sqlite_var_samp(_ibis_sqlite_var):
def __init__(self):
super().__init__(1)
def number_of_arguments(callable):
signature = inspect.signature(callable)
parameters = signature.parameters.values()
kinds = [param.kind for param in parameters]
valid_kinds = (
inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.POSITIONAL_ONLY,
)
if any(kind not in valid_kinds for kind in kinds) or any(
param.default is not inspect.Parameter.empty for param in parameters
):
raise TypeError(
'Only positional arguments without defaults are supported in Ibis '
'SQLite function registration'
)
return len(parameters)
def _register_function(func, con):
"""Register a Python callable with a SQLite connection `con`.
Parameters
----------
func : callable
con : sqlalchemy.Connection
"""
nargs = number_of_arguments(func)
con.connection.connection.create_function(func.__name__, nargs, func)
def _register_aggregate(agg, con):
"""Register a Python class that performs aggregation in SQLite.
Parameters
----------
agg : type
con : sqlalchemy.Connection
"""
nargs = number_of_arguments(agg.step) - 1 # because self
con.connection.connection.create_aggregate(agg.__name__, nargs, agg)
class SQLiteClient(alch.AlchemyClient):
"""The Ibis SQLite client class."""
dialect = SQLiteDialect
database_class = SQLiteDatabase
table_class = SQLiteTable
def __init__(self, path=None, create=False):
super().__init__(sa.create_engine("sqlite://"))
self.name = path
self.database_name = "base"
if path is not None:
self.attach(self.database_name, path, create=create)
for func in _SQLITE_UDF_REGISTRY:
self.con.run_callable(functools.partial(_register_function, func))
for agg in _SQLITE_UDAF_REGISTRY:
self.con.run_callable(functools.partial(_register_aggregate, agg))
@property
def current_database(self) -> Optional[str]:
return self.database_name
def list_databases(self):
raise NotImplementedError(
'Listing databases in SQLite is not implemented'
)
def set_database(self, name: str) -> None:
raise NotImplementedError('set_database is not implemented for SQLite')
def attach(self, name, path, create: bool = False) -> None:
"""Connect another SQLite database file
Parameters
----------
name : string
Database name within SQLite
path : string
Path to sqlite3 file
create : boolean, optional
If file does not exist, create file if True otherwise raise an
Exception
"""
if not os.path.exists(path) and not create:
raise FileNotFoundError(
errno.ENOENT, os.strerror(errno.ENOENT), path
)
quoted_name = self.con.dialect.identifier_preparer.quote(name)
self.raw_sql(
"ATTACH DATABASE {path!r} AS {name}".format(
path=path, name=quoted_name
)
)
self.has_attachment = True
@property
def client(self):
return self
def _get_sqla_table(self, name, schema=None, autoload=True):
return sa.Table(
name,
self.meta,
schema=schema or self.current_database,
autoload=autoload,
)
def table(self, name, database=None):
"""
Create a table expression that references a particular table in the
SQLite database
Parameters
----------
name : string
database : string, optional
name of the attached database that the table is located in.
Returns
-------
TableExpr
"""
alch_table = self._get_sqla_table(name, schema=database)
node = self.table_class(alch_table, self)
return self.table_expr_class(node)
def list_tables(self, like=None, database=None, schema=None):
if database is None:
database = self.database_name
return super().list_tables(like, schema=database)
def _table_from_schema(
self, name, schema, database: Optional[str] = None
) -> sa.Table:
columns = self._columns_from_schema(name, schema)
return sa.Table(name, self.meta, schema=database, *columns)
|
apache-2.0
| -6,957,263,060,061,383,000
| 21.613909
| 79
| 0.605408
| false
| 3.89187
| false
| false
| false
|
lakiw/cripts
|
cripts/core/s3_tools.py
|
1
|
4177
|
from django.conf import settings
from bson.objectid import ObjectId
import boto
from boto.s3.connection import S3Connection
from boto.s3.key import Key
class S3Error(Exception):
"""
Generic S3 Exception.
"""
pass
def s3_connector(bucket):
"""
Connect to an S3 bucket.
:param bucket: The bucket to connect to.
:type bucket: str
:returns: :class:`boto.s3.connection.S3Connection`, S3Error
"""
S3_hostname = getattr(settings, 'S3_HOSTNAME', S3Connection.DefaultHost)
try:
conn = S3Connection(aws_access_key_id = settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key = settings.AWS_SECRET_ACCESS_KEY,
is_secure = True,
host = S3_hostname)
mybucket = conn.get_bucket(bucket)
return mybucket
except boto.exception.S3ResponseError as e:
raise S3Error("Error connecting to S3: %s" % e)
except:
raise
def s3_create_bucket(bucket):
"""
Create an S3 bucket.
:param bucket: The bucket to create.
:type bucket: str
:returns: S3Error
"""
try:
S3_hostname = getattr(settings, 'S3_HOSTNAME', S3Connection.DefaultHost)
conn = S3Connection(aws_access_key_id = settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key = settings.AWS_SECRET_ACCESS_KEY,
is_secure = True,
host = S3_hostname)
conn.create_bucket(bucket)
except boto.exception.S3CreateError as e:
raise S3Error("Error creating bucket in S3: %s" % e)
except:
raise
def s3_translate_collection(collection):
"""
Translate CRIPs collection to S3 bucket.
:param collection: The collection to translate.
:type collection: str
:returns: str
"""
bucket = settings.COLLECTION_TO_BUCKET_MAPPING[collection.replace(".files","")]
return bucket + settings.S3_SEPARATOR + settings.S3_ID
def file_exists_s3(sample_md5, collection):
"""
Determine if a file aleady exists in S3.
:param sample_md5: The MD5 to search for.
:type sample_md5: str
:param collection: The collection to translate for lookup.
:type collection: str
:returns: str
"""
bucket = s3_connector(s3_translate_collection(collection))
return bucket.get_key(sample_md5)
def put_file_s3(data, collection):
"""
Add a file to S3.
:param data: The data to add.
:type data: str
:param collection: The collection to translate for addition.
:type collection: str
:returns: str
"""
bucket = s3_connector(s3_translate_collection(collection))
k = Key(bucket)
oid = ObjectId()
k.key = oid
# TODO: pass md5 to put_file() to avoid recalculation.
k.set_contents_from_string(data)
return oid
def get_file_s3(oid, collection):
"""
Get a file from S3.
:param oid: The ObjectId to lookup.
:type oid: str
:param collection: The collection to translate for lookup.
:type collection: str
:returns: str
"""
bucket = s3_connector(s3_translate_collection(collection))
k = bucket.get_key(oid)
return k.get_contents_as_string()
def get_filename_s3(sample_md5, collection):
"""
Get a filename from S3.
:param sample_md5: The MD5 to lookup.
:type sample_md5: str
:param collection: The collection to translate for lookup.
:type collection: str
:returns: str
"""
try:
bucket = s3_connector(s3_translate_collection(collection))
k = bucket.get_key(sample_md5)
filename = k.get_metadata("filename")
except Exception:
return None
return filename
def delete_file_s3(sample_md5, collection):
"""
Remove a file from S3.
:param sample_md5: The MD5 to remove.
:type sample_md5: str
:param collection: The collection to translate for lookup.
:type collection: str
:returns: True, None
"""
try:
bucket = s3_connector(s3_translate_collection(collection))
k = bucket.get_key(sample_md5)
k.delete()
return True
except Exception:
return None
|
mit
| 9,091,742,652,331,264,000
| 26.123377
| 83
| 0.629638
| false
| 3.699734
| false
| false
| false
|
geophysics/mtpy
|
mtpy/modeling/pek2d.py
|
1
|
20327
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 28 14:06:45 2014
@author: a1655681
"""
import mtpy.modeling.occam2d as o2d
import mtpy.modeling.pek1dclasses as p1dc
import numpy as np
import os
import os.path as op
import pek2dforward as p2d
import string
import scipy.interpolate as si
import mtpy.utils.filehandling as fh
import mtpy.core.edi as mtedi
import mtpy.modeling.pek2dforward as p2d
class Model():
"""
class for creating and reading model files
"""
def __init__(self, working_directory, **input_parameters):
self.working_directory = working_directory
self.edi_directory = None
self.occam_configfile = None
self.parameters_ctl = {}
self.parameters_ctl['ctl_string'] = 'TAB'
self.parameters_ctl['units_string'] = 'PR'
self.parameters_ctl['quadrants'] = '++--'
self.parameters_ctl['orientation_string'] = '0 0.d0 0.d0'
self.parameters_ctl['convergence_string'] = '1 6 1.d-4'
self.parameters_ctl['roughness_string'] = '2 1000.0d0 1000.0d0 0.d0'
self.parameters_ctl['anisotropy_penalty_string'] = '2 1000.d0 0.d0'
self.parameters_ctl['anisotropy_ctl_string'] = '1.d0 1.d0 1.d0'
self.parameters_model = {}
self.parameters_model['no_sideblockelements'] = 5
self.parameters_model['no_bottomlayerelements'] = 4
self.parameters_model['firstlayer_thickness'] = 100
#model depth is in km!
self.parameters_model['model_depth'] = 100
self.parameters_model['no_layers'] = 25
self.parameters_model['max_blockwidth'] = 1000
self.parameters_data = {}
self.parameters_data['strike'] = 0.
self.parameters_data['errorfloor'] = dict(z=np.array([[0.05,0.05],
[0.05,0.05]]),
tipper=np.array([0.02,0.02]))
self.parameters_data['errorfloor_type'] = 'offdiagonals'# offdiagonals or relative
self.parameters_data['max_no_frequencies'] = 50
self.parameters_data['mode'] = [1,1,1,1,1,1]
self.n_airlayers = 5
self.mesh = None
self.meshlocations_x = None
self.meshlocations_z = None
self.meshblockwidths_x = None
self.meshblockthicknesses_z = None
self.profile_easts = None
self.profile_norths = None
self.inversion1d_dirdict = {}
self.inversion1d_masterdir = '.'
self.inversion1d_modelno = 0
self.inversion1d_imethod = 'nearest'
self.idir_basename = 'aniso'
self.binsize_resistivitylog10 = 1.
self.binsize_strike = 20.
self.build_from_1d = False
self.rotation = 0.
self.modelfile = 'model.dat'
self.anisotropy_min_depth = 0.
self.strike = 0.
self.edifiles = []
self.Data = None
self.modelfile = 'model'
self.resfile = 'pb.res'
self.cvgfile = 'pb.cvg'
self.outfile = 'pb.out'
self.pexfile = 'pb.pex'
self.andfile = 'pb.and'
self.exlfile = 'pb.exl'
update_dict = {}
#correcting dictionary for upper case keys
input_parameters_nocase = {}
for key in input_parameters.keys():
input_parameters_nocase[key.lower()] = input_parameters[key]
update_dict.update(input_parameters_nocase)
for dictionary in [self.parameters_model,self.parameters_data]:
for key in dictionary.keys():
if key in update_dict:
#check if entry exists:
try:
value = float(update_dict[key])
dictionary[key] = value
except:
value = update_dict[key]
dictionary[key] = value
if type(value) in [str]:
if value.strip().lower()=='none':
dictionary[key] = None
for key in update_dict:
try:
value = getattr(self,key)
if update_dict[key] is not None:
try:
value = float(update_dict[key])
setattr(self,key,value)
except:
value = update_dict[key]
setattr(self,key,value)
if type(value) in [str]:
if value.strip().lower()=='none':
setattr(self,key,None)
except:
continue
self.input_parameters = update_dict
print self.idir_basename
if self.edifiles == []:
if self.edi_directory is not None:
try:
self.edifiles = [op.join(self.edi_directory,
f) for f in os.listdir(self.edi_directory)]
except IOError:
print("failed to find edi directory")
pass
def build_inputfiles(self):
inversiondir = fh.make_unique_folder(self.working_directory,basename=self.idir_basename)
os.mkdir(op.join(self.working_directory,inversiondir))
self.working_directory = inversiondir
self.build_model()
self.write_modelfile()
self.write_datafiles()
self.write_ctlfile()
def read_model(self):
"""
use pek2d forward python setup code to read the model
"""
model = p2d.Model(working_directory = self.working_directory,
**self.input_parameters)
model.read_model()
for attr in ['meshblockwidths_x', 'meshblockthicknesses_z',
'meshlocations_x', 'meshlocations_z',
'modelblocknums', 'resistivity', 'sds',
'station_indices','modelfile_reslines',
'n_airlayers']:
try:
setattr(self,attr,getattr(model,attr))
except:
print "can't assign attribute {}".format(attr)
def read_outfile(self,chunk=1750,linelength=52):
"""
read the outfile from the reverse end and get out last iteration
"""
# open outfile
outfile = open(op.join(self.working_directory,self.outfile))
if not hasattr(self,'modelblocknums'):
self.read_model()
elif self.modelblocknums is None:
self.read_model()
mb = np.sum(self.modelfile_reslines[:,-6:].astype(int))
# read backwards from end of file, in chunks of 175, until a 4-column row is found
nn = 1
while True:
try:
outfile.seek(-nn, 2)
outfile.readline()
line = outfile.readline().strip().split()
n = outfile.tell()
if len(line) == 4:
break
nn += chunk
except:
print "invalid outfile, cannot read resistivity values from outfile yet"
return
m = 0
while line[0] != '1':
outfile.seek(n-linelength*m)
line = outfile.readline().strip().split()
m += 1
self.outfile_reslines = np.zeros([mb,4])
for m in range(mb):
self.outfile_reslines[m] = [float(ll) for ll in line]
line = outfile.readline().strip().split()
# iterate through resistivity and assign new values if they have been inverted for
n = 0
nair = self.n_airlayers + 1
nx,nz = len(self.meshlocations_x), len(self.meshlocations_z)
for i in range(nz - nair):
for j in range(nx - 1):
for k in range(6):
mfi = (nx - 1)*i + j + 1
if self.modelfile_reslines[mfi,k+8] == '1':
if k < 3:
self.resistivity[i+nair-1,j,k] = self.outfile_reslines[n,2]
else:
self.sds[i+nair-1,j,k-3] = self.outfile_reslines[n,2]
n += 1
# print i,j,k,n
def build_model(self):
"""
build model file string
"""
# build a forward model object
ro = p2d.Model(self.working_directory,**self.input_parameters)
ro.build_model()
# assign relavent parameters to pek 2d inverse object
for at in ['stationlocations','parameters_model',
'meshlocations_x','meshlocations_z',
'meshblockwidths_x','meshblockthicknesses_z',
'profile_easts','profile_norths','Data',
'meshblockthicknesses_zair','meshlocations_zair']:
attvalue = getattr(ro,at)
setattr(self,at,attvalue)
ro.get_station_meshblock_numbers()
if ro.build_from_1d:
# try:
ro.get_1d_results()
ro.interpolate_1d_results()
for at in ['inversion1d_dirdict','inversion1d_modelno',
'models1d','resistivity','stationlocations',
'blockcentres_x','blockcentres_z']:
attvalue = getattr(ro,at)
setattr(self,at,attvalue)
# except:
else:
for at in ['resistivity','stationlocations',
'blockcentres_x','blockcentres_z']:
setattr(self,at,getattr(ro,at))
for at in ['inversion1d_dirdict','inversion1d_modelno',
'models1d']:
setattr(self,at,None)
ro.get_station_meshblock_numbers()
self.stationblocknums=ro.stationblocknums
self.build_modelfilestring()
def write_modelfile(self):
if not hasattr(self,'modelfilestring'):
self.build_model()
outfile = open(op.join(self.working_directory,
self.modelfile),'w')
outfile.write(self.modelfilestring)
outfile.close()
def build_modelfilestring(self):
# initialise a list containing info for model file
modelfilestring = []
# add header info
modelfilestring.append('NEW')
modelfilestring.append(' 1')
modelfilestring.append(' 1.000')
# add string giving number of cells:
modelfilestring.append(''.join(['%5i'%i for i in [len(self.meshlocations_x),
len(self.meshlocations_z)+self.n_airlayers,
self.n_airlayers+1]]))
# add strings giving horizontal and vertical mesh steps
meshz = list(self.meshblockthicknesses_zair)+list(self.meshblockthicknesses_z)
for meshstep in [self.meshblockwidths_x,meshz]:
modelfilestring.append\
(p2d.create_multiple_line_string(meshstep,
10,'%10.3f'))
# add resistivity map
rmap = ('%5i'%0*len(self.resistivity[0])+'\n')*self.n_airlayers
rmap += '\n'.join([''.join('%5i'%ii for ii in i) for i in \
np.arange(1,np.size(self.resistivity[:,:,0])+1).reshape(np.shape(self.resistivity)[:2])])
modelfilestring.append(rmap)
# add number of resistivity domains (+1 to include air)
modelfilestring.append('%5i'%(np.size(self.resistivity[:,:,0])+1))
# add dictionary contents, assuming rvertical = rmax, slant and dip zero
# first, air layer, properties always the same
modelfilestring.append(' 0 0 -1.00 0.00 0.00 0.00 0.00 0.00 0 0 0 0 0 0')
# second, dictionary contents
no = 1
for j in range(len(self.resistivity)):
for i in range(len(self.resistivity[j])):
# initialise a list containing resx,resy,strike
rlist = list(self.resistivity[j,i])
# insert resz (assumed to be same as resy)
rlist.insert(2,rlist[1])
# insert dip and slant (assumed zero)
rlist += [0.,0.]
# if rlist[1]/rlist[0] == 1.:
# aniso = ' 0'
# invert_key = ' 1 1 1 0 0 0'
# else:
aniso = ' 1'
invert_key = ' 1 1 1 1 1 0'
modelfilestring.append(''.join(['%5i'%no,aniso]+['%10.2f'%i for i in rlist]+[invert_key]))
no += 1
# append bathymetry index, at this stage only 0 allowed:
modelfilestring.append('%5i'%0)
# append number of calculation points (stations):
modelfilestring.append('%5i'%len(self.stationblocknums))
# append rotation
modelfilestring.append('%10.2f'%self.rotation)
# append station blocknums
modelfilestring.append(p2d.create_multiple_line_string(self.stationblocknums,
5,' %03i'))
modelfilestring.append('%5i'%0)
self.modelfilestring = '\n'.join(modelfilestring)+'\n'
def build_data(self):
imethod = 'nearest'
ftol = 0.000001
num_freq = int(self.parameters_data['max_no_frequencies'])
# get minimum and maximum periods
min_val = max([min(1./zo.freq) for zo in self.Data.Z])
max_val = min([max(1./zo.freq) for zo in self.Data.Z])
periodlst = []
for period in 1./self.Data.frequencies:
if len(periodlst) > 0:
# find the difference between the period and the closest period already in the list
closest_period_diff = np.amin(np.abs(np.array(periodlst)-period))
else:
# otherwise set period difference to a large number
closest_period_diff = 99999
# check whether the fractional difference is bigger than the tolerance set
# print closest_period_diff,closest_period_diff/period,
if closest_period_diff/period > ftol:
if min_val <= period <= max_val:
periodlst.append(period)
periodlst.sort()
# print periodlst
# if number of periods still too long based on the number of frequencies set
# then take out some frequencies
n = 2
new_periodlst = periodlst
while len(new_periodlst) > num_freq:
new_periodlst = [periodlst[int(p)] for p in range(len(periodlst)) if p%n == 0]
n += 1
periodlst = new_periodlst
mode = self.parameters_data['mode']
if type(mode) in [str]:
mode = mode.split(',')
self.parameters_data['mode'] = mode
datafile_data = {}
for ee,zo in enumerate(self.Data.Z):
to=self.Data.Tipper[ee]
datfn = str(self.stationblocknums[ee])+'_'+self.Data.stations[ee]+'.dat'
zerr = zo.zerr
z = zo.z
ze_rel = zerr/np.abs(z)
terr = to.tippererr
t = to.tipper
te_rel = terr/np.abs(t)
# set error floors
efz = self.parameters_data['errorfloor']['z']
eft = self.parameters_data['errorfloor']['tipper']
eftype = self.parameters_data['errorfloor_type']
if eftype in ['relative','offdiagonals']:
for i in range(2):
for j in range(2):
ze_rel[ze_rel<efz[i,j]] = efz[i,j]
te_rel[te_rel<eft[i]] = eft[i]
zerr = ze_rel * np.abs(z)
terr = te_rel * np.abs(t)
if eftype == 'offdiagonals':
for i in range(2):
for iz in range(len(z)):
if zerr[iz,i,i] < zerr[iz,i,1-i]:
zerr[iz,i,i] = zerr[iz,i,1-i]
zvar = zerr**2
# create interpolation functions to interpolate z and tipper values
properties = dict(z_real=np.real(z),z_imag=np.imag(z),
z_var=zvar,tipper_real=np.real(t),
tipper_imag=np.imag(t),tipper_err=terr)
properties_interp = {}
for key in properties.keys():
f = si.interp1d(np.log10(1./zo.freq),properties[key],
axis = 0,kind = imethod)
properties_interp[key] = f(np.log10(periodlst))
datafile_data[datfn] = properties_interp
self.datafile_data = datafile_data
self.freq = 1./(np.array(periodlst))
def build_datafiles(self):
if not hasattr(self,'datafile_data'):
self.build_data()
dfstrings = {}
for dfile in self.datafile_data.keys():
datfstr = '{:<3} '.format(len(self.freq))+\
' '.join([str(i) for i in self.parameters_data['mode']])+'\n'
for pv in range(len(self.freq)):
datlst = '{0:>12}'.format('%.06f'%(1./(self.freq[pv])))
for ii in range(2):
for jj in range(2):
for pval in ['z_real', 'z_imag', 'z_var']:
# print self.datafile_data[dfile][pval][pv][ii,jj]
datlst += '{0:>12}'.format('%.06f'%self.datafile_data[dfile][pval][pv][ii,jj])
for ii in range(2):
for pval in ['tipper_real', 'tipper_imag', 'tipper_err']:
datlst += '{0:>12}'.format('%.06f'%self.datafile_data[dfile][pval][pv][0,ii])
datfstr += ''.join(datlst)+'\n'
dfstrings[dfile] = datfstr
self.datafile_strings = dfstrings
def write_datafiles(self):
if not hasattr(self,'datafile_strings'):
self.build_datafiles()
exlf = open(os.path.join(self.working_directory,self.working_directory,self.exlfile),'w')
dfkeys=self.datafile_strings.keys()
dfkeys.sort()
for dfile in dfkeys:
f = open(op.join(self.working_directory,self.working_directory,dfile),'w')
f.write(self.datafile_strings[dfile])
f.close()
exlf.write(dfile+'\n')
exlf.close()
def write_ctlfile(self):
ctrf = open(op.join(self.working_directory,self.working_directory,'pb.ctr'),'w')
if type(self.parameters_data['mode']) == str:
self.parameters_data['mode'] = self.parameters_data['mode'].split(',')
ef = np.hstack([self.parameters_data['errorfloor'][l].flatten() for l in ['z','tipper']])
clist = []
clist.append(self.exlfile)
clist.append(self.parameters_ctl['ctl_string']+self.parameters_ctl['units_string']+self.parameters_ctl['quadrants'])
clist.append(' '.join([str(i) for i in self.parameters_data['mode']]))
clist.append(' '.join(['0.00' for i in ef]))
clist.append(self.parameters_ctl['orientation_string'])
clist.append(self.modelfile)
clist.append(self.resfile)
clist.append(self.parameters_ctl['convergence_string'])
clist.append(self.parameters_ctl['roughness_string'])
clist.append(self.parameters_ctl['anisotropy_penalty_string'])
clist.append(self.parameters_ctl['anisotropy_ctl_string'])
clist.append(self.cvgfile)
clist.append(self.outfile)
clist.append(self.pexfile)
clist.append(self.andfile)
self.controlfile_string = '\n'.join(clist)
ctrf.write(self.controlfile_string)
ctrf.close()
|
gpl-3.0
| 4,673,820,515,280,582,000
| 38.548638
| 125
| 0.513258
| false
| 3.923374
| false
| false
| false
|
calico/basenji
|
bin/basenji_bench_gtex.py
|
1
|
7886
|
#!/usr/bin/env python
# Copyright 2020 Calico LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from optparse import OptionParser
import glob
import os
import pickle
import shutil
import subprocess
import sys
import h5py
import numpy as np
import slurm
"""
basenji_bench_gtex.py
Compute SNP expression difference scores for variants in VCF files of
fine-mapped GTEx variants to benchmark as features in a classification
task.
"""
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <params_file> <model_file>'
parser = OptionParser(usage)
# sad
parser.add_option('-f', dest='genome_fasta',
default='%s/data/hg38.fa' % os.environ['BASENJIDIR'],
help='Genome FASTA for sequences [Default: %default]')
parser.add_option('--local',dest='local',
default=1024, type='int',
help='Local SAD score [Default: %default]')
parser.add_option('-n', dest='norm_file',
default=None,
help='Normalize SAD scores')
parser.add_option('-o',dest='out_dir',
default='sad_gtex',
help='Output directory for tables and plots [Default: %default]')
parser.add_option('--pseudo', dest='log_pseudo',
default=1, type='float',
help='Log2 pseudocount [Default: %default]')
parser.add_option('--rc', dest='rc',
default=False, action='store_true',
help='Average forward and reverse complement predictions [Default: %default]')
parser.add_option('--shifts', dest='shifts',
default='0', type='str',
help='Ensemble prediction shifts [Default: %default]')
parser.add_option('--stats', dest='sad_stats',
default='SAD',
help='Comma-separated list of stats to save. [Default: %default]')
parser.add_option('-t', dest='targets_file',
default=None, type='str',
help='File specifying target indexes and labels in table format')
parser.add_option('--ti', dest='track_indexes',
default=None, type='str',
help='Comma-separated list of target indexes to output BigWig tracks')
parser.add_option('--threads', dest='threads',
default=False, action='store_true',
help='Run CPU math and output in a separate thread [Default: %default]')
parser.add_option('-u', dest='penultimate',
default=False, action='store_true',
help='Compute SED in the penultimate layer [Default: %default]')
# classify
parser.add_option('--msl', dest='msl',
default=1, type='int',
help='Random forest min_samples_leaf [Default: %default]')
# multi
parser.add_option('-e', dest='conda_env',
default='tf2.4',
help='Anaconda environment [Default: %default]')
parser.add_option('-g', dest='gtex_vcf_dir',
default='/home/drk/seqnn/data/gtex_fine/susie_pip90')
parser.add_option('--name', dest='name',
default='gtex', help='SLURM name prefix [Default: %default]')
parser.add_option('--max_proc', dest='max_proc',
default=None, type='int',
help='Maximum concurrent processes [Default: %default]')
parser.add_option('-p', dest='processes',
default=None, type='int',
help='Number of processes, passed by multi script. \
(Unused, but needs to appear as dummy.)')
parser.add_option('-q', dest='queue',
default='gtx1080ti',
help='SLURM queue on which to run the jobs [Default: %default]')
parser.add_option('-r', dest='restart',
default=False, action='store_true',
help='Restart a partially completed job [Default: %default]')
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error('Must provide parameters and model files')
else:
params_file = args[0]
model_file = args[1]
#######################################################
# prep work
# output directory
if not options.restart:
if os.path.isdir(options.out_dir):
print('Please remove %s' % options.out_dir, file=sys.stderr)
exit(1)
os.mkdir(options.out_dir)
# pickle options
options_pkl_file = '%s/options.pkl' % options.out_dir
options_pkl = open(options_pkl_file, 'wb')
pickle.dump(options, options_pkl)
options_pkl.close()
#######################################################
# predict
cmd_base = '. /home/drk/anaconda3/etc/profile.d/conda.sh;'
cmd_base += ' conda activate %s;' % options.conda_env
cmd_base += ' basenji_sad.py %s %s %s' % (options_pkl_file, params_file, model_file)
jobs = []
for gtex_pos_vcf in glob.glob('%s/*_pos.vcf' % options.gtex_vcf_dir):
# positive job
job_base = os.path.splitext(os.path.split(gtex_pos_vcf)[1])[0]
out_dir = '%s/%s' % (options.out_dir, job_base)
if not options.restart or not os.path.isfile('%s/sad.h5'%out_dir):
cmd = '%s -o %s %s' % (cmd_base, out_dir, gtex_pos_vcf)
name = '%s_%s' % (options.name, job_base)
j = slurm.Job(cmd, name,
'%s.out'%out_dir, '%s.err'%out_dir,
queue=options.queue, gpu=1,
mem=22000, time='1-0:0:0')
jobs.append(j)
# negative job
gtex_neg_vcf = gtex_pos_vcf.replace('_pos.','_neg.')
job_base = os.path.splitext(os.path.split(gtex_neg_vcf)[1])[0]
out_dir = '%s/%s' % (options.out_dir, job_base)
if not options.restart or not os.path.isfile('%s/sad.h5'%out_dir):
cmd = '%s -o %s %s' % (cmd_base, out_dir, gtex_neg_vcf)
name = '%s_%s' % (options.name, job_base)
j = slurm.Job(cmd, name,
'%s.out'%out_dir, '%s.err'%out_dir,
queue=options.queue, gpu=1,
mem=22000, time='1-0:0:0')
jobs.append(j)
slurm.multi_run(jobs, max_proc=options.max_proc, verbose=True,
launch_sleep=10, update_sleep=60)
#######################################################
# classify
cmd_base = 'basenji_bench_classify.py -i 100 -p 2 -r 44 -s'
cmd_base += ' --msl %d' % options.msl
jobs = []
for gtex_pos_vcf in glob.glob('%s/*_pos.vcf' % options.gtex_vcf_dir):
tissue = os.path.splitext(os.path.split(gtex_pos_vcf)[1])[0][:-4]
sad_pos = '%s/%s_pos/sad.h5' % (options.out_dir, tissue)
sad_neg = '%s/%s_neg/sad.h5' % (options.out_dir, tissue)
out_dir = '%s/%s_class' % (options.out_dir, tissue)
if not options.restart or not os.path.isfile('%s/stats.txt' % out_dir):
cmd = '%s -o %s %s %s' % (cmd_base, out_dir, sad_pos, sad_neg)
j = slurm.Job(cmd, tissue,
'%s.out'%out_dir, '%s.err'%out_dir,
queue='standard', cpu=2,
mem=22000, time='1-0:0:0')
jobs.append(j)
slurm.multi_run(jobs, verbose=True)
def job_completed(options, pi):
"""Check whether a specific job has generated its
output file."""
if options.out_txt:
out_file = '%s/job%d/sad_table.txt' % (options.out_dir, pi)
elif options.out_zarr:
out_file = '%s/job%d/sad.zarr' % (options.out_dir, pi)
elif options.csv:
out_file = '%s/job%d/sad_table.csv' % (options.out_dir, pi)
else:
out_file = '%s/job%d/sad.h5' % (options.out_dir, pi)
return os.path.isfile(out_file) or os.path.isdir(out_file)
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
|
apache-2.0
| -2,270,618,590,698,872,600
| 36.023474
| 86
| 0.591301
| false
| 3.331643
| false
| false
| false
|
yasoob/youtube-dl-GUI
|
Threads/Download.py
|
1
|
6410
|
import math
from pathlib import Path
import youtube_dl
from PyQt5 import QtCore
class StopError(Exception):
pass
class DownloadSignals(QtCore.QObject):
"Define the signals available from a running download thread"
status_bar_signal = QtCore.pyqtSignal(str)
remove_url_signal = QtCore.pyqtSignal(str)
add_update_list_signal = QtCore.pyqtSignal([list])
remove_row_signal = QtCore.pyqtSignal()
finished = QtCore.pyqtSignal()
class Download(QtCore.QRunnable):
"Download Thread"
def __init__(self, opts):
super(Download, self).__init__()
self.parent = opts.get("parent")
self.error_occurred = False
self.done = False
self.file_name = ""
self.speed = "-- KiB/s"
self.eta = "00:00"
self.bytes = self.format_bytes(None)
self.url = opts.get("url")
self.directory = opts.get("directory")
if self.directory:
self.directory = str(Path(opts.get("directory")).resolve())
self.local_rowcount = opts.get("rowcount")
self.convert_format = opts.get("convert_format")
self.proxy = opts.get("proxy")
self.keep_file = opts.get("keep_file")
# Signals
self.signals = DownloadSignals()
def hook(self, li):
if self.done:
raise StopError()
_file_name = li.get("filename")
if li.get("downloaded_bytes"):
if li.get("speed"):
self.speed = self.format_speed(li.get("speed"))
self.eta = self.format_seconds(li.get("eta"))
self.bytes = self.format_bytes(li.get("total_bytes", "unknown"))
filename = str(Path(_file_name).stem)
self.signals.add_update_list_signal.emit(
[
self.local_rowcount,
filename,
self.bytes,
self.eta,
self.speed,
li.get("status"),
]
)
elif li.get("status") == "finished":
self.file_name = str(Path(_file_name).stem)
self.signals.add_update_list_signal.emit(
[
self.local_rowcount,
self.file_name,
self.bytes,
self.eta,
self.speed,
"Converting",
]
)
else:
self.bytes = self.format_bytes(li.get("total_bytes"))
self.file_name = Path(_file_name).name
self.speed = "-- KiB/s"
self.signals.add_update_list_signal.emit(
[
self.local_rowcount,
self.file_name,
self.bytes,
"00:00",
self.speed,
"Finished",
]
)
self.signals.status_bar_signal.emit("Already Downloaded")
self.signals.remove_row_signal.emit()
def _prepare_ytd_options(self):
ydl_options = {
"outtmpl": f"{self.directory}/%(title)s-%(id)s.%(ext)s",
"continuedl": True,
"quiet": True,
"proxy": self.proxy,
}
if self.convert_format is not False:
ydl_options["postprocessors"] = [
{
"key": "FFmpegVideoConvertor",
"preferedformat": self.convert_format,
}
]
if self.keep_file:
ydl_options["keepvideo"] = True
return ydl_options
def download(self):
ydl_options = self._prepare_ytd_options()
with youtube_dl.YoutubeDL(ydl_options) as ydl:
ydl.add_default_info_extractors()
ydl.add_progress_hook(self.hook)
try:
ydl.download([self.url])
except (
youtube_dl.utils.DownloadError,
youtube_dl.utils.ContentTooShortError,
youtube_dl.utils.ExtractorError,
youtube_dl.utils.UnavailableVideoError,
) as e:
self.error_occurred = True
self.signals.remove_row_signal.emit()
self.signals.remove_url_signal.emit(self.url)
self.signals.status_bar_signal.emit(str(e))
except StopError:
# import threading
# print("Exiting thread:", threading.currentThread().getName())
self.done = True
self.signals.finished.emit()
@QtCore.pyqtSlot()
def run(self):
self.signals.add_update_list_signal.emit(
[self.local_rowcount, self.url, "", "", "", "Starting"]
)
self.download()
if self.error_occurred is not True:
self.signals.add_update_list_signal.emit(
[
self.local_rowcount,
self.file_name,
self.bytes,
"00:00",
self.speed,
"Finished",
]
)
self.signals.status_bar_signal.emit("Done!")
self.signals.remove_url_signal.emit(self.url)
self.done = True
self.signals.finished.emit()
def stop(self):
self.done = True
def format_seconds(self, seconds):
(minutes, secs) = divmod(seconds, 60)
(hours, minutes) = divmod(minutes, 60)
if hours > 99:
return "--:--:--"
if hours == 0:
return "%02d:%02d" % (minutes, secs)
else:
return "%02d:%02d:%02d" % (hours, minutes, secs)
# TODO: Move to utils
def format_bytes(self, _bytes=None):
if not _bytes:
return "N/A"
_bytes = float(_bytes)
if _bytes == 0.0:
exponent = 0
else:
exponent = int(math.log(_bytes, 1024.0))
suffix = ["B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"][exponent]
converted = _bytes / float(1024 ** exponent)
return "%.2f%s" % (converted, suffix)
def format_speed(self, speed=None):
if not speed:
return "%10s" % "---b/s"
return "%10s" % ("%s/s" % self.format_bytes(speed))
|
mit
| 7,824,562,817,449,066,000
| 31.538071
| 88
| 0.487676
| false
| 4.138154
| false
| false
| false
|
AcrDijon/henet
|
pelican/plugins/henet_comments.py
|
1
|
1027
|
# -*- coding: utf-8 -*-
import traceback
from pelican import signals
from henet.comments import ArticleThread
from henet.rst.rst2html import rst2html
# xxx read config
storage_dir = '/Users/tarek/Dev/github.com/acr-dijon.org/comments/'
# xxx cache
def add_comments(generator, content):
try:
# the article unique id is its relative source path,
# so the comments are not dependant on the URL.
source_path = content.get_relative_source_path()
article_uuid = source_path.encode('utf8')
thread = ArticleThread(storage_dir, article_uuid)
thread = thread.asjson()
for comment in thread['comments']:
html = rst2html(comment['text'], theme='acr', body_only=True)
comment['html'] = html
content.metadata["comments"] = thread
except:
# XXX for some reason Pelican does not print plugins exceptions
traceback.print_exc()
raise
def register():
signals.article_generator_write_article.connect(add_comments)
|
apache-2.0
| 475,495,567,925,430,500
| 28.342857
| 73
| 0.666991
| false
| 3.846442
| false
| false
| false
|
stianpr/flask-oauthlib
|
tests/test_oauth2/test_code.py
|
1
|
4530
|
# coding: utf-8
from datetime import datetime, timedelta
from .base import TestCase
from .base import create_server, sqlalchemy_provider, cache_provider
from .base import db, Client, User, Grant
class TestDefaultProvider(TestCase):
def create_server(self):
create_server(self.app)
def prepare_data(self):
self.create_server()
oauth_client = Client(
name='ios', client_id='code-client', client_secret='code-secret',
_redirect_uris='http://localhost/authorized',
)
db.session.add(User(username='foo'))
db.session.add(oauth_client)
db.session.commit()
self.oauth_client = oauth_client
self.authorize_url = (
'/oauth/authorize?response_type=code&client_id=%s'
) % oauth_client.client_id
def test_get_authorize(self):
rv = self.client.get('/oauth/authorize')
assert 'client_id' in rv.location
rv = self.client.get('/oauth/authorize?client_id=no')
assert 'client_id' in rv.location
url = '/oauth/authorize?client_id=%s' % self.oauth_client.client_id
rv = self.client.get(url)
assert 'error' in rv.location
rv = self.client.get(self.authorize_url)
assert b'confirm' in rv.data
def test_post_authorize(self):
url = self.authorize_url + '&scope=foo'
rv = self.client.post(url, data={'confirm': 'yes'})
assert 'invalid_scope' in rv.location
url = self.authorize_url + '&scope=email'
rv = self.client.post(url, data={'confirm': 'yes'})
assert 'code' in rv.location
url = self.authorize_url + '&scope='
rv = self.client.post(url, data={'confirm': 'yes'})
assert 'error=Scopes+must+be+set' in rv.location
def test_invalid_token(self):
rv = self.client.get('/oauth/token')
assert b'unsupported_grant_type' in rv.data
rv = self.client.get('/oauth/token?grant_type=authorization_code')
assert b'error' in rv.data
assert b'code' in rv.data
url = (
'/oauth/token?grant_type=authorization_code'
'&code=nothing&client_id=%s'
) % self.oauth_client.client_id
rv = self.client.get(url)
assert b'invalid_client' in rv.data
url += '&client_secret=' + self.oauth_client.client_secret
rv = self.client.get(url)
assert b'invalid_client' not in rv.data
assert rv.status_code == 401
def test_invalid_redirect_uri(self):
authorize_url = (
'/oauth/authorize?response_type=code&client_id=code-client'
'&redirect_uri=http://localhost:8000/authorized'
'&scope=invalid'
)
rv = self.client.get(authorize_url)
assert 'error=' in rv.location
assert 'Mismatching+redirect+URI' in rv.location
def test_get_token(self):
expires = datetime.utcnow() + timedelta(seconds=100)
grant = Grant(
user_id=1,
client_id=self.oauth_client.client_id,
scope='email',
redirect_uri='http://localhost/authorized',
code='test-get-token',
expires=expires,
)
db.session.add(grant)
db.session.commit()
url = (
'/oauth/token?grant_type=authorization_code'
'&code=test-get-token&client_id=%s'
) % self.oauth_client.client_id
rv = self.client.get(url)
assert b'invalid_client' in rv.data
url += '&client_secret=' + self.oauth_client.client_secret
rv = self.client.get(url)
assert b'access_token' in rv.data
class TestSQLAlchemyProvider(TestDefaultProvider):
def create_server(self):
create_server(self.app, sqlalchemy_provider(self.app))
class TestCacheProvider(TestDefaultProvider):
def create_server(self):
create_server(self.app, cache_provider(self.app))
def test_get_token(self):
url = self.authorize_url + '&scope=email'
rv = self.client.post(url, data={'confirm': 'yes'})
assert 'code' in rv.location
code = rv.location.split('code=')[1]
url = (
'/oauth/token?grant_type=authorization_code'
'&code=%s&client_id=%s'
) % (code, self.oauth_client.client_id)
rv = self.client.get(url)
assert b'invalid_client' in rv.data
url += '&client_secret=' + self.oauth_client.client_secret
rv = self.client.get(url)
assert b'access_token' in rv.data
|
bsd-3-clause
| 1,346,164,920,111,912,200
| 32.308824
| 77
| 0.601104
| false
| 3.673966
| true
| false
| false
|
TariqAHassan/ZeitSci
|
analysis/pubmed_postprocessing.py
|
1
|
15935
|
"""
Clean the Pubmed Post Processing
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Dump obtained on: July 11, 2016 (double check).
Python 3.5
"""
import re
import os
import blaze
import numpy as np
import pandas as pd
from blaze import *
from copy import deepcopy
from collections import defaultdict
from collections import OrderedDict
from easymoney.easy_pandas import strlist_to_list
from easymoney.easy_pandas import twoD_nested_dict
from analysis.abstract_analysis import common_words
from tqdm import tqdm
# from odo import odo
# Goal:
# A dataframe with the following columns:
# Researcher
# Fields -- use journal ranking dataframe
# ResearcherSubfields -- use journal ranking dataframe
# ResearchAreas (sub-subfield) -- use journal ranking dataframe -- keywords
# Amount
# NormalizedAmount -- the grant in 2015 USD (2016 not handled properly...fix)
# Currency
# YearOfGrant
# FundingSource
# Collaborators X -- based on pubmed 2000-2016 download
# keywords
# Institution
# Endowment -- use wikipedia universties database
# InstitutionType -- use wikipedia universties database (i.e., public or private)
# InstitutionRanking V -- Ranking of the institution (uni). Impact Factor usage rights are prohibitive.
# InstutionCountry
# City/NearestCity
# lng
# lat
# V = VOID, i.e., not possible
# X = to do (when all country's data are assembled)
# Also: Run an abstract analysis on each keyword/term this will standardize the terms
# ------------------------------------------------------------------------- #
# General Tools & Information #
# ------------------------------------------------------------------------- #
MAIN_FOLDER = "/Users/tariq/Google Drive/Programming Projects/ZeitSci/"
# Move to AUX_NCBI_DATA Folder
os.chdir(MAIN_FOLDER + "/Data/NCBI_DATA")
pubmed = pd.io.parsers.read_csv("Pubmed2000_to_2015.csv", nrows=100000, encoding='utf-8')
# Clean..for now
pubmed['title'] = pubmed['title'].str.replace("[", "").str.replace("]", "")
tqdm.pandas(desc="status")
# ------------------------------------------------------------------------- #
# Integrate NCBI Metadata with Journal Ranking Information #
# ------------------------------------------------------------------------- #
os.chdir(MAIN_FOLDER + "/Data/WikiPull")
# Read in Journal Database
journal_db = pd.read_csv("wiki_journal_db.csv")
# Remove '.' from Journal_Abbrev
journal_db['Journal_Abbrev'] = journal_db['Journal_Abbrev'].map(lambda x: x if str(x) == 'nan' else x.replace(".", ""))
# Convert Field to list
journal_db['Field'] = journal_db['Field'].map(lambda x: x if str(x) == 'nan' else strlist_to_list(x))
# Convert Discipline to list.
# the first map converts "['item', 'item']" --> ['item', 'item']
# the second map replaces empty lists with nans
journal_db['Discipline'] = journal_db['Discipline'].map(
lambda x: x if str(x) == 'nan' else strlist_to_list(x)). \
map(lambda x: np.NaN if str(x) == 'nan' or len(x) == 1 and x[0] == '' else x)
# Merge Field and Discipline
field_and_discipline = journal_db.apply(lambda x: [x['Field'], x['Discipline']], axis=1)
# Dict with Journal's Full Name as the key
full_title_dict = dict(zip(journal_db['Title'].str.upper(), field_and_discipline))
# Dict with Journal's Abrev. as the key
abrev_title_dict = dict(zip(journal_db['Journal_Abbrev'].str.upper(), field_and_discipline))
# Remove NaN key
abrev_title_dict = {k: v for k, v in abrev_title_dict.items() if str(k) != 'nan'}
def journal_match(full_name, partial_name):
"""
Fuction to match joural to its field and discipline
using the full_title_dict and abrev_title_dict dictionaries.
:param full_name: the full name of the journal.
:type full_name: str
:param partial_name: the abrev. of the journal.
:type partial_name: str
:return: [FIELD, DISCIPLINE]
:rtype: ``nan`` or ``list``
"""
if partial_name.upper() in abrev_title_dict:
return abrev_title_dict[partial_name.upper()]
elif partial_name.upper() != full_name.upper() and full_name.upper() in full_title_dict:
return full_title_dict[full_name.upper()]
else:
return [np.NaN, np.NaN]
# Attempt to add field and discipline information using a journal's full name or abrev.
mapped_field_discipline = pubmed.progress_apply(lambda x: journal_match(x['journal'], x['journal_iso']), axis=1)
# Add journal field to the pubmed data frame
pubmed['field'] = mapped_field_discipline.progress_map(lambda x: x[0])
# Add journal discipline to the pubmed data frame
pubmed['discipline'] = mapped_field_discipline.progress_map(lambda x: x[1])
def duplicate_remover(input_list):
ordered_set = list()
for element in input_list:
if element not in ordered_set:
ordered_set.append(element)
return ordered_set
def grant_processor(grant):
list_of_grants = [g.split("; ") for g in grant.split(" | ")]
grant_ids = list()
agencies = list()
regions = list()
for g in list_of_grants:
for id in g[0].split(", "):
if id not in grant_ids:
grant_ids.append(id)
if g[1] not in agencies:
agencies.append(g[1])
if g[2] not in regions:
regions.append(g[2])
return grant_ids, agencies, regions
grants = pubmed['grants'].progress_map(grant_processor, na_action='ignore')
pubmed['grant_ids'] = grants.map(lambda x: x[0], na_action='ignore')
pubmed['grant_funders'] = grants.map(lambda x: x[1], na_action='ignore')
pubmed['grant_region'] = grants.map(lambda x: x[2], na_action='ignore')
del pubmed['grants']
def keywords_mesh_combine(keywords, mesh):
if str(keywords) == 'nan' and str(mesh) != 'nan':
return mesh
elif str(mesh) == 'nan' and str(keywords) != 'nan':
return keywords
elif str(mesh) == 'nan' and str(keywords) == 'nan':
return np.NaN
return "; ".join(set(keywords.split("; ") + mesh.split("; ")))
pubmed['keywords'] = pubmed.progress_apply(lambda x: keywords_mesh_combine(x['keywords'], x['mesh_terms']), axis=1)
del pubmed['mesh_terms']
# ------------------------------------------------------------------------- #
# Add Author+Afiliation #
# ------------------------------------------------------------------------- #
pubmed['author'] = pubmed['author'].str.split("; ")
pubmed['affiliation'] = pubmed['affiliation'].str.split("; ")
authors = pubmed['author'][0]
affiliations = pubmed['affiliation'][0]
# want: department + institution
def subsection_and_uni(affiliation, join_output=True, institution_only=False):
# look into C Benz - Medicine
# bazar result from:
# 1. pubmed[pubmed['author'].map(lambda x: 'C Hu' in x if str(x) != 'nan' else False)]['institution'][62284]
# 2. and the Eye and Ear Institute
department = None
institution = None
affiliation_split = affiliation.split(", ")
if affiliation_split == 1:
return np.NaN
department_terms = ['institute', 'department', 'division', 'dept']
institution_terms = ['institute', 'university', 'centre', 'school', 'center', 'clinic',
'hospital', 'national labratory', 'research labratory', 'college', 'library']
institution_deference = ['institute']
department_match = [i for i in affiliation_split if any(w in i.lower() for w in department_terms)]
if len(department_match) > 0:
department = department_match[0]
institution_match = [i for i in affiliation_split if any(w in i.lower() for w in institution_terms)]
if len(institution_match) == 1:
institution = institution_match[0]
elif len(institution_match) > 1:
institution = institution_match[-1]
if (department is None and institution is None) or institution is None:
return np.NaN
elif institution_only or \
(institution is not None and department is None) or \
(any(i in department.lower() for i in institution_deference) and institution is not None):
return institution
if join_output:
return ", ".join((department, institution))
else:
return ((department if department != None else np.NaN), (institution if institution != None else np.NaN))
def multi_affiliations(affiliations):
processed_affiliations = (subsection_and_uni(a, institution_only=True) for a in affiliations)
cleaned = [i for i in processed_affiliations if str(i) != 'nan']
if len(cleaned):
return "; ".join(cleaned)
else:
return np.NaN
def author_affil(authors, affiliations):
# Remove emails?
if 'nan' in [str(authors), str(affiliations)] or not len(authors) or not len(affiliations):
return np.NaN
if len(authors) > len(affiliations):
authors = authors[:len(affiliations)]
if len(affiliations) > len(authors):
affiliations = affiliations[:len(authors)]
cleaned_affilations = [a for a in map(subsection_and_uni, affiliations) if str(a) != 'nan']
if len(cleaned_affilations):
authors_afil = list(zip(authors, list(map(lambda a: subsection_and_uni(a), cleaned_affilations))))
return [" | ".join(a) for a in authors_afil]
else:
return np.NaN
pubmed['institutions'] = pubmed['affiliation'].progress_map(lambda a: multi_affiliations(a), na_action='ignore')
pubmed['author_afil'] = pubmed.progress_apply(lambda x: author_affil(x['author'], x['affiliation']), axis=1)
# pubmed['institutions'][pubmed['institutions'].map(lambda x: ";" in x if str(x) != 'nan' else False)]
# TO DO: replace dept and dept.
# ---------------------------------------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------------------------------------- #
# Export
# pubmed['author_afil'] = pubmed['author_afil'].progress_map(lambda x: "; ".join(x))
# ---------------------------------------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------------------------------------- #
# ------------------------------------------------------------------------- #
# Add Keywords #
# ------------------------------------------------------------------------- #
# Faster -- 42588.45 ms on average (N=3)
# title_keywords = pubmed['title'].map(lambda x: common_words(x, n=5, return_rank=False, digit_check=False), na_action='ignore')
# # Slower -- 47377.06 ms on average (N=3)...but easier to monitor.
# title_keywords = [0]*pubmed.shape[0]
# for t in range(len(pubmed['title'])):
# if t % 1000 == 0: print(round(float(t)/len(pubmed['title'])*100, 2), "%")
# title_keywords[t] = common_words(pubmed['title'][t], n=5, return_rank=False, digit_check=True, wrap_nans=False)
#
# # Add Keywords based on the title
# pubmed['keywords'] = title_keywords
# ------------------------------------------------------------------------- #
# Find Collaborations #
# ------------------------------------------------------------------------- #
# The most of the 'solutions' below are really just a series of hacks designed to
# drive down the run down because, frankly, this problem is a gemoetric nighmare when you have ~ 12 million rows.
# "...premature optimization is the root of all evil." ~ Donald Knuth
# So, you know, do as I say, not...
# ---------------------------------------------------------------------------------------- #
# Author + Index (to be used to locate field_discipline). This operation is vectorized (minus that map()...).
authors_field_discipline = pubmed['author_afil'] + pd.Series(mapped_field_discipline.index).progress_map(lambda x: [x])
# Try to work out collaborators.
# Thanks to @zvone on stackoverflow.
# see: http://stackoverflow.com/questions/39677070/procedure-to-map-all-relationships-between-elements-in-a-list-of-lists
collaborators_dict = defaultdict(set)
for paper in authors_field_discipline:
if str(paper) != 'nan':
for author in paper:
if str(author) != 'nan':
collaborators_dict[author].update(paper)
for author, collaborators in collaborators_dict.items():
collaborators.remove(author)
# from itertools import chain
# a = list(chain.from_iterable(authors_field_discipline.dropna().tolist()))
# ------------------------------------------------------------------------- #
# Find the Fields For Each Author #
# ------------------------------------------------------------------------- #
# dict of fields with keys corresponding to the pubmed df
field_nan_drop = pubmed['field'].dropna().reset_index()
index_field_dict = dict(zip(field_nan_drop['index'], field_nan_drop['field']))
# dict of disciplines with keys corresponding to the pubmed df
discipline_nan_drop = pubmed['discipline'].dropna().reset_index()
discipline_field_dict = dict(zip(discipline_nan_drop['index'], discipline_nan_drop['discipline']))
def collaborators_domain_seperator(single_author):
"""
Separates a list of authors and pubmed indexes into a list of lists of the form: [[AUTHORS], [FIELD], [DISCIPLINE]].
Please see: http://stackoverflow.com/questions/14776980/python-splitting-list-that-contains-strings-and-integers
Notes:
1. necessary to set(collab_domain)? Unclear.
2. add year information?
:param collab_domain:
:return:
:rtype: dict
"""
collab_dict = defaultdict(list)
for i in collaborators_dict[single_author]:
collab_dict[type(i)].append(i)
fields = list()
disciplines = list()
for i in collab_dict[int]:
if i in index_field_dict:
fields += index_field_dict[i]
if i in discipline_field_dict:
disciplines += discipline_field_dict[i]
set_fields = set(fields)
set_disciplines = set(disciplines)
info = {"index_of_papers": collab_dict[int]
, "num_authored": len(collab_dict[int])
, "collaborators": collab_dict[str] if len(collab_dict[str]) else np.NaN
, "num_collaborators": len(collab_dict[str])
, "fields": set_fields if len(set_fields) else np.NaN
, "disciplines": set_disciplines if len(set_disciplines) else np.NaN}
return info
# import cProfile
# cProfile.runctx("for i in range(10000): "
# " collaborators_domain_seperator({'Yum SK', 'Youn YA', 55558, 'Lee IG', 55597, 'Kim JH', 'Moon CJ'})"
# , None, locals())
# def fast_flatten(input_list):
# return list(chain.from_iterable(input_list))
c = 0
author_info = dict()
len_authors = len(pubmed['author_afil'])
for authors in pubmed['author_afil']:
c += 1
if c % 10000 == 0 or c == 1:
print(round(float(c) / len_authors * 100, 2), "%")
if str(authors) != 'nan':
for a in authors:
author_info[a] = collaborators_domain_seperator(a)
author_df = pd.DataFrame(list(author_info.values()))
author_full = [a.split(" | ") for a in list(author_info.keys())]
author_df['authors'] = [a[0] for a in author_full]
author_df['institutions'] = [a[1] for a in author_full]
author_df[(author_df.num_collaborators > 0) & pd.notnull(author_df.fields)]
# NOTE:
# Problem: Authors Sharing Name.
# len(set([i for s in pubmed['author'] for i in s]))
# # !=
# len([i for s in pubmed['author'] for i in s])
|
gpl-3.0
| -1,397,716,387,540,309,500
| 34.569196
| 128
| 0.577722
| false
| 3.587348
| false
| false
| false
|
ekorneechev/Connector
|
source/ctor.py
|
1
|
22467
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import time, properties
from GLOBAL import *
from re import escape
try: import keyring
except Exception as error:
class Keyring:
def set_password(self, *args): pass
def get_password(self, *args): return ""
keyring = Keyring()
properties.log.warning("Python 3: %s. Password storage is not available for FreeRDP." % error)
try: enableLog = properties.loadFromFile('default.conf')['LOG']
except KeyError: enableLog = DEFAULT['LOG']
if enableLog: STD_TO_LOG = ' >> ' + STDLOGFILE + " 2>&1 &"
else: STD_TO_LOG = ' &'
def f_write(f_name, cfg):
"""Создание файла с конфигурацией для remmina"""
f = open(WORKFOLDER+f_name,"w")
f.write("[remmina]\n")
for key in cfg.keys():
print(key,cfg[key], sep='=',file=f)
f.close()
class Remmina:
"""Класс, обеспечивающий подключение через remmina"""
cfg = {}
f_name = ".tmp.remmina"
def create_cfg_file(self, args):
"""Создание файла конфигурации для соединения"""
protocol = self.cfg['protocol']
server, login = properties.searchSshUser(args[0])
self.cfg['server'] = server
self.cfg['name'] = args.pop()
if protocol == 'RDP':
#[user, domain, color, quality, resolution, viewmode, folder, printer, clipboard, sound]
self.cfg['username'] = args[1]
self.cfg['domain'] = args[2]
self.cfg['colordepth'] = args[3]
self.cfg['quality'] = args[4]
self.cfg['resolution'] = args[5]
self.cfg['viewmode'] = args[6]
self.cfg['sharefolder'] = args[7]
self.cfg['shareprinter'] = args[8]
self.cfg['disableclipboard'] = args[9]
self.cfg['sound'] = args[10]
self.cfg['sharesmartcard'] = args[11]
if protocol == 'NX':
#[user, quality, resolution, viewmode, keyfile, crypt, clipboard, _exec]
self.cfg['username'] = args[1]
self.cfg['quality'] = args[2]
self.cfg['resolution'] = args[3]
self.cfg['viewmode'] = args[4]
self.cfg['nx_privatekey'] = args[5]
self.cfg['disableencryption'] = args[6]
self.cfg['disableclipboard'] = args[7]
self.cfg['exec'] = args[8]
if protocol == 'VNC':
#[user, quality, color, viewmode, viewonly, crypt, clipboard, showcursor]
self.cfg['username'] = args[1]
self.cfg['quality'] = args[2]
self.cfg['colordepth'] = args[3]
self.cfg['viewmode'] = args[4]
self.cfg['viewonly'] = args[5]
self.cfg['disableencryption'] = args[6]
self.cfg['disableclipboard'] = args[7]
self.cfg['showcursor'] = args[8]
if protocol == 'XDMCP':
#[color, viewmode, resolution, once, showcursor, _exec]
self.cfg['colordepth'] = args[1]
self.cfg['viewmode'] = args[2]
self.cfg['resolution'] = args[3]
self.cfg['once'] = args[4]
self.cfg['showcursor'] = args[5]
self.cfg['exec'] = args[6]
if protocol == 'SSH':
#[user, SSH_auth, keyfile, charset, _exec]
if login: self.cfg['ssh_username'] = login
else: self.cfg['ssh_username'] = args[1]
self.cfg['ssh_auth'] = args[2]
self.cfg['ssh_privatekey'] = args[3]
self.cfg['ssh_charset'] = args[4]
self.cfg['exec'] = args[5]
if protocol == 'SFTP':
#[user, SSH_auth, keyfile, charset, execpath]
if login: self.cfg['ssh_username'] = login
else: self.cfg['ssh_username'] = args[1]
self.cfg['ssh_auth'] = args[2]
self.cfg['ssh_privatekey'] = args[3]
self.cfg['ssh_charset'] = args[4]
self.cfg['execpath'] = args[5]
if protocol == 'SPICE':
#[tls, viewonly, resize, clipboard, cards, sound, cacert]
self.cfg['usetls'] = args[1]
self.cfg['viewonly'] = args[2]
self.cfg['resizeguest'] = args[3]
self.cfg['disableclipboard'] = args[4]
self.cfg['sharesmartcard'] = args[5]
self.cfg['enableaudio'] = args[6]
self.cfg['cacert'] = args[7]
f_write(self.f_name, self.cfg)
def start(self, parameters):
"""Запуск remmina с необходимыми параметрами"""
self.create_cfg_file(parameters)
properties.log.info ("Remmina: подключение по протоколу %s к серверу: %s", self.cfg['protocol'], self.cfg['server'])
command = 'remmina -c "' + WORKFOLDER + self.f_name + '"'
properties.log.info (command)
os.system('cd $HOME && ' + command + STD_TO_LOG)
class VncRemmina(Remmina):
"""Класс для настройки VNC-соединения через Remmina"""
def __init__(self):
self.cfg = dict(keymap='', quality=9, disableencryption=0, colordepth=24,
hscale=0, group='', password='', name='VNC-connection: ', viewonly=0,
disableclipboard=0, protocol='VNC', vscale=0, username='', disablepasswordstoring=1,
showcursor=0, disableserverinput=0, server='',aspectscale=0,
window_maximize=1, window_width=800, window_height=600, viewmode=1)
self.f_name = '.tmp_VNC.remmina'
class VncViewer:
"""Класс для настройки VNC-соединения через VncViewer"""
def start(self, args):
if type(args) == str:
properties.log.info ("VNC: подключение к серверу %s", args)
command = 'vncviewer ' + args
server = args
else:
command = 'vncviewer ' + args[0] + ' '
if args[1]: command += args[1]
if args[2]: command += args[2]
server = args[0]
properties.log.info ("VNC: подключение к серверу %s. Команда запуска:", server)
properties.log.info (command)
os.system(command + STD_TO_LOG)
class RdpRemmina(Remmina):
"""Класс для настройки RDP-соединения через Remmina"""
def __init__(self):
self.cfg = dict(disableclipboard=0, clientname='', quality=0, console=0, sharesmartcard=0,
resolution='', group='', password='', name='RDP-connection: ',
shareprinter=0, security='', protocol='RDP', execpath='', disablepasswordstoring=1,
sound='off', username='', sharefolder='', domain='', viewmode=3,
server='', colordepth=32, window_maximize=1, window_width=800, window_height=600)
self.cfg['exec'] = ''
self.f_name = '.tmp_RDP.remmina'
class XFreeRdp:
"""Класс для настройки RDP-соединения через xfreerdp"""
def start(self, args):
_link = "http://wiki.myconnector.ru/install#freerdp"
if freerdpCheck():
freerdpVersion = freerdpCheckVersion()
if freerdpVersion > "1.2":
nameConnect = args[len(args)-1]
command = 'xfreerdp /v:' + args[0] + " /t:'" + nameConnect + "'"
if args[1]: command += ' /u:' + args[1]
if args[2]: command += ' /d:' + args[2]
if args[3]: command += ' /f'
if args[4]: command += ' +clipboard'
if args[5]: command += ' /size:' + args[5]
if args[6]: command += ' /bpp:' + args[6]
if args[7]: command += ' /drive:LocalFolder,"' + args[7] + '"'
if args[8]: command += ' /g:' + args[8]
if args[9]: command += ' /gu:' + args[9]
if args[10]: command += ' /gd:' + args[10]
if args[11]:
command = "GATEPWD='" + args[11] + "' && " + command
command += ' /gp:$GATEPWD'
if args[12]: command += ' /admin'
if args[13]: command += SCARD
if args[14]: command += ' /printer'
if args[15]: command += ' /sound:sys:alsa'
if args[16]: command += ' /microphone:sys:alsa'
if args[17]: command += ' /multimon'
if args[18]: command += ' +compression'
if args[19]: command += ' /compression-level:' + args[19]
if args[20]: command += ' +fonts'
if args[21]: command += ' +aero'
if args[22]: command += ' +window-drag'
if args[23]: command += ' +menu-anims'
if args[24]: command += ' -themes'
if args[25]: command += ' -wallpaper'
if args[26]: command += ' /nsc'
if args[27]: command += ' /jpeg'
if args[28]: command += ' /jpeg-quality:' + str(args[28])
if args[29] and properties.checkPath(USBPATH): command += ' /drive:MEDIA,' + USBPATH
if args[31]: command += ' /workarea'
try: #Добавлена совместимость с предыдущей версией; < 1.4.0
if args[32]: command += ' /span'
except IndexError: pass
try: #< 1.4.1
if args[33]: command += ' /drive:Desktop,' + DESKFOLDER
if args[34]: command += ' /drive:Downloads,' + DOWNFOLDER
if args[35]: command += ' /drive:Documents,' + DOCSFOLDER
except IndexError: pass
try: #< 1.8.0
if args[36]: command += ' /gdi:hw'
else: command += ' /gdi:sw'
except IndexError: command += ' /gdi:sw'
try: #< 1.8.2
if args[38]: command += ' /cert-ignore'
if args[37]: command += ' +auto-reconnect'
except IndexError: command += ' +auto-reconnect /cert-ignore'
try:
if args[40] and len(args) >= 42: command += ' /p:' + escape(args[40])
elif args[30]: command += ' /p:' + escape(passwd(args[0], args[1]))
else: command += ' -sec-nla'
except: command += ' -sec-nla'
try:
if args[41] and len(args) >= 43: command += ' +glyph-cache'
except IndexError: pass
try:
# for compatibility also need to check lenght of 'args'
# length = 'last index' + 1 + 'title of the connect' (since version 1.5.6...)
if args[42] and len(args) >= 44: command += ' ' + args[42]
except IndexError: pass
server = args[0]
properties.log.info ("FreeRDP: подключение к серверу %s. Команда запуска:", server)
try: cmd2log = command.replace("/p:" + command.split("/p:")[1].split(' ')[0],"/p:<hidden>")
except: cmd2log = command
properties.log.info (cmd2log)
os.system(command + STD_TO_LOG)
if enableLog:
signal.signal(signal.SIGCHLD,signal.SIG_IGN)
subprocess.Popen([MAINFOLDER + "/connector-check-xfreerdp-errors"])
else:
properties.log.warning ("FreeRDP version below 1.2!")
os.system("zenity --error --text='\nУстановленная версия FreeRDP (%s) не соответствует минимальным требованиям,"
" подробности <a href=\"%s\">здесь</a>!' --no-wrap --icon-name=connector" % (freerdpVersion, _link))
else:
properties.log.warning ("FreeRDP is not installed!")
os.system("zenity --error --text='\nFreeRDP не установлен, подробности <a href=\"%s\">здесь</a>!' --no-wrap --icon-name=connector" % _link)
class NxRemmina(Remmina):
"""Класс для настройки NX-соединения через Remmina"""
def __init__(self):
self.cfg = dict(name='NX-connection: ', protocol='NX', quality=0, disableencryption=0,
resolution='',group='',password='',username='',NX_privatekey='',
showcursor=0, server='', disableclipboard=0, window_maximize=1,
window_width=800, window_height=600, viewmode=4, disablepasswordstoring=1)
self.cfg['exec'] = ''
self.f_name = '.tmp_NX.remmina'
class XdmcpRemmina(Remmina):
"""Класс для настройки XDMCP-соединения через Remmina"""
def __init__(self):
self.cfg = dict(resolution='', group='', password='', name='XDMCP-connection: ',
protocol='XDMCP', once=0, showcursor=0, server='',colordepth=0,
window_maximize=1, viewmode=1, window_width=800, window_height=600, disablepasswordstoring=1)
self.cfg['exec'] = ''
self.f_name = '.tmp_XDMCP.remmina'
class SftpRemmina(Remmina):
"""Класс для настройки SFTP-соединения через Remmina"""
def __init__(self):
self.cfg = dict(name='SFTP-connection: ', protocol='SFTP', ssh_enabled=0, ssh_auth=0,
ssh_charset='UTF-8', ssh_privatekey='', username='', ssh_username='',
group='', password='', execpath='/', server='', window_maximize=0,
window_height=600, window_width=800, ftp_vpanedpos=360, viewmode=0, disablepasswordstoring=1)
self.f_name = '.tmp_SFTP.remmina'
class SshRemmina(Remmina):
"""Класс для настройки SSH-соединения через Remmina"""
def __init__(self):
self.cfg = dict(name='SSH-connection: ', protocol='SSH', ssh_auth=0, ssh_charset='UTF-8',
ssh_privatekey='', group='', password='', username='', ssh_username='', ssh_enabled=0,
server='', window_maximize=0, window_width=500, window_height=500, viewmode=0, disablepasswordstoring=1)
self.cfg['exec'] = ''
self.f_name = '.tmp_SSH.remmina'
class SpiceRemmina(Remmina):
"""Класс для настройки SPICE-соединения через Remmina"""
def __init__(self):
self.cfg = dict(name='SPICE-connection: ', protocol='SPICE', ssh_enabled=0, ssh_auth=0,
disableclipboard=0, ssh_privatekey='', usertls=0, ssh_username='',
enableaudio=0, password='', cacert='', server='', ssh_loopback=0,
resizeguest=0, sharesmartcard=0, ssh_server='', viewonly=0, disablepasswordstoring=1)
self.f_name = '.tmp_SPICE.remmina'
class Vmware:
"""Класс для настройки соединения к VMWare серверу"""
def start(self, args):
if vmwareCheck():
if type(args) == str:
command = 'vmware-view -q -s ' + args
properties.log.info ("VMware: подключение к серверу %s", args)
properties.log.info (command)
else:
command = 'vmware-view -q -s ' + args[0]
if args[1]: command += ' -u ' + args[1]
if args[2]: command += ' -d ' + args[2]
if args[4]: command += ' --fullscreen'
properties.log.info ("VMware: подключение к серверу %s", args[0])
properties.log.info (command)
if args[3]: command += ' -p ' + args[3]
os.system(command + STD_TO_LOG)
else:
properties.log.warning ("VMware Horizon Client is not installed!")
os.system("zenity --error --text='\nVMware Horizon Client не установлен!' --no-wrap --icon-name=connector")
def _missCitrix():
"""Message for user, if Citrix Receiver not installed"""
properties.log.warning ("Citrix Receiver is not installed!")
os.system("zenity --error --text='\nCitrix Receiver не установлен!' --no-wrap --icon-name=connector")
class Citrix:
"""Класс для настройки ICA-соединения к Citrix-серверу"""
def start(self, args):
if type(args) == list:
addr = args[0]
else: addr = args
if citrixCheck():
properties.log.info ("Citrix: подключение к серверу %s", addr)
os.system('/opt/Citrix/ICAClient/util/storebrowse --addstore ' + addr)
os.system('/opt/Citrix/ICAClient/selfservice --icaroot /opt/Citrix/ICAClient' + STD_TO_LOG)
else: _missCitrix()
def preferences():
if citrixCheck():
properties.log.info ("Citrix: открытие настроек программы")
os.system('/opt/Citrix/ICAClient/util/configmgr --icaroot /opt/Citrix/ICAClient' + STD_TO_LOG)
else: _missCitrix()
class Web:
"""Класс для настройки подключения к WEB-ресурсу"""
def start(self, args):
if type(args) == list:
addr = args[0]
else: addr = args
if not addr.find("://") != -1:
addr = "http://" + addr
command = 'xdg-open "' + addr + '"'
properties.log.info ("WWW: открытие web-ресурса %s", addr)
properties.log.info (command)
os.system ( command + STD_TO_LOG)
class FileServer:
"""Класс для настройки подключения к файловому серверу"""
def start(self, args):
_exec = properties.loadFromFile('default.conf')['FS'] + ' "'
if type(args) == str:
if not args.find("://") != -1:
os.system("zenity --warning --text='Введите протокол подключения!\n"
"Или выберите из списка в дополнительных параметрах.' --no-wrap --icon-name=connector")
return 1
else:
command = _exec + args + '"'
server = args
else:
try: protocol, server = args[0].split("://")
except: server = args[0]; protocol = args[4]
command = _exec + protocol + "://"
if args[2]: command += args[2] + ";"
if args[1]: command += args[1] + "@"
command += server
if args[3]: command += '/' + args[3]
command += '"'
properties.log.info ("Открытие файлового сервера %s. Команда запуска:", server)
properties.log.info (command)
os.system (command + STD_TO_LOG)
def definition(protocol):
"""Функция определения протокола"""
whatProgram = properties.loadFromFile('default.conf') #загрузка параметров с выбором программ для подключения
if protocol == 'VNC':
if whatProgram['VNC'] == 0:
connect = VncRemmina()
else: connect = VncViewer()
elif protocol == 'RDP':
if whatProgram['RDP'] == 0:
connect = RdpRemmina()
else: connect = XFreeRdp()
elif protocol == 'NX':
connect = NxRemmina()
elif protocol == 'XDMCP':
connect = XdmcpRemmina()
elif protocol == 'SSH':
connect = SshRemmina()
elif protocol == 'SFTP':
connect = SftpRemmina()
elif protocol == 'VMWARE':
connect = Vmware()
elif protocol == 'CITRIX':
connect = Citrix()
elif protocol == 'WEB':
connect = Web()
elif protocol == 'SPICE':
connect = SpiceRemmina()
elif protocol == 'FS':
connect = FileServer()
return connect
def citrixCheck():
"""Фунцкия проверки наличия в системе Citrix Receiver"""
check = int(subprocess.check_output(CITRIX_CHECK + "/dev/null 2>&1; echo $?", shell=True, universal_newlines=True).strip())
check = not bool(check)
return check
def vmwareCheck():
"""Фунцкия проверки наличия в системе VMware Horizon Client"""
check = int(subprocess.check_output("which vmware-view > /dev/null 2>&1; echo $?", shell=True, universal_newlines=True).strip())
check = not bool(check)
return check
def freerdpCheck():
"""Фунцкия проверки наличия в системе FreeRDP"""
check = int(subprocess.check_output("which xfreerdp > /dev/null 2>&1; echo $?", shell=True, universal_newlines=True).strip())
check = not bool(check)
return check
def freerdpCheckVersion():
"""Фунцкия определения версии FreeRDP"""
version = subprocess.check_output("xfreerdp /version; exit 0",shell=True, universal_newlines=True).strip().split('\t')
version = version[0].split(" "); version = version[4].split("-")[0];
return version
def passwd(server, username):
"""Ввод пароля и запрос о его сохранении в связке ключей"""
password = keyring.get_password(str(server),str(username))
if password: return password
separator = "|CoNnEcToR|"
try:
password, save = subprocess.check_output("zenity --forms --title=\"Аутентификация (with NLA)\" --text=\"Имя пользователя: %s\""
" --add-password=\"Пароль:\" --add-combo=\"Хранить пароль в связке ключей:\" --combo-values=\"Да|Нет\""
" --separator=\"%s\" 2>/dev/null" % (username, separator),shell=True, universal_newlines=True).strip().split(separator)
if save == "Да" and password: keyring.set_password(str(server),str(username),str(password))
#если окно zenity закрыто или нажата кнопка Отмена, делаем raise ошибки FreeRDP
except ValueError:
password = " /CANCELED"
properties.log.warning ("FreeRDP: подключение отменено пользователем (окно zenity закрыто или нажата кнопка Отмена):")
return password
if __name__ == "__main__":
pass
|
gpl-2.0
| -4,469,510,885,947,356,700
| 46.831435
| 151
| 0.551243
| false
| 3.118206
| false
| false
| false
|
zbyte64/django-hyperadmin
|
hyperadmin/resources/storages/resources.py
|
1
|
4161
|
import os
from django.core.exceptions import ObjectDoesNotExist
from hyperadmin.links import Link
from hyperadmin.resources.crud import CRUDResource
from hyperadmin.resources.storages.forms import UploadForm, Base64UploadForm, UploadLinkForm
from hyperadmin.resources.storages.indexes import StorageIndex
from hyperadmin.resources.storages.endpoints import ListEndpoint, CreateUploadEndpoint, Base64UploadEndpoint, BoundFile
class StorageQuery(object):
def __init__(self, storage, path=''):
self.storage = storage
self.path = path
def filter(self, path):
if self.path:
path = os.path.join(self.path, path)
return StorageQuery(self.storage, path)
def get_dirs_and_files(self):
try:
dirs, files = self.storage.listdir(self.path)
except NotImplementedError:
return [], []
if self.path:
files = [os.path.join(self.path, filename) for filename in files]
return dirs, [BoundFile(self.storage, filename) for filename in files]
def get(self, path):
if self.path:
path = os.path.join(self.path, path)
if not self.storage.exists(path):
raise ObjectDoesNotExist
return BoundFile(self.storage, path)
class StorageResource(CRUDResource):
#resource_adaptor = storage object
form_class = UploadForm
upload_link_form_class = UploadLinkForm
base64_upload_form_class = Base64UploadForm
list_endpoint = (ListEndpoint, {})
create_upload_endpoint = (CreateUploadEndpoint, {})
base64_upload_endpoint = (Base64UploadEndpoint, {})
def __init__(self, **kwargs):
kwargs.setdefault('app_name', '-storages')
super(StorageResource, self).__init__(**kwargs)
def get_storage(self):
return self.resource_adaptor
storage = property(get_storage)
def get_base64_upload_form_class(self):
return self.base64_upload_form_class
def get_upload_link_form_class(self):
return self.upload_link_form_class
def get_view_endpoints(self):
endpoints = super(StorageResource, self).get_view_endpoints()
endpoints.insert(0, self.create_upload_endpoint)
return endpoints
def get_indexes(self):
return {'primary': StorageIndex('primary', self)}
def get_primary_query(self):
return StorageQuery(self.storage)
def get_instances(self):
'''
Returns a set of native objects for a given state
'''
if 'page' in self.state:
return self.state['page'].object_list
if self.state.has_view_class('change_form'):
return []
dirs, files = self.get_primary_query()
instances = [BoundFile(self.storage, file_name) for file_name in files]
return instances
def get_item_form_kwargs(self, item=None, **kwargs):
kwargs = super(StorageResource, self).get_item_form_kwargs(item, **kwargs)
kwargs['storage'] = self.storage
return kwargs
def get_form_kwargs(self, **kwargs):
kwargs = super(StorageResource, self).get_form_kwargs(**kwargs)
kwargs['storage'] = self.storage
return kwargs
def get_upload_link_form_kwargs(self, **kwargs):
kwargs = self.get_form_kwargs(**kwargs)
kwargs['resource'] = self
kwargs['request'] = self.api_request.request
return kwargs
def get_item_url(self, item):
return self.link_prototypes['update'].get_url(item=item)
def get_item_storage_link(self, item, **kwargs):
link_kwargs = {'url': item.instance.url,
'resource': self,
'prompt': 'Absolute Url',
'rel': 'storage-url', }
link_kwargs.update(kwargs)
storage_link = Link(**link_kwargs)
return storage_link
def get_item_outbound_links(self, item):
links = self.create_link_collection()
links.append(self.get_item_storage_link(item, link_factor='LO'))
return links
def get_item_prompt(self, item):
return item.instance.name
def get_paginator_kwargs(self):
return {}
|
bsd-3-clause
| -3,845,229,485,147,536,400
| 33.106557
| 119
| 0.641192
| false
| 4.012536
| false
| false
| false
|
rajul/tvb-framework
|
tvb/adapters/uploaders/gifti/parser.py
|
1
|
9083
|
# -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Framework Package. This package holds all Data Management, and
# Web-UI helpful to run brain-simulations. To use it, you also need do download
# TheVirtualBrain-Scientific Package (for simulators). See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
.. moduleauthor:: Mihai Andrei <mihai.andrei@codemart.ro>
.. moduleauthor:: Calin Pavel <calin.pavel@codemart.ro>
"""
import os
import numpy as np
from nibabel.gifti import giftiio
from nibabel.nifti1 import intent_codes, data_type_codes
from tvb.basic.logger.builder import get_logger
from tvb.core.adapters.exceptions import ParseException
from tvb.datatypes.surfaces import CorticalSurface, center_vertices, make_surface
from tvb.datatypes.time_series import TimeSeriesSurface
OPTION_READ_METADATA = "ReadFromMetaData"
class GIFTIParser(object):
"""
This class reads content of a GIFTI file and builds / returns a Surface instance
filled with details.
"""
UNIQUE_ID_ATTR = "UniqueID"
SUBJECT_ATTR = "SubjectID"
ASP_ATTR = "AnatomicalStructurePrimary"
DATE_ATTR = "Date"
DESCRIPTION_ATTR = "Description"
NAME_ATTR = "Name"
TIME_STEP_ATTR = "TimeStep"
def __init__(self, storage_path, operation_id):
self.logger = get_logger(__name__)
self.storage_path = storage_path
self.operation_id = operation_id
@staticmethod
def _get_meta_dict(data_array):
data_array_meta = data_array.meta
if data_array_meta is None or data_array_meta.data is None:
return {}
return dict((meta_pair.name, meta_pair.value) for meta_pair in data_array_meta.data)
@staticmethod
def _is_surface_gifti(data_arrays):
return (len(data_arrays) == 2
and intent_codes.code["NIFTI_INTENT_POINTSET"] == data_arrays[0].intent
and data_type_codes.code["NIFTI_TYPE_FLOAT32"] == data_arrays[0].datatype
and intent_codes.code["NIFTI_INTENT_TRIANGLE"] == data_arrays[1].intent
and data_type_codes.code["NIFTI_TYPE_INT32"] == data_arrays[1].datatype)
@staticmethod
def _is_timeseries_gifti(data_arrays):
return (len(data_arrays) > 1
and intent_codes.code["NIFTI_INTENT_TIME_SERIES"] == data_arrays[0].intent
and data_type_codes.code["NIFTI_TYPE_FLOAT32"] == data_arrays[0].datatype)
def _parse_surface(self, data_arrays, data_arrays_part2, surface_type, should_center):
meta_dict = self._get_meta_dict(data_arrays[0])
anatomical_structure_primary = meta_dict.get(self.ASP_ATTR)
gid = meta_dict.get(self.UNIQUE_ID_ATTR)
subject = meta_dict.get(self.SUBJECT_ATTR)
title = meta_dict.get(self.NAME_ATTR)
# Now try to determine what type of surface we have
# If a surface type is not explicitly given we use the type specified in the metadata
if surface_type == OPTION_READ_METADATA:
surface_type = anatomical_structure_primary
if surface_type is None:
raise ParseException("Please specify the type of the surface")
surface = make_surface(surface_type)
if surface is None:
raise ParseException("Could not determine surface type! %s" % surface_type)
# Now fill TVB data type with metadata
if gid is not None:
gid = gid.replace("{", "").replace("}", "")
surface.gid = gid
if subject is not None:
surface.subject = subject
if title is not None:
surface.title = title
surface.storage_path = self.storage_path
surface.set_operation_id(self.operation_id)
surface.zero_based_triangles = True
# Now fill TVB data type with geometry data
vertices = data_arrays[0].data
triangles = data_arrays[1].data
vertices_in_lh = len(vertices)
# If a second file is present append that data
if data_arrays_part2 is not None:
# offset the indices
offset = len(vertices)
vertices = np.vstack([vertices, data_arrays_part2[0].data])
triangles = np.vstack([triangles, offset + data_arrays_part2[1].data])
if should_center:
vertices = center_vertices(vertices)
# set hemisphere mask if cortex
if isinstance(surface, CorticalSurface):
# if there was a 2nd file then len(vertices) != vertices_in_lh
surface.hemisphere_mask = np.zeros(len(vertices), dtype=np.bool)
surface.hemisphere_mask[vertices_in_lh:] = 1
surface.vertices = vertices
surface.triangles = triangles
return surface
def _parse_timeseries(self, data_arrays):
# Create TVB time series to be filled
time_series = TimeSeriesSurface()
time_series.storage_path = self.storage_path
time_series.set_operation_id(self.operation_id)
time_series.start_time = 0.0
time_series.sample_period = 1.0
# First process first data_array and extract important data from it's metadata
meta_dict = self._get_meta_dict(data_arrays[0])
gid = meta_dict.get(self.UNIQUE_ID_ATTR)
sample_period = meta_dict.get(self.TIME_STEP_ATTR)
time_series.subject = meta_dict.get(self.SUBJECT_ATTR)
time_series.title = meta_dict.get(self.NAME_ATTR)
if gid:
time_series.gid = gid.replace("{", "").replace("}", "")
if sample_period:
time_series.sample_period = float(sample_period)
# todo : make sure that write_time_slice is not required here
# Now read time series data
for data_array in data_arrays:
time_series.write_data_slice([data_array.data])
# Close file after writing data
time_series.close_file()
return time_series
def parse(self, data_file, data_file_part2=None, surface_type=OPTION_READ_METADATA, should_center=False):
"""
Parse NIFTI file(s) and returns A Surface or a TimeSeries for it.
:param surface_type: one of "Cortex" "Head" "ReadFromMetaData"
:param data_file_part2: a file containing the second part of the surface
"""
self.logger.debug("Start to parse GIFTI file: %s" % data_file)
if data_file is None:
raise ParseException("Please select GIFTI file which contains data to import")
if not os.path.exists(data_file):
raise ParseException("Provided file %s does not exists" % data_file)
if data_file_part2 is not None and not os.path.exists(data_file_part2):
raise ParseException("Provided file part %s does not exists" % data_file_part2)
try:
gifti_image = giftiio.read(data_file)
data_arrays = gifti_image.darrays
self.logger.debug("File parsed successfully")
if data_file_part2 is not None:
data_arrays_part2 = giftiio.read(data_file_part2).darrays
else:
data_arrays_part2 = None
except Exception, excep:
self.logger.exception(excep)
msg = "File: %s does not have a valid GIFTI format." % data_file
raise ParseException(msg)
self.logger.debug("Determine data type stored in GIFTI file")
# First check if it's a surface
if self._is_surface_gifti(data_arrays):
# If a second part exists is must be of the same type
if data_arrays_part2 is not None and not self._is_surface_gifti(data_arrays_part2):
raise ParseException("Second file must be a surface too")
return self._parse_surface(data_arrays, data_arrays_part2, surface_type, should_center)
elif self._is_timeseries_gifti(data_arrays):
return self._parse_timeseries(data_arrays)
else:
raise ParseException("Could not map data from GIFTI file to a TVB data type")
|
gpl-2.0
| 4,596,486,521,529,889,300
| 40.479452
| 109
| 0.654519
| false
| 3.711892
| false
| false
| false
|
jarifibrahim/ashoka-dashboard
|
dashboard/migrations/0012_auto_20161210_1033.py
|
1
|
3097
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-10 05:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0011_auto_20161209_2249'),
]
operations = [
migrations.AddField(
model_name='email',
name='active',
field=models.BooleanField(default=False, help_text='If this field is set to ON all the emails of the specified type will use this template. Each type of email can have only one default', verbose_name='Use this as default template of its type?'),
),
migrations.AddField(
model_name='email',
name='name',
field=models.CharField(default='A', help_text='Template name to uniquely identify it.', max_length=200, verbose_name='Name of the Template'),
preserve_default=False,
),
migrations.AlterField(
model_name='email',
name='message',
field=models.TextField(help_text="If you wish to include the url to consultant form in the email, please add 'FORM_URL' (without quotes) placeholder. It will be replaced by the actual consultant url in the email.", verbose_name='Body of the Email'),
),
migrations.AlterField(
model_name='email',
name='type',
field=models.CharField(choices=[('IM', 'Instruction Mail'), ('RM', 'Reminder Mail')], help_text='Type of the template. Currently Instruction Email template and Reminder Email template are supported.', max_length=5, verbose_name='Type of Email'),
),
migrations.AlterField(
model_name='teamstatus',
name='automatic_reminder',
field=models.BooleanField(default=True, help_text='Should periodic Automatic Reminders Emails be sent?', verbose_name='Send Automatic Reminders?'),
),
migrations.AlterField(
model_name='teamstatus',
name='call_change_count',
field=models.IntegerField(default=0, help_text='This value will be added to total calls count.', verbose_name='Add/Subtract Total Calls count'),
),
migrations.AlterField(
model_name='teamstatus',
name='kick_off',
field=models.CharField(choices=[('NS', 'Not Started'), ('IMS', 'Intro Mail Sent'), ('DA', 'Date Arranged'), ('CH', 'Call Happened')], default='NS', max_length=5, verbose_name='Kick Off Status'),
),
migrations.AlterField(
model_name='teamstatus',
name='last_automatic_reminder',
field=models.DateTimeField(blank=True, editable=False, null=True, verbose_name='Last automatic reminder sent on'),
),
migrations.AlterField(
model_name='teamstatus',
name='mid_term',
field=models.CharField(choices=[('NS', 'Not Started'), ('IMS', 'Intro Mail Sent'), ('DA', 'Date Arranged'), ('CH', 'Call Happened')], default='NS', max_length=5, verbose_name='Mid Term Status'),
),
]
|
apache-2.0
| 2,860,983,926,016,569,000
| 49.770492
| 261
| 0.619309
| false
| 4.213605
| false
| false
| false
|
cdd1969/pygwa
|
lib/flowchart/nodes/n08_detectpeaks_v2/node_detectpeaks_v2.py
|
1
|
11749
|
#!/usr/bin python
# -*- coding: utf-8 -*-
from pyqtgraph import BusyCursor
from pyqtgraph.Qt import QtGui
import numpy as np
from lib.flowchart.nodes.generalNode import NodeWithCtrlWidget, NodeCtrlWidget
from lib.functions.general import isNumpyDatetime, isNumpyNumeric
from lib.functions.detectpeaks import full_peak_detection_routine, prepare_order, prepare_datetime
class detectPeaksTSNode_v2(NodeWithCtrlWidget):
"""Detect peaks (minima/maxima) from passed TimeSeries, check period"""
nodeName = "Detect Peaks (v2)"
uiTemplate = [
{'title': 'data', 'name': 'column', 'type': 'list', 'value': None, 'default': None, 'values': [None], 'tip': 'Column name with hydrograph data'},
{'name': 'datetime', 'type': 'list', 'value': None, 'default': None, 'values': [None], 'tip': 'Location of the datetime objects.'},
{'name': 'Peak Detection Params', 'type': 'group', 'children': [
{'name': 'T', 'type': 'float', 'value': 12.42, 'default': 12.42, 'suffix': ' hours', 'tip': 'Awaited period of the signal in hours.'},
{'title': 'dt', 'name': 'hMargin', 'type': 'float', 'value': 1.5, 'default': 1.5, 'limits': (0., 100.), 'suffix': ' hours', 'tip': 'Number of hours, safety margin when comparing period length.\nSee formula below:\nT/2 - dt < T_i/2 < T/2 + dt'},
{'name': 'order', 'type': 'str', 'value': '?', 'readonly': True, 'tip': 'How many points on each side to use for the comparison'},
{'name': 'mode', 'type': 'list', 'values': ['clip', 'wrap'], 'value': 'clip', 'default': 'clip', 'tip': 'How the edges of the vector are treated. ‘wrap’ (wrap around)\nor ‘clip’ (treat overflow as the same as the last (or first) element)'},
{'name': 'removeRegions', 'type': 'bool', 'value': True, 'readonly': True, 'default': True, 'visible': False, 'tip': "remove possible multiple peaks that go one-by-one"}
]},
{'title': 'Ignore peaks', 'name': 'ignore', 'type': 'bool', 'value': False, 'default': False, 'tip': 'Checkbox to ignore peaks that are mentioned in parameter `Peak IDs', 'children': [
{'title': 'Peak IDs', 'name': 'peaks2ignore', 'type': 'str', 'value': '', 'default': '', 'tip': 'IDs of the peaks that will be ignored. IDs can be found in table in terminal `raw`. \nInteger or a comma-separated integer list.\n Example:\n12\n0, 12, 1153'},
]},
{'title': 'Plausibility Check Params', 'name': 'check_grp', 'type': 'group', 'children': [
{'title': 'Neighbour MIN peaks', 'name': 'MIN_grp', 'type': 'group', 'children': [
{'title': 'Valid Period\n(lower border)', 'name': 'range1', 'type': 'float', 'value': 10.0, 'default': 10., 'suffix': ' hours', 'tip': 'Lower border of the valid time-distance between two neigbour MIN peaks'},
{'title': 'Valid Period\n(upper border)', 'name': 'range2', 'type': 'float', 'value': 15.0, 'default': 15., 'suffix': ' hours', 'tip': 'Upper border of the valid time-distance between two neigbour MIN peaks'},
{'title': 'Warnings (MIN)', 'name': 'warn', 'type': 'str', 'value': '?', 'default': '?', 'readonly': True}
]},
{'title': 'Neighbour MAX peaks', 'name': 'MAX_grp', 'type': 'group', 'children': [
{'title': 'Valid Period\n(lower border)', 'name': 'range1', 'type': 'float', 'value': 10.0, 'default': 10., 'suffix': ' hours', 'tip': 'Lower border of the valid time-distance between two neigbour MAX peaks'},
{'title': 'Valid Period\n(upper border)', 'name': 'range2', 'type': 'float', 'value': 15.0, 'default': 15., 'suffix': ' hours', 'tip': 'Upper border of the valid time-distance between two neigbour MAX peaks'},
{'title': 'Warnings (MAX)', 'name': 'warn', 'type': 'str', 'value': '?', 'default': '?', 'readonly': True}
]},
{'title': 'Neighbour ALL peaks', 'name': 'ALL_grp', 'type': 'group', 'children': [
{'title': 'Valid Period\n(lower border)', 'name': 'range1', 'type': 'float', 'value': 4.0, 'default': 4., 'suffix': ' hours', 'tip': 'Lower border of the valid time-distance between two neigbour peaks (MIN or MAX)'},
{'title': 'Valid Period\n(upper border)', 'name': 'range2', 'type': 'float', 'value': 9.0, 'default': 9., 'suffix': ' hours', 'tip': 'Upper border of the valid time-distance between two neigbour peaks (MIN or MAX)'},
{'title': 'Warnings (ALL)', 'name': 'warn', 'type': 'str', 'value': '?', 'default': '?', 'readonly': True}
]},
{ 'title': 'Warnings (Total)', 'name': 'warn_sum', 'type': 'str', 'value': '?', 'default': '?', 'readonly': True},
]},
{'title': 'Output', 'name': 'out_grp', 'type': 'group', 'children': [
{ 'title': 'Raw Minimums', 'name': 'raw_nmin', 'type': 'str', 'value': '?', 'default': '?', 'readonly': True},
{ 'title': 'Raw Maximums', 'name': 'raw_nmax', 'type': 'str', 'value': '?', 'default': '?', 'readonly': True},
{ 'title': 'Raw Number\nof Mins+Maxs', 'name': 'raw_n_all', 'type': 'str', 'value': '?', 'default': '?', 'readonly': True},
{ 'title': 'Final Number\nof Cycles', 'name': 'n_cycles', 'type': 'str', 'value': '?', 'default': '?', 'readonly': True}
]},
{'name': 'Plot', 'type': 'action'}]
def __init__(self, name, parent=None):
super(detectPeaksTSNode_v2, self).__init__(name, parent=parent, terminals={'In': {'io': 'in'}, 'raw': {'io': 'out'}, 'peaks': {'io': 'out'}}, color=(250, 250, 150, 150))
self._plotRequired = False
def _createCtrlWidget(self, **kwargs):
return detectPeaksTSNode_v2CtrlWidget(**kwargs)
def process(self, In):
df = In
self.CW().param('check_grp', 'MIN_grp', 'warn').setValue('?')
self.CW().param('check_grp', 'MAX_grp', 'warn').setValue('?')
self.CW().param('check_grp', 'ALL_grp', 'warn').setValue('?')
self.CW().param('check_grp', 'warn_sum').setValue('?')
self.CW().param('out_grp', 'raw_nmin').setValue('?')
self.CW().param('out_grp', 'raw_nmax').setValue('?')
self.CW().param('out_grp', 'raw_n_all').setValue('?')
self.CW().param('out_grp', 'n_cycles').setValue('?')
self.CW().param('Peak Detection Params', 'order').setValue('?')
if df is None:
return {'raw': None, 'peaks': None}
colname = [col for col in df.columns if isNumpyNumeric(df[col].dtype)]
self.CW().param('column').setLimits(colname)
colname = [col for col in df.columns if isNumpyDatetime(df[col].dtype)]
self.CW().param('datetime').setLimits(colname)
kwargs = self.CW().prepareInputArguments()
kwargs['split'] = True
with BusyCursor():
kwargs['order'] = prepare_order(kwargs['T'], kwargs['hMargin'], prepare_datetime(df, datetime=kwargs['datetime']))
self.CW().param('Peak Detection Params', 'order').setValue(str(kwargs['order']))
#peaks = detectPeaks_ts(df, kwargs.pop('column'), plot=self._plotRequired, **kwargs)
extra, raw, peaks = full_peak_detection_routine(df, col=kwargs.pop('column'), date_col=kwargs.pop('datetime'),
IDs2mask=kwargs.pop('IDs2mask'), valid_range=kwargs.pop('valid_range'),
plot=self._plotRequired,
**kwargs)
n_warn_min = len(extra['warnings']['MIN'])
n_warn_max = len(extra['warnings']['MAX'])
n_warn_all = len(extra['warnings']['ALL'])
self.CW().param('check_grp', 'MIN_grp', 'warn').setValue(n_warn_min)
self.CW().param('check_grp', 'MAX_grp', 'warn').setValue(n_warn_max)
self.CW().param('check_grp', 'ALL_grp', 'warn').setValue(n_warn_all)
self.CW().param('check_grp', 'warn_sum').setValue(n_warn_min + n_warn_max + n_warn_all)
self.CW().param('out_grp', 'raw_nmin').setValue(extra['raw_nmin'])
self.CW().param('out_grp', 'raw_nmax').setValue(extra['raw_nmax'])
if raw is not None: self.CW().param('out_grp', 'raw_n_all').setValue(len(raw.index))
if peaks is not None: self.CW().param('out_grp', 'n_cycles').setValue(len(peaks.index))
return {'raw': raw, 'peaks': peaks}
def plot(self):
self._plotRequired = True
self._plotRequired = self.check_n_warnings()
self.update()
self._plotRequired = False
def check_n_warnings(self):
n = self.CW().param('check_grp', 'warn_sum').value()
if n == '?':
return True
if int(n) > 100:
reply = QtGui.QMessageBox.question(None, 'Too many Warnings!',
"You are going to plot {0} peak-warnings!\nThis will be slow and not informative!\n\nDo you really want to create the plot?".format(n),
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.No:
return False
elif reply == QtGui.QMessageBox.Yes:
return True
else:
return True
class detectPeaksTSNode_v2CtrlWidget(NodeCtrlWidget):
def __init__(self, **kwargs):
super(detectPeaksTSNode_v2CtrlWidget, self).__init__(update_on_statechange=True, **kwargs)
self.disconnect_valueChanged2upd(self.param('Peak Detection Params', 'order'))
self.disconnect_valueChanged2upd(self.param('check_grp', 'MIN_grp', 'warn'))
self.disconnect_valueChanged2upd(self.param('check_grp', 'MAX_grp', 'warn'))
self.disconnect_valueChanged2upd(self.param('check_grp', 'ALL_grp', 'warn'))
self.disconnect_valueChanged2upd(self.param('check_grp', 'warn_sum'))
self.disconnect_valueChanged2upd(self.param('out_grp', 'raw_nmin'))
self.disconnect_valueChanged2upd(self.param('out_grp', 'raw_nmax'))
self.disconnect_valueChanged2upd(self.param('out_grp', 'n_cycles'))
self.disconnect_valueChanged2upd(self.param('out_grp', 'raw_n_all'))
self.param('Plot').sigActivated.connect(self._parent.plot)
def prepareInputArguments(self):
kwargs = dict()
kwargs['column'] = self.param('column').value()
kwargs['datetime'] = self.param('datetime').value()
kwargs['T'] = self.param('Peak Detection Params', 'T').value()
kwargs['hMargin'] = self.param('Peak Detection Params', 'hMargin').value()
kwargs['mode'] = self.param('Peak Detection Params', 'mode').value()
kwargs['IDs2mask'] = [int(val) for val in self.param('ignore', 'peaks2ignore').value().split(',')] if (self.param('ignore').value() is True and self.param('ignore', 'peaks2ignore').value() != '') else []
kwargs['removeRegions'] = self.param('Peak Detection Params', 'removeRegions').value()
kwargs['valid_range'] = {
'MIN': [np.timedelta64(int(self.param('check_grp', 'MIN_grp', 'range1').value()*3600), 's'),
np.timedelta64(int(self.param('check_grp', 'MIN_grp', 'range2').value()*3600), 's')],
'MAX': [np.timedelta64(int(self.param('check_grp', 'MAX_grp', 'range1').value()*3600), 's'),
np.timedelta64(int(self.param('check_grp', 'MAX_grp', 'range2').value()*3600), 's')],
'ALL': [np.timedelta64(int(self.param('check_grp', 'ALL_grp', 'range1').value()*3600), 's'),
np.timedelta64(int(self.param('check_grp', 'ALL_grp', 'range2').value()*3600), 's')]
}
return kwargs
|
gpl-2.0
| -2,258,799,015,408,088,600
| 68.064706
| 268
| 0.570224
| false
| 3.50373
| false
| false
| false
|
lfblogs/aio2py
|
aio2py/db/pool.py
|
1
|
1541
|
# -*- coding: UTF-8 -*-
__author__ = "Liu Fei"
__github__ = "http://github.com/lfblogs"
__all__ = [
"Pool"
]
"""
Define database connection pool
"""
import asyncio
import logging
try:
import aiomysql
except ImportError:
from aio2py.required import aiomysql
try:
import aiopg
except ImportError:
from aio2py.required import aiopg
logging.basicConfig(level=logging.INFO)
@asyncio.coroutine
def Pool(loop,**kw):
logging.info('Create database connection pool...')
global __pool
ENGINE = kw.get('ENGINE',None)
if not ENGINE:
raise KeyError('Not found database ENGINE in conf files...')
if ENGINE == 'mysql':
__pool = yield from aiomysql.create_pool(
host = kw.get('host', ''),
port = kw.get('port', 3306),
user = kw.get('user', ''),
password = kw.get('password', ''),
db = kw.get('db', ''),
charset = kw.get('charset', 'utf8'),
autocommit = kw.get('autocommit', True),
maxsize = kw.get('maxsize', 10),
minsize = kw.get('minsize', 1),
loop = loop
)
elif ENGINE == 'postgresql':
__pool = yield from aiopg.pool.create_pool(
host = kw.get('host', ''),
port = kw.get('port', 5432),
user = kw.get('user', ''),
password = kw.get('password', ''),
database = kw.get('db', ''),
maxsize = kw.get('maxsize', 10),
minsize = kw.get('minsize', 1),
loop = loop
)
else:
raise KeyError('Database ENGINE Error...')
|
apache-2.0
| 8,201,426,354,376,966,000
| 23.09375
| 68
| 0.55743
| false
| 3.558891
| false
| false
| false
|
lpenz/omnilint
|
container/omnilint/checkers/ansibleplaybooks.py
|
1
|
1902
|
# Copyright (C) 2020 Leandro Lisboa Penz <lpenz@lpenz.org>
# This file is subject to the terms and conditions defined in
# file 'LICENSE', which is part of this source code package.
'''ansible playbook checker'''
import yaml
import subprocess
import re
from omnilint.error import Error
from omnilint.checkers import Checker
class AnsiblePlaybook(Checker):
extensions = ['yaml', 'yml']
def __init__(self):
super(AnsiblePlaybook, self).__init__()
def isplaybook(self, data):
if not isinstance(data, list):
return False
for e in data:
if not isinstance(e, dict):
return False
if 'import_playbook' in e:
return True
if 'hosts' in e:
return True
return False
def check(self, reporter, origname, tmpname, firstline, fd):
try:
data = yaml.load(fd)
except yaml.YAMLError:
# This is reported by the yaml checker:
return
if not self.isplaybook(data):
return
with subprocess.Popen(
['/usr/local/bin/ansible-lint', '-p', '--nocolor', tmpname],
stdout=subprocess.PIPE,
env={'HOME': '/tmp'}) as p:
regex = re.compile(''.join([
'^(?P<path>[^:]+)', ':(?P<line>[0-9]+)', ': (?P<message>.*)$'
]))
for line in p.stdout:
line = line.decode('utf-8').rstrip()
m = regex.match(line)
assert m
reporter.report(
Error(msg=m.group('message'),
file=m.group('path'),
line=int(m.group('line'))))
def register(omnilint):
'''Registration function, called by omnilint while loading the checker with
itself as argument'''
omnilint.register(AnsiblePlaybook)
|
mit
| -1,433,899,188,108,483,600
| 29.677419
| 79
| 0.538381
| false
| 4.207965
| false
| false
| false
|
LCAV/pyroomacoustics
|
pyroomacoustics/denoise/__init__.py
|
1
|
1165
|
"""
Single Channel Noise Reduction
==============================
Collection of single channel noise reduction (SCNR) algorithms for speech:
- :doc:`Spectral Subtraction <pyroomacoustics.denoise.spectral_subtraction>` [1]_
- :doc:`Subspace Approach <pyroomacoustics.denoise.subspace>` [2]_
- :doc:`Iterative Wiener Filtering <pyroomacoustics.denoise.iterative_wiener>` [3]_
At `this repository <https://github.com/santi-pdp/segan>`_, a deep learning approach in Python can be found.
References
----------
.. [1] M. Berouti, R. Schwartz, and J. Makhoul, *Enhancement of speech corrupted by acoustic noise,*
ICASSP '79. IEEE International Conference on Acoustics, Speech, and Signal Processing, 1979, pp. 208-211.
.. [2] Y. Ephraim and H. L. Van Trees, *A signal subspace approach for speech enhancement,*
IEEE Transactions on Speech and Audio Processing, vol. 3, no. 4, pp. 251-266, Jul 1995.
.. [3] J. Lim and A. Oppenheim, *All-Pole Modeling of Degraded Speech,*
IEEE Transactions on Acoustics, Speech, and Signal Processing 26.3 (1978): 197-210.
"""
from .spectral_subtraction import *
from .subspace import *
from .iterative_wiener import *
|
mit
| -1,552,405,641,654,512,000
| 39.172414
| 109
| 0.712446
| false
| 2.941919
| false
| false
| false
|
klpdotorg/dubdubdub
|
ekstepbin/ProcessUsage.py
|
1
|
1985
|
import os
import json
import time
import sys
DeviceList = sys.argv[1] #this takes device list as argument
TagList = sys.argv[2] #this takes Tag list as argument
dir = os.path.dirname(__file__)
json_file = os.path.join(dir, '../datapull/ekstepv3data/data/ME_SESSION_SUMMARY.json')
output_file = os.path.join(dir, '../datapull/usage.txt')
usage_file = open(output_file, 'w',encoding='utf-8')
with open (os.path.join(dir, '../datapull/'+DeviceList)) as f:
device_list = [line.rstrip() for line in f]
with open (os.path.join(dir, '../datapull/'+TagList)) as e:
tag_list = [line.rstrip() for line in e]
for line in open(json_file, 'r'):
valid_data = False
data = json.loads(line)
if 'app' in data["etags"]:
if len(data["etags"]["app"]) > 0:
if str(data["etags"]["app"][0]) in tag_list:
valid_data = True
if not valid_data:
if (str(data["dimensions"]["did"]) in device_list):
valid_data = True
if valid_data:
usage_file.write( data["mid"])
usage_file.write("|")
usage_file.write( data["uid"])
usage_file.write("|")
usage_file.write( data["dimensions"]["did"])
usage_file.write("|")
usage_file.write( str(data["edata"]["eks"]["timeSpent"]))
usage_file.write("|")
usage_file.write( str(data["dimensions"]["gdata"]["id"]))
usage_file.write("|")
s=int(data["context"]["date_range"]["from"])/1000.0
usage_file.write( time.strftime("%Y-%m-%dT%H:%M:%S",time.localtime(s)))
usage_file.write("|")
s=int(data["context"]["date_range"]["to"])/1000.0
usage_file.write( time.strftime("%Y-%m-%dT%H:%M:%S",time.localtime(s)))
usage_file.write("|")
s=int(data["syncts"])/1000.0
usage_file.write( time.strftime("%Y-%m-%dT%H:%M:%S",time.localtime(s)))
usage_file.write("\n")
usage_file.close()
|
mit
| 340,299,390,527,681,540
| 37.921569
| 86
| 0.565239
| false
| 3.286424
| false
| false
| false
|
faylau/microblog
|
config.py
|
1
|
1798
|
#coding=utf-8
"""
该配置文件在__init__.py中读取
"""
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
"""
:summary:
"""
SCRF_ENABLED = True # 激活跨站点请求伪造保护
# SECRET_KEY配置仅仅当CSRF激活的时候才需要,建立一个加密的令牌用于验证表单。
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
# FLASKY_MAIL_SUBJECT_PREFIX = '[Flasky]'
# FLASKY_MAIL_SENDER = 'Flasky Admin <flasky@example.com>'
# FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN')
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
"""
:summary:
"""
DEBUG = True
# MAIL_SERVER = 'smtp.googlemail.com'
# MAIL_PORT = 587
# MAIL_USE_TLS = True
# MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
# MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.db')
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
class TestingConfig(Config):
"""
:summary:
"""
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir,
'data-test.db')
class ProductionConfig(Config):
"""
:summary:
"""
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.db')
config = {'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig}
|
apache-2.0
| -8,939,346,742,033,719,000
| 24.742424
| 73
| 0.592462
| false
| 2.937716
| true
| false
| false
|
Webcampak/cli
|
webcampak/core/wpakCapture.py
|
1
|
22904
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2010-2016 Eurotechnia (support@webcampak.com)
# This file is part of the Webcampak project.
# Webcampak is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
# Webcampak is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with Webcampak.
# If not, see http://www.gnu.org/licenses/
import os
import time
import gettext
from wpakConfigObj import Config
from wpakFileUtils import fileUtils
from wpakTimeUtils import timeUtils
from wpakTransferUtils import transferUtils
from wpakPhidgetsUtils import phidgetsUtils
from capture.wpakCaptureUtils import captureUtils
from capture.wpakCaptureEmails import captureEmails
from capture.wpakCaptureObj import captureObj
from capture.wpakSensorsObj import sensorsObj
from capture.drivers.wpakCaptureGphoto import captureGphoto
from capture.drivers.wpakCaptureIPCam import captureIPCam
from capture.drivers.wpakCaptureWebfile import captureWebfile
from capture.drivers.wpakCaptureTestPicture import captureTestPicture
from capture.drivers.wpakCaptureWpak import captureWpak
from capture.drivers.wpakCaptureRtsp import captureRtsp
from capture.drivers.wpakCapturePhidget import capturePhidget
from wpakPictureTransformations import pictureTransformations
from wpakFTPUtils import FTPUtils
# This class is used to capture a picture or sensors from a source
class Capture(object):
""" This class is used to capture from a source
Args:
log: A class, the logging interface
appConfig: A class, the app config interface
config_dir: A string, filesystem location of the configuration directory
sourceId: Source ID of the source to capture
Attributes:
tbc
"""
def __init__(self, log, appConfig, config_dir, sourceId):
self.log = log
self.appConfig = appConfig
self.config_dir = config_dir
self.currentSourceId = sourceId
self.setSourceId(sourceId)
self.configPaths = Config(self.log, self.config_dir + 'param_paths.yml')
self.dirEtc = self.configPaths.getConfig('parameters')['dir_etc']
self.dirConfig = self.configPaths.getConfig('parameters')['dir_config']
self.dirBin = self.configPaths.getConfig('parameters')['dir_bin']
self.dirSources = self.configPaths.getConfig('parameters')['dir_sources']
self.dirSourceLive = self.configPaths.getConfig('parameters')['dir_source_live']
self.dirSourceCapture = self.configPaths.getConfig('parameters')['dir_source_capture']
self.dirLocale = self.configPaths.getConfig('parameters')['dir_locale']
self.dirLocaleMessage = self.configPaths.getConfig('parameters')['dir_locale_message']
self.dirStats = self.configPaths.getConfig('parameters')['dir_stats']
self.dirCache = self.configPaths.getConfig('parameters')['dir_cache']
self.dirEmails = self.configPaths.getConfig('parameters')['dir_emails']
self.dirResources = self.configPaths.getConfig('parameters')['dir_resources']
self.dirLogs = self.configPaths.getConfig('parameters')['dir_logs']
self.dirXferQueue = self.configPaths.getConfig('parameters')['dir_xfer'] + 'queued/'
self.dirCurrentSource = self.dirSources + 'source' + self.currentSourceId + '/'
self.dirCurrentSourceTmp = self.dirSources + 'source' + self.currentSourceId + '/' + \
self.configPaths.getConfig('parameters')['dir_source_tmp']
self.dirCurrentSourceCapture = self.dirSources + 'source' + self.currentSourceId + '/' + self.dirSourceCapture
self.dirCurrentSourcePictures = self.dirSources + 'source' + self.currentSourceId + '/' + \
self.configPaths.getConfig('parameters')['dir_source_pictures']
self.dirCurrentSourceLogs = self.dirLogs + 'source' + self.currentSourceId + '/'
self.setupLog()
self.log.info("===START===")
self.log.info("capture(): Start")
self.configGeneral = Config(self.log, self.dirConfig + 'config-general.cfg')
self.configSource = Config(self.log, self.dirEtc + 'config-source' + str(self.getSourceId()) + '.cfg')
self.configSourceFTP = Config(self.log,
self.dirEtc + 'config-source' + str(self.currentSourceId) + '-ftpservers.cfg')
self.dirCurrentLocaleMessages = self.dirLocale + self.configSource.getConfig(
'cfgsourcelanguage') + "/" + self.dirLocaleMessage
self.initGetText(self.dirLocale, self.configGeneral.getConfig('cfgsystemlang'),
self.configGeneral.getConfig('cfggettextdomain'))
self.timeUtils = timeUtils(self)
self.fileUtils = fileUtils(self)
self.phidgetsUtils = phidgetsUtils(self)
self.FTPUtils = FTPUtils(self)
self.transferUtils = transferUtils(self)
self.setScriptStartTime(self.timeUtils.getCurrentSourceTime(self.configSource))
# By default, the picture date corresponds to the time the script started
self.log.info("capture(): " + _("Set Capture Time to script start time (default at script startup)"))
self.setCaptureTime(self.getScriptStartTime())
fileCaptureDetails = self.dirSources + 'source' + self.currentSourceId + '/' + self.dirSourceLive + 'last-capture.json'
fileCaptureLog = self.dirCurrentSourceCapture + self.getCaptureTime().strftime("%Y%m%d") + ".jsonl"
self.log.info("capture(): " + _("Create Capture Status object and set script start date"))
self.currentCaptureDetails = captureObj(self.log, fileCaptureLog)
self.currentCaptureDetails.setCaptureFile(fileCaptureDetails)
self.currentCaptureDetails.setCaptureValue('scriptStartDate', self.getScriptStartTime().isoformat())
self.log.info("capture(): " + _("Load previous Capture Status Object (if available)"))
self.lastCaptureDetails = captureObj(self.log)
self.lastCaptureDetails.setCaptureFile(fileCaptureDetails)
self.lastCaptureDetails.loadCaptureFile()
self.captureUtils = captureUtils(self)
self.captureEmails = captureEmails(self)
self.pictureTransformations = pictureTransformations(self)
self.captureUtils.setPictureTransformations(self.pictureTransformations)
self.log.info("capture(): " + _("Initializing the following capture driver: %(captureDriver)s") % {
'captureDriver': self.configSource.getConfig('cfgsourcetype')})
if self.configSource.getConfig('cfgsourcetype') == "gphoto":
# If the source is a gphoto camera
self.captureDriver = captureGphoto(self)
elif self.configSource.getConfig('cfgsourcetype') == "testpicture":
# The source is using a test picture, randomly modified
self.captureDriver = captureTestPicture(self)
elif self.configSource.getConfig('cfgsourcetype') == "ipcam" or (
self.configSource.getConfig('cfgsourcetype') == "wpak" and self.configSource.getConfig(
'cfgsourcewpaktype') == "rec"):
# If the source is an IP Camera
self.captureDriver = captureIPCam(self)
elif self.configSource.getConfig('cfgsourcetype') == "webfile":
# If the source is a Web File
self.captureDriver = captureWebfile(self)
elif self.configSource.getConfig('cfgsourcetype') == "wpak" and self.configSource.getConfig(
'cfgsourcewpaktype') == "get":
# If the source is another source of the same Webcampak
self.captureDriver = captureWpak(self)
elif self.configSource.getConfig('cfgsourcetype') == "rtsp":
# If the source is a RTSP stream
self.captureDriver = captureRtsp(self)
self.captureFilename = None
def setupLog(self):
""" Setup logging to file"""
if not os.path.exists(self.dirCurrentSourceLogs):
os.makedirs(self.dirCurrentSourceLogs)
logFilename = self.dirCurrentSourceLogs + "capture.log"
self.appConfig.set(self.log._meta.config_section, 'file', logFilename)
self.appConfig.set(self.log._meta.config_section, 'rotate', True)
self.appConfig.set(self.log._meta.config_section, 'max_bytes', 512000)
self.appConfig.set(self.log._meta.config_section, 'max_files', 10)
self.log._setup_file_log()
def initGetText(self, dirLocale, cfgsystemlang, cfggettextdomain):
""" Initialize Gettext with the corresponding translation domain
Args:
dirLocale: A string, directory location of the file
cfgsystemlang: A string, webcampak-level language configuration parameter from config-general.cfg
cfggettextdomain: A string, webcampak-level gettext domain configuration parameter from config-general.cfg
Returns:
None
"""
self.log.debug("capture.initGetText(): Start")
try:
t = gettext.translation(cfggettextdomain, dirLocale, [cfgsystemlang], fallback=True)
_ = t.ugettext
t.install()
self.log.info("capture.initGetText(): " + _(
"Initialized gettext with Domain: %(cfggettextdomain)s - Language: %(cfgsystemlang)s - Path: %(dirLocale)s")
% {'cfggettextdomain': cfggettextdomain, 'cfgsystemlang': cfgsystemlang,
'dirLocale': dirLocale})
except:
self.log.error("No translation file available")
# Setters and Getters
def setScriptStartTime(self, scriptStartTime):
self.log.info("capture.setScriptStartTime(): " + _("Script Start Time set to: %(scriptStartTime)s") % {
'scriptStartTime': scriptStartTime.isoformat()})
self.scriptStartTime = scriptStartTime
def getScriptStartTime(self):
return self.scriptStartTime
def setCaptureFilename(self, captureFilename):
self.captureFilename = captureFilename
def getCaptureFilename(self):
return self.captureFilename
def setSourceId(self, sourceId):
self.sourceId = sourceId
def getSourceId(self):
return self.sourceId
def setCaptureTime(self, captureTime=None):
if captureTime == None:
self.captureTime = self.timeUtils.getCurrentSourceTime(self.configSource)
else:
self.captureTime = captureTime
self.log.info("capture.setCaptureTime(): " + _("Capture Time set to: %(captureTime)s") % {
'captureTime': str(self.captureTime)})
return self.captureTime
def getCaptureTime(self):
return self.captureTime
def run(self):
""" Initiate the capture process for the source """
self.log.info("capture.run(): " + _("Initiate capture process for source: %(currentSourceId)s") % {
'currentSourceId': str(self.sourceId)})
# There might be a need to delay the capture by a couple of seconds
if self.configSource.getConfig('cfgcapturedelay') != "0":
self.log.info("capture.run(): " + _("Delaying capture by %(CaptureDelay)s seconds.") % {
'CaptureDelay': str(self.configSource.getConfig('cfgcapturedelay'))})
time.sleep(int(self.configSource.getConfig('cfgcapturedelay')))
if self.configSource.getConfig('cfgcapturedelaydate') != "script":
self.setCaptureTime()
if self.configSource.getConfig('cfgnocapture') == "yes":
self.log.info("capture.run(): " + _("Capture manually disabled via administration panel"))
elif self.configSource.getConfig('cfgsourceactive') != "yes":
self.log.info("capture.run(): " + _("Source is not active, not proceeding with capture"))
elif self.captureUtils.isWithinTimeframe() == False:
self.log.info("capture.run(): " + _("Capture calendar is active but capture not in the correct timeframe"))
elif self.captureUtils.checkInterval() == False:
self.log.info("capture.run(): " + _("Not enough time since last picture was captured, not proceeding"))
else:
# Capture the picture and return an array containing one or more files to be processed
# If multiple files are being processed, the captureDate value is the one of the latest picture captured
capturedPictures = self.captureDriver.capture()
# Used to count the number of times pictures are being processed,
# since we only want to generate hotlink images once per capture cycle
processedPicturesCount = 0
if capturedPictures != False:
for currentPicture in capturedPictures:
self.log.info("capture.run(): " + _("Begin processing of picture: %(currentPicture)s") % {
'currentPicture': currentPicture})
# Set picture filename
self.setCaptureFilename(os.path.splitext(os.path.basename(currentPicture))[0])
self.pictureTransformations.setFilesourcePath(currentPicture)
self.pictureTransformations.setFiledestinationPath(currentPicture)
# Process pictures (crop, resize, watermark, legend, ...)
if processedPicturesCount == 0 or self.configSource.getConfig(
'cfgsourcecamiplimiterotation') != "yes":
self.captureUtils.modifyPictures(True)
else: # Only generate the hotlink for the first picture being processed
self.captureUtils.modifyPictures(False)
# Copy pictures to live/ directory as last-capture.jpg or last-capture.raw
if self.configSource.getConfig('cfghotlinkmax') != "no":
self.captureUtils.createLivePicture(self.getCaptureFilename())
# Archive picture to its definitive location
self.captureUtils.archivePicture(self.getCaptureFilename())
# Create hotlinks and send those by FTP if enabled
self.captureUtils.generateHotlinks()
# Send file to first remote FTP Server
self.captureUtils.sendPicture(self.configSource.getConfig('cfgftpmainserverid'),
self.configSource.getConfig('cfgftpmainserverretry'),
self.configSource.getConfig('cfgftpmainserverraw'),
self.captureFilename)
# Send file to second remote FTP Server
self.captureUtils.sendPicture(self.configSource.getConfig('cfgftpsecondserverid'),
self.configSource.getConfig('cfgftpsecondserverretry'),
self.configSource.getConfig('cfgftpsecondserverraw'),
self.captureFilename)
# Copy file to first internal source
if self.configSource.getConfig('cfgcopymainenable') == "yes":
self.captureUtils.copyPicture(self.configSource.getConfig('cfgcopymainsourceid'),
self.configSource.getConfig('cfgcopymainsourceraw'),
self.captureFilename)
# Copy file to second internal source
if self.configSource.getConfig('cfgcopysecondenable') == "yes":
self.captureUtils.copyPicture(self.configSource.getConfig('cfgcopysecondsourceid'),
self.configSource.getConfig('cfgcopysecondsourceraw'),
self.captureFilename)
# Automtically purge old pictures
self.captureUtils.purgePictures(self.getCaptureFilename())
storedJpgSize = self.captureUtils.getArchivedSize(self.getCaptureFilename(), "jpg")
storedRawSize = self.captureUtils.getArchivedSize(self.getCaptureFilename(), "raw")
self.currentCaptureDetails.setCaptureValue('storedJpgSize',
self.currentCaptureDetails.getCaptureValue(
'storedJpgSize') + storedJpgSize)
self.currentCaptureDetails.setCaptureValue('storedRawSize',
self.currentCaptureDetails.getCaptureValue(
'storedRawSize') + storedRawSize)
self.currentCaptureDetails.setCaptureValue('totalCaptureSize',
self.currentCaptureDetails.getCaptureValue(
'totalCaptureSize') + int(
storedJpgSize + storedRawSize))
processedPicturesCount = processedPicturesCount + 1
self.log.info("capture.run(): " + _("Capture process completed"))
self.currentCaptureDetails.setCaptureValue('captureSuccess', True)
if os.path.isfile(self.dirCache + "source" + self.currentSourceId + "-errorcount"):
os.remove(self.dirCache + "source" + self.currentSourceId + "-errorcount")
else:
self.log.info("capture.run(): " + _("Unable to capture picture"))
self.captureUtils.generateFailedCaptureHotlink()
self.currentCaptureDetails.setCaptureValue('captureSuccess', False)
self.captureUtils.setCustomCounter('errorcount', int(self.captureUtils.getCustomCounter('errorcount')) + 1)
if self.configSource.getConfig('cfgcapturedeleteafterdays') != "0":
# Purge old pictures (by day)
self.captureUtils.deleteOldPictures()
if self.configSource.getConfig('cfgcapturemaxdirsize') != "0":
# Purge old pictures (by size)
self.captureUtils.deleteOverSize()
if self.configGeneral.getConfig('cfgstatsactivate') == "yes":
self.captureUtils.sendUsageStats()
# if self.configSource.getConfig('cfgemailcapturestats') == "yes":
# self.captureEmails.sendCaptureStats()
sensorFilename = self.getCaptureTime().strftime("%Y%m%d") + "-sensors.jsonl"
fileCaptureLog = self.dirCurrentSourcePictures + self.getCaptureTime().strftime("%Y%m%d") + "/" + sensorFilename
if self.configGeneral.getConfig('cfgphidgetactivate') == "yes" and self.configSource.getConfig(
'cfgphidgetactivate') == "yes":
capturedSensors = capturePhidget(self).capture()
currentSensorsDetails = sensorsObj(self.log, fileCaptureLog)
currentSensorsDetails.setSensorsValue('date', self.getCaptureTime().isoformat())
currentSensorsDetails.setSensorsValue('sensors', capturedSensors)
# Record capture interval
sourceCaptureInterval = int(self.configSource.getConfig('cfgcroncapturevalue'))
if self.configSource.getConfig('cfgcroncaptureinterval') == "minutes":
sourceCaptureInterval = int(self.configSource.getConfig('cfgcroncapturevalue')) * 60
currentSensorsDetails.setSensorsValue('interval', sourceCaptureInterval)
currentSensorsDetails.archiveSensorsFile()
#If the phidget sensor file exists, it is being sent throughout the chain.
if (os.path.isfile(fileCaptureLog)):
# Send file to first remote FTP Server
self.captureUtils.sendSensor(self.configSource.getConfig('cfgftpmainserverid'),
self.configSource.getConfig('cfgftpmainserverretry'),
sensorFilename)
# Send file to second remote FTP Server
self.captureUtils.sendSensor(self.configSource.getConfig('cfgftpsecondserverid'),
self.configSource.getConfig('cfgftpsecondserverretry'),
sensorFilename)
# Copy file to first internal source
if self.configSource.getConfig('cfgcopymainenable') == "yes":
self.captureUtils.copySensor(self.configSource.getConfig('cfgcopymainsourceid'),
sensorFilename)
# Copy file to second internal source
if self.configSource.getConfig('cfgcopysecondenable') == "yes":
self.captureUtils.copySensor(self.configSource.getConfig('cfgcopysecondsourceid'),
sensorFilename)
scriptEndDate = self.timeUtils.getCurrentSourceTime(self.configSource)
totalCaptureTime = int((scriptEndDate - self.getScriptStartTime()).total_seconds() * 1000)
self.log.info("capture.run(): " + _("Capture: Overall capture time: %(TotalCaptureTime)s ms") % {
'TotalCaptureTime': str(totalCaptureTime)})
self.currentCaptureDetails.setCaptureValue('scriptEndDate', scriptEndDate.isoformat())
self.currentCaptureDetails.setCaptureValue('scriptRuntime', totalCaptureTime)
self.currentCaptureDetails.setCaptureValue('processedPicturesCount', processedPicturesCount)
# Two different files are being stored here:
# - The last-capture file, which is only being stored id the capture is successful
# - The capture archive, which contains all capture requests (successful or not)
if capturedPictures != False:
self.currentCaptureDetails.writeCaptureFile()
self.currentCaptureDetails.archiveCaptureFile()
self.log.info(
"capture.run(): " + _("-----------------------------------------------------------------------"))
self.log.info("===END===")
|
gpl-3.0
| -3,304,625,089,856,374,300
| 56.26
| 127
| 0.627925
| false
| 4.621469
| true
| false
| false
|
tjw/swift
|
utils/gyb_syntax_support/__init__.py
|
1
|
3605
|
import textwrap
from AttributeNodes import ATTRIBUTE_NODES
from CommonNodes import COMMON_NODES # noqa: I201
from DeclNodes import DECL_NODES # noqa: I201
from ExprNodes import EXPR_NODES # noqa: I201
from GenericNodes import GENERIC_NODES # noqa: I201
from PatternNodes import PATTERN_NODES # noqa: I201
from StmtNodes import STMT_NODES # noqa: I201
import Token
from TypeNodes import TYPE_NODES # noqa: I201
# Re-export global constants
SYNTAX_NODES = COMMON_NODES + EXPR_NODES + DECL_NODES + ATTRIBUTE_NODES + \
STMT_NODES + GENERIC_NODES + TYPE_NODES + PATTERN_NODES
SYNTAX_TOKENS = Token.SYNTAX_TOKENS
SYNTAX_TOKEN_MAP = Token.SYNTAX_TOKEN_MAP
def make_missing_child(child):
"""
Generates a C++ call to make the raw syntax for a given Child object.
"""
if child.is_token():
token = child.main_token()
tok_kind = token.kind if token else "unknown"
tok_text = token.text if token else ""
return 'RawSyntax::missing(tok::%s, "%s")' % (tok_kind, tok_text)
else:
missing_kind = "Unknown" if child.syntax_kind == "Syntax" \
else child.syntax_kind
if child.node_choices:
return make_missing_child(child.node_choices[0])
return 'RawSyntax::missing(SyntaxKind::%s)' % missing_kind
def check_child_condition_raw(child):
"""
Generates a C++ closure to check whether a given raw syntax node can
satisfy the requirements of child.
"""
result = '[](const RC<RawSyntax> &Raw) {\n'
result += ' // check %s\n' % child.name
if child.token_choices:
result += 'if (!Raw->isToken()) return false;\n'
result += 'auto TokKind = Raw->getTokenKind();\n'
tok_checks = []
for choice in child.token_choices:
tok_checks.append("TokKind == tok::%s" % choice.kind)
result += 'return %s;\n' % (' || '.join(tok_checks))
elif child.text_choices:
result += 'if (!Raw->isToken()) return false;\n'
result += 'auto Text = Raw->getTokenText();\n'
tok_checks = []
for choice in child.text_choices:
tok_checks.append('Text == "%s"' % choice)
result += 'return %s;\n' % (' || '.join(tok_checks))
elif child.node_choices:
node_checks = []
for choice in child.node_choices:
node_checks.append(check_child_condition_raw(choice) + '(Raw)')
result += 'return %s;\n' % ((' || ').join(node_checks))
else:
result += 'return %s::kindof(Raw->getKind());' % child.type_name
result += '}'
return result
def make_missing_swift_child(child):
"""
Generates a Swift call to make the raw syntax for a given Child object.
"""
if child.is_token():
token = child.main_token()
tok_kind = token.swift_kind() if token else "unknown"
if not token or not token.text:
tok_kind += '("")'
return 'RawSyntax.missingToken(.%s)' % tok_kind
else:
missing_kind = "unknown" if child.syntax_kind == "Syntax" \
else child.swift_syntax_kind
return 'RawSyntax.missing(.%s)' % missing_kind
def create_node_map():
"""
Creates a lookup table to find nodes by their kind.
"""
return {node.syntax_kind: node for node in SYNTAX_NODES}
def is_visitable(node):
return not node.is_base() and not node.collection_element
def dedented_lines(description):
"""
Each line of the provided string with leading whitespace stripped.
"""
if not description:
return []
return textwrap.dedent(description).split('\n')
|
apache-2.0
| -4,316,504,402,919,193,600
| 34.343137
| 75
| 0.618863
| false
| 3.562253
| false
| false
| false
|
pythonpro-dev/pp-apiaccesstoken
|
pp/apiaccesstoken/middleware.py
|
1
|
3860
|
# -*- coding: utf-8 -*-
"""
"""
import logging
from pp.apiaccesstoken.tokenmanager import Manager
from pp.apiaccesstoken.tokenmanager import AccessTokenInvalid
from pp.apiaccesstoken.headers import WSGI_ENV_ACCESS_TOKEN_HEADER
def get_log(e=None):
return logging.getLogger("{0}.{1}".format(__name__, e) if e else __name__)
def recover_secret(access_token):
"""Given the access_token recover the access_secret to verify it with.
:params access_secret: The access token string.
:returns: access_secret on success or None on failure.
"""
raise NotImplementedError('No Valid Access Detail Recovery Provided')
class ValidateAccessToken(object):
"""Validate and API access token and populate the wsgi environment with
the identity recovered.
ValidateAccessToken.HTTP_HEADER is the name of the wsgi env variable to
look for.
ValidateAccessToken.ENV_KEY is the name wsgi env variable to store
the identity in. The value of the identity is recovered from the 'identity'
field in the access_token payload.
The constructor for this class takes a recover_secret() function. This
needs to be provided or NotImplementedError will be raised. This function
recovers the access_secret for the given access_token if any. If this
function returns None then nothing was recovered and the token is invalid.
"""
# The wsgi environment variable to set when an identity was found:
ENV_KEY = 'pp.api_access.identity'
def __init__(
self, application, recover_secret=recover_secret
):
self.log = get_log('ValidateAccessToken')
self.application = application
self.recover_secret = recover_secret
def recover_access(self, environ, access_token):
"""Populate the environment with the user identity recovered from the
payload of the access_token.
To get the payload the access_token needs its corresponding
access_secret to recover it.
"""
log = get_log('ValidateAccessToken.recover_access')
log.debug("recovering the access_secret for access_token:{}".format(
access_token
))
try:
access_secret = self.recover_secret(access_token)
if access_secret:
log.debug(
"access_secret for access_token:{} recovered OK.".format(
access_token
)
)
man = Manager(access_secret)
payload = man.verify_access_token(access_token)
log.debug(
"Payload recovered for '{}'. Looking for identity.".format(
access_token
)
)
identity = payload['identity']
self.log.debug(
"Token Valid. Adding identity '{}' environ.".format(
identity
)
)
environ[self.ENV_KEY] = identity
else:
self.log.debug(
"No secret recovered for '{}'. Ignoring token.".format(
access_token
)
)
except AccessTokenInvalid, e:
self.log.error(
"token validation fail: '{}'".format(e)
)
except Exception, e:
self.log.exception(
"General error validating token: '{}'".format(e)
)
def __call__(self, environ, start_response):
"""Wsgi hook into kicking off the token validation and identity
recovery.
"""
access_token = environ.get(WSGI_ENV_ACCESS_TOKEN_HEADER)
if access_token:
self.recover_access(environ, access_token)
return self.application(environ, start_response)
|
bsd-3-clause
| -3,918,434,066,661,931,500
| 31.711864
| 79
| 0.594301
| false
| 4.701583
| false
| false
| false
|
boun-cmpe-soslab/drenaj
|
drenaj/drenaj_api/handlers/campaignshandler.py
|
1
|
8934
|
import bson.json_util
import tornado.ioloop
import tornado.web
from pymongo.errors import OperationFailure
from tornado import gen
from tornado.web import HTTPError
from tornado.web import MissingArgumentError
import drenaj_api.utils.drenajneo4jmanager as drenajneo4jmanager
class CampaignsHandler(tornado.web.RequestHandler):
## def datetime_hook(self, dct):
## # TODO: this only checks for the first level 'created_at'
## # We should think about whether making this recursive.
## if 'created_at' in dct:
## time_struct = time.strptime(dct['created_at'], "%a %b %d %H:%M:%S +0000 %Y") #Tue Apr 26 08:57:55 +0000 2011
## dct['created_at'] = datetime.datetime.fromtimestamp(time.mktime(time_struct))
## return bson.json_util.object_hook(dct)
## return bson.json_util.object_hook(dct)
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS")
self.set_header('Access-Control-Allow-Headers', 'Origin, X-Requested-With, Content-Type, Accept')
def options(self, *args):
self.post(*args)
def get(self, *args):
self.post(*args)
#self.write("not implemented yet")
#@drenaj_simple_auth
@tornado.web.asynchronous
@gen.coroutine
def post(self, *args, **keywords):
# Refactoring note: the new drenaj database manager is now a class and it's initialized
# in Tornado application. The old code just imported the manager module as 'drenajmongomanager'/
# Instead of search&replace procedure, assign the new db instance to drenajmongomanager.
drenajmongomanager = self.application.db
print args
action = args[0]
print action
print args
verbose_response = self.get_argument('verbose', '')
if (action == 'new'):
try:
campaign_id = self.get_argument('campaign_id')
description = self.get_argument('description', '')
campaign_type = self.get_argument('campaign_type', '') # timeline, streaming or both.
query_terms = self.get_argument('query_terms', '')
user_id_strs_to_follow = self.get_argument('user_id_strs_to_follow', '')
user_screen_names_to_follow = self.get_argument('user_screen_names_to_follow', '')
try:
drenajmongomanager.create_campaign(
{
'campaign_id': campaign_id,
'description': description,
'campaign_type': campaign_type,
'query_terms': query_terms,
'user_id_strs_to_follow': user_id_strs_to_follow,
'user_screen_names_to_follow': user_screen_names_to_follow,
})
result = 'success'
except OperationFailure:
result = 'failure'
self.write({'status': result})
self.add_header('Content-Type', 'application/json')
except MissingArgumentError as e:
# TODO: implement logging.
raise HTTPError(500, 'You didn''t supply %s as an argument' % e.arg_name)
elif (action == 'view'):
try:
campaign_id = self.get_argument('campaign_id', 'default')
subcommand = args[1]
if subcommand == None:
cursor = drenajmongomanager.get_campaign(campaign_id)
campaign = yield cursor
if campaign:
self.write(bson.json_util.dumps(campaign))
else:
self.write(bson.json_util.dumps({}))
self.add_header('Content-Type', 'application/json')
if subcommand == "watched_users":
skip = self.get_argument('skip', 0)
limit = self.get_argument('limit', 100)
attached_users_array = drenajneo4jmanager.get_users_attached_to_campaign(campaign_id, skip, limit)
attached_users_response = {'watched_users': [], 'campaign_id': campaign_id}
for item in attached_users_array:
x = dict()
y = dict()
for rel in item[1]:
if rel.type == 'TIMELINE_TASK_STATE':
x = dict(rel.properties)
elif rel.type == 'FRIENDFOLLOWER_TASK_STATE':
y = dict(rel.properties)
attached_users_response['watched_users'] += [[item[0], x, y]]
self.write(bson.json_util.dumps(attached_users_response))
self.add_header('Content-Type', 'application/json')
elif subcommand == "freqs":
cursor = drenajmongomanager.get_campaign_with_freqs(campaign_id)
cursor = yield cursor
campaign = cursor['result']
if campaign:
self.write(bson.json_util.dumps(campaign))
else:
self.write(bson.json_util.dumps({}))
self.add_header('Content-Type', 'application/json')
elif subcommand == 'histograms':
re_calculate = self.get_argument('re_calculate', 'no')
n_bins = self.get_argument('n_bins', "100")
if re_calculate == 'no':
cursor = drenajmongomanager.get_campaign_histograms(campaign_id)
hists = yield cursor
if hists.count() == 0:
re_calculate = 'yes'
if re_calculate == 'yes':
results = drenajmongomanager.get_users_related_with_campaign(campaign_id)
tmp = yield results[0]
users = tmp['result']
# How many tweets?
tmp = yield results[1]
n_tweets = tmp.count()
hist = drenajmongomanager.prepare_hist_and_plot(n_tweets, users, n_bins, campaign_id)
hists = [hist]
yield self.application.db.motor_column.histograms.insert(hist)
self.write(bson.json_util.dumps(hists[0]))
self.add_header('Content-Type', 'application/json')
except MissingArgumentError as e:
# TODO: implement logging.
raise HTTPError(500, 'You didn''t supply %s as an argument' % e.arg_name)
elif (action == 'edit'):
try:
campaign_id = self.get_argument('campaign_id')
subcommand = args[1]
if subcommand == 'add_watched_users':
new_watched_users = self.get_argument('new_watched_users','')
drenajmongomanager.add_to_watchlist(campaign_id, new_watched_users)
self.write(bson.json_util.dumps({'result': 'successful'}))
self.add_header('Content-Type', 'application/json')
except MissingArgumentError as e:
raise HTTPError(500, 'You didn''t supply %s as an argument' % e.arg_name)
elif (action == 'list'):
try:
cursor = drenajmongomanager.get_campaigns_list()
campaigns = yield cursor.to_list(length=None)
self.write(bson.json_util.dumps(campaigns))
self.add_header('Content-Type', 'application/json')
except MissingArgumentError as e:
# TODO: implement logging.
raise HTTPError(500, 'You didn''t supply %s as an argument' % e.arg_name)
elif (action == 'filter'):
try:
skip = int(self.get_argument('skip', 0))
limit = int(self.get_argument('limit', 10))
print("FILTER: ", "skip: ", skip, ", limit", limit)
cursor = drenajmongomanager.get_campaign_list_with_freqs(skip, limit)
print("END FILTER: ", "skip: ", skip, ", limit", limit)
cursor = yield cursor
campaigns = cursor['result']
print("GCLWF: EXCEPTION: ", "campaigns: ", campaigns)
self.write(bson.json_util.dumps(campaigns))
self.add_header('Content-Type', 'application/json')
except MissingArgumentError as e:
# TODO: implement logging.
raise HTTPError(500, 'You didn''t supply %s as an argument' % e.arg_name)
else:
self.write('not recognized')
|
mit
| -2,630,529,231,603,231,000
| 46.269841
| 123
| 0.527871
| false
| 4.303468
| false
| false
| false
|
scott-s-douglas/SWAPR
|
SWAPRrubric.py
|
1
|
10154
|
from SWAPRsqlite import *
from itertools import groupby
def createRubricsTable(db):
db.cursor.execute("CREATE TABLE IF NOT EXISTS rubrics (labNumber int, itemIndex int, itemType text, itemValues text, graded boolean, itemPrompt text)")
def addRubricItem(db, labNumber, itemIndex, itemType, itemValues, graded, itemPrompt = None):
db.cursor.execute("INSERT INTO rubrics VALUES (NULL, ?, ?, ?, ?, ?)", [labNumber, itemIndex, itemType, itemPrompt, graded])
if itemValues == []:
db.cursor.execute("INSERT INTO responseKeys VALUES (NULL,?,?,?,?)",[labNumber,itemIndex,0,None])
for i in range(len(itemValues)):
db.cursor.execute("INSERT INTO responseKeys VALUES (NULL, ?,?,?,?)",[labNumber, itemIndex,i,float(itemValues[-(i+1)])])
db.conn.commit()
def getMaxScore(db,labNumber):
# assumes max score corresponds with response 0
db.cursor.execute("SELECT score FROM responseKeys, rubrics WHERE response = 0 AND responseKeys.labNumber = ? AND responseKeys.itemIndex = rubrics.itemIndex AND responseKeys.labNumber = rubrics.labNumber AND graded",[labNumber])
maxScoreVector = [float(entry[0]) for entry in db.cursor.fetchall()]
maxScore = sum(maxScoreVector)
return maxScore, maxScoreVector
def getNgradedItems(db,labNumber,likert5only=False):
"Return the number of graded items in a particular lab's rubric. This function makes a SQLite call, so don't run it between a select and a fetch on that same database."
if not likert5only:
db.cursor.execute('''SELECT count(*)
FROM rubrics
WHERE
labNumber = ?
AND graded
''',[labNumber])
else:
db.cursor.execute('''SELECT count(*)
FROM rubrics
WHERE
labNumber = ?
AND graded
AND itemType = 'likert5'
''',[labNumber])
Ngraded = int(db.cursor.fetchone()[0])
return Ngraded
def getScoresDict(db,labNumber):
# Construct a dictionary of dictionaries where each possible response is paired with its score for GRADED items only
db.cursor.execute('''SELECT k.itemIndex, k.response, k.score
FROM responseKeys k, rubrics r
WHERE
--match labNumber
r.labNumber = ?
AND r.labNumber = k.labNumber
--match itemIndex
AND r.itemIndex = k.itemIndex
AND k.score IS NOT NULL
AND r.graded
ORDER BY k.itemIndex, k.response, k.score''',[labNumber])
data = [[int(entry[0]),int(entry[1]),float(entry[2])] for entry in db.cursor.fetchall()]
scoresDict = {}
for itemIndex, itemIndexGroup in groupby(data, lambda entry: entry[0]):
thisScores = {}
for pair in itemIndexGroup:
thisScores.update({pair[1]:pair[2]})
scoresDict.update({itemIndex:thisScores})
return scoresDict
def addDefaultRubric(db, labNumber):
# Make sure the Wnumbers are actually consecutive on WebAssign!
if labNumber == 3:
addRubricItem(db, labNumber, 1, 'likert5', [0,2,6,10,12], True, 'The video presentation is clean and easy to follow.')
addRubricItem(db, labNumber, 2, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve the video presentation?')
addRubricItem(db, labNumber, 3, 'yhn', [0,6,12], True, 'Does the video introduce the problem and state the main result?')
addRubricItem(db, labNumber, 4, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve the introduction and the statements of the main result?')
addRubricItem(db, labNumber, 5, 'yhn', [0,6,12], True, 'Does the video identify the model(s) relevant to this physical system?')
addRubricItem(db, labNumber, 6, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve the discussion of how the main physics ideas are applied in the problem under study?')
addRubricItem(db, labNumber, 7, 'yhn', [0,1,2], True, 'The computational model(s) successfully predict(s) the mass of the black hole.')
addRubricItem(db, labNumber, 8, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve how well his/her computational model(s) predicted the mass of the black hole?')
addRubricItem(db, labNumber, 9, 'likert5', [0,2,6,10,12], True, 'The presenter successfully discusses how his/her computational model(s) predicts or fails to predict the mass of the black hole.')
addRubricItem(db, labNumber, 10, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve his/her EXPLANATION of how his/her model predicted the mass of the black hole?')
addRubricItem(db, labNumber, 11, 'likert5', [0,2,6,10,12], True, 'The video presentation correctly explains the physics.')
addRubricItem(db, labNumber, 12, 'freeResponse', [], False, 'Were there any aspects of the physics in this video which the presenter did not make clear? Was the presenter mistaken about some of the physics he or she presented?')
addRubricItem(db, labNumber, 13, 'comparative5', [-2,-1,0,1,2], False, 'How does this video compare to your own video?')
addRubricItem(db, labNumber, 14, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve his/her report, or what are a couple of things you have learned from this video to improve your own report?')
if labNumber == 6:
addRubricItem(db, labNumber, 1, 'likert5', [0,2,6,10,12], True, 'The video presentation is clear and easy to follow.')
addRubricItem(db, labNumber, 2, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve the video presentation?')
addRubricItem(db, labNumber, 3, 'yhn', [0,6,12], True, 'Does the presenter identify the lecture they attended and introduce the topic of that lecture?')
addRubricItem(db, labNumber, 4, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve the introduction and the problem statement? ')
addRubricItem(db, labNumber, 5, 'yhn', [0,1,2], True, 'Does the presenter summarize the main points of the lecture and state why this topic was of interest to him or her?')
addRubricItem(db, labNumber, 6, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve the summary of the main points of the lecture? ')
addRubricItem(db, labNumber, 7, 'likert5', [0,2,6,10,12], True, 'TThe presenter taught the viewer something interesting they learned as a result of attending this lecture.')
addRubricItem(db, labNumber, 8, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve the summary of the main points of the lecture? ')
addRubricItem(db, labNumber, 9, 'likert5', [0,2,6,10,12], True, 'The presenter followed up on the lecture with ideas or concepts not discussed by the public speaker.')
addRubricItem(db, labNumber, 10, 'freeResponse', [], False, 'Were there any aspects of the physics in this video which the presenter did not make clear? Was the presenter mistaken about some of the physics he or she presented? ')
addRubricItem(db, labNumber, 11, 'comparative5', [-2,-1,0,1,2], False, 'How does this video compare to your own video?')
addRubricItem(db, labNumber, 12, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve his/her report, or what are a couple of things you have learned from this video to improve your own report?')
else:
addRubricItem(db, labNumber, 1, 'likert5', [0,2,6,10,12], True, 'The video presentation is clean and easy to follow.')
addRubricItem(db, labNumber, 2, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve the video presentation?')
addRubricItem(db, labNumber, 3, 'yhn', [0,6,12], True, 'Does the video introduce the problem and state the main result?')
addRubricItem(db, labNumber, 4, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve the introduction and the statements of the main result?')
addRubricItem(db, labNumber, 5, 'yhn', [0,6,12], True, 'Does the video identify the model(s) relevant to this physical system?')
addRubricItem(db, labNumber, 6, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve the discussion of how the main physics ideas are applied in the problem under study?')
addRubricItem(db, labNumber, 7, 'likert5', [0,0.5,1,1.5,2], True, 'The computational model(s) successfully predict(s) the motion of the object observed.')
addRubricItem(db, labNumber, 8, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve how well his/her computational model(s) predicted the motion of the object?')
addRubricItem(db, labNumber, 9, 'likert5', [0,2,6,10,12], True, 'The presenter successfully discusses how his/her computational model(s) predicts or fails to predict the motion of the object.')
addRubricItem(db, labNumber, 10, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve his/her EXPLANATION of how his/her model predicted the motion of the object?')
addRubricItem(db, labNumber, 11, 'likert5', [0,2,6,10,12], True, 'The video presentation correctly explains the physics.')
addRubricItem(db, labNumber, 12, 'freeResponse', [], False, 'Were there any aspects of the physics in this video which the presenter did not make clear? Was the presenter mistaken about some of the physics he or she presented?')
addRubricItem(db, labNumber, 13, 'comparative5', [-2,-1,0,1,2], False, 'How does this video compare to your own video?')
addRubricItem(db, labNumber, 14, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve his/her report, or what are a couple of things you have learned from this video to improve your own report?')
|
gpl-2.0
| -4,788,761,156,601,311,000
| 85.786325
| 241
| 0.697065
| false
| 3.655148
| false
| false
| false
|
griddynamics/nova-billing
|
tests/__init__.py
|
1
|
1989
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Nova Billing
# Copyright (C) GridDynamics Openstack Core Team, GridDynamics
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Base class for Nova Billing unit tests.
"""
import unittest
import stubout
import json
import os
class TestCase(unittest.TestCase):
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
def tearDown(self):
"""Runs after each test method to tear down test environment."""
self.stubs.UnsetAll()
self.stubs.SmartUnsetAll()
@staticmethod
def json_load_from_file(filename):
with open(os.path.join(os.path.dirname(
os.path.abspath(__file__)), filename),
"rt") as json_file:
return json.load(json_file)
#Set it to True for json out files regeneration
write_json = False
def json_check_with_file(self, data, filename):
if self.write_json:
with open(os.path.join(os.path.dirname(
os.path.abspath(__file__)), filename),
"wt") as json_file:
json.dump(data, json_file, indent=4)
else:
self.assertEqual(data,
self.json_load_from_file(filename))
|
gpl-3.0
| 3,098,607,152,673,971,000
| 32.728814
| 74
| 0.646053
| false
| 4.018182
| true
| false
| false
|
science09/minitwi
|
app/minitwi.py
|
1
|
6408
|
#-*- coding:utf-8 -*-
import time
from hashlib import md5
from datetime import datetime
from flask import Flask, request, session, url_for, redirect, \
render_template, abort, g, flash
from werkzeug.security import check_password_hash, generate_password_hash
from models import *
PER_PAGE = 10
app = Flask(__name__)
app.config['SECRET_KEY'] = 'development key'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///tuimimi.db'
app.debug = True
app.config.from_object(__name__)
app.config.from_envvar('MINITWIT_SETTINGS', silent=True)
def get_user_id(username):
"""Convenience method to look up the id for a username."""
rv = User.query.filter_by(username=username).first_or_404()
return rv.user_id if rv else None
def format_datetime(timestamp):
"""Format a timestamp for display."""
return datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d @ %H:%M')
def gravatar_url(email, size=80):
"""Return the gravatar image for the given email address."""
return 'http://www.gravatar.com/avatar/%s?d=identicon&s=%d' % \
(md5(email.strip().lower().encode('utf-8')).hexdigest(), size)
@app.before_request
def before_request():
g.user = None
if 'user_id' in session:
g.user = User.query.filter_by(user_id=session['user_id']).first_or_404()
else:
app.logger.warning('user_id not in session')
@app.route('/')
def timeline():
"""Shows a users timeline or if no user is logged in it will
redirect to the public timeline. This timeline shows the user's
messages as well as all the messages of followed users.
"""
if not g.user:
return redirect(url_for('public_timeline'))
message = Message.query.filter_by(author_id=session['user_id']).order_by('pub_date desc').limit(PER_PAGE)
return render_template('timeline.html', messages=message)
@app.route('/public')
def public_timeline():
"""Displays the latest messages of all users."""
message = Message.query.order_by('pub_date desc').limit(PER_PAGE)
return render_template('timeline.html', messages=message)
@app.route('/<username>')
def user_timeline(username):
"""Display's a users tweets."""
profile_user = User.query.filter_by(username=username).first_or_404()
print profile_user
if profile_user is None:
abort(404)
followed = False
if g.user:
followed = Follower.query.filter_by(who_id=session['user_id'],whom_id=profile_user.user_id).first() is not None
message = Message.query.filter_by(author_id=profile_user.user_id).order_by('pub_date desc').limit(PER_PAGE)
return render_template('timeline.html', messages=message,profile_user=profile_user,followed=followed)
@app.route('/<username>/follow')
def follow_user(username):
"""Adds the current user as follower of the given user."""
if not g.user:
abort(401)
whom_id = get_user_id(username)
if whom_id is None:
abort(404)
follower = Follower(session['user_id'], whom_id)
db.session.add(follower)
db.session.commit()
flash('You are now following "%s"' % username)
return redirect(url_for('user_timeline', username=username))
@app.route('/<username>/unfollow')
def unfollow_user(username):
"""Removes the current user as follower of the given user."""
if not g.user:
abort(401)
whom_id = get_user_id(username)
if whom_id is None:
abort(404)
follower = Follower.query.filter_by(who_id=session['user_id'], whom_id=whom_id).first()
db.session.delete(follower)
db.session.commit()
flash('You are no longer following "%s"' % username)
return redirect(url_for('user_timeline', username=username))
@app.route('/add_message', methods=['POST'])
def add_message():
"""Registers a new message for the user."""
if 'user_id' not in session:
abort(401)
if request.form['text']:
message = Message(session['user_id'], request.form['text'], int(time.time()))
db.session.add(message)
db.session.commit()
flash('Your message was recorded')
return redirect(url_for('timeline'))
@app.route('/login', methods=['GET', 'POST'])
def login():
"""Logs the user in."""
if g.user:
return redirect(url_for('timeline'))
error = None
if request.method == 'POST':
user = User.query.filter_by(username=request.form['username']).first_or_404()
if user is None:
error = 'Invalid username'
elif not check_password_hash( user.pw_hash, request.form['password']):
error = 'Invalid password'
else:
flash('You were logged in')
session['user_id'] = user.user_id
return redirect(url_for('timeline'))
return render_template('login.html', error=error)
@app.route('/register', methods=['GET', 'POST'])
def register():
"""Registers the user."""
if g.user:
return redirect(url_for('timeline'))
error = None
if request.method == 'POST':
if not request.form['username']:
error = 'You have to enter a username'
elif not request.form['email'] or \
'@' not in request.form['email']:
error = 'You have to enter a valid email address'
elif not request.form['password']:
error = 'You have to enter a password'
elif request.form['password'] != request.form['password2']:
error = 'The two passwords do not match'
elif User.query.filter_by(username=request.form['username']).first() is not None:
error = 'The username is already taken'
else:
user = User(request.form['username'], request.form['email'],
generate_password_hash(request.form['password']))
print request.form['username'], request.form['email']
db.session.add(user)
db.session.commit()
flash('You were successfully registered and can login now')
return redirect(url_for('login'))
return render_template('register.html', error=error)
@app.route('/logout')
def logout():
"""Logs the user out."""
flash('You were logged out')
session.pop('user_id', None)
return redirect(url_for('public_timeline'))
# add some filters to jinja
app.jinja_env.filters['datetimeformat'] = format_datetime
app.jinja_env.filters['gravatar'] = gravatar_url
if __name__ == '__main__':
db.create_all()
app.run()
|
mit
| 8,190,148,019,333,539,000
| 36.255814
| 119
| 0.644351
| false
| 3.649203
| false
| false
| false
|
pybel/pybel
|
src/pybel/io/bel_commons_client.py
|
1
|
3998
|
# -*- coding: utf-8 -*-
"""Transport functions for `BEL Commons <https://github.com/bel-commons/bel-commons>`_.
BEL Commons is a free, open-source platform for hosting BEL content. Because it was originally
developed and published in an academic capacity at Fraunhofer SCAI, a public instance can be
found at https://bel-commons-dev.scai.fraunhofer.de. However, this instance is only supported
out of posterity and will not be updated. If you would like to host your own instance of
BEL Commons, there are instructions on its GitHub page.
"""
import logging
from typing import Optional
import pystow
import requests
from .nodelink import from_nodelink, to_nodelink
from ..struct.graph import BELGraph
from ..version import get_version
__all__ = [
'to_bel_commons',
'from_bel_commons',
]
logger = logging.getLogger(__name__)
RECIEVE_ENDPOINT = '/api/receive/'
GET_ENDPOINT = '/api/network/{}/export/nodelink'
def _get_host() -> Optional[str]:
"""Find the host with :func:`pystow.get_config`.
Has two possibilities:
1. The PyBEL config entry ``PYBEL_REMOTE_HOST``, loaded in :mod:`pybel.constants`
2. The environment variable ``PYBEL_REMOTE_HOST``
"""
return pystow.get_config('pybel', 'remote_host')
def _get_user() -> Optional[str]:
return pystow.get_config('pybel', 'remote_user')
def _get_password() -> Optional[str]:
return pystow.get_config('pybel', 'remote_password')
def to_bel_commons(
graph: BELGraph,
host: Optional[str] = None,
user: Optional[str] = None,
password: Optional[str] = None,
public: bool = True,
) -> requests.Response:
"""Send a graph to the receiver service and returns the :mod:`requests` response object.
:param graph: A BEL graph
:param host: The location of the BEL Commons server. Alternatively, looks up in PyBEL config with
``PYBEL_REMOTE_HOST`` or the environment as ``PYBEL_REMOTE_HOST``.
:param user: Username for BEL Commons. Alternatively, looks up in PyBEL config with
``PYBEL_REMOTE_USER`` or the environment as ``PYBEL_REMOTE_USER``
:param password: Password for BEL Commons. Alternatively, looks up in PyBEL config with
``PYBEL_REMOTE_PASSWORD`` or the environment as ``PYBEL_REMOTE_PASSWORD``
:param public: Should the network be made public?
:return: The response object from :mod:`requests`
"""
if host is None:
host = _get_host()
logger.debug('using host: %s', host)
if user is None:
user = _get_user()
if user is None:
raise ValueError('no user found')
if password is None:
password = _get_password()
if password is None:
raise ValueError('no password found')
url = host.rstrip('/') + RECIEVE_ENDPOINT
response = requests.post(
url,
json=to_nodelink(graph),
headers={
'content-type': 'application/json',
'User-Agent': 'PyBEL v{}'.format(get_version()),
'bel-commons-public': 'true' if public else 'false',
},
auth=(user, password),
)
logger.debug('received response: %s', response)
return response
def from_bel_commons(network_id: int, host: Optional[str] = None) -> BELGraph:
"""Retrieve a public network from BEL Commons.
In the future, this function may be extended to support authentication.
:param network_id: The BEL Commons network identifier
:param host: The location of the BEL Commons server. Alternatively, looks up in PyBEL config with
``PYBEL_REMOTE_HOST`` or the environment as ``PYBEL_REMOTE_HOST``.
:raises: ValueError if host configuration can not be found
"""
if host is None:
host = _get_host()
if host is None:
raise ValueError('host not specified in arguments, PyBEL configuration, or environment.')
url = host + GET_ENDPOINT.format(network_id)
res = requests.get(url)
graph_json = res.json()
graph = from_nodelink(graph_json)
return graph
|
mit
| -2,507,534,870,051,758,600
| 31.504065
| 101
| 0.670585
| false
| 3.627949
| true
| false
| false
|
SebastianSchildt/potatonet-power
|
gui/epod.py
|
1
|
2803
|
import urwid
from yesno import ConfirmButton
class ElectricityPodlet(object):
def __init__(self, name, nr):
self.name=name
self.nr=nr
self.PWRstate='off'
self.ETHstate='unknown'
self.buildGui()
def buildGui(self):
txt=urwid.Text(('nodehead', self.name), align='center')
#headline=urwid.Filler(txt,"middle")
headline=urwid.Padding(txt,align="center")
headline=urwid.AttrMap(headline,'nodehead')
self.eth=urwid.Text('Eth Link: '+str(self.ETHstate),align='left', wrap="clip")
if self.PWRstate=='on':
#self.btn=urwid.Button("Switch PWR Off")
self.btn=ConfirmButton("Switch PWR Off", self.PWRPress)
self.pwr=urwid.Text( ('nodeON' ,'Power: '+str(self.PWRstate)), align='left')
else:
#self.btn=urwid.Button("Switch PWR On")
self.btn=ConfirmButton("Switch PWR On", self.PWRPress)
self.pwr=urwid.Text( ('nodeOFF' ,'Power: '+str(self.PWRstate)), align='left')
#urwid.connect_signal(self.btn, 'click', self.PWRPress, self.name)
#self.btnHolder=urwid.AttrMap(self.btn, 'btn', focus_map='reversed')
self.btnHolder=self.btn
#p=urwid.Pile([ urwid.BoxAdapter(headline,1), ('pack',self.pwr), ('pack',eth), ('pack',self.btnHolder) ])
p=urwid.Pile([ headline, ('pack',self.pwr), ('pack',self.eth), ('pack',self.btnHolder) ])
self.ui=urwid.LineBox(p)
def updatePowerState(self,state):
if state == True or state==1 or int(state) == 1:
self.PWRstate="on"
self.btn.set_label("Switch PWR Off")
self.pwr.set_text( ('nodeON','Power: '+str(self.PWRstate)))
else:
self.PWRstate='off'
self.btn.set_label("Switch PWR On")
self.pwr.set_text( ('nodeOFF','Power: '+str(self.PWRstate)))
def updateEthernetState(self,state):
if int(state) == 0:
self.ETHstate="disabled"
#self.btn.set_label("Switch PWR Off")
self.eth.set_text( ('nodeOFF','Eth Link: '+str(self.ETHstate)))
elif int(state) == 1:
self.ETHstate="enabled, no link"
#self.btn.set_label("Switch PWR Off")
self.eth.set_text( ('nodeOFF','Eth Link: '+str(self.ETHstate)))
elif int(state) == 2:
self.ETHstate="UNKNOWN"
#self.btn.set_label("Switch PWR Off")
self.eth.set_text( ('nodeOFF','Eth Link: '+str(self.ETHstate)))
elif int(state) == 3:
self.ETHstate="enabled, link active"
#self.btn.set_label("Switch PWR Off")
self.eth.set_text( ('nodeON','Eth Link: '+str(self.ETHstate)))
else:
self.ETHstate='UNKNOWN'
#self.btn.set_label("Switch PWR On")
self.eth.set_text( ('nodeOFF','Eth Link: '+str(self.ETHstate)))
def PWRPress(self):
if self.PWRstate == 'off':
self.PWRstate='on'
self.btn.set_label("Switch PWR Off")
self.pwr.set_text( ('nodeON','Power: '+str(self.PWRstate)))
else:
self.PWRstate='off'
self.btn.set_label("Switch PWR On")
self.pwr.set_text( ('nodeOFF','Power: '+str(self.PWRstate)))
|
mit
| -2,659,397,285,443,749,000
| 30.144444
| 107
| 0.663932
| false
| 2.619626
| false
| false
| false
|
zeta709/django-coffee-capsules
|
coffee_capsules/forms.py
|
1
|
2874
|
from django import forms
from django.contrib.admin import widgets
#from django.forms.extras import SelectDateWidget
from coffee_capsules.models import Purchase, PurchaseItem, Request
from coffee_capsules.widgets import SelectedReadonly
class PurchaseForm(forms.ModelForm):
class Meta:
model = Purchase
# References about AdminSplitDateTime():
# http://stackoverflow.com/questions/15643019/
def __init__(self, *args, **kwargs):
super(PurchaseForm, self).__init__(*args, **kwargs)
self.fields['begin_date'].widget = widgets.AdminSplitDateTime()
self.fields['end_date'].widget = widgets.AdminSplitDateTime()
class PurchaseItemForm(forms.ModelForm):
#default_price = forms.CharField()
class Meta:
model = PurchaseItem
widgets = {
'capsule': SelectedReadonly(),
}
def __init__(self, *args, **kwargs):
super(PurchaseItemForm, self).__init__(*args, **kwargs)
self.fields['capsule'].widget.attrs['readonly'] = 'readonly'
#self.fields['default_price'].widget = forms.HiddenInput()
#self.fields['default_price'].widget.attrs['readonly'] = 'readonly'
class MyRequestForm(forms.ModelForm):
class Meta:
model = Request
#fields = ('purchaseitem','user', 'quantity_queued',)
#readonly_fields = ('purchaseitem','user',)
exclude = ('user',)
widgets = {
'purchaseitem': SelectedReadonly(),
#'user': SelectedReadonly(),
#'user': forms.HiddenInput(),
#'user': forms.Select(),
#'user': forms.TextInput(),
}
def __init__(self, *args, **kwargs):
super(MyRequestForm, self).__init__(*args, **kwargs)
self.fields['purchaseitem'].widget.attrs['readonly'] = 'readonly'
self.fields['purchaseitem'].label = 'Item'
self.fields['quantity_queued'].label = 'Quantity'
def clean_quantity_queued(self):
qty = self.cleaned_data['quantity_queued']
my_u_unit = self.cleaned_data['purchaseitem'].purchase.u_unit
if qty < 0:
raise forms.ValidationError("Values cannot be negative.")
if qty % my_u_unit != 0:
raise forms.ValidationError('Each value should be multiples of '
+ str(my_u_unit))
return qty
def clean(self):
cleaned_data = super(MyRequestForm, self).clean()
purchaseitem = cleaned_data.get("purchaseitem")
purchase = purchaseitem.purchase
if purchase.is_not_open():
raise forms.ValidationError("The purchase is not yet open.")
if purchase.is_ended():
raise forms.ValidationError("The purchase is aleady ended.")
if purchase.is_closed:
raise forms.ValidationError("The purchase is closed.")
return cleaned_data
|
bsd-3-clause
| -2,064,641,096,433,153,300
| 36.324675
| 76
| 0.618302
| false
| 4.214076
| false
| false
| false
|
stephenlienharrell/WPEAR
|
wpear/DataConverter.py
|
1
|
9026
|
#! /usr/bin/env python
import os
import shlex
import shutil
import subprocess
import sys
WGRIB_PATH='./wgrib'
EGREP_PATH='egrep'
GREP_PATH='grep'
NCEP_GRID_TYPE=3
# http://www.nco.ncep.noaa.gov/pmb/docs/on388/tableb.html
class DataConverter:
FNULL=open(os.devnull, 'w')
def __init__(self, wgrib_path=WGRIB_PATH, egrep_path=EGREP_PATH, grep_path=GREP_PATH, ncep_grid_type=NCEP_GRID_TYPE):
self.wgrib_path = wgrib_path
self.egrep_path = egrep_path
self.grep_path = grep_path
self.grid_type = ncep_grid_type
def interploateGrid(self, inputfilepath, outputfilepath):
# This isn't working. to fix maybe http://www.ftp.cpc.ncep.noaa.gov/wd51we/wgrib2/tricks.wgrib2 #21
cmd1 = '{} {} -new_grid ncep grid {} {}'.format(self.wgrib_path, inputfilepath, self.grid_type, outputfilepath)
ps = subprocess.check_output(shlex.split(cmd1), stderr=subprocess.STDOUT)
def extractMessages(self, inputfilepath, varlist, outputfilepath):
cmd1 = '{} {} -s'.format(self.wgrib_path, inputfilepath)
try:
ps = subprocess.check_output(shlex.split(cmd1), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
# check if the e.output starts with :*** FATAL ERROR: Statistical processing bad n=0 ***
# http://www.ftp.cpc.ncep.noaa.gov/wd51we/wgrib2/tricks.wgrib2
# this may happen several times. in future, handle as many times as no of error messages
text = os.linesep.join([s for s in e.output.splitlines() if s])
print text
if text.startswith('*** FATAL ERROR: Statistical processing bad n=0 ***') :
lastline = text.splitlines()[-1]
errmsgno = int(str(lastline.split(':')[0]).strip())
# creating new file without error msg
newinputfilepath = os.path.splitext(inputfilepath)[0] + 'fixstat' + os.path.splitext(inputfilepath)[1]
newfilecmd1 = '{} {} -pdt'.format(self.wgrib_path, inputfilepath)
newfilecmd2 = '{} -v ^{}:'.format(self.egrep_path, errmsgno)
newfilecmd3 = '{} -i {} -grib {}'.format(self.wgrib_path, inputfilepath, newinputfilepath)
p1 = subprocess.Popen(shlex.split(newfilecmd1), stdout=subprocess.PIPE)
p2 = subprocess.Popen(shlex.split(newfilecmd2), stdin=p1.stdout, stdout=subprocess.PIPE)
p3 = subprocess.Popen(shlex.split(newfilecmd3), stdin=p2.stdout, stdout=self.FNULL)
p3.wait()
inputfilepath = newinputfilepath
cmd1 = '{} {} -s'.format(self.wgrib_path, newinputfilepath)
else:
print 'extract1message failed for file = {}\n'.format(inputfilepath)
pipe1 = subprocess.Popen(shlex.split(cmd1), stdout=subprocess.PIPE)
greplist = [self.grep_path]
for var in varlist:
greplist.append('-e')
greplist.append(var)
pipe2 = subprocess.Popen(greplist, stdin=pipe1.stdout, stdout=subprocess.PIPE)
cmd3 = '{} -i {} -grib {}'.format(self.wgrib_path, inputfilepath, outputfilepath)
pipe3 = subprocess.Popen(shlex.split(cmd3), stdin=pipe2.stdout, stdout=self.FNULL)
pipe3.wait()
def subsetRegion(self, inputfilepath, minlat, maxlat, minlon, maxlon, outputfilepath):
cmd = '{} {} -small_grib {}:{} {}:{} {} -set_grib_type same'.format(self.wgrib_path, inputfilepath, minlon, maxlon, minlat, maxlat, outputfilepath)
try:
subprocess.check_call(shlex.split(cmd), stdout=self.FNULL)
except subprocess.CalledProcessError as e:
print e.cmd
print e.returncode
print e.output
def extractMessagesAndSubsetRegion(self, inputfilepath, varlist, tempfiledir, minlat, maxlat, minlon, maxlon, outputfilepath):
try:
os.makedirs(tempfiledir)
except OSError:
pass
if tempfiledir.endswith('/'):
tempfilepath = tempfiledir + inputfilepath.split('/')[-1]
tempfilepath2 = tempfiledir + inputfilepath.split('/')[-1] + 'temp2'
else:
tempfilepath = tempfiledir + '/' + inputfilepath.split('/')[-1]
tempfilepath2 = tempfiledir + '/' + inputfilepath.split('/')[-1] + 'temp2'
self.subsetRegion(inputfilepath, minlat, maxlat, minlon, maxlon, tempfilepath)
self.extractMessages(tempfilepath, varlist, tempfilepath2)
if inputfilepath.split('/')[-1].startswith('hrrr'):
self.interpolateGridHRRR(tempfilepath2, outputfilepath)
elif inputfilepath.split('/')[-1].startswith('rtma'):
self.interpolateGridRTMA(tempfilepath2, outputfilepath)
else:
raise AttributeError('no known file format found')
os.remove(tempfilepath)
os.remove(tempfilepath2)
def interpolateGridHRRR(self, inputfilepath, outputfilepath, nx=500, ny=500, dx=1000, dy=1000):
# nx = number of grid points in x-direction
# ny = number of grid points in y-direction
# dx = grid cell size in meters in x-direction
# dy = grid cell size in meters in y direction
cmd = '{} -set_grib_type same {} -new_grid_winds grid -new_grid lambert:262.5:38.5:38.5 271.821305:{}:{} 38.261837:{}:{} {}'.format(self.wgrib_path, inputfilepath, nx, dx, ny, dy, outputfilepath)
try:
subprocess.check_call(shlex.split(cmd), stdout=self.FNULL)
except subprocess.CalledProcessError as e:
print e.cmd
print e.returncode
print e.output
def interpolateGridRTMA(self, inputfilepath, outputfilepath, nx=500, ny=500, dx=1000, dy=1000):
# nx = number of grid points in x-direction
# ny = number of grid points in y-direction
# dx = grid cell size in meters in x-direction
# dy = grid cell size in meters in y direction
cmd = '{} -set_grib_type same {} -new_grid_winds grid -new_grid lambert:265:25:25 272.014856:{}:{} 38.231829:{}:{} {}'.format(self.wgrib_path, inputfilepath, nx, dx, ny, dy, outputfilepath)
try:
subprocess.check_call(shlex.split(cmd), stdout=self.FNULL)
except subprocess.CalledProcessError as e:
print e.cmd
print e.returncode
print e.output
###############################################################################################
########################################### Test ##############################################
###############################################################################################
# dc = DataConverter()
###############################################################################################
######################################### extractMessages #####################################
###############################################################################################
# dc.extractMessages('sourceFileDownloads/rtma2p5.t00z.2dvaranl_ndfd.grb2', [':DPT:2 m above ground', ':TMP:2 m above ground'], 'sourceFileDownloads/em_rtma2p5.t00z.2dvaranl_ndfd.grb2')
# dc.extractMessages('sourceFileDownloads/hrrr.t00z.wrfsfcf18.grib2', [':TMP:500 mb', ':WIND:10 m above ground'], 'sourceFileDownloads/em_hrrr.t00z.wrfsfcf18.grib2')
# dc.extractMessages('sourceFileDownloads/hrrr.t00z.wrfsfcf00.grib2', [':TMP:500 mb', ':WIND:10 m above ground'], 'sourceFileDownloads/em_hrrr.t00z.wrfsfcf00.grib2')
###############################################################################################
######################################### subsetRegion ########################################
###############################################################################################
# dc.subsetRegion('sourceFileDownloads/em_rtma2p5.t00z.2dvaranl_ndfd.grb2', 38.22, 41.22, -87.79, -84.79, 'sourceFileDownloads/sem_rtma2p5.t00z.2dvaranl_ndfd.grb2')
# dc.subsetRegion('sourceFileDownloads/em_hrrr.t00z.wrfsfcf18.grib2', 38.22, 41.22, -87.79, -84.79, 'sourceFileDownloads/sem_hrrr.t00z.wrfsfcf18.grib2')
# dc.subsetRegion('sourceFileDownloads/em_hrrr.t00z.wrfsfcf00.grib2', 38.22, 41.22, -87.79, -84.79, 'sourceFileDownloads/sem_hrrr.t00z.wrfsfcf00.grib2')
###############################################################################################
############################### extractMessagesAndSubsetRegion ################################
###############################################################################################
# dc.extractMessagesAndSubsetRegion('sourceFileDownloads/rtma2p5.t00z.2dvaranl_ndfd.grb2', [':DPT:2 m above ground', ':TMP:2 m above ground'], 'temp/', 38.22, 41.22, -87.79, -84.79, 'sourceFileDownloads/sem_rtma2p5.t00z.2dvaranl_ndfd.grb2')
# dc.extractMessagesAndSubsetRegion('sourceFileDownloads/hrrr.t00z.wrfsfcf00.grib2', [':TMP:500 mb', ':WIND:10 m above ground'], 'temp', 38.22, 41.22, -87.79, -84.79, 'sourceFileDownloads/sem_hrrr.t00z.wrfsfcf00.grib2')
|
gpl-3.0
| 3,493,325,894,899,479,600
| 54.374233
| 240
| 0.574008
| false
| 3.616186
| false
| false
| false
|
dgollub/pokealmanac
|
scripts/parse_pokeapi.py
|
1
|
12145
|
#!/usr/bin/env python
# encoding: utf-8
"""
Copyright (c) 2016 by Daniel Kurashige-Gollub <daniel@kurashige-gollub.de>
License MIT: see LICENSE file.
"""
"""
Download the API documentation for the PokeAPI.co site, parse it and
generate Swift structs/classes from it that allow us to easily
use the API in an iOS project.
WARNING: produces un-compilable code and wrong code at the moment.
This is due to the following:
- this code is not optimized/bug free
- the actual API documentation on the PokeAPI.co site has actual errors,
like listing the wrong data type
- the actual API documentation seems to have duplicate "Version"
definitions for the Version endpoint
- need a way to add custom method to the result struct that resolves
NamedAPIResourceList types into a list of the real type
- the PokeAPI documentation lacks information about optional results
i. e. results that can be empty/null
TODO(dkg): also generate SQL statements and Swift methods that allow us
to easily save and load the data gathered from the API on the device
in a SQLite database file.
"""
import os
import codecs
# import shutil
import sys
import traceback
# fix stdout utf-8 decoding/encoding errors
reload(sys)
sys.setdefaultencoding('utf-8')
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
API_URL = "http://pokeapi.co/docsv2/"
if sys.version_info.major != 2 and sys.version_info.minor != 7:
print "This script was developed with Python 2.7.x and there is no guarantee that it will work with another version."
print "Please uncomment the version check yourself if you want to give it a try."
sys.exit(1)
try:
from bs4 import BeautifulSoup
except ImportError:
print "Please install the Python library BeautifulSoup4 first."
sys.exit(1)
try:
import lxml
except ImportError:
print "Please install the Python lxml library first."
sys.exit(1)
try:
import requests
except ImportError:
print "Please install the Python requests library first."
sys.exit(1)
def download_api_page():
print "Dowloading API documentation from %s" % API_URL
r = requests.get(API_URL)
if r.status_code != 200:
raise Exception("Could not download the Pokemon API site. Please check. Reason: %s" % (str(r.raw.read())))
print "Ok"
return unicode(r.text)
def parse_endpoint(soup, endpoint_id, already_done):
# special cases
# version ==> id is "versions"
if endpoint_id == "version":
endpoint_id = "versions"
header = soup.find("h2", id=endpoint_id)
if header is None:
print "Could not find header for endpoint '%s'!!!" % (endpoint_id)
return (None, False)
model_header = header.find_next_sibling("h4")
# TODO(dkg): example, url and desc are completely wrong at the moment - fix this!
desc_element = header.find_next_sibling("p")
if desc_element is None:
print "No description for %s" % (endpoint_id)
desc = ""
else:
desc = desc_element.text # NOTE(dkg): text loses all inner HTML elements though ... hmmm.
url_element = header.find_next_sibling("h3")
url = url_element.text if url_element is not None else ""
# example_element = header.find_next_sibling("pre")
example = ""
example_element = header.find_previous_sibling("pre")
if example_element is not None:
example_sib = example_element.find_next_sibling("h4")
if example_sib.text == model_header.text:
example = example_element.text if example_element is not None else ""
# print endpoint_id, header
# print desc
# print url
# print example
code = """
//
// %(category)s - %(name)s
// %(url)s
// %(desc)s
//
%(example)s
//
//
public class %(name)s : JSONJoy {
%(variables)s
public required init(_ decoder: JSONDecoder) throws {
%(trycatches)s
}
}"""
# TODO(dkg): what about optional variables????
variable = "public let %(name)s: %(type)s // %(comment)s"
decoder_array = """
guard let tmp%(tmpName)s = decoder["%(name)s"].array else { throw JSONError.WrongType }
var collect%(tmpName)s = [%(type)s]()
for tmpDecoder in tmp%(tmpName)s {
collect%(tmpName)s.append(try %(type)s(tmpDecoder))
}
%(name)s = collect%(tmpName)s
"""
decoder_type = """%(name)s = try %(type)s(decoder["%(name)s"])"""
decoder_var = """%(name)s = try decoder["%(name)s"].%(type)s"""
result = []
# raise Exception("Test")
while model_header is not None and model_header.text not in already_done:
model_table = model_header.find_next_sibling("table")
# print model_header
# print model_table
mt_body = model_table.find("tbody")
mt_rows = mt_body.find_all("tr")
variables = []
trycatches = []
for mt_row in mt_rows:
# print mt_row
columns = mt_row.find_all("td")
varname = columns[0].text
vardesc = columns[1].text
vartype = columns[-1].text
if vartype in ["integer", "string", "boolean"]:
typevar = "Int" if vartype == "integer" else "String" if vartype == "string" else "Bool"
varout = variable % {
"name": varname,
"type": typevar,
"comment": vardesc
}
decodetype = "getInt()" if vartype == "integer" else "getString()" if vartype == "string" else "bool"
decoderout = decoder_var % {
"name": varname,
"type": decodetype
}
elif "list" in vartype:
# example: list <a href="#berryflavormap">BerryFlavorMap</a>
if "integer" in vartype:
typename = "[Int]"
elif "string" in vartype:
typename = "[String]"
else:
anchors = columns[-1].find_all("a")
typename = anchors[-1].text if len(anchors) > 0 else "????"
if len(anchors) == 0:
raise Exception("What is this? %s %s" % (varname, model_header.text))
varout = variable % {
"name": varname,
"type": u"[%s]" % (typename),
"comment": vardesc
}
decoderout = decoder_array % {
"name": varname,
"type": typename,
"tmpName": varname.capitalize(),
}
elif "NamedAPIResource" in vartype:
# TODO(dkg): Need to add additional method that converts the NamedAPIResource URL to it's correct type.
# Example: BerryFirmness here points to a URL, instead of the full JSON for BerryFirmness.
# The struct therefore should provide a method that either returns the cached data or nil
# if no cached data is available. (What about if the actual API didn't provide any data?)
# example: <a href="#namedapiresource">NamedAPIResource</a> (<a href="#berry-firmnesses">BerryFirmness</a>)
typename = columns[-1].find_all("a")[-1].text
varout = variable % {
"name": varname,
"type": typename,
"comment": vardesc
}
decoderout = decoder_type % {
"name": varname,
"type": typename
}
else:
# TODO(dkg): this case emits some wrong code for certain cases - need to fix this
# Just handle this type as its own datatype
varout = variable % {
"name": varname,
"type": vartype,
"comment": vardesc
}
decoderout = decoder_var % {
"name": varname,
"type": vartype
}
# raise Exception("Variable '%s' datatype not handled: %s" % (varname, vartype))
variables.append(varout)
trycatches.append(decoderout)
# print varname, vardesc, vartype, varout
# return
tmp = code % {
"category": header.text,
"name": model_header.text.replace(" ", ""),
"desc": desc,
"url": url,
"example": u"\n".join(map(lambda line: u"// %s" % line, example.split("\n"))),
"variables": (u"\n%s" % (u" " * 4)).join(variables),
"trycatches": (u"\n%s" % (u" " * 8)).join(trycatches),
}
result.append(tmp)
already_done.append(model_header.text)
# get the next response model
model_header = model_header.find_next_sibling("h4")
# print "next model_header", model_header
# check if the next header belongs to a different endpoint
if model_header is not None and endpoint_id not in ["common-models", "resource-lists"]:
parent_header = model_header.find_previous_sibling("h2")
# print 'parent_header["id"]', endpoint_id, parent_header["id"]
if endpoint_id != parent_header["id"][1:]:
model_header = None
return ("\n".join(result), True)
def parse_api(api_data):
print "Gonna parse the data now ..."
soup = BeautifulSoup(api_data, "lxml")
# head_element = soup.find(id="pokeapi-v2-api-reference")
# nav_table = head_element.find_next_sibling("table")
# lists = nav_table.find_all("ul")
div = soup.find("div", class_="doc-select")
lists = filter(lambda l: len(l.attrs.keys()) == 0, div.find_all("li"))
api_endpoint_ids = []
for l in lists:
endpoint_id = l.a["href"]
if endpoint_id in ["#wrap", "#info"]:
continue
api_endpoint_ids.append(endpoint_id)
print api_endpoint_ids
already_done = []
result = []
for endpoint in api_endpoint_ids:
parsed_data, found = parse_endpoint(soup, endpoint[1:], already_done) # remove # char from the id
if found:
result.append(parsed_data)
return "\n".join(result)
def main():
print "Go!"
folder = os.path.join(CURRENT_PATH, "pokeapi.co")
if not os.path.exists(folder):
os.makedirs(folder)
api_file_name = os.path.join(folder, "api.html")
download_api = True
ask = "dontask" not in sys.argv
if os.path.exists(api_file_name):
if ask:
user_input = (raw_input("A local copy of the API site exists already. Do you want to download it anyway and overwrite the local copy? yes/[no]: ") or "").strip().lower()[:1]
download_api = user_input in ["y", "j"]
else:
download_api = False
if download_api:
api_site_data = download_api_page()
with codecs.open(api_file_name, "w", "utf-8") as f:
f.write(api_site_data)
else:
with codecs.open(api_file_name, "r", "utf-8") as f:
api_site_data = f.read()
parsed_api = parse_api(api_site_data)
if len(parsed_api) > 0:
# print parsed_api # TODO(dkg): write to a file
output_file = os.path.join(folder, "pokeapi-generated.swift")
with codecs.open(output_file, "w", "utf-8") as f:
f.write("//\n// This file was generated by a Python script.\n// DO NOT USE THIS CODE DIRECTLY! IT DOES NOT COMPILE!\n//\n\n")
f.write("//\n// There are documentation errors in the API, so some types are wrong.\n// Double check everything before ")
f.write("using any of this generated code.\n// DO NOT USE THIS CODE DIRECTLY! IT DOES NOT COMPILE!\n//\n\n")
f.write(parsed_api)
f.write("\n")
print "Wrote %s" % (output_file)
print "Done."
try:
main()
except Exception as ex:
print "Something went wrong. Oops."
print ex
traceback.print_exc(file=sys.stdout)
|
mit
| 4,508,636,457,054,790,700
| 36.140673
| 185
| 0.573322
| false
| 3.858005
| false
| false
| false
|
kartikluke/yotube
|
googleapiclient/errors.py
|
1
|
3516
|
#!/usr/bin/python2.4
#
# Copyright (C) 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Errors for the library.
All exceptions defined by the library
should be defined in this file.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from oauth2client import util
from oauth2client.anyjson import simplejson
class Error(Exception):
"""Base error for this module."""
pass
class HttpError(Error):
"""HTTP data was invalid or unexpected."""
@util.positional(3)
def __init__(self, resp, content, uri=None):
self.resp = resp
self.content = content
self.uri = uri
def _get_reason(self):
"""Calculate the reason for the error from the response content."""
reason = self.resp.reason
try:
data = simplejson.loads(self.content)
reason = data['error']['message']
except (ValueError, KeyError):
pass
if reason is None:
reason = ''
return reason
def __repr__(self):
if self.uri:
return '<HttpError %s when requesting %s returned "%s">' % (
self.resp.status, self.uri, self._get_reason().strip())
else:
return '<HttpError %s "%s">' % (self.resp.status, self._get_reason())
__str__ = __repr__
class InvalidJsonError(Error):
"""The JSON returned could not be parsed."""
pass
class UnknownFileType(Error):
"""File type unknown or unexpected."""
pass
class UnknownLinkType(Error):
"""Link type unknown or unexpected."""
pass
class UnknownApiNameOrVersion(Error):
"""No API with that name and version exists."""
pass
class UnacceptableMimeTypeError(Error):
"""That is an unacceptable mimetype for this operation."""
pass
class MediaUploadSizeError(Error):
"""Media is larger than the method can accept."""
pass
class ResumableUploadError(HttpError):
"""Error occured during resumable upload."""
pass
class InvalidChunkSizeError(Error):
"""The given chunksize is not valid."""
pass
class InvalidNotificationError(Error):
"""The channel Notification is invalid."""
pass
class BatchError(HttpError):
"""Error occured during batch operations."""
@util.positional(2)
def __init__(self, reason, resp=None, content=None):
self.resp = resp
self.content = content
self.reason = reason
def __repr__(self):
return '<BatchError %s "%s">' % (self.resp.status, self.reason)
__str__ = __repr__
class UnexpectedMethodError(Error):
"""Exception raised by RequestMockBuilder on unexpected calls."""
@util.positional(1)
def __init__(self, methodId=None):
"""Constructor for an UnexpectedMethodError."""
super(UnexpectedMethodError, self).__init__(
'Received unexpected call %s' % methodId)
class UnexpectedBodyError(Error):
"""Exception raised by RequestMockBuilder on unexpected bodies."""
def __init__(self, expected, provided):
"""Constructor for an UnexpectedMethodError."""
super(UnexpectedBodyError, self).__init__(
'Expected: [%s] - Provided: [%s]' % (expected, provided))
|
mit
| -440,186,985,566,424,600
| 24.114286
| 75
| 0.685438
| false
| 3.959459
| false
| false
| false
|
Bartzi/stn-ocr
|
datasets/fsns/tfrecord_utils/tfrecord_to_image.py
|
1
|
2073
|
import argparse
import csv
import os
import re
import numpy as np
import tensorflow as tf
from PIL import Image
FILENAME_PATTERN = re.compile(r'.+-(\d+)-of-(\d+)')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='tool that takes tfrecord files and extracts all images + labels from it')
parser.add_argument('tfrecord_dir', help='path to directory containing tfrecord files')
parser.add_argument('destination_dir', help='path to dir where resulting images shall be saved')
parser.add_argument('stage', help='stage of training these files are for [e.g. train]')
args = parser.parse_args()
tfrecord_files = os.listdir(args.tfrecord_dir)
tfrecord_files = sorted(tfrecord_files, key=lambda x: int(FILENAME_PATTERN.match(x).group(1)))
with open(os.path.join(args.destination_dir, '{}.csv'.format(args.stage)), 'w') as label_file:
writer = csv.writer(label_file, delimiter='\t')
for tfrecord_file in tfrecord_files:
tfrecord_filename = os.path.join(args.tfrecord_dir, tfrecord_file)
file_id = FILENAME_PATTERN.match(tfrecord_file).group(1)
dest_dir = os.path.join(args.destination_dir, args.stage, file_id)
os.makedirs(dest_dir, exist_ok=True)
record_iterator = tf.python_io.tf_record_iterator(path=tfrecord_filename)
for idx, string_record in enumerate(record_iterator):
example = tf.train.Example()
example.ParseFromString(string_record)
labels = example.features.feature['image/class'].int64_list.value
img_string = example.features.feature['image/encoded'].bytes_list.value[0]
file_name = os.path.join(dest_dir, '{}.png'.format(idx))
with open(file_name, 'wb') as f:
f.write(img_string)
label_file_data = [file_name]
label_file_data.extend(labels)
writer.writerow(label_file_data)
print("recovered {:0>6} files".format(idx), end='\r')
|
gpl-3.0
| -3,318,274,954,810,785,300
| 38.113208
| 123
| 0.636276
| false
| 3.701786
| false
| false
| false
|
olysonek/tuned
|
tuned/plugins/plugin_bootloader.py
|
1
|
12902
|
from . import base
from .decorators import *
import tuned.logs
from . import exceptions
from tuned.utils.commands import commands
import tuned.consts as consts
import os
import re
import tempfile
log = tuned.logs.get()
class BootloaderPlugin(base.Plugin):
"""
Plugin for tuning bootloader options.
Currently only grub2 is supported and reboot is required to apply the tunings.
These tunings are unloaded only on profile change followed by reboot.
"""
def __init__(self, *args, **kwargs):
if not os.path.isfile(consts.GRUB2_TUNED_TEMPLATE_PATH):
raise exceptions.NotSupportedPluginException("Required GRUB2 template not found, disabling plugin.")
super(BootloaderPlugin, self).__init__(*args, **kwargs)
self._cmd = commands()
def _instance_init(self, instance):
instance._has_dynamic_tuning = False
instance._has_static_tuning = True
# controls grub2_cfg rewrites in _instance_post_static
self.update_grub2_cfg = False
self._initrd_remove_dir = False
self._initrd_dst_img_val = None
self._cmdline_val = ""
self._initrd_val = ""
self._grub2_cfg_file_names = self._get_grub2_cfg_files()
def _instance_cleanup(self, instance):
pass
@classmethod
def _get_config_options(cls):
return {
"grub2_cfg_file": None,
"initrd_dst_img": None,
"initrd_add_img": None,
"initrd_add_dir": None,
"initrd_remove_dir": None,
"cmdline": None,
}
def _get_effective_options(self, options):
"""Merge provided options with plugin default options and merge all cmdline.* options."""
effective = self._get_config_options().copy()
cmdline_keys = []
for key in options:
if str(key).startswith("cmdline"):
cmdline_keys.append(key)
elif key in effective:
effective[key] = options[key]
else:
log.warn("Unknown option '%s' for plugin '%s'." % (key, self.__class__.__name__))
cmdline_keys.sort()
cmdline = ""
for key in cmdline_keys:
val = options[key]
if val is None or val == "":
continue
op = val[0]
vals = val[1:].strip()
if op == "+" and vals != "":
cmdline += " " + vals
elif op == "-" and vals != "":
for p in vals.split():
regex = re.escape(p)
cmdline = re.sub(r"(\A|\s)" + regex + r"(?=\Z|\s)", r"", cmdline)
else:
cmdline += " " + val
cmdline = cmdline.strip()
if cmdline != "":
effective["cmdline"] = cmdline
return effective
def _get_grub2_cfg_files(self):
cfg_files = []
for f in consts.GRUB2_CFG_FILES:
if os.path.exists(f):
cfg_files.append(f)
return cfg_files
def _patch_bootcmdline(self, d):
return self._cmd.add_modify_option_in_file(consts.BOOT_CMDLINE_FILE, d)
def _remove_grub2_tuning(self):
if not self._grub2_cfg_file_names:
log.info("cannot find grub.cfg to patch")
return
self._patch_bootcmdline({consts.BOOT_CMDLINE_TUNED_VAR : "", consts.BOOT_CMDLINE_INITRD_ADD_VAR : ""})
for f in self._grub2_cfg_file_names:
self._cmd.add_modify_option_in_file(f, {"set\s+" + consts.GRUB2_TUNED_VAR : "", "set\s+" + consts.GRUB2_TUNED_INITRD_VAR : ""}, add = False)
if self._initrd_dst_img_val is not None:
log.info("removing initrd image '%s'" % self._initrd_dst_img_val)
self._cmd.unlink(self._initrd_dst_img_val)
def _instance_unapply_static(self, instance, full_rollback = False):
if full_rollback:
log.info("removing grub2 tuning previously added by Tuned")
self._remove_grub2_tuning()
self._update_grubenv({"tuned_params" : "", "tuned_initrd" : ""})
def _grub2_cfg_unpatch(self, grub2_cfg):
log.debug("unpatching grub.cfg")
cfg = re.sub(r"^\s*set\s+" + consts.GRUB2_TUNED_VAR + "\s*=.*\n", "", grub2_cfg, flags = re.MULTILINE)
grub2_cfg = re.sub(r" *\$" + consts.GRUB2_TUNED_VAR, "", cfg, flags = re.MULTILINE)
cfg = re.sub(r"^\s*set\s+" + consts.GRUB2_TUNED_INITRD_VAR + "\s*=.*\n", "", grub2_cfg, flags = re.MULTILINE)
grub2_cfg = re.sub(r" *\$" + consts.GRUB2_TUNED_INITRD_VAR, "", cfg, flags = re.MULTILINE)
cfg = re.sub(consts.GRUB2_TEMPLATE_HEADER_BEGIN + r"\n", "", grub2_cfg, flags = re.MULTILINE)
return re.sub(consts.GRUB2_TEMPLATE_HEADER_END + r"\n+", "", cfg, flags = re.MULTILINE)
def _grub2_cfg_patch_initial(self, grub2_cfg, d):
log.debug("initial patching of grub.cfg")
s = r"\1\n\n" + consts.GRUB2_TEMPLATE_HEADER_BEGIN + "\n"
for opt in d:
s += r"set " + self._cmd.escape(opt) + "=\"" + self._cmd.escape(d[opt]) + "\"\n"
s += consts.GRUB2_TEMPLATE_HEADER_END + r"\n"
grub2_cfg = re.sub(r"^(\s*###\s+END\s+[^#]+/00_header\s+### *)\n", s, grub2_cfg, flags = re.MULTILINE)
d2 = {"linux" : consts.GRUB2_TUNED_VAR, "initrd" : consts.GRUB2_TUNED_INITRD_VAR}
for i in d2:
# add tuned parameters to all kernels
grub2_cfg = re.sub(r"^(\s*" + i + r"(16|efi)?\s+.*)$", r"\1 $" + d2[i], grub2_cfg, flags = re.MULTILINE)
# remove tuned parameters from rescue kernels
grub2_cfg = re.sub(r"^(\s*" + i + r"(?:16|efi)?\s+\S+rescue.*)\$" + d2[i] + r" *(.*)$", r"\1\2", grub2_cfg, flags = re.MULTILINE)
# fix whitespaces in rescue kernels
grub2_cfg = re.sub(r"^(\s*" + i + r"(?:16|efi)?\s+\S+rescue.*) +$", r"\1", grub2_cfg, flags = re.MULTILINE)
return grub2_cfg
def _grub2_default_env_patch(self):
grub2_default_env = self._cmd.read_file(consts.GRUB2_DEFAULT_ENV_FILE)
if len(grub2_default_env) <= 0:
log.info("cannot read '%s'" % consts.GRUB2_DEFAULT_ENV_FILE)
return False
d = {"GRUB_CMDLINE_LINUX_DEFAULT" : consts.GRUB2_TUNED_VAR, "GRUB_INITRD_OVERLAY" : consts.GRUB2_TUNED_INITRD_VAR}
write = False
for i in d:
if re.search(r"^[^#]*\b" + i + r"\s*=.*\\\$" + d[i] + r"\b.*$", grub2_default_env, flags = re.MULTILINE) is None:
write = True
if grub2_default_env[-1] != "\n":
grub2_default_env += "\n"
grub2_default_env += i + "=\"${" + i + ":+$" + i + r" }\$" + d[i] + "\"\n"
if write:
log.debug("patching '%s'" % consts.GRUB2_DEFAULT_ENV_FILE)
self._cmd.write_to_file(consts.GRUB2_DEFAULT_ENV_FILE, grub2_default_env)
return True
def _grub2_cfg_patch(self, d):
log.debug("patching grub.cfg")
if not self._grub2_cfg_file_names:
log.info("cannot find grub.cfg to patch")
return False
for f in self._grub2_cfg_file_names:
grub2_cfg = self._cmd.read_file(f)
if len(grub2_cfg) <= 0:
log.info("cannot patch %s" % f)
continue
log.debug("adding boot command line parameters to '%s'" % f)
grub2_cfg_new = grub2_cfg
patch_initial = False
for opt in d:
(grub2_cfg_new, nsubs) = re.subn(r"\b(set\s+" + opt + "\s*=).*$", r"\1" + "\"" + d[opt] + "\"", grub2_cfg_new, flags = re.MULTILINE)
if nsubs < 1 or re.search(r"\$" + opt, grub2_cfg, flags = re.MULTILINE) is None:
patch_initial = True
# workaround for rhbz#1442117
if len(re.findall(r"\$" + consts.GRUB2_TUNED_VAR, grub2_cfg, flags = re.MULTILINE)) != \
len(re.findall(r"\$" + consts.GRUB2_TUNED_INITRD_VAR, grub2_cfg, flags = re.MULTILINE)):
patch_initial = True
if patch_initial:
grub2_cfg_new = self._grub2_cfg_patch_initial(self._grub2_cfg_unpatch(grub2_cfg), d)
self._cmd.write_to_file(f, grub2_cfg_new)
self._grub2_default_env_patch()
return True
def _grub2_update(self):
self._grub2_cfg_patch({consts.GRUB2_TUNED_VAR : self._cmdline_val, consts.GRUB2_TUNED_INITRD_VAR : self._initrd_val})
self._patch_bootcmdline({consts.BOOT_CMDLINE_TUNED_VAR : self._cmdline_val, consts.BOOT_CMDLINE_INITRD_ADD_VAR : self._initrd_val})
def _has_bls(self):
return os.path.exists(consts.BLS_ENTRIES_PATH)
def _update_grubenv(self, d):
log.debug("updating grubenv, setting %s" % str(d));
l = ["%s=%s" % (str(option), str(value)) for option, value in d.items()]
(rc, out) = self._cmd.execute(["grub2-editenv", "-", "set"] + l)
if rc != 0:
log.warn("cannot update grubenv: '%s'" % out)
return False;
return True
def _bls_entries_patch_initial(self):
machine_id = self._cmd.get_machine_id()
if machine_id == "":
return False
log.debug("running kernel update hook '%s' to patch BLS entries" % consts.KERNEL_UPDATE_HOOK_FILE)
(rc, out) = self._cmd.execute([consts.KERNEL_UPDATE_HOOK_FILE, "add"], env = {"KERNEL_INSTALL_MACHINE_ID" : machine_id})
if rc != 0:
log.warn("cannot patch BLS entries: '%s'" % out)
return False
return True
def _bls_update(self):
log.debug("updating BLS")
if self._has_bls() and \
self._update_grubenv({"tuned_params" : self._cmdline_val, "tuned_initrd" : self._initrd_val}) and \
self._bls_entries_patch_initial():
return True
return False
def _init_initrd_dst_img(self, name):
if self._initrd_dst_img_val is None:
self._initrd_dst_img_val = os.path.join(consts.BOOT_DIR, os.path.basename(name))
def _check_petitboot(self):
return os.path.isdir(consts.PETITBOOT_DETECT_DIR)
def _install_initrd(self, img):
if self._check_petitboot():
log.warn("Detected Petitboot which doesn't support initrd overlays. The initrd overlay will be ignored by bootloader.")
log.info("installing initrd image as '%s'" % self._initrd_dst_img_val)
img_name = os.path.basename(self._initrd_dst_img_val)
if not self._cmd.copy(img, self._initrd_dst_img_val):
return False
self.update_grub2_cfg = True
curr_cmdline = self._cmd.read_file("/proc/cmdline").rstrip()
initrd_grubpath = "/"
lc = len(curr_cmdline)
if lc:
path = re.sub(r"^\s*BOOT_IMAGE=\s*(\S*/).*$", "\\1", curr_cmdline)
if len(path) < lc:
initrd_grubpath = path
self._initrd_val = os.path.join(initrd_grubpath, img_name)
return True
@command_custom("grub2_cfg_file")
def _grub2_cfg_file(self, enabling, value, verify, ignore_missing):
# nothing to verify
if verify:
return None
if enabling and value is not None:
self._grub2_cfg_file_names = [str(value)]
@command_custom("initrd_dst_img")
def _initrd_dst_img(self, enabling, value, verify, ignore_missing):
# nothing to verify
if verify:
return None
if enabling and value is not None:
self._initrd_dst_img_val = str(value)
if self._initrd_dst_img_val == "":
return False
if self._initrd_dst_img_val[0] != "/":
self._initrd_dst_img_val = os.path.join(consts.BOOT_DIR, self._initrd_dst_img_val)
@command_custom("initrd_remove_dir")
def _initrd_remove_dir(self, enabling, value, verify, ignore_missing):
# nothing to verify
if verify:
return None
if enabling and value is not None:
self._initrd_remove_dir = self._cmd.get_bool(value) == "1"
@command_custom("initrd_add_img", per_device = False, priority = 10)
def _initrd_add_img(self, enabling, value, verify, ignore_missing):
# nothing to verify
if verify:
return None
if enabling and value is not None:
src_img = str(value)
self._init_initrd_dst_img(src_img)
if src_img == "":
return False
if not self._install_initrd(src_img):
return False
@command_custom("initrd_add_dir", per_device = False, priority = 10)
def _initrd_add_dir(self, enabling, value, verify, ignore_missing):
# nothing to verify
if verify:
return None
if enabling and value is not None:
src_dir = str(value)
self._init_initrd_dst_img(src_dir)
if src_dir == "":
return False
if not os.path.isdir(src_dir):
log.error("error: cannot create initrd image, source directory '%s' doesn't exist" % src_dir)
return False
log.info("generating initrd image from directory '%s'" % src_dir)
(fd, tmpfile) = tempfile.mkstemp(prefix = "tuned-bootloader-", suffix = ".tmp")
log.debug("writing initrd image to temporary file '%s'" % tmpfile)
os.close(fd)
(rc, out) = self._cmd.execute("find . | cpio -co > %s" % tmpfile, cwd = src_dir, shell = True)
log.debug("cpio log: %s" % out)
if rc != 0:
log.error("error generating initrd image")
self._cmd.unlink(tmpfile, no_error = True)
return False
self._install_initrd(tmpfile)
self._cmd.unlink(tmpfile)
if self._initrd_remove_dir:
log.info("removing directory '%s'" % src_dir)
self._cmd.rmtree(src_dir)
@command_custom("cmdline", per_device = False, priority = 10)
def _cmdline(self, enabling, value, verify, ignore_missing):
v = self._variables.expand(self._cmd.unquote(value))
if verify:
cmdline = self._cmd.read_file("/proc/cmdline")
if len(cmdline) == 0:
return None
cmdline_set = set(cmdline.split())
value_set = set(v.split())
cmdline_intersect = cmdline_set.intersection(value_set)
if cmdline_intersect == value_set:
log.info(consts.STR_VERIFY_PROFILE_VALUE_OK % ("cmdline", str(value_set)))
return True
else:
log.error(consts.STR_VERIFY_PROFILE_VALUE_FAIL % ("cmdline", str(cmdline_intersect), str(value_set)))
return False
if enabling and value is not None:
log.info("installing additional boot command line parameters to grub2")
self.update_grub2_cfg = True
self._cmdline_val = v
def _instance_post_static(self, instance, enabling):
if enabling and self.update_grub2_cfg:
self._grub2_update()
self._bls_update()
self.update_grub2_cfg = False
|
gpl-2.0
| -48,616,862,515,739,710
| 36.61516
| 143
| 0.654782
| false
| 2.690158
| false
| false
| false
|
ajhager/copycat
|
copycat/coderack/codelets/bond.py
|
1
|
10023
|
# Copyright (c) 2007-2017 Joseph Hager.
#
# Copycat is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License,
# as published by the Free Software Foundation.
#
# Copycat is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Copycat; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Bond Codelets"""
import copycat.toolbox as toolbox
from copycat.coderack import Codelet
class BondBottomUpScout(Codelet):
"""Choose an object and a neighbor of that object probabilistically by
intra string salience. Choose a bond facet probabilistically by
relevance in the string. Check if there is a bond between the two
descriptors of this facet. Post a bond strength tester codelet with
urgency a function of the degree of association of bonds of the bond
category."""
structure_category = 'bond'
def run(self, coderack, slipnet, workspace):
from_object = workspace.choose_object('intra_string_salience')
to_object = from_object.choose_neighbor()
if not to_object:
return # Fizzle
bond_facet = workspace.choose_bond_facet(from_object, to_object)
if not bond_facet:
return # Fizzle
from_descriptor = from_object.get_descriptor(bond_facet)
to_descriptor = to_object.get_descriptor(bond_facet)
if not from_descriptor or not to_descriptor:
return # Fizzle
bond_category = slipnet.get_bond_category(from_descriptor, to_descriptor)
if not bond_category:
return # Fizzle
return workspace.propose_bond(from_object, to_object, bond_category,
bond_facet, from_descriptor, to_descriptor)
class BondBuilder(Codelet):
"""Attempt to build the proposed bond, fighting with any competitiors."""
structure_category = 'bond'
def run(self, coderack, slipnet, workspace):
bond = self.arguments[0]
string = bond.string
from_object = bond.from_object
to_object = bond.to_object
objects = workspace.objects()
if (from_object not in objects) or (to_object not in objects):
return # Fizzle
existing_bond = string.get_existing_bond(bond)
if existing_bond:
existing_bond.bond_category.activation_buffer += workspace.activation
direction_category = existing_bond.direction_category
if direction_category:
direction_category.activation_buffer += workspace.activation
string.remove_proposed_bond(bond)
return # Fizzle
string.remove_proposed_bond(bond)
incompatible_bonds = bond.incompatible_bonds()
if not workspace.fight_it_out(bond, 1, incompatible_bonds, 1):
return # Fizzle
incompatible_groups = workspace.get_common_groups(from_object, to_object)
spans = [group.letter_span() for group in incompatible_groups]
strength = 0 if len(spans) == 0 else max(spans)
if not workspace.fight_it_out(bond, 1, incompatible_groups, strength):
return # Fizzle
incompatible_corrs = []
at_edge = bond.is_leftmost_in_string() or bond.is_rightmost_in_string()
if bond.direction_category and at_edge:
incompatible_corrs = bond.incompatible_correspondences()
if not workspace.fight_it_out(bond, 2, incompatible_corrs, 3):
return # Fizzle
for ibond in incompatible_bonds:
workspace.break_bond(ibond)
for igroup in incompatible_groups:
workspace.break_group(igroup)
for icorrespondence in incompatible_corrs:
workspace.break_correspondence(icorrespondence)
return workspace.build_bond(bond)
class BondStrengthTester(Codelet):
"""Calculate the proposed bond's strength and decide probabilistically
whether to post a bond builder codelet with urgency a function of the
strength."""
structure_category = 'bond'
def run(self, coderack, slipnet, workspace):
bond = self.arguments[0]
bond.update_strengths()
strength = bond.total_strength
probability = strength / 100.0
probability = workspace.temperature_adjusted_probability(probability)
if not toolbox.flip_coin(probability):
bond.string.remove_proposed_bond(bond)
return # Fizzle
bond.proposal_level = 2
bond.from_object_descriptor.activation_buffer += workspace.activation
bond.to_object_descriptor.activation_buffer += workspace.activation
bond.bond_facet.activation_buffer += workspace.activation
return [(BondBuilder([bond]), strength)]
class BondTopDownCategoryScout(Codelet):
"""Choose a string probabilistically by the relevance of the category in
the string and the string's unhappiness. Chooses an object and a neighbor
of the object in the string probabilistically by instra string salience.
Choose a bond facet probabilistically by relevance in in the string.
Checks if there is a bond of the category between the two descriptors of
the facet, posting a bond strength tester codelet with urgency a function
of the degree of association of bonds of the category."""
structure_category = 'bond'
def run(self, coderack, slipnet, workspace):
category = self.arguments[0]
initial_string = workspace.initial_string
target_string = workspace.target_string
i_relevance = initial_string.local_bond_category_relevance(category)
t_relevance = target_string.local_bond_category_relevance(category)
i_unhappiness = initial_string.intra_string_unhappiness
t_unhappiness = target_string.intra_string_unhappiness
values = [round(toolbox.average(i_relevance, i_unhappiness)),
round(toolbox.average(t_relevance, t_unhappiness))]
string = toolbox.weighted_select(values, [initial_string, target_string])
obj = string.get_random_object('intra_string_salience')
neighbor = obj.choose_neighbor()
if neighbor is None:
return # Fizzle
facet = workspace.choose_bond_facet(obj, neighbor)
if facet is None:
return # Fizzle
object_descriptor = obj.get_descriptor(facet)
neighbor_descriptor = neighbor.get_descriptor(facet)
if object_descriptor is None or neighbor_descriptor is None:
return # Fizzle
if slipnet.get_bond_category(object_descriptor,
neighbor_descriptor) == category:
from_object = obj
to_object = neighbor
from_descriptor = object_descriptor
to_descriptor = neighbor_descriptor
elif slipnet.get_bond_category(neighbor_descriptor,
object_descriptor) == category:
from_object = neighbor
to_object = obj
from_descriptor = neighbor_descriptor
to_descriptor = object_descriptor
else:
return # Fizzle
return workspace.propose_bond(from_object, to_object, category, facet,
from_descriptor, to_descriptor)
class BondTopDownDirectionScout(Codelet):
"""Choose a string probabilistically by the relevance of the direction
category in the string and the string's unhappiness. Chooses an object
in the string probabilisitically by intra string salience. Chooses a
neighbor of the object in the given direction. Chooses a bond facet
probabilistically by relevance in the string. Checks if there is a
bond of the given direction between the two descriptors of the facet,
posting a bond strength tester codelet with urgency a function of the
degree of association of bonds of the bond category."""
structure_category = 'bond'
def run(self, coderack, slipnet, workspace):
category = self.arguments[0]
initial_string = workspace.initial_string
target_string = workspace.target_string
i_relevance = initial_string.local_direction_category_relevance(category)
t_relevance = target_string.local_direction_category_relevance(category)
i_unhappiness = initial_string.intra_string_unhappiness
t_unhappiness = target_string.intra_string_unhappiness
values = [round(toolbox.average(i_relevance, i_unhappiness)),
round(toolbox.average(t_relevance, t_unhappiness))]
string = toolbox.weighted_select(values, [initial_string, target_string])
obj = string.get_random_object('intra_string_salience')
if category == slipnet.plato_left:
neighbor = obj.choose_left_neighbor()
elif category == slipnet.plato_right:
neighbor = obj.choose_right_neighbor()
if neighbor is None:
return # Fizzle
facet = workspace.choose_bond_facet(obj, neighbor)
if facet is None:
return # Fizzle
object_descriptor = obj.get_descriptor(facet)
neighbor_descriptor = neighbor.get_descriptor(facet)
if object_descriptor is None or neighbor_descriptor is None:
return # Fizzle
bond_category = slipnet.get_bond_category(object_descriptor,
neighbor_descriptor)
if bond_category is None or not bond_category.directed:
return # Fizzle
return workspace.propose_bond(obj, neighbor,
bond_category, facet,
object_descriptor, neighbor_descriptor)
|
gpl-2.0
| 1,317,708,017,138,761,200
| 40.589212
| 81
| 0.663474
| false
| 4.171036
| false
| false
| false
|
jwilliamn/handwritten
|
modeling/svm/mnist_helpers.py
|
1
|
2813
|
# Standard scientific Python imports
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
import numpy as np
def show_some_digits(images, targets, sample_size=24, title_text='Digit {}' ):
'''
Visualize random digits in a grid plot
images - array of flatten gidigs [:,784]
targets - final labels
'''
nsamples=sample_size
rand_idx = np.random.choice(images.shape[0],nsamples)
images_and_labels = list(zip(images[rand_idx], targets[rand_idx]))
img = plt.figure(1, figsize=(15, 12), dpi=160)
for index, (image, label) in enumerate(images_and_labels):
plt.subplot(np.ceil(nsamples/6.0), 6, index + 1)
plt.axis('off')
#each image is flat, we have to reshape to 2D array 28x28-784
plt.imshow(image.reshape(32,32), cmap=plt.cm.gray_r, interpolation='nearest')
plt.title(title_text.format(label))
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
"""
Plots confusion matrix,
cm - confusion matrix
"""
plt.figure(1, figsize=(15, 12), dpi=160)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
def plot_param_space_scores(scores, C_range, gamma_range):
"""
Draw heatmap of the validation accuracy as a function of gamma and C
Parameters
----------
scores - 2D numpy array with accuracies
"""
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.jet,
norm=MidpointNormalize(vmin=0.5, midpoint=0.9))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
|
gpl-3.0
| 1,047,688,784,952,043,300
| 32.105882
| 85
| 0.648774
| false
| 3.477132
| false
| false
| false
|
bw57899/aws-command-line-cli
|
ec2.export.csv.py
|
1
|
1212
|
#!/usr/bin/env python
# Based on the script found here: http://cloudbuzz.wordpress.com/2011/02/15/336/
import boto.ec2
csv_file = open('instances.csv','w+')
def process_instance_list(connection):
map(build_instance_list,connection.get_all_instances())
def build_instance_list(reservation):
map(write_instances,reservation.instances)
def write_instances(instance):
environment = '-'
if 'environment' in instance.tags:
environment = instance.tags['environment']
# For more parameters to the boto.ec2.instance.Instance object, see here: http://boto.readthedocs.org/en/latest/ref/ec2.html#module-boto.ec2.instance
# In our case, we use the "environment" tag to distinguish between dev/staging/prod instances.
csv_file.write("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n"%(instance.id,instance.tags['Name'],environment,instance.private_ip_address,
instance.state,instance.placement,instance.architecture, instance.vpc_id, instance.kernel, instance.instance_type, instance.image_id, instance.launch_time))
csv_file.flush()
# you need change the region name.
if __name__=="__main__":
connection = boto.ec2.connect_to_region('eu-west-1')
process_instance_list(connection)
csv_file.close()
|
gpl-2.0
| 4,554,851,283,583,734,000
| 36.875
| 160
| 0.735974
| false
| 3.284553
| false
| false
| false
|
yuvipanda/edit-stats
|
dmz/store.py
|
1
|
3209
|
"""Implements a db backed storage area for intermediate results"""
import sqlite3
class Store(object):
"""
Represents an sqlite3 backed storage area that's vaguely key value
modeled for intermediate storage about metadata / data for metrics
about multiple wikis that have some underlying country related basis
"""
_initial_sql_ = [
'CREATE TABLE IF NOT EXISTS meta (key, value);',
'CREATE UNIQUE INDEX IF NOT EXISTS meta_key ON meta(key);',
'CREATE TABLE IF NOT EXISTS wiki_meta (wiki, key, value);',
'CREATE UNIQUE INDEX IF NOT EXISTS wiki_meta_key ON wiki_meta(wiki, key);',
'CREATE TABLE IF NOT EXISTS country_info (wiki, country, key, value);',
'CREATE UNIQUE INDEX IF NOT EXISTS country_info_key ON country_info(wiki, country, key);'
]
def __init__(self, path):
"""Initialize a store at the given path.
Creates the tables required if they do not exist"""
self.db = sqlite3.connect(path)
for sql in Store._initial_sql_:
self.db.execute(sql)
def set_meta(self, key, value):
"""Set generic metadata key value, global to the store"""
self.db.execute("INSERT OR REPLACE INTO meta VALUES (?, ?)", (key, value))
self.db.commit()
def get_meta(self, key):
"""Get generic metadata key value, global to the store"""
try:
cur = self.db.cursor()
cur.execute("SELECT value from meta WHERE key = ?", (key, ))
cur.fetchone()
return cur[0]
finally:
cur.close()
def set_wiki_meta(self, wiki, key, value):
"""Set wiki specific meta key value"""
self.db.execute("INSERT OR REPLACE INTO wiki_meta VALUES (?, ?, ?)", (wiki, key, value))
self.db.commit()
def get_wiki_meta(self, key):
"""Get wiki specific meta key value"""
try:
cur = self.db.cursor()
cur.execute("SELECT value from wiki_meta WHERE wiki = ? AND key = ?", (wiki, key, ))
cur.fetchone()
return cur[0]
finally:
cur.close()
def set_country_info(self, wiki, country, key, value):
"""Set a country and wiki specific key and value"""
self.db.execute("INSERT OR REPLACE INTO country_info VALUES (?, ?, ?, ?)", (wiki, country, key, value))
self.db.commit()
def set_country_info_bulk(self, wiki, key, country_dict):
"""Bulk insert a dictionary of country specific key and value.
The dictionary should be of form {'country': 'value'}
"""
insert_data = [(wiki, k, key, v) for (k, v) in country_dict.iteritems()]
self.db.executemany("INSERT OR REPLACE INTO country_info VALUES (?, ?, ?, ?)", insert_data)
self.db.commit()
def get_country_info(self, wiki, country, key):
"""Get a country and wiki specific value for a given key"""
try:
cur = self.db.cursor()
cur.execute("SELECT value from country_info WHERE wiki = ? AND country = ?AND key = ?",
(wiki, country, key, ))
cur.fetchone()
return cur[0]
finally:
cur.close()
|
mit
| 910,942,427,203,685,800
| 38.134146
| 111
| 0.587722
| false
| 4.051768
| false
| false
| false
|
jskye/car-classifier-research
|
src/hyp.verification.tools/py/test/test.py
|
1
|
3773
|
__author__ = 'juliusskye'
import os, sys
sys.path.append('..')
from py.Rectangle import Rectangle
from py.CompareRectangles import CompareRectangles
# things = [0,1,2,3,4,5]
# for thing in things:
#
# if thing>2:
# print(str(thing) + ' is greater than two')
# break
# else: 'no things greater than two'
# det_jaccard_index = 50
# sw_jaccard_index = 100
# print("hypothesis_JI: {0}, slidingwindow_JI: {1}".format(det_jaccard_index, sw_jaccard_index))
# x=[0,0,0,1]
# print(not any(x))
# imageDir = "this.noisy"
# if imageDir[-5:] == "noisy":
# noisytest = True
# print("noisytest: "+str(noisytest))
import numpy as np
import cv2
import copy
JI_THRESH = 0.35
# r1 = cv2.rectangle((0,0),(100,100))
# r2 = cv2.rectangle((20,20),(40,40))
r1 = (0,0,100,100)
r2 = (20,20,40,40)
r3 = (40,40,80,80)
r4 = (10,10,10,10)
r5 = (20,20,10,10)
detected_objects = []
# print(detected_objects)
detected_objects = [r1,r2,r3,r4,r5]
# detected_objects.append(r1)
# detected_objects.append(r2)
# detected_objects.append(r3)
# detected_objects.append(r4)
# detected_objects.append(r5)
detected_numpy = np.array(detected_objects)
detected_objects_clone = detected_numpy
print(detected_objects_clone)
# get rid of hypotheses that are contained inside others
# because ... there shouldnt be a car within a car...
# detected_objects_clone = copy.copy(detected_objects)
iterations = int(len(detected_objects_clone))-1
for this_index, this_detected_object in enumerate(detected_objects_clone[:-1]):
# use the opencv returned rectangle and create our own.
this_detected_rect = Rectangle(this_detected_object[0], this_detected_object[1], this_detected_object[2], this_detected_object[3])
print("this index (before second loop) is: {0}".format(this_index))
# compare with those in front of this index.
for that_index in range((this_index+1), len(detected_objects_clone)):
# print(detected_objects_clone)
# print("that index (before we get object) is: {0}".format(that_index))
if that_index >= len(detected_objects_clone):
break
that_detected_object = detected_objects_clone[that_index]
that_detected_rect = Rectangle(that_detected_object[0], that_detected_object[1], that_detected_object[2], that_detected_object[3])
# get comparison of this and that rectangle.
comparison_hypotheses = CompareRectangles(this_detected_rect, that_detected_rect, JI_THRESH)
# print("this index is: {0}".format(this_index))
# print("this rect is: {0}".format(this_detected_rect))
# print("that index is: {0}".format(that_index))
# print("that rect is: {0}".format(that_detected_rect))
# if one of them is contained.
if comparison_hypotheses.is_full_containment():
# keep the container and remove the contained.
contained = comparison_hypotheses.rect_fully_contained()
print("contained is: {0}".format(contained))
print("this detected rect is: {0}".format(this_detected_rect))
print("that detected rect is: {0}".format(that_detected_rect))
# determine which is the contained.
print(contained == this_detected_rect)
print(contained == that_detected_rect)
if contained == this_detected_rect:
# detected_objects_clone.pop(this_index)
detected_objects_clone = np.delete(detected_objects_clone, this_index, 0)
print("this rect is contained. removed this rectangle.")
elif contained == that_detected_rect:
# detected_objects_clone.delete(that_index)
detected_objects_clone = np.delete(detected_objects_clone, that_index, 0)
print("that rect is contained. removed that rectangle")
else:
pass
if debugging:
print("hypothese not contained")
# set resultant clone as the new list with contained hypotheses removed.
detected_objects = detected_objects_clone
print(detected_objects_clone)
|
mit
| 3,068,737,828,676,781,600
| 33.614679
| 132
| 0.708985
| false
| 3.032958
| false
| false
| false
|
DayGitH/Family-Tree
|
node.py
|
1
|
3177
|
from PySide import QtCore, QtGui
import sys
class Actor(QtGui.QGraphicsWidget):
nick_name = ''
real_name = ''
gender = ''
bday = ''
age = ''
marital = ''
children = ''
death = ''
important = False
notes = ''
def __init__(self, nick_name, real_name, gender, bday, age, marital, children, death, important, notes, parent=None):
super(Actor, self).__init__(parent)
self.nick_name = nick_name
self.real_name = real_name
self.gender = gender
self.bday = bday
self.age = age
if marital == ['S000']:
self.marital = 'Single'
elif marital[-1][0] == 'M':
self.marital = 'Married'
elif marital[-1][0] == 'W':
self.marital = 'Widower' if self.gender == 'M' else ('Widow' if gender == 'F' else '')
elif marital[-1][0] == 'D':
self.marital = 'Divorced'
elif marital[-1][0] == 'E':
self.marital = 'Engaged'
if children == ['']:
self.children = 0
else:
self.children = len(children)
self.death = death
self.important = important
self.notes = notes
def headerRect(self):
return QtCore.QRectF(-55,-60,110,35)
def boundingRect(self):
return QtCore.QRectF(-60, -60, 120, 120)
def shape(self):
path = QtGui.QPainterPath()
path.addEllipse(self.boundingRect())
return path
def paint(self, painter, option, widget):
r = self.boundingRect()
h = self.headerRect()
painter.setBrush(QtGui.QColor.fromHsv(255,0,255,160))
painter.drawEllipse(r)
if self.gender == 'M':
painter.setBrush(QtGui.QColor.fromHsv(240,255,255,255))
elif self.gender == 'F':
painter.setBrush(QtGui.QColor.fromHsv(0,255,255,255))
painter.drawRoundedRect(h,5,5)
text = self.nick_name
painter.setPen(QtCore.Qt.white)
painter.drawText(h,QtCore.Qt.AlignCenter, text)
text = '\n'.join((self.real_name, str(self.age) + ' - ' + self.marital,
self.bday, 'Children: ' + str(self.children)))
painter.setPen(QtCore.Qt.black)
painter.drawText(r,QtCore.Qt.AlignCenter, text)
class View(QtGui.QGraphicsView):
def resizeEvent(self, event):
super(View, self).resizeEvent(event)
self.fitInView(self.sceneRect(), QtCore.Qt.KeepAspectRatio)
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
scene = QtGui.QGraphicsScene(-350,-350,700,700)
actor = Actor('Akber','Akber Ali','M','1991-Jan-28', 23,'Single',0,'2051-Jan-28',True, '')
actor.setPos(0,0)
scene.addItem(actor)
view = View(scene)
view.setWindowTitle("Animated Tiles")
view.setViewportUpdateMode(QtGui.QGraphicsView.BoundingRectViewportUpdate)
view.setCacheMode(QtGui.QGraphicsView.CacheBackground)
view.setRenderHints(
QtGui.QPainter.Antialiasing | QtGui.QPainter.SmoothPixmapTransform)
view.show()
sys.exit(app.exec_())
|
cc0-1.0
| 5,410,718,640,994,726,000
| 30.78
| 121
| 0.56972
| false
| 3.491209
| false
| false
| false
|
9468305/script
|
geetest_offline/geetest_offline_gd.py
|
1
|
12177
|
#!/usr/local/bin/python3
# -*- coding: utf-8 -*-
'''
geetest offline 6.0.0 spider for gd.gsxt.org.cn
'''
import os
import time
import random
import logging
from logging import NullHandler
import json
import requests
import execjs
from bs4 import BeautifulSoup
import constants
import util
logging.getLogger(__name__).addHandler(NullHandler())
logging.basicConfig(level=logging.DEBUG)
HOST = 'http://gd.gsxt.gov.cn'
INDEX = HOST
JSRUNTIME = execjs.get(execjs.runtime_names.Node)
USERRESPONSE_JSCONTEXT = JSRUNTIME.compile(util.USERRESPONSE_JS)
TIMEOUT = 15
GD_LIST_FILE = 'gd_list.json'
GD_RESULT_FILE = 'gd_result.json'
GD_NOTFOUND_FILE = 'gd_notfound.json'
def load_json(json_file):
'''load json file'''
if not os.path.isfile(json_file):
logging.info("Json File Not Exist")
return None
with open(json_file, 'r', encoding='utf8') as _f:
json_data = json.load(_f)
logging.info(len(json_data))
return json_data
def save_json(json_file, json_data):
'''save json file'''
with open(json_file, 'w', encoding='utf8') as _f:
json.dump(json_data, _f, indent=2, sort_keys=True, ensure_ascii=False)
logging.info(len(json_data))
def calc_userresponse(distance, challenge):
'''根据滑动距离 distance 和 challenge ,计算 userresponse。'''
return USERRESPONSE_JSCONTEXT.call('userresponse', distance, challenge)
def calc_validate(challenge):
'''calculate validate'''
_r = random.randint(0, len(util.OFFLINE_SAMPLE)-1)
distance, rand0, rand1 = util.OFFLINE_SAMPLE[_r]
distance_r = calc_userresponse(distance, challenge)
rand0_r = calc_userresponse(rand0, challenge)
rand1_r = calc_userresponse(rand1, challenge)
validate = distance_r + '_' + rand0_r + '_' + rand1_r
logging.debug(validate)
return validate
def parse_name_url(html_doc):
'''使用BeautifulSoup解析HTML页面,查找详情链接'''
_soup = BeautifulSoup(html_doc, 'html.parser')
_findall = _soup.find_all('div',
class_="clickStyle",
style='margin-left: 160px;padding-left: 10px;')
name_url_array = []
if _findall:
for _a in _findall:
_company = _a.find('a')
_name = ''.join(_company.get_text().split())
_url = _company['href']
if _url.startswith('../'):
_url = INDEX + '/aiccips/CheckEntContext/' + _url
name_url_array.append((_name, _url))
logging.info(name_url_array)
else:
logging.error('Company Link Not Found')
return name_url_array
def get_mainpage(session):
'''
Get http://gd.gsxt.gov.cn
Response Code 200
'''
logging.debug('GET ' + INDEX)
_headers = {'Accept': constants.ACCEPT_HTML,
'Accept-Language': constants.ACCEPT_LANGUAGE,
'User-Agent': constants.USER_AGENT}
_response = session.get(INDEX, headers=_headers, timeout=TIMEOUT)
logging.debug('response code:' + str(_response.status_code))
return _response.status_code == 200
def get_captcha(session):
'''
GET /aiccips//verify/start.html
Response JSON
{
"success": 0,
"gt": "c02ee51ee0afe88899efe6dc729627fc",
"challenge": "ed3d2c21991e3bef5e069713af9fa6caed"
}
'''
_url = INDEX + '/aiccips//verify/start.html'
logging.debug('GET ' + _url)
_headers = {'Accept': constants.ACCEPT_JSON,
'Accept-Language': constants.ACCEPT_LANGUAGE,
'User-Agent': constants.USER_AGENT,
'Referer': INDEX,
'X-Requested-With': 'XMLHttpRequest'}
_params = {'t': str(int(time.time() * 1000))}
_response = session.get(_url, headers=_headers, params=_params, timeout=TIMEOUT)
logging.debug('response code: ' + str(_response.status_code))
logging.debug('response text: ' + _response.text)
if _response.status_code != 200:
return False
return _response.json()
def post_validate(session, challenge, validate, keyword):
'''
POST /aiccips/verify/sec.html
Response JSON
{
"status": "success",
"textfield": "waY5F5lZyxvKw9bMM4nBs7HUgWS1SRpagFutRKqs/+DkRqCIS9N4PUCqM9fmrbg1",
"version": "3.3.0"
}
'''
_url = INDEX + '/aiccips/verify/sec.html'
logging.debug('POST ' + _url)
_headers = {'Accept': constants.ACCEPT_JSON,
'Accept-Language': constants.ACCEPT_LANGUAGE,
'User-Agent': constants.USER_AGENT,
'Referer': INDEX,
'X-Requested-With': 'XMLHttpRequest',
'Origin': HOST}
_params = [('textfield', keyword),
('geetest_challenge', challenge),
('geetest_validate', validate),
('geetest_seccode', validate + '|jordan')]
_response = session.post(_url, headers=_headers, data=_params, timeout=TIMEOUT)
logging.debug('response code: ' + str(_response.status_code))
logging.debug('response text: ' + _response.text)
if _response.status_code != 200:
return False
_json_obj = _response.json()
logging.debug(_json_obj)
return _json_obj['textfield'] if _json_obj['status'] == 'success' else None
def post_search(session, textfield):
'''
POST /aiccips/CheckEntContext/showCheck.html
Response HTML WebPage
'''
_url = INDEX + '/aiccips/CheckEntContext/showCheck.html'
logging.debug('POST ' + _url)
_headers = {'Accept': constants.ACCEPT_HTML,
'Accept-Language': constants.ACCEPT_LANGUAGE,
'User-Agent': constants.USER_AGENT,
'Referer': INDEX,
'X-Requested-With': 'XMLHttpRequest',
'Origin': HOST}
_params = [('textfield', textfield),
('type', 'nomal')]
_response = session.post(_url, headers=_headers, data=_params, timeout=TIMEOUT)
logging.debug('response code: ' + str(_response.status_code))
logging.debug('response text: ' + _response.text)
if _response.status_code != 200:
return None
return parse_name_url(_response.text)
def get_validate(session, keyword):
'''safe loop post validate'''
for _ in range(10):
captcha = get_captcha(session)
if not captcha:
return None
validate = calc_validate(captcha['challenge'])
textfield = post_validate(session, captcha['challenge'], validate, keyword)
if textfield:
return textfield
return None
def parse_detail_sz(html_doc):
'''parse company detail for shenzhen'''
_soup = BeautifulSoup(html_doc, 'html.parser')
_yyzz = _soup.find('div', class_='item_box', id='yyzz')
if not _yyzz:
logging.error('Detail yyzz Not Found')
return None
_li_all = _yyzz.find_all('li')
if not _li_all:
logging.error("Detail li Not Found")
return None
_info = {}
for _li in _li_all:
_text = ''.join(_li.get_text().split())
_k, _v = _text.split(sep=':', maxsplit=1)
_info[_k] = _v
logging.info(_info)
if not _info['企业名称']:
_info = None # for safe
return _info
def parse_detail(html_doc):
'''parse company detail for guangzhou and other'''
_soup = BeautifulSoup(html_doc, 'html.parser')
_table = _soup.find('table', cellspacing='6')
if not _table:
logging.error('Detail table Not Found')
return None
_tr_all = _table.find_all('td')
if not _tr_all:
logging.error("Detail td Not Found")
return None
_info = {}
for _td in _tr_all:
_text = ''.join(_td.get_text().split())
if _text == '营业执照信息':
continue
_k, _v = _text.split(sep=':', maxsplit=1)
_temp = {}
_temp[_k] = _v
for _k2, _v2 in _temp.items():
if _k2 == '.企业名称' or _k2 == '.名称':
_info['企业名称'] = _v2
elif _k2 == '.统一社会信用代码/注册号' or _k2 == '.注册号':
_info['注册号/统一社会信用代码'] = _v2
elif _k2 == '.类型':
_info['类型'] = _v2
elif _k2 == '.负责人' or _k2 == '.经营者':
_info['法定代表人'] = _v2
elif _k2 == '.成立日期' or _k2 == '.注册日期':
_info['成立日期'] = _v2
elif _k2 == '.营业期限自':
_info['营业期限自'] = _v2
elif _k2 == '.营业期限至':
_info['营业期限至'] = _v2
elif _k2 == '.登记机关':
_info['登记机关'] = _v2
elif _k2 == '.核准日期':
_info['核准日期'] = _v2
elif _k2 == '.登记状态':
_info['登记状态'] = _v2
elif _k2 == '.营业场所' or _k2 == '.经营场所':
_info['住所'] = _v2
elif _k2 == '.经营范围':
_info['经营范围'] = _v2
_info['注册资本'] = '0'
logging.info(_info)
if not _info['企业名称']:
_info = None # for safe
return _info
def query_keyword(session, keyword):
'''query keyword'''
#if not get_mainpage(session):
# return None
logging.info(keyword)
textfield = get_validate(session, keyword)
if textfield:
return post_search(session, textfield)
return None
def safe_query_keyword(keyword):
'''Safe query keyword, handle network timeout and retry'''
for _ in range(5):
try:
with requests.Session() as session:
return query_keyword(session, keyword)
except requests.RequestException as _e:
logging.error(_e)
time.sleep(5)
return None
def query_detail(session, url):
'''query company detail url'''
logging.debug('GET ' + url)
_headers = {'Accept': constants.ACCEPT_HTML,
'Accept-Language': constants.ACCEPT_LANGUAGE,
'User-Agent': constants.USER_AGENT}
_response = session.get(url, headers=_headers, timeout=TIMEOUT)
logging.debug('response code:' + str(_response.status_code))
if _response.status_code == 200:
if url.find('www.szcredit.org.cn') is not -1:
return parse_detail_sz(_response.text)
elif url.find('GSpublicityList.html') is not -1:
return parse_detail(_response.text)
else:
logging.error('URL Type Not Support')
return None
def safe_query_detail(url):
'''Safe query url, handle network timeout and retry multi times.'''
for _ in range(5):
try:
with requests.Session() as session:
return query_detail(session, url)
except requests.RequestException as _e:
logging.error(_e)
time.sleep(5)
return None
def query_entry():
'''main entry'''
lists = load_json(GD_LIST_FILE)
if not lists:
lists = []
results = load_json(GD_RESULT_FILE)
if not results:
results = {}
notfound = load_json(GD_NOTFOUND_FILE)
if not notfound:
notfound = []
for keyword in lists:
if keyword in results:
continue
if keyword in notfound:
continue
name_url_array = safe_query_keyword(keyword)
if not name_url_array:
notfound.append(keyword)
continue
for name, url in name_url_array:
if name in results:
continue
detail_dict = safe_query_detail(url)
if detail_dict:
results.update({name : detail_dict})
save_json('result.json', results)
save_json('notfound.json', notfound)
logging.info('done')
if __name__ == "__main__":
query_entry()
|
mit
| -263,166,151,455,258,660
| 30.441096
| 85
| 0.556119
| false
| 3.440151
| false
| false
| false
|
HugoMMRabson/fonsa
|
src/my/installer/copyintomaple/atxraspisettings.py
|
1
|
6164
|
#!/usr/bin/python3
'''
Created on Aug 18, 2019
@author: johnrabsonjr
'''
import os
from queue import Queue
import sys
import time
import RPi.GPIO as GPIO # @UnresolvedImport
REBOOTPULSEMINIMUM = None
REBOOTPULSEMAXIMUM = None
SHUT_DOWN = None
BOOTOK = None
ATXRASPI_SOFTBTN = None
FACTORYRESET = None
GREEN22 = None
BLUE23 = None
RED15 = None
PB1000C_LBO = None
MAPLEDRIVE = None
PB1000C_SHOULD_I_PULL_UP_OR_DOWN = None
WHITE, PERIWINKLE, YELLOW, GREEN, VIOLET, BLUE, RED, BLACK = ('white', 'periwinkle', 'yellow', 'green', 'violet', 'blue', 'red', 'black')
ALL_POTENTIAL_COLORS = (WHITE, PERIWINKLE, YELLOW, GREEN, VIOLET, BLUE, RED, BLACK)
def singleton(cls):
"""
See http://stackoverflow.com/questions/674304/pythons-use-of-new-and-init?ref=mythemeco&t=pack for explanation
"""
instances = {}
def getinstance():
if cls not in instances:
instances[cls] = cls()
return instances[cls]
return getinstance
def is_battery_low():
return True if GPIO.input(PB1000C_LBO) == 0 else False
def shut_it_down(reboot=False):
# MyLEDController.show_led_color(RED)
# GPIO.output(BOOTOK, 0)
if reboot:
logit("shut_it_down is rebooting me")
if MAPLEDRIVE:
logit("... but we're a mapledrive. So, in all likelihood, reboot=>shutdown.")
os.system('sudo reboot')
else:
logit("shut_it_down is calling poweroff")
GPIO.output(ATXRASPI_SOFTBTN, 1)
os.system('sudo poweroff')
def poweroff_now(reboot=False):
magic_key = 'b' if reboot else 'o'
for (val,
fname) in (('3',
'/proc/sys/kernel/printk'), ('3',
'/proc/sys/vm/drop_caches'),
('256', '/proc/sys/vm/min_free_kbytes'),
('1', '/proc/sys/vm/overcommit_memory'),
('1', '/proc/sys/vm/oom_kill_allocating_task'),
('0', '/proc/sys/vm/oom_dump_tasks'),
('1', '/proc/sys/kernel/sysrq'), (magic_key,
'/proc/sysrq-trigger')):
with open(fname, 'w') as f:
f.write(
val
) # See http://major.io/2009/01/29/linux-emergency-reboot-or-shutdown-with-magic-commands/
def logit(s):
print(s)
os.system('echo "$(date) %s" >> /var/log/atxraspi.log' % s)
def setup_gpio():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(BOOTOK, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(ATXRASPI_SOFTBTN, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(SHUT_DOWN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
if MAPLEDRIVE:
GPIO.setup(GREEN22, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(BLUE23, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(RED15, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(PB1000C_LBO, GPIO.IN, pull_up_down=PB1000C_SHOULD_I_PULL_UP_OR_DOWN)
GPIO.setup(FACTORYRESET, GPIO.IN, pull_up_down=GPIO.PUD_UP)
class ThreadToServiceGPIOLED(object):
""" Threading example class
The run() method will be started and it will run in the background
until the application exits.
"""
def __init__(self, q):
""" Constructor
:type interval: int
:param interval: Check interval, in seconds
"""
import threading
self.q = q
thread = threading.Thread(target=self.run, args=())
thread.daemon = True # Daemonize thread
self.coldct = { WHITE:(0, 0, 0), PERIWINKLE:(0, 0, 1), YELLOW:(0, 1, 0), GREEN:(0, 1, 1), VIOLET:(1, 0, 0), BLUE:(1, 0, 1), RED:(1, 1, 0), BLACK:(1, 1, 1) }
thread.start() # Start the execution
def run(self):
""" Method that runs forever """
old_color_chain = None
while True:
if self.q.empty() is True:
if old_color_chain is None:
time.sleep(.1) # There's no flashing for us to do.
else:
self.flash_the_led_in_this_sequence(old_color_chain)
else:
try:
color_chain, repetitions = self.q.get(timeout=5)
except:
logit("That's odd. Queue wasn't empty a moment ago, but it is now.")
else:
assert(repetitions >= 0)
if repetitions > 0:
for _ in range(repetitions):
self.flash_the_led_in_this_sequence(color_chain)
else:
old_color_chain = color_chain
def flash_the_led_in_this_sequence(self, color_chain):
for (col, dur) in color_chain:
self.illuminate_led_appropriately(col)
time.sleep(dur)
def illuminate_led_appropriately(self, pulse_color):
a, b, c = self.coldct[pulse_color]
try:
GPIO.output(GREEN22, a)
GPIO.output(BLUE23, b)
GPIO.output(RED15, c)
except RuntimeError:
print("Warning --- you haven't run setup_gpio() yet. Therefore, I can't set the colors of the LEDs yet.")
def get_cpu_temp(): # get_cpu_temp_raw()
try:
with open('/sys/class/thermal/thermal_zone0/temp', 'r') as f:
t = float(f.read().strip('\n')) / 1000.
return t
except Exception as ex: # (FileNotFoundError, TypeError, ValueError) as ex:
logit('''get_cpu_temp failed --- %s --- returning 0''' % str(ex))
return 0.
@singleton
class _MyLEDController:
def __init__(self):
self.q = Queue(maxsize=0)
self.thr = ThreadToServiceGPIOLED(self.q)
def set_this_group_as_default(self, color_chain):
self.q.put([color_chain, 0])
def flash_this_group_only_once(self, color_chain):
self.q.put([color_chain, 1])
def flash_this_group_repeatedly(self, color_chain, count):
self.q.put([color_chain, count])
def set_default_led_color(self, a_color):
self.q.put([[(a_color, .2)], 0])
def flash_this_led_color_once(self, a_color, dur):
self.q.put([[(a_color, dur)], 1])
MyLEDController = _MyLEDController() if MAPLEDRIVE else None
|
gpl-3.0
| 1,488,395,012,073,988,600
| 31.442105
| 164
| 0.571544
| false
| 3.310419
| false
| false
| false
|
scieloorg/journals-catalog
|
jcatalog/reports/fapesp_evaluation_line.py
|
1
|
43533
|
# coding: utf-8
import pyexcel
import xlsxwriter
import models
import re
import datetime
from accent_remover import *
def formatindicator(indicator):
data = indicator
if type(indicator) == str:
if '.' in indicator and '>' not in indicator:
data = float(indicator)
return data
def formatjcr(indicator):
data = indicator
if type(indicator) == str:
if '.' in indicator and '>' not in indicator:
data = float(indicator)
elif '>10' in indicator:
data = 10
else:
data = None
return data
def formatman(indicator):
data = indicator
if type(indicator) == str:
data = None
return data
def timesfmt(data):
if isinstance(data, float):
num = round(data, 2)
elif isinstance(data, int):
num = data
else:
if isinstance(data, str):
num = None
return num
def journal(query, filename, sheetname, issn, atfile):
# Creates the Excel folder and add a worksheet
if issn:
workbook = xlsxwriter.Workbook('output/journals/' + filename)
worksheet = workbook.add_worksheet(sheetname)
else:
workbook = xlsxwriter.Workbook('output/' + filename)
worksheet = workbook.add_worksheet(sheetname)
worksheet.freeze_panes(1, 0)
worksheet.set_row(0, 70)
# HEADER
col = 0
wrap_header = workbook.add_format({'text_wrap': True, 'size': 9})
format_date_iso = workbook.add_format({'num_format': 'yyyymmdd'})
sheet_header = pyexcel.get_sheet(
file_name='data/scielo/rotulos_avaliacao_fapesp_abel.xlsx',
sheet_name='rotulos_dados_periodicos',
name_columns_by_row=0)
headers = sheet_header.to_records()
for h in headers:
worksheet.write(0, col, h['rotulo_portugues'], wrap_header)
col += 1
extraction_date = models.Scielofapesp.objects.first().extraction_date
# SciELO
scielo = query
row = 1
for doc in scielo:
for h in [
'anterior',
'2008',
'2009',
'2010',
'2011',
'2012',
'2013',
'2014',
'2015',
'2016',
'2017',
'2018'
]:
print(doc.issn_scielo + '_' + str(h))
col = 0
worksheet.write(row, col, extraction_date, format_date_iso)
col += 1
# ativo em 2018
active = 0
if doc.title_current_status == 'current':
active = 1
worksheet.write(row, col, active)
col += 1
# ativo no ano
ativo_y = 0
if 'docs' in doc:
if 'docs_' + h in doc['docs']:
# print(doc['docs']['docs_'+h])
if doc['docs']['docs_' + h] == '':
ativo_y = 0
elif int(doc['docs']['docs_' + h]) > 0:
ativo_y = 1
worksheet.write(row, col, ativo_y)
col += 1
# ISSN SciELO
worksheet.write(row, col, doc.issn_scielo)
col += 1
worksheet.write(row, col, '; '.join(doc.issn_list))
col += 1
worksheet.write(row, col, doc.title)
col += 1
if doc['is_scopus'] == 1:
scopus = models.Scopus.objects.filter(id=str(doc.scopus_id))[0]
worksheet.write(row, col, scopus.title)
col += 1
if doc['is_wos'] == 1:
# wos = models.Wos.objects.filter(id=str(doc.wos_id))[0]
worksheet.write(row, col, doc['wos_indexes'][0]['title'])
col += 1
# DOI Prefix e publisher
worksheet.write(row, col, doc.crossref['doi_provider']['prefix'])
col += 1
worksheet.write(row, col, doc.crossref[
'doi_provider']['publisher'])
col += 1
if 'url' in doc['api']:
worksheet.write(row, col, doc.api['url'])
col += 1
# URL
doajapi = models.Doajapi.objects.filter(issn_list=doc.issn_scielo)
if doajapi:
if 'editorial_review' in doajapi[0]['results'][0]['bibjson']:
url_journal = doajapi[0]['results'][0][
'bibjson']['editorial_review']['url']
worksheet.write(row, col, url_journal)
col += 1
# Publisher Name
worksheet.write(row, col, doc.publisher_name)
col += 1
# Country
worksheet.write(row, col, doc.country)
col += 1
if doc['is_scopus'] == 1:
scopus = models.Scopus.objects.filter(id=str(doc.scopus_id))[0]
worksheet.write(row, col, scopus.country)
col += 1
if doc['is_wos'] == 1:
for i in doc['issn_list']:
wos = models.Wos.objects.filter(issn_list=i)
if len(wos) > 0:
worksheet.write(row, col, wos[0].country)
else:
worksheet.write(row, col, doc.country)
col += 1
# Submissions - Manager System
col = 16
submiss = models.Submissions.objects.filter(
issn_list=doc.issn_scielo)
if submiss:
# descricao sist. gestao
sist = 'ND'
if submiss[0]['scholarone'] == 1:
sist = 'ScholarOne'
elif submiss[0]['ojs_scielo'] == 1:
sist = 'OJS-SciELO'
elif submiss[0]['ojs_outro'] == 1:
sist = 'OJS-Outro'
elif submiss[0]['outro'] == 1:
sist = 'Outro'
worksheet.write(row, col, sist)
col += 1
if 'scholarone' in submiss[0]:
worksheet.write(row, col, submiss[0]['scholarone'] or 0)
col += 1
if 'ojs_scielo' in submiss[0]:
worksheet.write(row, col, submiss[0]['ojs_scielo'] or 0)
col += 1
if 'ojs_outro' in submiss[0]:
worksheet.write(row, col, submiss[0]['ojs_outro'] or 0)
col += 1
# Para outro ou ND == 1
if 'outro' in submiss[0]:
worksheet.write(row, col, submiss[0]['outro'] or 0)
col += 1
else:
# "Outro" para periódicos sem este dado
worksheet.write(row, col, "Outro")
col += 1
# 0 para periodicos sem este dado
worksheet.write(row, col, 0)
col += 1
worksheet.write(row, col, 0)
col += 1
worksheet.write(row, col, 0)
col += 1
# marcar 1 para coluna outro - periodico sem este dado
worksheet.write(row, col, 1)
col += 1
# Adocao de ORCID
col = 21
if 'orcid' in doc:
worksheet.write(row, col, 1)
col += 1
worksheet.write(row, col, 0)
col += 1
else:
worksheet.write(row, col, 0)
col += 1
worksheet.write(row, col, 0)
col += 1
# SciELO Avaliacao - tipo de instituicao
col = 23
if 'avaliacao' in doc:
if 'tipo_inst' in doc['avaliacao']:
worksheet.write(row, col, doc['avaliacao']['tipo_inst'])
col += 1
if 'tipo_1' in doc['avaliacao']:
worksheet.write(row, col, doc['avaliacao']['tipo_1'])
else:
if doc['avaliacao']['tipo_inst'] == 1:
worksheet.write(row, col, 1)
col += 1
if 'tipo_2' in doc['avaliacao']:
worksheet.write(row, col, doc['avaliacao']['tipo_2'])
else:
if doc['avaliacao']['tipo_inst'] == 2:
worksheet.write(row, col, 1)
col += 1
if 'tipo_3' in doc['avaliacao']:
worksheet.write(row, col, doc['avaliacao']['tipo_3'])
else:
if doc['avaliacao']['tipo_inst'] == 3:
worksheet.write(row, col, 1)
col += 1
if 'tipo_4' in doc['avaliacao']:
worksheet.write(row, col, doc['avaliacao']['tipo_4'])
else:
if doc['avaliacao']['tipo_inst'] == 4:
worksheet.write(row, col, 1)
col += 1
if 'inst_n1' in doc['avaliacao']:
worksheet.write(row, col, doc['avaliacao']['inst_n1'])
col += 1
if 'inst_n2' in doc['avaliacao']:
worksheet.write(row, col, doc['avaliacao']['inst_n2'])
col += 1
if 'inst_n3' in doc['avaliacao']:
worksheet.write(row, col, doc['avaliacao']['inst_n3'])
col += 1
if 'contatos' in doc['avaliacao']:
count = 0
for c in doc['avaliacao']['contatos']:
name = None
lattes = None
orcid = None
if c['cargo'] == 'Editor-chefe' or c['cargo'] == 'Editor':
count += 1
name = c['first_name'] + ' ' + c['last_name']
lattes = c['cv_lattes_editor_chefe']
orcid = c['orcid_editor_chefe']
if name:
worksheet.write(row, col, name)
col += 1
if lattes:
worksheet.write(row, col, lattes)
col += 1
if orcid:
worksheet.write(row, col, orcid)
col += 1
if count == 3:
break
else:
col += 17
# Thematic Areas
col = 40
for k in [
'title_thematic_areas',
'title_is_agricultural_sciences',
'title_is_applied_social_sciences',
'title_is_biological_sciences',
'title_is_engineering',
'title_is_exact_and_earth_sciences',
'title_is_health_sciences',
'title_is_human_sciences',
'title_is_linguistics_letters_and_arts',
'title_is_multidisciplinary'
]:
if k in doc:
worksheet.write(row, col, doc[k])
col += 1
# Wos Categories
col = 50
if 'wos_subject_areas' in doc['api']:
worksheet.write(row, col, '; '.join(
doc['api']['wos_subject_areas']))
col += 1
# Historico
worksheet.write(row, col, doc.title_current_status)
col += 1
if 'first_year' in doc['api']:
worksheet.write(row, col, int(doc['api']['first_year']))
col += 1
worksheet.write(row, col, doc.inclusion_year_at_scielo)
col += 1
if 'stopping_year_at_scielo' in doc:
worksheet.write(row, col, doc.stopping_year_at_scielo)
col += 1
worksheet.write(
row, col, doc.date_of_the_first_document, format_date_iso)
col += 1
worksheet.write(
row, col, doc.date_of_the_last_document, format_date_iso)
col += 1
# APC
col = 57
if 'apc' in doc:
if doc['apc']['apc'] == 'Sim':
worksheet.write(row, col, 1)
else:
worksheet.write(row, col, 0)
col += 1
# if doc['apc']['value']:
# worksheet.write(row, col, doc['apc']['value'])
# col += 1
if doc['apc']['comments']:
worksheet.write(row, col, doc['apc']['comments'])
col += 1
apc_list = []
for f in range(1, 9):
coin = None
value = None
concept = None
if 'apc' + str(f) + '_value_coin':
coin = doc['apc']['apc' + str(f) + '_value_coin']
value = doc['apc']['apc' + str(f) + '_value']
concept = doc['apc']['apc' + str(f) + '_concept']
if coin or value or concept:
apc_list.append('[%s) value: %s %s - concept: %s]' %
(str(f), coin, value, concept))
if apc_list:
worksheet.write(row, col, '; '.join(apc_list))
col += 1
else:
worksheet.write(row, col, 0)
col += 4
# Indexacao
col = 60
worksheet.write(row, col, doc.is_scopus)
col += 1
worksheet.write(row, col, doc.is_jcr)
col += 1
# WOS
worksheet.write(row, col, doc.is_wos)
col += 1
# SCIE
scie = 0
if 'wos_indexes' in doc:
for i in doc['wos_indexes']:
if 'scie' in i['index']:
scie = 1
break
worksheet.write(row, col, scie)
col += 1
# SSCI
ssci = 0
if 'wos_indexes' in doc:
for i in doc['wos_indexes']:
if 'ssci' in i['index']:
ssci = 1
break
worksheet.write(row, col, ssci)
col += 1
# A&HCI
ahci = 0
if 'wos_indexes' in doc:
for i in doc['wos_indexes']:
if 'ahci' in i['index']:
ahci = 1
break
worksheet.write(row, col, ahci)
col += 1
# ESCI
esci = 0
if 'wos_indexes' in doc:
for i in doc['wos_indexes']:
if 'esci' in i['index']:
esci = 1
break
worksheet.write(row, col, esci)
col += 1
# Pubmed, PMC
col = 67
pubmed = models.Pubmedapi.objects.filter(issn_list=doc.issn_scielo)
if pubmed:
if 'pubmed' in pubmed[0]['db_name']:
worksheet.write(row, col, 1 or 0)
col += 1
if 'pmc' in pubmed[0]['db_name']:
worksheet.write(row, col, 1 or 0)
col += 1
else:
worksheet.write(row, col, 0)
col += 1
worksheet.write(row, col, 0)
col += 1
# ANO DE PUBLICACAO
col = 69
if h == 'anterior':
year = '2007'
else:
year = h
worksheet.write(row, col, int(year))
col += 1
# Documentos
if 'docs' in doc:
if 'docs_' + h in doc['docs']:
worksheet.write(row, col, doc['docs']['docs_' + h] or 0)
else:
worksheet.write(row, col, 0)
col += 1
if 'document_en_' + h in doc['docs']:
worksheet.write(row, col, doc['docs'][
'document_en_' + h] or 0)
else:
worksheet.write(row, col, 0)
col += 1
if 'document_pt_' + h in doc['docs']:
worksheet.write(row, col, doc['docs'][
'document_pt_' + h] or 0)
else:
worksheet.write(row, col, 0)
col += 1
if 'document_es_' + h in doc['docs']:
worksheet.write(row, col, doc['docs'][
'document_es_' + h] or 0)
else:
worksheet.write(row, col, 0)
col += 1
if 'doc_2_more_lang_' + h in doc['docs']:
worksheet.write(row, col, doc['docs'][
'doc_2_more_lang_' + h] or 0)
else:
worksheet.write(row, col, 0)
col += 1
if 'document_other_languages_' + h in doc['docs']:
worksheet.write(row, col, doc['docs'][
'document_other_languages_' + h] or 0)
else:
worksheet.write(row, col, 0)
col += 1
# CITABLES
if 'is_citable_' + h in doc['docs']:
worksheet.write(row, col, doc['docs'][
'is_citable_' + h] or 0)
else:
worksheet.write(row, col, 0)
col += 1
if 'tipo_review_' + h in doc['docs']:
worksheet.write(row, col, doc['docs'][
'tipo_review_' + h] or 0)
else:
worksheet.write(row, col, 0)
col += 1
if 'citable_en_' + h in doc['docs']:
worksheet.write(row, col, doc['docs'][
'citable_en_' + h] or 0)
else:
worksheet.write(row, col, 0)
col += 1
if 'citable_pt_' + h in doc['docs']:
worksheet.write(row, col, doc['docs'][
'citable_pt_' + h] or 0)
else:
worksheet.write(row, col, 0)
col += 1
if 'citable_es_' + h in doc['docs']:
worksheet.write(row, col, doc['docs'][
'citable_es_' + h] or 0)
else:
worksheet.write(row, col, 0)
col += 1
if 'citable_doc_2_more_lang_' + h in doc['docs']:
worksheet.write(row, col, doc['docs'][
'citable_doc_2_more_lang_' + h] or 0)
else:
worksheet.write(row, col, 0)
col += 1
if 'citable_other_lang_' + h in doc['docs']:
worksheet.write(row, col, doc['docs'][
'citable_other_lang_' + h] or 0)
else:
worksheet.write(row, col, 0)
col += 1
else:
col += 13
# Acessos
col = 83
if 'access' in doc:
if h == 'anterior':
pass
elif h == '2011':
hy = 'anterior'
for yacc in [
'anterior',
'2012',
'2013',
'2014',
'2015',
'2016',
'2017',
'2018'
]:
if 'pub_' + hy + '_acc_anterior' in doc['access']:
worksheet.write(row, col, doc['access'][
'pub_' + hy + '_acc_' + yacc])
else:
worksheet.write(row, col, 0)
col += 1
elif int(h) > 2011:
for yacc in [
'anterior',
'2012',
'2013',
'2014',
'2015',
'2016',
'2017',
'2018'
]:
if 'pub_' + h + '_acc_' + yacc in doc['access']:
worksheet.write(row, col, doc['access'][
'pub_' + h + '_acc_' + yacc] or 0)
else:
worksheet.write(row, col, 0)
col += 1
else:
col += 8
# SciELO CI WOS cited
col = 91
if 'scieloci' in doc:
if h == 'anterior':
pass
else:
year = str(h)
if 'docs_' + year in doc['scieloci']:
worksheet.write(
row, col, doc['scieloci']['docs_' + year])
col += 1
if 'is_citable_' + year in doc['scieloci']:
worksheet.write(row, col, doc['scieloci'][
'is_citable_' + year])
col += 1
if 'scieloci_cited_' + year in doc['scieloci']:
worksheet.write(row, col, doc['scieloci'][
'scieloci_cited_' + year])
col += 1
if 'scieloci_wos_cited_' + year in doc['scieloci']:
worksheet.write(row, col, doc['scieloci'][
'scieloci_wos_cited_' + year])
col += 1
if 'one_o_more_scielo_cited_' + year in doc['scieloci']:
worksheet.write(row, col, doc['scieloci'][
'one_o_more_scielo_cited_' + year])
col += 1
if 'one_o_more_wos_cited_' + year in doc['scieloci']:
worksheet.write(row, col, doc['scieloci'][
'one_o_more_wos_cited_' + year])
col += 1
else:
col += 6
# Google
col = 97
if h == 'anterior':
pass
else:
year = str(h)
if 'google_scholar_h5_' + year in doc:
worksheet.write(row, col, doc['google_scholar_h5_' + year])
col += 1
if 'google_scholar_m5_' + year in doc:
worksheet.write(row, col, doc['google_scholar_m5_' + year])
col += 1
# SCOPUS - CiteScore
col = 99
if doc['is_scopus'] == 1:
if h in scopus and 'citescore' in scopus[h]:
worksheet.write(row, col, formatindicator(
scopus[h]['citescore']))
col += 1
# Scopus - SNIP - APLICAR PARA 2007 (SEM ACUMULAR MESMO)
col = 100
h2 = None
if h == 'anterior':
h2 = '2007'
else:
h2 = h
snip = 0
if doc['is_scopus'] == 1:
if h2 in scopus and 'snip' in scopus[h2]:
worksheet.write(
row, col, formatindicator(scopus[h2]['snip']))
snip = 1
else:
snip = 0
if snip == 0:
if doc['is_cwts'] == 1:
cwts = models.Cwts.objects.filter(id=str(doc.cwts_id))[0]
if h2 in cwts and 'snip' in cwts[h2]:
worksheet.write(
row, col, formatindicator(cwts[h2]['snip']))
snip = 1
col += 1
# SCIMAGO - SJR, tt_docs, tt_cites, cites_by_docs, h_index
col = 101
h2 = None
if h == 'anterior':
h2 = '2007'
else:
h2 = h
if doc['is_scimago'] == 1:
scimago = models.Scimago.objects.filter(
id=str(doc.scimago_id))[0]
for i in [
'sjr',
'total_docs_3years',
'total_cites_3years',
'cites_by_doc_2years',
'h_index'
]:
if h2 in scimago and i in scimago[h2]:
worksheet.write(
row, col, formatindicator(scimago[h2][i]))
col += 1
# JCR
col = 106
if doc['is_jcr'] == 1:
if h == 'anterior':
h2 = '2007'
else:
h2 = h
jcr = models.Jcr.objects.filter(id=str(doc.jcr_id))[0]
for i in [
'total_cites',
'journal_impact_factor',
'impact_factor_without_journal_self_cites',
'five_year_impact_factor',
'immediacy_index',
'citable_items',
'cited_half_life',
'citing_half_life',
'eigenfactor_score',
'article_influence_score',
'percentage_articles_in_citable_items',
'average_journal_impact_factor_percentile',
'normalized_eigenfactor'
]:
if h2 in jcr and i in jcr[h2]:
worksheet.write(row, col, formatjcr(jcr[h2][i]))
col += 1
else:
col += 13
# Affiliations_documents
col = 119
if 'aff' in doc:
if h == 'anterior':
if 'br_ate_2007' in doc['aff']:
worksheet.write(row, col, doc['aff'][
'br_ate_2007'] or 0)
col += 1
if 'estrang_ate_2007' in doc['aff']:
worksheet.write(row, col, doc['aff'][
'estrang_ate_2007'] or 0)
col += 1
if 'nao_ident_ate_2007' in doc['aff']:
worksheet.write(row, col, doc['aff'][
'nao_ident_ate_2007'] or 0)
col += 1
if 'br_estrang_ate_2007' in doc['aff']:
worksheet.write(row, col, doc['aff'][
'br_estrang_ate_2007'] or 0)
col += 1
if 'nao_ident_todos_ate_2007' in doc['aff']:
worksheet.write(row, col, doc['aff'][
'nao_ident_todos_ate_2007'] or 0)
col += 1
if 'br_' + h in doc['aff']:
worksheet.write(row, col, doc['aff']['br_' + h] or 0)
col += 1
if 'estrang_' + h in doc['aff']:
worksheet.write(row, col, doc['aff']['estrang_' + h] or 0)
col += 1
if 'nao_ident_' + h in doc['aff']:
worksheet.write(row, col, doc['aff'][
'nao_ident_' + h] or 0)
col += 1
if 'br_estrang_' + h in doc['aff']:
worksheet.write(row, col, doc['aff'][
'br_estrang_' + h] or 0)
col += 1
if 'nao_ident_todos_' + h in doc['aff']:
worksheet.write(row, col, doc['aff'][
'nao_ident_todos_' + h] or 0)
col += 1
else:
col += 5
# Manuscritos
col = 124
if 'manuscritos' in doc:
if h == '2014':
col += 4
if 'recebidos_2014' in doc['manuscritos']:
worksheet.write(row, col, formatman(
doc['manuscritos']['recebidos_2014']))
col += 1
if 'aprovados_2014' in doc['manuscritos']:
worksheet.write(row, col, formatman(
doc['manuscritos']['aprovados_2014']))
col += 1
else:
if 'recebidos_' + h + '_1sem' in doc['manuscritos']:
worksheet.write(row, col, formatman(
doc['manuscritos']['recebidos_' + h + '_1sem']))
col += 1
if 'aprovados_' + h + '_1sem' in doc['manuscritos']:
worksheet.write(row, col, formatman(
doc['manuscritos']['aprovados_' + h + '_1sem']))
col += 1
if 'recebidos_' + h + '_2sem' in doc['manuscritos']:
worksheet.write(row, col, formatman(
doc['manuscritos']['recebidos_' + h + '_2sem']))
col += 1
if 'aprovados_' + h + '_2sem' in doc['manuscritos']:
worksheet.write(row, col, formatman(
doc['manuscritos']['aprovados_' + h + '_2sem']))
col += 1
# Tempos entre submissao, aprovacao e publicacao
col = 130
if 'times' in doc:
if h == 'anterior':
# sub_aprov
if 'media_meses_sub_aprov_ate_2007' in doc['times']:
times = timesfmt(
doc['times']['media_meses_sub_aprov_ate_2007'])
worksheet.write(row, col, times)
col += 1
if 'desvp_meses_sub_aprov_ate_2007' in doc['times']:
times = timesfmt(
doc['times']['desvp_meses_sub_aprov_ate_2007'])
worksheet.write(row, col, times)
col += 1
# sub_pub
if 'media_meses_sub_pub_ate_2007' in doc['times']:
times = timesfmt(
doc['times']['media_meses_sub_pub_ate_2007'])
worksheet.write(row, col, times)
col += 1
if 'desvp_meses_sub_pub_ate_2007' in doc['times']:
times = timesfmt(
doc['times']['desvp_meses_sub_pub_ate_2007'])
worksheet.write(row, col, times)
col += 1
# sub_pub_scielo
if 'media_meses_sub_pub_scielo_ate_2007' in doc['times']:
times = timesfmt(
doc['times']['media_meses_sub_pub_scielo_ate_2007'])
worksheet.write(row, col, times)
col += 1
if 'desvp_meses_sub_pub_scielo_ate_2007' in doc['times']:
times = timesfmt(
doc['times']['desvp_meses_sub_pub_scielo_ate_2007'])
worksheet.write(row, col, times)
col += 1
# aprov_pub
if 'media_meses_aprov_pub_ate_2007' in doc['times']:
times = timesfmt(
doc['times']['media_meses_aprov_pub_ate_2007'])
worksheet.write(row, col, times)
col += 1
if 'desvp_meses_aprov_pub_ate_2007' in doc['times']:
times = timesfmt(
doc['times']['desvp_meses_aprov_pub_ate_2007'])
worksheet.write(row, col, times)
col += 1
# aprov_pub_scielo
if 'media_meses_aprov_pub_scielo_ate_2007' in doc['times']:
times = timesfmt(
doc['times']['media_meses_aprov_pub_scielo_ate_2007'])
worksheet.write(row, col, times)
col += 1
if 'desvp_meses_aprov_pub_scielo_ate_2007' in doc['times']:
times = timesfmt(
doc['times']['desvp_meses_aprov_pub_scielo_ate_2007'])
worksheet.write(row, col, times)
col += 1
else:
# sub_aprov
if 'media_meses_sub_aprov_' + h in doc['times']:
times = timesfmt(
doc['times']['media_meses_sub_aprov_' + h])
worksheet.write(row, col, times)
col += 1
if 'desvp_meses_sub_aprov_' + h in doc['times']:
times = timesfmt(
doc['times']['desvp_meses_sub_aprov_' + h])
worksheet.write(row, col, times)
col += 1
# sub_pub
if 'media_meses_sub_pub_' + h in doc['times']:
times = timesfmt(
doc['times']['media_meses_sub_pub_' + h])
worksheet.write(row, col, times)
col += 1
if 'desvp_meses_sub_pub_' + h in doc['times']:
times = timesfmt(
doc['times']['desvp_meses_sub_pub_' + h])
worksheet.write(row, col, times)
col += 1
# sub_pub_scielo
if 'media_meses_sub_pub_scielo_' + h in doc['times']:
times = timesfmt(
doc['times']['media_meses_sub_pub_scielo_' + h])
worksheet.write(row, col, times)
col += 1
if 'desvp_meses_sub_pub_scielo_' + h in doc['times']:
times = timesfmt(
doc['times']['desvp_meses_sub_pub_scielo_' + h])
worksheet.write(row, col, times)
col += 1
# aprov_pub
if 'media_meses_aprov_pub_' + h in doc['times']:
times = timesfmt(
doc['times']['media_meses_aprov_pub_' + h])
worksheet.write(row, col, times)
col += 1
if 'desvp_meses_aprov_pub_' + h in doc['times']:
times = timesfmt(
doc['times']['desvp_meses_aprov_pub_' + h])
worksheet.write(row, col, times)
col += 1
# aprov_pub_scielo
if 'media_meses_aprov_pub_scielo_' + h in doc['times']:
times = timesfmt(
doc['times']['media_meses_aprov_pub_scielo_' + h])
worksheet.write(row, col, times)
col += 1
if 'desvp_meses_aprov_pub_scielo_' + h in doc['times']:
times = timesfmt(
doc['times']['desvp_meses_aprov_pub_scielo_' + h])
worksheet.write(row, col, times)
col += 1
# SciELO - Citações Concedidass
col = 140
if 'citations' in doc:
for cit in doc['citations']:
if h in cit:
# print(cit[h])
for label in [
'total_citgrant',
'total_citgrant_journals',
'total_citgrant_autocit',
'citgrant_journal_scielo',
'citgrant_journal_scielo_wos',
'citgrant_journal_wos',
'citgrant_journal_other',
'citgrant_other_docs',
'citgrant_books',
'cit_pub_year',
'cit_pubyear_minus1',
'cit_pubyear_minus2',
'cit_pubyear_minus3',
'cit_pubyear_minus4',
'cit_pubyear_minus5',
'cit_pubyear_minus6',
'cit_pubyear_minus7',
'cit_pubyear_minus8',
'cit_pubyear_minus9',
'cit_pubyear_minus10'
]:
citation = cit[h][label]
worksheet.write(row, col, citation or '')
col += 1
else:
col += 20
# Access - Google Analytics
col = 160
if 'ga_access' in doc:
if h == '2017':
for label in [
'total_access',
'porcent_americas',
'porcent_brazil',
'porcent_united_states',
'porcent_asia',
'porcent_china',
'porcent_india',
'porcent_europe',
'porcent_spain',
'porcent_portugal',
'porcent_africa',
'porcent_south_africa',
'porcent_palop',
'porcent_oceania',
'porcent_others'
]:
if label in doc['ga_access']:
ga_access = doc['ga_access'][label]
worksheet.write(row, col, ga_access)
col += 1
else:
col += 15
# Avança ano
row += 1
# Avança journal
row += 1
# Creates 'areas tematicas' worksheet
formatline = workbook.add_format({'text_wrap': False, 'size': 9})
worksheet3 = workbook.add_worksheet('dados agregados - AT')
worksheet3.freeze_panes(1, 0)
worksheet3.set_row(0, 60)
sheet3 = pyexcel.get_sheet(
file_name=atfile,
sheet_name='import',
name_columns_by_row=0)
sheet3_json = sheet3.to_records()
row = 0
col = 0
for h in sheet3.colnames:
worksheet3.write(row, col, h, wrap_header)
col += 1
row = 1
for line in sheet3_json:
col = 0
for label in sheet3.colnames:
if col == 0:
worksheet3.write(row, col, line[label], format_date_iso)
else:
worksheet3.write(row, col, line[label], formatline)
col += 1
row += 1
# Creates 'descricao rotulos' worksheet
worksheet2 = workbook.add_worksheet('rótulos-dados-periódicos')
worksheet2.set_column(0, 0, 30)
worksheet2.set_column(1, 1, 70)
sheet2 = pyexcel.get_sheet(
file_name='data/scielo/rotulos_avaliacao_fapesp_abel.xlsx',
sheet_name='rotulos_dados_periodicos',
name_columns_by_row=0)
sheet2_json = sheet2.to_records()
worksheet2.write(0, 0, 'Rótulo', formatline)
worksheet2.write(0, 1, 'Descrição', formatline)
row = 1
for line in sheet2_json:
col = 0
worksheet2.write(row, col, line['rotulo_portugues'], formatline)
col += 1
worksheet2.write(row, col, line['descricao'], formatline)
row += 1
# Grava planilha Excel
workbook.close()
def alljournals():
scielo = models.Scielofapesp.objects.filter(
fapesp_evaluation__2018__fullset=1)
today = datetime.datetime.now().strftime('%Y%m%d')
filename = 'Fapesp-avaliação-SciELO-todos-' + today + '.xlsx'
sheetname = 'SciELO-todos'
atfile = 'data/scielo/Fapesp-avaliação-SciELO-todos-AT.xlsx'
journal(
query=scielo,
filename=filename,
sheetname=sheetname,
issn=None,
atfile=atfile)
def activethisyear():
scielo = models.Scielofapesp.objects.filter(
fapesp_evaluation__2018__activethisyear=1)
today = datetime.datetime.now().strftime('%Y%m%d')
filename = 'Fapesp-avaliação-SciELO-ativos2018-' + today + '.xlsx'
sheetname = 'SciELO-ativos2018'
atfile = 'data/scielo/Fapesp-avaliação-SciELO-ativos2018-AT.xlsx'
journal(
query=scielo,
filename=filename,
sheetname=sheetname,
issn=None,
atfile=atfile)
# Ativos neste ano e inclusos antes de 2016
def activethisyear_inclusion_before():
# já considera:
# title_current_status='current'
# collection='scl'
scielo = models.Scielofapesp.objects.filter(
fapesp_evaluation__2018__evaluated=1)
today = datetime.datetime.now().strftime('%Y%m%d')
filename = 'Fapesp-avaliação-SciELO-ativos2018-até2015-' + today + '.xlsx'
sheetname = 'SciELO-ativos2018-ate2015'
atfile = 'data/scielo/Fapesp-avaliação-SciELO-ativos2018-até2015-AT.xlsx'
journal(
query=scielo,
filename=filename,
sheetname=sheetname,
issn=None,
atfile=atfile)
def onejournal():
scielo = models.Scielofapesp.objects.filter(
fapesp_evaluation__2018__evaluated=1)
counter = 0
for j in scielo:
counter += 1
issn = j['issn_scielo']
queryj = models.Scielofapesp.objects.filter(issn_list=issn)
short_title = accent_remover(j['short_title_scielo'])
title = re.sub(r'[\[\]:*?/\\]', "", short_title)
# acronym = j['api']['acronym']
print(title.lower())
today = datetime.datetime.now().strftime('%Y%m%d')
filename = 'Fapesp-avaliacao-SciELO-' + issn + '-' + today + '.xlsx'
atfile = 'data/scielo/Fapesp-avaliação-SciELO-ativos2018-até2015-AT-import.xlsx'
journal(
query=queryj,
filename=filename,
sheetname=title[0:30],
issn=issn,
atfile=atfile)
print(counter)
# teste MIOC
# queryj = models.Scielofapesp.objects.filter(issn_list='0074-0276')
# issn = '0074-0276'
# filename = 'avaliacao_scielo_'+issn+'.xlsx'
# sheetname = 'Mem. Inst. Oswaldo Cruz'
# atfile = 'data/scielo/Fapesp-avaliação-SciELO-ativos2018-até2015-AT.xlsx'
# journal(query=queryj, filename=filename, sheetname=sheetname, issn=issn, atfile=atfile)
def main():
# Todos os periodicos da colecao scielo (fullset)
# alljournals()
# Ativos em 2018 (activethisyear)
# activethisyear()
# Antes de 2016 (evaluated)
# activethisyear_inclusion_before()
# Antes de 2016 (evaluated) - 1 planilha por periódico
onejournal()
if __name__ == "__main__":
main()
|
bsd-2-clause
| -1,299,251,792,692,191,500
| 35.709705
| 93
| 0.407071
| false
| 4.093056
| false
| false
| false
|
spaus/pysandbox
|
sqltests.py
|
1
|
2443
|
import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class Person(Base):
__tablename__ = 'person'
# Here we define columns for the table person
# Notice that each column is also a normal Python instance attribute.
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
class Address(Base):
__tablename__ = 'address'
# Here we define columns for the table address.
# Notice that each column is also a normal Python instance attribute.
id = Column(Integer, primary_key=True)
street_name = Column(String(250))
street_number = Column(String(250))
post_code = Column(String(250), nullable=False)
person_id = Column(Integer, ForeignKey('person.id'))
person = relationship(Person)
def createExampleDB(BaseClass):
# Create an engine that stores data in the local directory's
# sqlalchemy_example.db file.
engine = create_engine('sqlite:///sqlalchemy_example.db')
# Create all tables in the engine. This is equivalent to "Create Table"
# statements in raw SQL.
BaseClass.metadata.create_all(engine)
def addRecords(BaseClass):
engine = create_engine('sqlite:///sqlalchemy_example.db')
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
BaseClass.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
# Insert a Person in the person table
new_person = Person(name='new person')
session.add(new_person)
session.commit()
# Insert an Address in the address table
new_address = Address(post_code='00000', person=new_person)
session.add(new_address)
session.commit()
# createExampleDB(Base)
addRecords(Base)
|
mit
| -8,484,875,581,494,622,000
| 34.42029
| 76
| 0.722882
| false
| 4.147708
| false
| false
| false
|
diacritica/bokzuyXMPPbot
|
bokzuy_bot.py
|
1
|
5842
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
bokzuyXMPPbot: Your dummy XMPP bot for Bokzuy.com
Copyright (C) 2012 Pablo Ruiz Múzquiz
See the file LICENSE for copying permission.
"""
import sys
import logging
import getpass
import json
import sleekxmpp
import requests
from optparse import OptionParser
# Make sure we use UTF-8 by default even with python < 3.0.
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding('utf8')
else:
raw_input = input
class EchoBot(sleekxmpp.ClientXMPP):
"""
A simple SleekXMPP bot for Bokzuy that will follow orders
such as listing friends, badges and sending bokies.
Based on the SleekXMPP bot.
"""
def __init__(self, jid, password, bokzuy_auth):
sleekxmpp.ClientXMPP.__init__(self, jid, password)
self.add_event_handler("session_start", self.start)
self.add_event_handler("message", self.message)
self.bokzuy_auth = bokzuy_auth
def start(self, event):
self.send_presence()
self.get_roster()
def message(self, msg):
"""
Process incoming message stanzas. Be aware that this also
includes MUC messages and error messages. It is usually
a good idea to check the messages's type before processing
or sending replies.
Arguments:
msg -- The received message stanza. See the documentation
for stanza objects and the Message stanza to see
how it may be used.
"""
if msg['type'] in ('chat', 'normal'):
msgstr = "%(body)s" % msg
if msgstr == "b":
result = self.get_badges()
resultdict = json.loads(result)
resultlist = ["%i - %s" % (badge["id"], badge["name"]) for \
badge in resultdict["badges"]]
resultlist.sort()
resultstr = "\n".join(resultlist)
elif msgstr == "f":
result = self.get_friends()
resultdict = json.loads(result)
resultlist = ["%i - %s" % (friend[u"id"], friend[u"name"]) for\
friend in resultdict[u"friends"]]
resultlist.sort()
resultstr = "\n".join(resultlist)
else:
try:
if msgstr.count("@") == 3:
badgeid, userid, comment, group = msgstr.split("@")
else:
group = ""
badgeid, userid, comment = msgstr.split("@")
result = self.send_boky(int(badgeid), int(userid), \
comment, group)
resultstr = json.loads(result)["msg"]
except:
resultstr = "This bot is away or you made a mistake"
msg.reply(resultstr).send()
def send_boky(self, badgeid=1, userid=10, \
comment="API TEST THROUGH XMPP BOT :)", group="kaleidos"):
params = {
'badgeId': badgeid,
'comment': comment,
'group': group,
}
response = requests.post("https://api.bokzuy.com/%s/bokies" % \
(userid), data=params, auth=self.bokzuy_auth, verify=False)
return response.content
def get_badges(self):
response = requests.get("https://api.bokzuy.com/badges",\
auth=self.bokzuy_auth, verify=False)
return response.content
def get_friends(self):
response = requests.get("https://api.bokzuy.com/me/friends",\
auth=self.bokzuy_auth, verify=False)
return response.content
if __name__ == '__main__':
# Setup the command line arguments.
optp = OptionParser()
# Output verbosity options.
optp.add_option('-q', '--quiet', help='set logging to ERROR',
action='store_const', dest='loglevel',
const=logging.ERROR, default=logging.INFO)
optp.add_option('-d', '--debug', help='set logging to DEBUG',
action='store_const', dest='loglevel',
const=logging.DEBUG, default=logging.INFO)
optp.add_option('-v', '--verbose', help='set logging to COMM',
action='store_const', dest='loglevel',
const=5, default=logging.INFO)
# JID and password options.
optp.add_option("-j", "--jid", dest="jid",
help="JID to use")
optp.add_option("-p", "--password", dest="password",
help="password to use")
# Bokzuy user and password options.
optp.add_option("-b", "--bokid", dest="bokzuy_username",
help="Bokzuy user to use")
optp.add_option("-w", "--bokpass", dest="bokzuy_password",
help="Bokzuy password to use")
opts, args = optp.parse_args()
# Setup logging.
logging.basicConfig(level=opts.loglevel,
format='%(levelname)-8s %(message)s')
if opts.jid is None:
opts.jid = raw_input("Username: ")
if opts.password is None:
opts.password = getpass.getpass("Password: ")
if opts.bokzuy_username is None:
opts.bokzuy_username = raw_input("Bokzuy username: ")
if opts.bokzuy_password is None:
opts.bokzuy_password = getpass.getpass("Bokzuy password: ")
bokzuy_auth = (opts.bokzuy_username, opts.bokzuy_password)
xmpp = EchoBot(opts.jid, opts.password, bokzuy_auth)
xmpp.register_plugin('xep_0030') # Service Discovery
xmpp.register_plugin('xep_0004') # Data Forms
xmpp.register_plugin('xep_0060') # PubSub
xmpp.register_plugin('xep_0199') # XMPP Ping
if xmpp.connect(('talk.google.com', 5222)):
#xmpp.process(block=True)
xmpp.process(threaded=False)
print("Done!")
else:
print("Unable to connect.")
|
gpl-2.0
| -1,316,866,664,008,892,200
| 31.270718
| 79
| 0.564287
| false
| 3.744231
| false
| false
| false
|
alexey-grom/django-userflow
|
userflow/views/verify/request.py
|
1
|
1109
|
# encoding: utf-8
from django.http.response import HttpResponseRedirect, Http404
from django.views.generic.detail import DetailView
from userflow.models import UserEmail
class RequestConfirmEmailView(DetailView):
model = UserEmail
def get_queryset(self):
return super(RequestConfirmEmailView, self).get_queryset().inactive()
def get_object(self, queryset=None):
object = super(RequestConfirmEmailView, self).get_object(queryset)
if object.user != self.request.user:
if object.user.is_active:
raise Http404
confirmation = object.confirmations.\
unfinished().\
first()
if not confirmation:
from userflow.models import EmailConfirmation
confirmation = EmailConfirmation.objects.create(email=object)
return confirmation
def render_to_response(self, context, **response_kwargs):
self.object.send('verify',
self.object.get_owner(),
self.request)
return HttpResponseRedirect(self.object.get_wait_url())
|
mit
| 1,387,404,666,578,132,700
| 33.65625
| 77
| 0.655546
| false
| 4.60166
| false
| false
| false
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/flags.py
|
1
|
6431
|
# coding=utf-8
# Copyright 2021 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common command-line flags."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_bool("registry_help", False,
"If True, logs the contents of the registry and exits.")
flags.DEFINE_bool("tfdbg", False,
"If True, use the TF debugger CLI on train/eval.")
flags.DEFINE_bool("export_saved_model", False,
"DEPRECATED - see serving/export.py.")
flags.DEFINE_bool("dbgprofile", False,
"If True, record the timeline for chrome://tracing/.")
flags.DEFINE_string("model", None, "Which model to use.")
flags.DEFINE_string("hparams_set", None, "Which parameters to use.")
flags.DEFINE_string("hparams_range", None, "Parameters range.")
flags.DEFINE_string("hparams", "",
"A comma-separated list of `name=value` hyperparameter "
"values. This flag is used to override hyperparameter "
"settings either when manually selecting hyperparameters "
"or when using Vizier. If a hyperparameter setting is "
"specified by this flag then it must be a valid "
"hyperparameter name for the model.")
flags.DEFINE_string("problem", None, "Problem name.")
# data_dir is a common flag name - catch conflicts and define it once.
try:
flags.DEFINE_string("data_dir", None, "Directory with training data.")
except: # pylint: disable=bare-except
pass
flags.DEFINE_integer("train_steps", 250000,
"The number of steps to run training for.")
flags.DEFINE_string("eval_early_stopping_metric", "loss",
"If --eval_early_stopping_steps is not None, then stop "
"when --eval_early_stopping_metric has not decreased for "
"--eval_early_stopping_steps")
flags.DEFINE_float("eval_early_stopping_metric_delta", 0.1,
"Delta determining whether metric has plateaued.")
flags.DEFINE_integer("eval_early_stopping_steps", None,
"If --eval_early_stopping_steps is not None, then stop "
"when --eval_early_stopping_metric has not decreased for "
"--eval_early_stopping_steps")
flags.DEFINE_bool("eval_early_stopping_metric_minimize", True,
"Whether to check for the early stopping metric going down "
"or up.")
flags.DEFINE_integer("eval_timeout_mins", 240,
"The maximum amount of time to wait to wait between "
"checkpoints. Set -1 to wait indefinitely.")
flags.DEFINE_bool("eval_run_autoregressive", False,
"Run eval autoregressively where we condition on previous"
"generated output instead of the actual target.")
flags.DEFINE_bool("eval_use_test_set", False,
"Whether to use the '-test' data for EVAL (and PREDICT).")
flags.DEFINE_integer("keep_checkpoint_max", 20,
"How many recent checkpoints to keep.")
flags.DEFINE_bool("enable_graph_rewriter", False,
"Enable graph optimizations that are not on by default.")
flags.DEFINE_integer("keep_checkpoint_every_n_hours", 10000,
"Number of hours between each checkpoint to be saved. "
"The default value 10,000 hours effectively disables it.")
flags.DEFINE_integer("save_checkpoints_secs", 0,
"Save checkpoints every this many seconds. "
"Default=0 means save checkpoints each x steps where x "
"is max(iterations_per_loop, local_eval_frequency).")
flags.DEFINE_bool("log_device_placement", False,
"Whether to log device placement.")
flags.DEFINE_string("warm_start_from", None, "Warm start from checkpoint.")
# Distributed training flags
flags.DEFINE_integer("local_eval_frequency", 1000,
"Save checkpoints and run evaluation every N steps during "
"local training.")
flags.DEFINE_integer("eval_throttle_seconds", 600,
"Do not re-evaluate unless the last evaluation was started"
" at least this many seconds ago.")
flags.DEFINE_bool("sync", False, "Sync compute on PS.")
flags.DEFINE_string("worker_job", "/job:localhost", "name of worker job")
flags.DEFINE_integer("worker_gpu", 1, "How many GPUs to use.")
flags.DEFINE_integer("worker_replicas", 1, "How many workers to use.")
flags.DEFINE_integer("worker_id", 0, "Which worker task are we.")
flags.DEFINE_float("worker_gpu_memory_fraction", 0.95,
"Fraction of GPU memory to allocate.")
flags.DEFINE_integer("ps_gpu", 0, "How many GPUs to use per ps.")
flags.DEFINE_string("gpu_order", "", "Optional order for daisy-chaining GPUs."
" e.g. \"1 3 2 4\"")
flags.DEFINE_string("ps_job", "/job:ps", "name of ps job")
flags.DEFINE_integer("ps_replicas", 0, "How many ps replicas.")
# Decoding flags
flags.DEFINE_string("decode_hparams", "",
"Comma-separated list of name=value pairs to control "
"decode behavior. See decoding.decode_hparams for "
"defaults.")
flags.DEFINE_string("decode_from_file", "",
"Path to the source file for decoding, used by "
"continuous_decode_from_file.")
flags.DEFINE_string("decode_to_file", "",
"Path to the decoded file generated by decoding, used by "
"continuous_decode_from_file.")
flags.DEFINE_string("decode_reference", "",
"Path to the reference file for decoding, used by "
"continuous_decode_from_file to compute BLEU score.")
|
apache-2.0
| -5,781,691,177,619,446,000
| 50.862903
| 80
| 0.636759
| false
| 4.149032
| false
| false
| false
|
mkuron/espresso
|
src/config/check_myconfig.py
|
1
|
3768
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from subprocess import CalledProcessError
from defines import Defines
import featuredefs
def damerau_levenshtein_distance(s1, s2):
d = {}
lenstr1 = len(s1)
lenstr2 = len(s2)
for i in range(-1, lenstr1 + 1):
d[(i, -1)] = i + 1
for j in range(-1, lenstr2 + 1):
d[(-1, j)] = j + 1
for i in range(lenstr1):
for j in range(lenstr2):
if s1[i] == s2[j]:
cost = 0
else:
cost = 1
d[(i, j)] = min(
d[(i - 1, j)] + 1, # deletion
d[(i, j - 1)] + 1, # insertion
d[(i - 1, j - 1)] + cost, # substitution
)
if i and j and s1[i] == s2[j - 1] and s1[i - 1] == s2[j]:
d[(i, j)] = min(d[(i, j)], d[i - 2, j - 2] + cost)
# transposition
return d[lenstr1 - 1, lenstr2 - 1]
def handle_unknown(f, all_features):
match = None
max_dist = max(2, len(f) // 2)
for d in all_features:
dist = damerau_levenshtein_distance(f, d)
if dist < max_dist:
min_dist = dist
match = d
if match:
print("Unknown feature '{}', did you mean '{}'?".format(f, match))
else:
print("Unknown feature '{}'".format(f))
class FeatureError(Exception):
pass
def print_exception(ex):
print("""Skipped external header because {} returned non-zero exit code {},
output: {}.""".format(' '.join(ex.cmd), ex.returncode, ex.output.strip()))
def check_myconfig(compiler, feature_file, myconfig, pre_header=None):
# This does not work on all compilers, so if the parsing fails
# we just bail out.
external_defs = []
if pre_header:
try:
external_features = Defines(compiler).defines(pre_header)
except CalledProcessError as ex:
print_exception(ex)
return
external_defs = ['-D' + s for s in external_features]
try:
my_features = Defines(compiler, flags=external_defs).defines(myconfig)
except CalledProcessError as ex:
print_exception(ex)
return
# Parse feature file
defs = featuredefs.defs(feature_file)
error_state = False
for e in (my_features & defs.externals):
error_state = True
my_features.remove(e)
print(
"External feature '{}' can not be defined in myconfig.".format(e))
for u in (my_features - defs.features):
if u.startswith('__'):
continue
error_state = True
handle_unknown(u, defs.features)
if error_state:
raise FeatureError("There were errors in '{}'".format(sys.argv[3]))
else:
return
if __name__ == "__main__":
if len(sys.argv) > 4:
pre_header = sys.argv[4]
else:
pre_header = None
try:
check_myconfig(sys.argv[1], sys.argv[2], sys.argv[3], pre_header)
sys.exit()
except FeatureError:
sys.exit("There were errors in '{}'".format(sys.argv[3]))
|
gpl-3.0
| 4,629,193,591,994,059,000
| 28.904762
| 87
| 0.57776
| false
| 3.619597
| false
| false
| false
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/lettuce/django/apps.py
|
1
|
3009
|
# -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from os.path import join, dirname
try:
from importlib import import_module
except ImportError:
from django.utils.importlib import import_module
from django.apps import apps as django_apps
from django.conf import settings
def _filter_bultins(module):
"returns only those apps that are not builtin django.contrib"
name = module.__name__
return not name.startswith("django.contrib") and name != 'lettuce.django'
def _filter_configured_apps(module):
"returns only those apps that are in django.conf.settings.LETTUCE_APPS"
app_found = True
if hasattr(settings, 'LETTUCE_APPS') and isinstance(settings.LETTUCE_APPS, tuple):
app_found = False
for appname in settings.LETTUCE_APPS:
if module.__name__.startswith(appname):
app_found = True
return app_found
def _filter_configured_avoids(module):
"returns apps that are not within django.conf.settings.LETTUCE_AVOID_APPS"
run_app = False
if hasattr(settings, 'LETTUCE_AVOID_APPS') and isinstance(settings.LETTUCE_AVOID_APPS, tuple):
for appname in settings.LETTUCE_AVOID_APPS:
if module.__name__.startswith(appname):
run_app = True
return not run_app
def get_apps():
return [app_cfg.module for app_cfg in django_apps.get_app_configs()]
def harvest_lettuces(only_the_apps=None, avoid_apps=None, path="features"):
"""gets all installed apps that are not from django.contrib
returns a list of tuples with (path_to_app, app_module)
"""
apps = get_apps()
if isinstance(only_the_apps, tuple) and any(only_the_apps):
def _filter_only_specified(module):
return module.__name__ in only_the_apps
apps = filter(_filter_only_specified, apps)
else:
apps = filter(_filter_bultins, apps)
apps = filter(_filter_configured_apps, apps)
apps = filter(_filter_configured_avoids, apps)
if isinstance(avoid_apps, tuple) and any(avoid_apps):
def _filter_avoid(module):
return module.__name__ not in avoid_apps
apps = filter(_filter_avoid, apps)
joinpath = lambda app: (join(dirname(app.__file__), path), app)
return map(joinpath, apps)
|
agpl-3.0
| -8,298,065,788,740,383,000
| 34.388235
| 98
| 0.691822
| false
| 3.686275
| false
| false
| false
|
rjdp/cement
|
cement/ext/ext_dummy.py
|
1
|
6538
|
"""Dummy Framework Extension"""
from ..core import backend, output, handler, mail
from ..utils.misc import minimal_logger
LOG = minimal_logger(__name__)
class DummyOutputHandler(output.CementOutputHandler):
"""
This class is an internal implementation of the
:ref:`IOutput <cement.core.output>` interface. It does not take any
parameters on initialization, and does not actually output anything.
"""
class Meta:
"""Handler meta-data"""
interface = output.IOutput
"""The interface this class implements."""
label = 'dummy'
"""The string identifier of this handler."""
display_override_option = False
def render(self, data_dict, template=None):
"""
This implementation does not actually render anything to output, but
rather logs it to the debug facility.
:param data_dict: The data dictionary to render.
:param template: The template parameter is not used by this
implementation at all.
:returns: None
"""
LOG.debug("not rendering any output to console")
LOG.debug("DATA: %s" % data_dict)
return None
class DummyMailHandler(mail.CementMailHandler):
"""
This class implements the :ref:`IMail <cement.core.mail>`
interface, but is intended for use in development as no email is actually
sent.
**Usage**
.. code-block:: python
class MyApp(CementApp):
class Meta:
label = 'myapp'
mail_handler = 'dummy'
# create, setup, and run the app
app = MyApp()
app.setup()
app.run()
# fake sending an email message
app.mail.send('This is my fake message',
subject='This is my subject',
to=['john@example.com', 'rita@example.com'],
from_addr='me@example.com',
)
The above will print the following to console:
.. code-block:: text
======================================================================
DUMMY MAIL MESSAGE
----------------------------------------------------------------------
To: john@example.com, rita@example.com
From: me@example.com
CC:
BCC:
Subject: This is my subject
---
This is my fake message
----------------------------------------------------------------------
**Configuration**
This handler supports the following configuration settings:
* **to** - Default ``to`` addresses (list, or comma separated depending
on the ConfigHandler in use)
* **from_addr** - Default ``from_addr`` address
* **cc** - Default ``cc`` addresses (list, or comma separated depending
on the ConfigHandler in use)
* **bcc** - Default ``bcc`` addresses (list, or comma separated depending
on the ConfigHandler in use)
* **subject** - Default ``subject``
* **subject_prefix** - Additional string to prepend to the ``subject``
You can add these to any application configuration file under a
``[mail.dummy]`` section, for example:
**~/.myapp.conf**
.. code-block:: text
[myapp]
# set the mail handler to use
mail_handler = dummy
[mail.dummy]
# default to addresses (comma separated list)
to = me@example.com
# default from address
from = someone_else@example.com
# default cc addresses (comma separated list)
cc = jane@example.com, rita@example.com
# default bcc addresses (comma separated list)
bcc = blackhole@example.com, someone_else@example.com
# default subject
subject = This is The Default Subject
# additional prefix to prepend to the subject
subject_prefix = MY PREFIX >
"""
class Meta:
#: Unique identifier for this handler
label = 'dummy'
def _get_params(self, **kw):
params = dict()
for item in ['to', 'from_addr', 'cc', 'bcc', 'subject']:
config_item = self.app.config.get(self._meta.config_section, item)
params[item] = kw.get(item, config_item)
# also grab the subject_prefix
params['subject_prefix'] = self.app.config.get(
self._meta.config_section,
'subject_prefix'
)
return params
def send(self, body, **kw):
"""
Mimic sending an email message, but really just print what would be
sent to console. Keyword arguments override configuration
defaults (cc, bcc, etc).
:param body: The message body to send
:type body: multiline string
:keyword to: List of recipients (generally email addresses)
:type to: list
:keyword from_addr: Address (generally email) of the sender
:type from_addr: string
:keyword cc: List of CC Recipients
:type cc: list
:keyword bcc: List of BCC Recipients
:type bcc: list
:keyword subject: Message subject line
:type subject: string
:returns: Boolean (``True`` if message is sent successfully, ``False``
otherwise)
**Usage**
.. code-block:: python
# Using all configuration defaults
app.mail.send('This is my message body')
# Overriding configuration defaults
app.mail.send('My message body'
to=['john@example.com'],
from_addr='me@example.com',
cc=['jane@example.com', 'rita@example.com'],
subject='This is my subject',
)
"""
# shorted config values
params = self._get_params(**kw)
msg = "\n" + "=" * 77 + "\n"
msg += "DUMMY MAIL MESSAGE\n"
msg += "-" * 77 + "\n\n"
msg += "To: %s\n" % ', '.join(params['to'])
msg += "From: %s\n" % params['from_addr']
msg += "CC: %s\n" % ', '.join(params['cc'])
msg += "BCC: %s\n" % ', '.join(params['bcc'])
if params['subject_prefix'] not in [None, '']:
msg += "Subject: %s %s\n\n---\n\n" % (params['subject_prefix'],
params['subject'])
else:
msg += "Subject: %s\n\n---\n\n" % params['subject']
msg += body + "\n"
msg += "\n" + "-" * 77 + "\n"
print(msg)
return True
def load(app):
handler.register(DummyOutputHandler)
handler.register(DummyMailHandler)
|
bsd-3-clause
| 3,786,560,060,303,839,700
| 28.318386
| 78
| 0.547568
| false
| 4.349967
| true
| false
| false
|
nloyolag/music-albums
|
music_albums/migrations/0001_initial.py
|
1
|
2519
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-21 20:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=500, verbose_name='Title')),
('release_date', models.DateTimeField(blank=True, verbose_name='Release date')),
('rating', models.IntegerField(blank=True, choices=[(1, '★'), (2, '★★'), (3, '★★★'), (4, '★★★★'), (5, '★★★★★')], verbose_name='Rating')),
('cover', models.ImageField(default='images/albums/default.jpg', upload_to='images/albums', verbose_name='Cover')),
],
options={
'verbose_name': 'Album',
'verbose_name_plural': 'Albums',
},
),
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=500, verbose_name='Name')),
('albums', models.ManyToManyField(related_name='artists', to='music_albums.Album', verbose_name='Albums')),
],
options={
'verbose_name': 'Artist',
'verbose_name_plural': 'Artists',
},
),
migrations.CreateModel(
name='Label',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=500, verbose_name='Name')),
('is_operating', models.BooleanField(default=True, verbose_name='Is operating')),
],
options={
'verbose_name': 'Record Label',
'verbose_name_plural': 'Record Labels',
},
),
migrations.AddField(
model_name='album',
name='label',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='album', to='music_albums.Label', verbose_name='Record Label'),
),
]
|
mit
| -5,822,098,756,944,028,000
| 40.483333
| 180
| 0.540779
| false
| 3.950794
| false
| false
| false
|
smileboywtu/Code-Interview
|
combination.py
|
1
|
1331
|
# list all the combination using recursive method
# use python 3.5 as default
"""
c(4, 2):
{1,2,3,4}
/ | \\
/ | \\
1{2,3,4} 2{3,4} 3{4}
/ | \ / \ |
1, 2 1,3 1,4 2,3 2,4 3,4
"""
def combinationiterator(set, start, end, current, choose):
"iterate the elements in set"
if current is choose:
for index in range(choose):
print(set[index], end=' ')
print()
else:
for index in range(start, end):
# get enough elements to choose
if end - index >= choose - current:
set.append(index + 1)
# think why here just use the index + 1 not the start + 1
combinationiterator(set.copy(), index+1, end, current+1, choose)
set.pop()
def combination(m, n):
"interface to create the combination list"
set = []
combinationiterator(set, 0, m, 0, n)
print("""
combination using recursive method
C(3, 2):
1, 2
1, 3
2, 3
""")
m = 3
n = 2
print("choose n=", n, "in group of m=", m, "members")
combination(m, n)
input("\n\nPress Enter to exit.")
|
gpl-2.0
| 6,553,644,880,663,066,000
| 25.098039
| 80
| 0.449286
| false
| 3.770538
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.