code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# Copyright 2013 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, MetaData
from sqlalchemy import PrimaryKeyConstraint, String, Table, Text
from sqlalchemy import UniqueConstraint
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
items = Table('items', meta,
Column("id", String(length=30)),
Column("project_id", String(length=64)),
Column("vpc_id", String(length=12)),
Column("os_id", String(length=36)),
Column("data", Text()),
PrimaryKeyConstraint('id'),
UniqueConstraint('os_id', name='items_os_id_idx'),
mysql_engine="InnoDB",
mysql_charset="utf8"
)
items.create()
tags = Table('tags', meta,
Column("project_id", String(length=64)),
Column("item_id", String(length=30)),
Column("key", String(length=127)),
Column("value", String(length=255)),
PrimaryKeyConstraint('project_id', 'item_id', 'key'),
mysql_engine="InnoDB",
mysql_charset="utf8"
)
tags.create()
if migrate_engine.name == "mysql":
# In Folsom we explicitly converted migrate_version to UTF8.
sql = "ALTER TABLE migrate_version CONVERT TO CHARACTER SET utf8;"
migrate_engine.execute(sql)
# Set default DB charset to UTF8.
sql = (" ALTER DATABASE %s DEFAULT CHARACTER SET utf8;" %
migrate_engine.url.database)
migrate_engine.execute(sql)
def downgrade(migrate_engine):
raise NotImplementedError("Downgrade from Juno is unsupported.")
| openstack/ec2-api | ec2api/db/sqlalchemy/migrate_repo/versions/001_juno.py | Python | apache-2.0 | 2,144 |
### HiForest Configuration
# Collisions: pp
# Type: MC
# Input: AOD
import FWCore.ParameterSet.Config as cms
process = cms.Process('HiForest')
process.options = cms.untracked.PSet()
#####################################################################################
# HiForest labelling info
#####################################################################################
process.load("HeavyIonsAnalysis.JetAnalysis.HiForest_cff")
process.HiForest.inputLines = cms.vstring("HiForest V3",)
import subprocess
version = subprocess.Popen(["(cd $CMSSW_BASE/src && git describe --tags)"], stdout=subprocess.PIPE, shell=True).stdout.read()
if version == '':
version = 'no git info'
process.HiForest.HiForestVersion = cms.string(version)
#####################################################################################
# Input source
#####################################################################################
process.source = cms.Source("PoolSource",
duplicateCheckMode = cms.untracked.string("noDuplicateCheck"),
fileNames = cms.untracked.vstring(
"/store/user/gsfs/Neutrinos5Mfor5TeVpp/pp5TeV_Pileup_2_RECO_2016_01_24/160125_211443/0000/step3_pp_Pileup_RAW2DIGI_L1Reco_RECO_1.root"
)
)
# Number of events we want to process, -1 = all events
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1))
#####################################################################################
# Load Global Tag, Geometry, etc.
#####################################################################################
process.load('Configuration.StandardSequences.Services_cff')
process.load('Configuration.Geometry.GeometryDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '75X_mcRun2_asymptotic_ppAt5TeV_v3', '')
process.HiForest.GlobalTagLabel = process.GlobalTag.globaltag
# Customization
from HeavyIonsAnalysis.Configuration.CommonFunctions_cff import overrideJEC_pp5020
process = overrideJEC_pp5020(process)
#####################################################################################
# Define tree output
#####################################################################################
process.TFileService = cms.Service("TFileService",
fileName=cms.string("MiniForestAODMC.root"))
#####################################################################################
# Additional Reconstruction and Analysis: Main Body
#####################################################################################
####################################################################################
#############################
# Jets
#############################
process.load("HeavyIonsAnalysis.JetAnalysis.FullJetSequence_nominalPP")
# Use this version for JEC
# process.load("HeavyIonsAnalysis.JetAnalysis.FullJetSequence_JECPP")
#####################################################################################
############################
# Event Analysis
############################
process.load('HeavyIonsAnalysis.EventAnalysis.hltanalysis_cff')
process.load('HeavyIonsAnalysis.EventAnalysis.hievtanalyzer_data_cfi') #use data version to avoid PbPb MC
process.hiEvtAnalyzer.Vertex = cms.InputTag("offlinePrimaryVertices")
process.hiEvtAnalyzer.doCentrality = cms.bool(False)
process.hiEvtAnalyzer.doEvtPlane = cms.bool(False)
process.hiEvtAnalyzer.doMC = cms.bool(True) #general MC info
process.hiEvtAnalyzer.doHiMC = cms.bool(False) #HI specific MC info
process.load('HeavyIonsAnalysis.JetAnalysis.HiGenAnalyzer_cfi')
process.HiGenParticleAna.genParticleSrc = cms.untracked.InputTag("genParticles")
process.HiGenParticleAna.doHI = False
process.HiGenParticleAna.ptMin = cms.untracked.double(10.0)
#process.HiGenParticleAna.chargedOnly = cms.untracked.bool(True)
#process.HiGenParticleAna.stableOnly = cms.untracked.bool(True)
process.load('HeavyIonsAnalysis.EventAnalysis.runanalyzer_cff')
#process.load("HeavyIonsAnalysis.JetAnalysis.pfcandAnalyzer_pp_cfi")
#process.pfcandAnalyzer.skipCharged = False
#process.pfcandAnalyzer.pfPtMin = 0
#process.pfcandAnalyzer.pfCandidateLabel = cms.InputTag("particleFlow")
#process.pfcandAnalyzer.doVS = cms.untracked.bool(False)
#process.pfcandAnalyzer.doUEraw_ = cms.untracked.bool(False)
#process.pfcandAnalyzer.genLabel = cms.InputTag("genParticles")
#####################################################################################
#########################
# Track Analyzer
#########################
process.load('HeavyIonsAnalysis.JetAnalysis.ExtraTrackReco_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.TrkAnalyzers_cff')
# Use this instead for track corrections
## process.load('HeavyIonsAnalysis.JetAnalysis.TrkAnalyzers_Corr_cff')
#####################################################################################
#####################
# photons
######################
#process.load('HeavyIonsAnalysis.PhotonAnalysis.ggHiNtuplizer_cfi')
#process.ggHiNtuplizer.gsfElectronLabel = cms.InputTag("gedGsfElectrons")
#process.ggHiNtuplizer.recoPhotonHiIsolationMap = cms.InputTag('photonIsolationHIProducerpp')
#process.ggHiNtuplizer.VtxLabel = cms.InputTag("offlinePrimaryVertices")
#process.ggHiNtuplizer.particleFlowCollection = cms.InputTag("particleFlow")
#process.ggHiNtuplizer.doVsIso = cms.bool(False)
#process.ggHiNtuplizer.doElectronVID = cms.bool(True)
#process.ggHiNtuplizerGED = process.ggHiNtuplizer.clone(recoPhotonSrc = cms.InputTag('gedPhotons'),
# recoPhotonHiIsolationMap = cms.InputTag('photonIsolationHIProducerppGED'))
####################################################################################
#####################
# Electron ID
#####################
from PhysicsTools.SelectorUtils.tools.vid_id_tools import *
# turn on VID producer, indicate data format to be processed
# DataFormat.AOD or DataFormat.MiniAOD
dataFormat = DataFormat.AOD
switchOnVIDElectronIdProducer(process, dataFormat)
# define which IDs we want to produce. Check here https://twiki.cern.ch/twiki/bin/viewauth/CMS/CutBasedElectronIdentificationRun2#Recipe_for_regular_users_for_7_4
my_id_modules = ['RecoEgamma.ElectronIdentification.Identification.cutBasedElectronID_Spring15_25ns_V1_cff']
#add them to the VID producer
for idmod in my_id_modules:
setupAllVIDIdsInModule(process,idmod,setupVIDElectronSelection)
####################################################################################
#####################
# tupel and necessary PAT sequences
#####################
#process.load("HeavyIonsAnalysis.VectorBosonAnalysis.tupelSequence_pp_mc_cff")
#####################################################################################
#########################
# Main analysis list
#########################
process.ana_step = cms.Path(process.hltanalysis *
process.hiEvtAnalyzer *
process.HiGenParticleAna*
process.jetSequences +
process.egmGsfElectronIDSequence + #Should be added in the path for VID module
# process.ggHiNtuplizer +
# process.ggHiNtuplizerGED +
# process.pfcandAnalyzer +
process.HiForest +
process.trackSequencesPP +
process.runAnalyzer
# process.tupelPatSequence
)
#####################################################################################
#########################
# Event Selection
#########################
process.load('HeavyIonsAnalysis.JetAnalysis.EventSelection_cff')
process.pHBHENoiseFilterResultProducer = cms.Path( process.HBHENoiseFilterResultProducer )
process.HBHENoiseFilterResult = cms.Path(process.fHBHENoiseFilterResult)
process.HBHENoiseFilterResultRun1 = cms.Path(process.fHBHENoiseFilterResultRun1)
process.HBHENoiseFilterResultRun2Loose = cms.Path(process.fHBHENoiseFilterResultRun2Loose)
process.HBHENoiseFilterResultRun2Tight = cms.Path(process.fHBHENoiseFilterResultRun2Tight)
process.HBHEIsoNoiseFilterResult = cms.Path(process.fHBHEIsoNoiseFilterResult)
process.PAprimaryVertexFilter = cms.EDFilter("VertexSelector",
src = cms.InputTag("offlinePrimaryVertices"),
cut = cms.string("!isFake && abs(z) <= 25 && position.Rho <= 2 && tracksSize >= 2"),
filter = cms.bool(True), # otherwise it won't filter the events
)
process.NoScraping = cms.EDFilter("FilterOutScraping",
applyfilter = cms.untracked.bool(True),
debugOn = cms.untracked.bool(False),
numtrack = cms.untracked.uint32(10),
thresh = cms.untracked.double(0.25)
)
process.pPAprimaryVertexFilter = cms.Path(process.PAprimaryVertexFilter)
process.pBeamScrapingFilter=cms.Path(process.NoScraping)
process.load("HeavyIonsAnalysis.VertexAnalysis.PAPileUpVertexFilter_cff")
process.pVertexFilterCutG = cms.Path(process.pileupVertexFilterCutG)
process.pVertexFilterCutGloose = cms.Path(process.pileupVertexFilterCutGloose)
process.pVertexFilterCutGtight = cms.Path(process.pileupVertexFilterCutGtight)
process.pVertexFilterCutGplus = cms.Path(process.pileupVertexFilterCutGplus)
process.pVertexFilterCutE = cms.Path(process.pileupVertexFilterCutE)
process.pVertexFilterCutEandG = cms.Path(process.pileupVertexFilterCutEandG)
process.pAna = cms.EndPath(process.skimanalysis)
# Customization
| tuos/RpPb2015Analysis | pileup/mc/pu1/v01/runForestAOD_pp_MC_75X.py | Python | mit | 9,788 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Numpy BoxList classes and functions."""
import numpy as np
class BoxList(object):
"""Box collection.
BoxList represents a list of bounding boxes as numpy array, where each
bounding box is represented as a row of 4 numbers,
[y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes within a
given list correspond to a single image.
Optionally, users can add additional related fields (such as
objectness/classification scores).
"""
def __init__(self, data):
"""Constructs box collection.
Args:
data: a numpy array of shape [N, 4] representing box coordinates
Raises:
ValueError: if bbox data is not a numpy array
ValueError: if invalid dimensions for bbox data
"""
if not isinstance(data, np.ndarray):
raise ValueError('data must be a numpy array.')
if len(data.shape) != 2 or data.shape[1] != 4:
raise ValueError('Invalid dimensions for box data.')
if data.dtype != np.float32 and data.dtype != np.float64:
raise ValueError('Invalid data type for box data: float is required.')
if not self._is_valid_boxes(data):
raise ValueError('Invalid box data. data must be a numpy array of '
'N*[y_min, x_min, y_max, x_max]')
self.data = {'boxes': data}
def num_boxes(self):
"""Return number of boxes held in collections."""
return self.data['boxes'].shape[0]
def get_extra_fields(self):
"""Return all non-box fields."""
return [k for k in self.data.keys() if k != 'boxes']
def has_field(self, field):
return field in self.data
def add_field(self, field, field_data):
"""Add data to a specified field.
Args:
field: a string parameter used to speficy a related field to be accessed.
field_data: a numpy array of [N, ...] representing the data associated
with the field.
Raises:
ValueError: if the field is already exist or the dimension of the field
data does not matches the number of boxes.
"""
if self.has_field(field):
raise ValueError('Field ' + field + 'already exists')
if len(field_data.shape) < 1 or field_data.shape[0] != self.num_boxes():
raise ValueError('Invalid dimensions for field data')
self.data[field] = field_data
def get(self):
"""Convenience function for accesssing box coordinates.
Returns:
a numpy array of shape [N, 4] representing box corners
"""
return self.get_field('boxes')
def get_field(self, field):
"""Accesses data associated with the specified field in the box collection.
Args:
field: a string parameter used to speficy a related field to be accessed.
Returns:
a numpy 1-d array representing data of an associated field
Raises:
ValueError: if invalid field
"""
if not self.has_field(field):
raise ValueError('field {} does not exist'.format(field))
return self.data[field]
def get_coordinates(self):
"""Get corner coordinates of boxes.
Returns:
a list of 4 1-d numpy arrays [y_min, x_min, y_max, x_max]
"""
box_coordinates = self.get()
y_min = box_coordinates[:, 0]
x_min = box_coordinates[:, 1]
y_max = box_coordinates[:, 2]
x_max = box_coordinates[:, 3]
return [y_min, x_min, y_max, x_max]
def _is_valid_boxes(self, data):
"""Check whether data fullfills the format of N*[ymin, xmin, ymax, xmin].
Args:
data: a numpy array of shape [N, 4] representing box coordinates
Returns:
a boolean indicating whether all ymax of boxes are equal or greater than
ymin, and all xmax of boxes are equal or greater than xmin.
"""
if data.shape[0] > 0:
for i in xrange(data.shape[0]):
if data[i, 0] > data[i, 2] or data[i, 1] > data[i, 3]:
return False
return True
| Laucoonte/cpmx | object_detection/utils/np_box_list.py | Python | mit | 4,520 |
"""Authentication operations."""
import logging
import requests
from objectrocket import bases
from objectrocket import errors
logger = logging.getLogger(__name__)
class Auth(bases.BaseAuthLayer):
"""Authentication operations.
:param objectrocket.client.Client base_client: An objectrocket.client.Client instance.
"""
def __init__(self, base_client):
self.__username = None
self.__password = None
self.__token = None
super(Auth, self).__init__(base_client=base_client)
#####################
# Public interface. #
#####################
def authenticate(self, username, password):
"""Authenticate against the ObjectRocket API.
:param str username: The username to perform basic authentication against the API with.
:param str password: The password to perform basic authentication against the API with.
:returns: A token used for authentication against token protected resources.
:rtype: str
"""
# Update the username and password bound to this instance for re-authentication needs.
self._username = username
self._password = password
# Attempt to authenticate.
resp = requests.get(
self._url,
auth=(username, password),
**self._default_request_kwargs
)
# Attempt to extract authentication data.
try:
if resp.status_code == 200:
json_data = resp.json()
token = json_data['data']['token']
elif resp.status_code == 401:
raise errors.AuthFailure(resp.json().get('message', 'Authentication Failure.'))
else:
raise errors.AuthFailure(
"Unknown exception while authenticating: '{}'".format(resp.text)
)
except errors.AuthFailure:
raise
except Exception as ex:
logging.exception(ex)
raise errors.AuthFailure('{}: {}'.format(ex.__class__.__name__, ex))
# Update the token bound to this instance for use by other client operations layers.
self._token = token
logger.info('New API token received: "{}".'.format(token))
return token
######################
# Private interface. #
######################
@property
def _default_request_kwargs(self):
"""The default request keyword arguments to be passed to the requests library."""
return super(Auth, self)._default_request_kwargs
@property
def _password(self):
"""The password currently being used for authentication."""
return self.__password
@_password.setter
def _password(self, new_password):
"""Update the password to be used for authentication."""
self.__password = new_password
def _refresh(self):
"""Refresh the API token using the currently bound credentials.
This is simply a convenience method to be invoked automatically if authentication fails
during normal client use.
"""
# Request and set a new API token.
new_token = self.authenticate(self._username, self._password)
self._token = new_token
logger.info('New API token received: "{}".'.format(new_token))
return self._token
@property
def _token(self):
"""The API token this instance is currently using."""
return self.__token
@_token.setter
def _token(self, new_token):
"""Update the API token which this instance is to use."""
self.__token = new_token
return self.__token
@property
def _url(self):
"""The base URL for authentication operations."""
return self._client._url + 'tokens/'
@property
def _username(self):
"""The username currently being used for authentication."""
return self.__username
@_username.setter
def _username(self, new_username):
"""Update the username to be used for authentication."""
self.__username = new_username
def _verify(self, token):
"""Verify that the given token is valid.
:param str token: The API token to verify.
:returns: The token's corresponding user model as a dict, or None if invalid.
:rtype: dict
"""
# Attempt to authenticate.
url = '{}{}/'.format(self._url, 'verify')
resp = requests.post(
url,
json={'token': token},
**self._default_request_kwargs
)
if resp.status_code == 200:
return resp.json().get('data', None)
return None
| objectrocket/python-client | objectrocket/auth.py | Python | mit | 4,660 |
#!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Extract UserMetrics "actions" strings from the Chrome source.
This program generates the list of known actions we expect to see in the
user behavior logs. It walks the Chrome source, looking for calls to
UserMetrics functions, extracting actions and warning on improper calls,
as well as generating the lists of possible actions in situations where
there are many possible actions.
See also:
base/metrics/user_metrics.h
http://wiki.corp.google.com/twiki/bin/view/Main/ChromeUserExperienceMetrics
After extracting all actions, the content will go through a pretty print
function to make sure it's well formatted. If the file content needs to be
changed, a window will be prompted asking for user's consent. The old version
will also be saved in a backup file.
"""
__author__ = 'evanm (Evan Martin)'
from HTMLParser import HTMLParser
import logging
import os
import re
import shutil
import sys
from xml.dom import minidom
import print_style
sys.path.insert(1, os.path.join(sys.path[0], '..', '..', 'python'))
from google import path_utils
# Import the metrics/common module for pretty print xml.
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
import diff_util
import pretty_print_xml
# Files that are known to use content::RecordComputedAction(), which means
# they require special handling code in this script.
# To add a new file, add it to this list and add the appropriate logic to
# generate the known actions to AddComputedActions() below.
KNOWN_COMPUTED_USERS = (
'back_forward_menu_model.cc',
'options_page_view.cc',
'render_view_host.cc', # called using webkit identifiers
'user_metrics.cc', # method definition
'new_tab_ui.cc', # most visited clicks 1-9
'extension_metrics_module.cc', # extensions hook for user metrics
'safe_browsing_blocking_page.cc', # various interstitial types and actions
'language_options_handler_common.cc', # languages and input methods in CrOS
'cros_language_options_handler.cc', # languages and input methods in CrOS
'about_flags.cc', # do not generate a warning; see AddAboutFlagsActions()
'external_metrics.cc', # see AddChromeOSActions()
'core_options_handler.cc', # see AddWebUIActions()
'browser_render_process_host.cc', # see AddRendererActions()
'render_thread_impl.cc', # impl of RenderThread::RecordComputedAction()
'render_process_host_impl.cc', # browser side impl for
# RenderThread::RecordComputedAction()
'mock_render_thread.cc', # mock of RenderThread::RecordComputedAction()
'ppb_pdf_impl.cc', # see AddClosedSourceActions()
'pepper_pdf_host.cc', # see AddClosedSourceActions()
'key_systems_support_uma.cc', # See AddKeySystemSupportActions()
)
# Language codes used in Chrome. The list should be updated when a new
# language is added to app/l10n_util.cc, as follows:
#
# % (cat app/l10n_util.cc | \
# perl -n0e 'print $1 if /kAcceptLanguageList.*?\{(.*?)\}/s' | \
# perl -nle 'print $1, if /"(.*)"/'; echo 'es-419') | \
# sort | perl -pe "s/(.*)\n/'\$1', /" | \
# fold -w75 -s | perl -pe 's/^/ /;s/ $//'; echo
#
# The script extracts language codes from kAcceptLanguageList, but es-419
# (Spanish in Latin America) is an exception.
LANGUAGE_CODES = (
'af', 'am', 'ar', 'az', 'be', 'bg', 'bh', 'bn', 'br', 'bs', 'ca', 'co',
'cs', 'cy', 'da', 'de', 'de-AT', 'de-CH', 'de-DE', 'el', 'en', 'en-AU',
'en-CA', 'en-GB', 'en-NZ', 'en-US', 'en-ZA', 'eo', 'es', 'es-419', 'et',
'eu', 'fa', 'fi', 'fil', 'fo', 'fr', 'fr-CA', 'fr-CH', 'fr-FR', 'fy',
'ga', 'gd', 'gl', 'gn', 'gu', 'ha', 'haw', 'he', 'hi', 'hr', 'hu', 'hy',
'ia', 'id', 'is', 'it', 'it-CH', 'it-IT', 'ja', 'jw', 'ka', 'kk', 'km',
'kn', 'ko', 'ku', 'ky', 'la', 'ln', 'lo', 'lt', 'lv', 'mk', 'ml', 'mn',
'mo', 'mr', 'ms', 'mt', 'nb', 'ne', 'nl', 'nn', 'no', 'oc', 'om', 'or',
'pa', 'pl', 'ps', 'pt', 'pt-BR', 'pt-PT', 'qu', 'rm', 'ro', 'ru', 'sd',
'sh', 'si', 'sk', 'sl', 'sn', 'so', 'sq', 'sr', 'st', 'su', 'sv', 'sw',
'ta', 'te', 'tg', 'th', 'ti', 'tk', 'to', 'tr', 'tt', 'tw', 'ug', 'uk',
'ur', 'uz', 'vi', 'xh', 'yi', 'yo', 'zh', 'zh-CN', 'zh-TW', 'zu',
)
# Input method IDs used in Chrome OS. The list should be updated when a
# new input method is added to
# chromeos/ime/input_methods.txt in the Chrome tree, as
# follows:
#
# % sort chromeos/ime/input_methods.txt | \
# perl -ne "print \"'\$1', \" if /^([^#]+?)\s/" | \
# fold -w75 -s | perl -pe 's/^/ /;s/ $//'; echo
#
# The script extracts input method IDs from input_methods.txt.
INPUT_METHOD_IDS = (
'xkb:am:phonetic:arm', 'xkb:be::fra', 'xkb:be::ger', 'xkb:be::nld',
'xkb:bg::bul', 'xkb:bg:phonetic:bul', 'xkb:br::por', 'xkb:by::bel',
'xkb:ca::fra', 'xkb:ca:eng:eng', 'xkb:ca:multix:fra', 'xkb:ch::ger',
'xkb:ch:fr:fra', 'xkb:cz::cze', 'xkb:cz:qwerty:cze', 'xkb:de::ger',
'xkb:de:neo:ger', 'xkb:dk::dan', 'xkb:ee::est', 'xkb:es::spa',
'xkb:es:cat:cat', 'xkb:fi::fin', 'xkb:fr::fra', 'xkb:gb:dvorak:eng',
'xkb:gb:extd:eng', 'xkb:ge::geo', 'xkb:gr::gre', 'xkb:hr::scr',
'xkb:hu::hun', 'xkb:il::heb', 'xkb:is::ice', 'xkb:it::ita', 'xkb:jp::jpn',
'xkb:latam::spa', 'xkb:lt::lit', 'xkb:lv:apostrophe:lav', 'xkb:mn::mon',
'xkb:no::nob', 'xkb:pl::pol', 'xkb:pt::por', 'xkb:ro::rum', 'xkb:rs::srp',
'xkb:ru::rus', 'xkb:ru:phonetic:rus', 'xkb:se::swe', 'xkb:si::slv',
'xkb:sk::slo', 'xkb:tr::tur', 'xkb:ua::ukr', 'xkb:us::eng',
'xkb:us:altgr-intl:eng', 'xkb:us:colemak:eng', 'xkb:us:dvorak:eng',
'xkb:us:intl:eng',
)
# The path to the root of the repository.
REPOSITORY_ROOT = os.path.join(path_utils.ScriptDir(), '..', '..', '..')
number_of_files_total = 0
# Tags that need to be inserted to each 'action' tag and their default content.
TAGS = {'description': 'Please enter the description of the metric.',
'owner': ('Please list the metric\'s owners. Add more owner tags as '
'needed.')}
def AddComputedActions(actions):
"""Add computed actions to the actions list.
Arguments:
actions: set of actions to add to.
"""
# Actions for back_forward_menu_model.cc.
for dir in ('BackMenu_', 'ForwardMenu_'):
actions.add(dir + 'ShowFullHistory')
actions.add(dir + 'Popup')
for i in range(1, 20):
actions.add(dir + 'HistoryClick' + str(i))
actions.add(dir + 'ChapterClick' + str(i))
# Actions for new_tab_ui.cc.
for i in range(1, 10):
actions.add('MostVisited%d' % i)
# Actions for safe_browsing_blocking_page.cc.
for interstitial in ('Phishing', 'Malware', 'Multiple'):
for action in ('Show', 'Proceed', 'DontProceed', 'ForcedDontProceed'):
actions.add('SBInterstitial%s%s' % (interstitial, action))
# Actions for language_options_handler.cc (Chrome OS specific).
for input_method_id in INPUT_METHOD_IDS:
actions.add('LanguageOptions_DisableInputMethod_%s' % input_method_id)
actions.add('LanguageOptions_EnableInputMethod_%s' % input_method_id)
for language_code in LANGUAGE_CODES:
actions.add('LanguageOptions_UiLanguageChange_%s' % language_code)
actions.add('LanguageOptions_SpellCheckLanguageChange_%s' % language_code)
def AddWebKitEditorActions(actions):
"""Add editor actions from editor_client_impl.cc.
Arguments:
actions: set of actions to add to.
"""
action_re = re.compile(r'''\{ [\w']+, +\w+, +"(.*)" +\},''')
editor_file = os.path.join(REPOSITORY_ROOT, 'webkit', 'api', 'src',
'EditorClientImpl.cc')
for line in open(editor_file):
match = action_re.search(line)
if match: # Plain call to RecordAction
actions.add(match.group(1))
def AddClosedSourceActions(actions):
"""Add actions that are in code which is not checked out by default
Arguments
actions: set of actions to add to.
"""
actions.add('PDF.FitToHeightButton')
actions.add('PDF.FitToWidthButton')
actions.add('PDF.LoadFailure')
actions.add('PDF.LoadSuccess')
actions.add('PDF.PreviewDocumentLoadFailure')
actions.add('PDF.PrintButton')
actions.add('PDF.PrintPage')
actions.add('PDF.SaveButton')
actions.add('PDF.ZoomFromBrowser')
actions.add('PDF.ZoomInButton')
actions.add('PDF.ZoomOutButton')
actions.add('PDF_Unsupported_3D')
actions.add('PDF_Unsupported_Attachment')
actions.add('PDF_Unsupported_Bookmarks')
actions.add('PDF_Unsupported_Digital_Signature')
actions.add('PDF_Unsupported_Movie')
actions.add('PDF_Unsupported_Portfolios_Packages')
actions.add('PDF_Unsupported_Rights_Management')
actions.add('PDF_Unsupported_Screen')
actions.add('PDF_Unsupported_Shared_Form')
actions.add('PDF_Unsupported_Shared_Review')
actions.add('PDF_Unsupported_Sound')
actions.add('PDF_Unsupported_XFA')
def AddAndroidActions(actions):
"""Add actions that are used by Chrome on Android.
Arguments
actions: set of actions to add to.
"""
actions.add('Cast_Sender_CastDeviceSelected');
actions.add('Cast_Sender_CastEnterFullscreen');
actions.add('Cast_Sender_CastMediaType');
actions.add('Cast_Sender_CastPlayRequested');
actions.add('Cast_Sender_YouTubeDeviceSelected');
actions.add('DataReductionProxy_PromoDisplayed');
actions.add('DataReductionProxy_PromoLearnMore');
actions.add('DataReductionProxy_TurnedOn');
actions.add('DataReductionProxy_TurnedOnFromPromo');
actions.add('DataReductionProxy_TurnedOff');
actions.add('MobileActionBarShown')
actions.add('MobileBeamCallbackSuccess')
actions.add('MobileBeamInvalidAppState')
actions.add('MobileBreakpadUploadAttempt')
actions.add('MobileBreakpadUploadFailure')
actions.add('MobileBreakpadUploadSuccess')
actions.add('MobileContextMenuCopyImageLinkAddress')
actions.add('MobileContextMenuCopyLinkAddress')
actions.add('MobileContextMenuCopyLinkText')
actions.add('MobileContextMenuDownloadImage')
actions.add('MobileContextMenuDownloadLink')
actions.add('MobileContextMenuDownloadVideo')
actions.add('MobileContextMenuImage')
actions.add('MobileContextMenuLink')
actions.add('MobileContextMenuOpenImageInNewTab')
actions.add('MobileContextMenuOpenLink')
actions.add('MobileContextMenuOpenLinkInIncognito')
actions.add('MobileContextMenuOpenLinkInNewTab')
actions.add('MobileContextMenuSaveImage')
actions.add('MobileContextMenuSearchByImage')
actions.add('MobileContextMenuShareLink')
actions.add('MobileContextMenuText')
actions.add('MobileContextMenuVideo')
actions.add('MobileContextMenuViewImage')
actions.add('MobileFirstEditInOmnibox')
actions.add('MobileFocusedFakeboxOnNtp')
actions.add('MobileFocusedOmniboxNotOnNtp')
actions.add('MobileFocusedOmniboxOnNtp')
actions.add('MobileFreAttemptSignIn')
actions.add('MobileFreSignInSuccessful')
actions.add('MobileFreSkipSignIn')
actions.add('MobileMenuAddToBookmarks')
actions.add('MobileMenuAllBookmarks')
actions.add('MobileMenuBack')
actions.add('MobileMenuCloseAllTabs')
actions.add('MobileMenuCloseTab')
actions.add('MobileMenuFeedback')
actions.add('MobileMenuFindInPage')
actions.add('MobileMenuForward')
actions.add('MobileMenuFullscreen')
actions.add('MobileMenuNewIncognitoTab')
actions.add('MobileMenuNewTab')
actions.add('MobileMenuOpenTabs')
actions.add('MobileMenuQuit')
actions.add('MobileMenuReload')
actions.add('MobileMenuSettings')
actions.add('MobileMenuShare')
actions.add('MobileMenuShow')
actions.add('MobileNTPBookmark')
actions.add('MobileNTPForeignSession')
actions.add('MobileNTPMostVisited')
actions.add('MobileNTPRecentlyClosed')
actions.add('MobileNTPSwitchToBookmarks')
actions.add('MobileNTPSwitchToIncognito')
actions.add('MobileNTPSwitchToMostVisited')
actions.add('MobileNTPSwitchToOpenTabs')
actions.add('MobileNewTabOpened')
actions.add('MobileOmniboxSearch')
actions.add('MobileOmniboxVoiceSearch')
actions.add('MobileOmniboxRefineSuggestion')
actions.add('MobilePageLoaded')
actions.add('MobilePageLoadedDesktopUserAgent')
actions.add('MobilePageLoadedWithKeyboard')
actions.add('MobileReceivedExternalIntent')
actions.add('MobileRendererCrashed')
actions.add('MobileShortcutAllBookmarks')
actions.add('MobileShortcutFindInPage')
actions.add('MobileShortcutNewIncognitoTab')
actions.add('MobileShortcutNewTab')
actions.add('MobileSideSwipeFinished')
actions.add('MobileStackViewCloseTab')
actions.add('MobileStackViewSwipeCloseTab')
actions.add('MobileTabClobbered')
actions.add('MobileTabClosed')
actions.add('MobileTabStripCloseTab')
actions.add('MobileTabStripNewTab')
actions.add('MobileTabSwitched')
actions.add('MobileToolbarBack')
actions.add('MobileToolbarForward')
actions.add('MobileToolbarNewTab')
actions.add('MobileToolbarReload')
actions.add('MobileToolbarShowMenu')
actions.add('MobileToolbarShowStackView')
actions.add('MobileToolbarStackViewNewTab')
actions.add('MobileToolbarToggleBookmark')
actions.add('MobileUsingMenuByHwButtonDragging')
actions.add('MobileUsingMenuByHwButtonTap')
actions.add('MobileUsingMenuBySwButtonDragging')
actions.add('MobileUsingMenuBySwButtonTap')
actions.add('SystemBack')
actions.add('SystemBackForNavigation')
def AddAboutFlagsActions(actions):
"""This parses the experimental feature flags for UMA actions.
Arguments:
actions: set of actions to add to.
"""
about_flags = os.path.join(REPOSITORY_ROOT, 'chrome', 'browser',
'about_flags.cc')
flag_name_re = re.compile(r'\s*"([0-9a-zA-Z\-_]+)",\s*// FLAGS:RECORD_UMA')
for line in open(about_flags):
match = flag_name_re.search(line)
if match:
actions.add("AboutFlags_" + match.group(1))
# If the line contains the marker but was not matched by the regex, put up
# an error if the line is not a comment.
elif 'FLAGS:RECORD_UMA' in line and line[0:2] != '//':
print >>sys.stderr, 'WARNING: This line is marked for recording ' + \
'about:flags metrics, but is not in the proper format:\n' + line
def AddBookmarkManagerActions(actions):
"""Add actions that are used by BookmarkManager.
Arguments
actions: set of actions to add to.
"""
actions.add('BookmarkManager_Command_AddPage')
actions.add('BookmarkManager_Command_Copy')
actions.add('BookmarkManager_Command_Cut')
actions.add('BookmarkManager_Command_Delete')
actions.add('BookmarkManager_Command_Edit')
actions.add('BookmarkManager_Command_Export')
actions.add('BookmarkManager_Command_Import')
actions.add('BookmarkManager_Command_NewFolder')
actions.add('BookmarkManager_Command_OpenIncognito')
actions.add('BookmarkManager_Command_OpenInNewTab')
actions.add('BookmarkManager_Command_OpenInNewWindow')
actions.add('BookmarkManager_Command_OpenInSame')
actions.add('BookmarkManager_Command_Paste')
actions.add('BookmarkManager_Command_ShowInFolder')
actions.add('BookmarkManager_Command_Sort')
actions.add('BookmarkManager_Command_UndoDelete')
actions.add('BookmarkManager_Command_UndoGlobal')
actions.add('BookmarkManager_Command_UndoNone')
actions.add('BookmarkManager_NavigateTo_BookmarkBar')
actions.add('BookmarkManager_NavigateTo_Mobile')
actions.add('BookmarkManager_NavigateTo_Other')
actions.add('BookmarkManager_NavigateTo_Recent')
actions.add('BookmarkManager_NavigateTo_Search')
actions.add('BookmarkManager_NavigateTo_SubFolder')
def AddChromeOSActions(actions):
"""Add actions reported by non-Chrome processes in Chrome OS.
Arguments:
actions: set of actions to add to.
"""
# Actions sent by Chrome OS update engine.
actions.add('Updater.ServerCertificateChanged')
actions.add('Updater.ServerCertificateFailed')
# Actions sent by Chrome OS cryptohome.
actions.add('Cryptohome.PKCS11InitFail')
def AddExtensionActions(actions):
"""Add actions reported by extensions via chrome.metricsPrivate API.
Arguments:
actions: set of actions to add to.
"""
# Actions sent by Chrome OS File Browser.
actions.add('FileBrowser.CreateNewFolder')
actions.add('FileBrowser.PhotoEditor.Edit')
actions.add('FileBrowser.PhotoEditor.View')
actions.add('FileBrowser.SuggestApps.ShowDialog')
# Actions sent by Google Now client.
actions.add('GoogleNow.MessageClicked')
actions.add('GoogleNow.ButtonClicked0')
actions.add('GoogleNow.ButtonClicked1')
actions.add('GoogleNow.Dismissed')
# Actions sent by Chrome Connectivity Diagnostics.
actions.add('ConnectivityDiagnostics.LaunchSource.OfflineChromeOS')
actions.add('ConnectivityDiagnostics.LaunchSource.WebStore')
actions.add('ConnectivityDiagnostics.UA.LogsShown')
actions.add('ConnectivityDiagnostics.UA.PassingTestsShown')
actions.add('ConnectivityDiagnostics.UA.SettingsShown')
actions.add('ConnectivityDiagnostics.UA.TestResultExpanded')
actions.add('ConnectivityDiagnostics.UA.TestSuiteRun')
def GrepForActions(path, actions):
"""Grep a source file for calls to UserMetrics functions.
Arguments:
path: path to the file
actions: set of actions to add to
"""
global number_of_files_total
number_of_files_total = number_of_files_total + 1
# we look for the UserMetricsAction structure constructor
# this should be on one line
action_re = re.compile(r'[^a-zA-Z]UserMetricsAction\("([^"]*)')
malformed_action_re = re.compile(r'[^a-zA-Z]UserMetricsAction\([^"]')
computed_action_re = re.compile(r'RecordComputedAction')
line_number = 0
for line in open(path):
line_number = line_number + 1
match = action_re.search(line)
if match: # Plain call to RecordAction
actions.add(match.group(1))
elif malformed_action_re.search(line):
# Warn if this line is using RecordAction incorrectly.
print >>sys.stderr, ('WARNING: %s has malformed call to RecordAction'
' at %d' % (path, line_number))
elif computed_action_re.search(line):
# Warn if this file shouldn't be calling RecordComputedAction.
if os.path.basename(path) not in KNOWN_COMPUTED_USERS:
print >>sys.stderr, ('WARNING: %s has RecordComputedAction at %d' %
(path, line_number))
class WebUIActionsParser(HTMLParser):
"""Parses an HTML file, looking for all tags with a 'metric' attribute.
Adds user actions corresponding to any metrics found.
Arguments:
actions: set of actions to add to
"""
def __init__(self, actions):
HTMLParser.__init__(self)
self.actions = actions
def handle_starttag(self, tag, attrs):
# We only care to examine tags that have a 'metric' attribute.
attrs = dict(attrs)
if not 'metric' in attrs:
return
# Boolean metrics have two corresponding actions. All other metrics have
# just one corresponding action. By default, we check the 'dataType'
# attribute.
is_boolean = ('dataType' in attrs and attrs['dataType'] == 'boolean')
if 'type' in attrs and attrs['type'] in ('checkbox', 'radio'):
if attrs['type'] == 'checkbox':
is_boolean = True
else:
# Radio buttons are boolean if and only if their values are 'true' or
# 'false'.
assert(attrs['type'] == 'radio')
if 'value' in attrs and attrs['value'] in ['true', 'false']:
is_boolean = True
if is_boolean:
self.actions.add(attrs['metric'] + '_Enable')
self.actions.add(attrs['metric'] + '_Disable')
else:
self.actions.add(attrs['metric'])
def GrepForWebUIActions(path, actions):
"""Grep a WebUI source file for elements with associated metrics.
Arguments:
path: path to the file
actions: set of actions to add to
"""
close_called = False
try:
parser = WebUIActionsParser(actions)
parser.feed(open(path).read())
# An exception can be thrown by parser.close(), so do it in the try to
# ensure the path of the file being parsed gets printed if that happens.
close_called = True
parser.close()
except Exception, e:
print "Error encountered for path %s" % path
raise e
finally:
if not close_called:
parser.close()
def WalkDirectory(root_path, actions, extensions, callback):
for path, dirs, files in os.walk(root_path):
if '.svn' in dirs:
dirs.remove('.svn')
if '.git' in dirs:
dirs.remove('.git')
for file in files:
ext = os.path.splitext(file)[1]
if ext in extensions:
callback(os.path.join(path, file), actions)
def AddLiteralActions(actions):
"""Add literal actions specified via calls to UserMetrics functions.
Arguments:
actions: set of actions to add to.
"""
EXTENSIONS = ('.cc', '.mm', '.c', '.m')
# Walk the source tree to process all .cc files.
ash_root = os.path.normpath(os.path.join(REPOSITORY_ROOT, 'ash'))
WalkDirectory(ash_root, actions, EXTENSIONS, GrepForActions)
chrome_root = os.path.normpath(os.path.join(REPOSITORY_ROOT, 'chrome'))
WalkDirectory(chrome_root, actions, EXTENSIONS, GrepForActions)
content_root = os.path.normpath(os.path.join(REPOSITORY_ROOT, 'content'))
WalkDirectory(content_root, actions, EXTENSIONS, GrepForActions)
net_root = os.path.normpath(os.path.join(REPOSITORY_ROOT, 'net'))
WalkDirectory(net_root, actions, EXTENSIONS, GrepForActions)
webkit_root = os.path.normpath(os.path.join(REPOSITORY_ROOT, 'webkit'))
WalkDirectory(os.path.join(webkit_root, 'glue'), actions, EXTENSIONS,
GrepForActions)
WalkDirectory(os.path.join(webkit_root, 'port'), actions, EXTENSIONS,
GrepForActions)
def AddWebUIActions(actions):
"""Add user actions defined in WebUI files.
Arguments:
actions: set of actions to add to.
"""
resources_root = os.path.join(REPOSITORY_ROOT, 'chrome', 'browser',
'resources')
WalkDirectory(resources_root, actions, ('.html'), GrepForWebUIActions)
def AddHistoryPageActions(actions):
"""Add actions that are used in History page.
Arguments
actions: set of actions to add to.
"""
actions.add('HistoryPage_BookmarkStarClicked')
actions.add('HistoryPage_EntryMenuRemoveFromHistory')
actions.add('HistoryPage_EntryLinkClick')
actions.add('HistoryPage_EntryLinkRightClick')
actions.add('HistoryPage_SearchResultClick')
actions.add('HistoryPage_EntryMenuShowMoreFromSite')
actions.add('HistoryPage_NewestHistoryClick')
actions.add('HistoryPage_NewerHistoryClick')
actions.add('HistoryPage_OlderHistoryClick')
actions.add('HistoryPage_Search')
actions.add('HistoryPage_InitClearBrowsingData')
actions.add('HistoryPage_RemoveSelected')
actions.add('HistoryPage_SearchResultRemove')
actions.add('HistoryPage_ConfirmRemoveSelected')
actions.add('HistoryPage_CancelRemoveSelected')
def AddKeySystemSupportActions(actions):
"""Add actions that are used for key system support metrics.
Arguments
actions: set of actions to add to.
"""
actions.add('KeySystemSupport.Widevine.Queried')
actions.add('KeySystemSupport.WidevineWithType.Queried')
actions.add('KeySystemSupport.Widevine.Supported')
actions.add('KeySystemSupport.WidevineWithType.Supported')
def AddAutomaticResetBannerActions(actions):
"""Add actions that are used for the automatic profile settings reset banners
in chrome://settings.
Arguments
actions: set of actions to add to.
"""
# These actions relate to the the automatic settings reset banner shown as
# a result of the reset prompt.
actions.add('AutomaticReset_WebUIBanner_BannerShown')
actions.add('AutomaticReset_WebUIBanner_ManuallyClosed')
actions.add('AutomaticReset_WebUIBanner_ResetClicked')
# These actions relate to the the automatic settings reset banner shown as
# a result of settings hardening.
actions.add('AutomaticSettingsReset_WebUIBanner_BannerShown')
actions.add('AutomaticSettingsReset_WebUIBanner_ManuallyClosed')
actions.add('AutomaticSettingsReset_WebUIBanner_LearnMoreClicked')
class Error(Exception):
pass
def _ExtractText(parent_dom, tag_name):
"""Extract the text enclosed by |tag_name| under |parent_dom|
Args:
parent_dom: The parent Element under which text node is searched for.
tag_name: The name of the tag which contains a text node.
Returns:
A (list of) string enclosed by |tag_name| under |parent_dom|.
"""
texts = []
for child_dom in parent_dom.getElementsByTagName(tag_name):
text_dom = child_dom.childNodes
if text_dom.length != 1:
raise Error('More than 1 child node exists under %s' % tag_name)
if text_dom[0].nodeType != minidom.Node.TEXT_NODE:
raise Error('%s\'s child node is not a text node.' % tag_name)
texts.append(text_dom[0].data)
return texts
class Action(object):
def __init__(self, name, description, owners, obsolete=None):
self.name = name
self.description = description
self.owners = owners
self.obsolete = obsolete
def ParseActionFile(file_content):
"""Parse the XML data currently stored in the file.
Args:
file_content: a string containing the action XML file content.
Returns:
(actions, actions_dict) actions is a set with all user actions' names.
actions_dict is a dict from user action name to Action object.
"""
dom = minidom.parseString(file_content)
comment_nodes = []
# Get top-level comments. It is assumed that all comments are placed before
# <acionts> tag. Therefore the loop will stop if it encounters a non-comment
# node.
for node in dom.childNodes:
if node.nodeType == minidom.Node.COMMENT_NODE:
comment_nodes.append(node)
else:
break
actions = set()
actions_dict = {}
# Get each user action data.
for action_dom in dom.getElementsByTagName('action'):
action_name = action_dom.getAttribute('name')
actions.add(action_name)
owners = _ExtractText(action_dom, 'owner')
# There is only one description for each user action. Get the first element
# of the returned list.
description_list = _ExtractText(action_dom, 'description')
if len(description_list) > 1:
logging.error('user actions "%s" has more than one descriptions. Exactly '
'one description is needed for each user action. Please '
'fix.', action_name)
sys.exit(1)
description = description_list[0] if description_list else None
# There is at most one obsolete tag for each user action.
obsolete_list = _ExtractText(action_dom, 'obsolete')
if len(obsolete_list) > 1:
logging.error('user actions "%s" has more than one obsolete tag. At most '
'one obsolete tag can be added for each user action. Please'
' fix.', action_name)
sys.exit(1)
obsolete = obsolete_list[0] if obsolete_list else None
actions_dict[action_name] = Action(action_name, description, owners,
obsolete)
return actions, actions_dict, comment_nodes
def _CreateActionTag(doc, action_name, action_object):
"""Create a new action tag.
Format of an action tag:
<action name="name">
<owner>Owner</owner>
<description>Description.</description>
<obsolete>Deprecated.</obsolete>
</action>
<obsolete> is an optional tag. It's added to user actions that are no longer
used any more.
If action_name is in actions_dict, the values to be inserted are based on the
corresponding Action object. If action_name is not in actions_dict, the
default value from TAGS is used.
Args:
doc: The document under which the new action tag is created.
action_name: The name of an action.
action_object: An action object representing the data to be inserted.
Returns:
An action tag Element with proper children elements.
"""
action_dom = doc.createElement('action')
action_dom.setAttribute('name', action_name)
# Create owner tag.
if action_object and action_object.owners:
# If owners for this action is not None, use the stored value. Otherwise,
# use the default value.
for owner in action_object.owners:
owner_dom = doc.createElement('owner')
owner_dom.appendChild(doc.createTextNode(owner))
action_dom.appendChild(owner_dom)
else:
# Use default value.
owner_dom = doc.createElement('owner')
owner_dom.appendChild(doc.createTextNode(TAGS.get('owner', '')))
action_dom.appendChild(owner_dom)
# Create description tag.
description_dom = doc.createElement('description')
action_dom.appendChild(description_dom)
if action_object and action_object.description:
# If description for this action is not None, use the store value.
# Otherwise, use the default value.
description_dom.appendChild(doc.createTextNode(
action_object.description))
else:
description_dom.appendChild(doc.createTextNode(
TAGS.get('description', '')))
# Create obsolete tag.
if action_object and action_object.obsolete:
obsolete_dom = doc.createElement('obsolete')
action_dom.appendChild(obsolete_dom)
obsolete_dom.appendChild(doc.createTextNode(
action_object.obsolete))
return action_dom
def PrettyPrint(actions, actions_dict, comment_nodes=[]):
"""Given a list of action data, create a well-printed minidom document.
Args:
actions: A list of action names.
actions_dict: A mappting from action name to Action object.
Returns:
A well-printed minidom document that represents the input action data.
"""
doc = minidom.Document()
# Attach top-level comments.
for node in comment_nodes:
doc.appendChild(node)
actions_element = doc.createElement('actions')
doc.appendChild(actions_element)
# Attach action node based on updated |actions|.
for action in sorted(actions):
actions_element.appendChild(
_CreateActionTag(doc, action, actions_dict.get(action, None)))
return print_style.GetPrintStyle().PrettyPrintNode(doc)
def main(argv):
presubmit = ('--presubmit' in argv)
actions_xml_path = os.path.join(path_utils.ScriptDir(), 'actions.xml')
# Save the original file content.
with open(actions_xml_path, 'rb') as f:
original_xml = f.read()
actions, actions_dict, comment_nodes = ParseActionFile(original_xml)
AddComputedActions(actions)
# TODO(fmantek): bring back webkit editor actions.
# AddWebKitEditorActions(actions)
AddAboutFlagsActions(actions)
AddWebUIActions(actions)
AddLiteralActions(actions)
# print "Scanned {0} number of files".format(number_of_files_total)
# print "Found {0} entries".format(len(actions))
AddAndroidActions(actions)
AddAutomaticResetBannerActions(actions)
AddBookmarkManagerActions(actions)
AddChromeOSActions(actions)
AddClosedSourceActions(actions)
AddExtensionActions(actions)
AddHistoryPageActions(actions)
AddKeySystemSupportActions(actions)
pretty = PrettyPrint(actions, actions_dict, comment_nodes)
if original_xml == pretty:
print 'actions.xml is correctly pretty-printed.'
sys.exit(0)
if presubmit:
logging.info('actions.xml is not formatted correctly; run '
'extract_actions.py to fix.')
sys.exit(1)
# Prompt user to consent on the change.
if not diff_util.PromptUserToAcceptDiff(
original_xml, pretty, 'Is the new version acceptable?'):
logging.error('Aborting')
sys.exit(1)
print 'Creating backup file: actions.old.xml.'
shutil.move(actions_xml_path, 'actions.old.xml')
with open(actions_xml_path, 'wb') as f:
f.write(pretty)
print ('Updated %s. Don\'t forget to add it to your changelist' %
actions_xml_path)
return 0
if '__main__' == __name__:
sys.exit(main(sys.argv))
| anirudhSK/chromium | tools/metrics/actions/extract_actions.py | Python | bsd-3-clause | 31,615 |
# -*- coding: utf-8 -*-
# Copyright 2017 Quartile Limited
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
from odoo import models, fields, api, _
from odoo.exceptions import Warning
class ProjectActivityConfirm(models.TransientModel):
_name = 'project.activity.confirm'
_description = 'Project activity confirm'
@api.multi
def action_activity_confirm(self):
activity_ids = self._context.get('active_ids', False)
activities = self.env['project.activity'].browse(activity_ids)
for activity in activities:
if not activity.confirmed:
activity.confirmed = True
return {'type': 'ir.actions.act_window_close'}
| rfhk/tks-custom | project_activity/wizard/project_activity_confirm.py | Python | agpl-3.0 | 699 |
import collections
#from goodreads import apikey
from goodreads.client import GoodreadsClient
from goodreads.book import GoodreadsBook
from goodreads.author import GoodreadsAuthor
from goodreads.shelf import GoodreadsShelf
class TestBook():
@classmethod
def setup_class(cls):
client = GoodreadsClient('nTRaECtlyOjSmjJnLKRaiw', 'hCXp9GKlAe3sk1QIj0jXLF4UGLt9vfj54hDAfzHY')
#client.authenticate(apikey.oauth_access_token,
# apikey.oauth_access_token_secret)
cls.book = client.book('11870085')
def test_get_book(self):
assert isinstance(self.book, GoodreadsBook)
assert self.book.gid == '11870085'
assert repr(self.book) == 'The Fault in Our Stars'
def test_title(self):
assert self.book.title == 'The Fault in Our Stars'
def test_authors(self):
assert len(self.book.authors) == 1
assert isinstance(self.book.authors[0], GoodreadsAuthor)
def test_description(self):
assert self.book.description.startswith(
'"I fell in love the way you fall asleep: slowly, then all at once."')
def test_average_rating(self):
rating = float(self.book.average_rating)
assert rating >= 1.0
assert rating <= 5.0
def test_rating_dist(self):
assert self.book.rating_dist.startswith('5:')
def test_ratings_count(self):
assert self.book.ratings_count.isdigit()
def test_text_reviews_count(self):
assert self.book.text_reviews_count.isdigit()
def test_num_pages(self):
assert self.book.num_pages.isdigit()
def test_popular_shelves(self):
assert all(isinstance(shelf, GoodreadsShelf)
for shelf in self.book.popular_shelves)
def test_work(self):
assert type(self.book.work) == collections.OrderedDict
assert self.book.work['id']['#text'] == '16827462'
def test_series_works(self):
assert self.book.series_works is None
def test_publication_date(self):
assert self.book.publication_date == ('1', '10', '2012')
def test_publisher(self):
assert self.book.publisher == 'Dutton Books'
def test_language_code(self):
assert self.book.language_code == 'eng'
def test_edition_information(self):
assert self.book.edition_information is None
def test_image_url(self):
assert self.book.image_url == 'https://d2arxad8u2l0g7.cloudfront.net/books/1360206420m/11870085.jpg'
def test_small_image_url(self):
assert self.book.small_image_url == 'https://d2arxad8u2l0g7.cloudfront.net/books/1360206420s/11870085.jpg'
def test_is_ebook(self):
assert self.book.is_ebook == 'false'
def test_format(self):
assert self.book.format == 'Hardcover'
def test_isbn(self):
assert self.book.isbn == '0525478817'
def test_isbn13(self):
assert self.book.isbn13 == '9780525478812'
def test_link(self):
assert self.book.link == 'https://www.goodreads.com/book/show/11870085-the-fault-in-our-stars'
def test_reviews_widget(self):
assert self.book.reviews_widget.startswith('<style>')
assert self.book.reviews_widget.endswith('</div>')
def test_similar_books(self):
assert all(isinstance(b, GoodreadsBook)
for b in self.book.similar_books)
| TEJESH/gandhi | tests/book_test.py | Python | mit | 3,364 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2018:
# Matthieu Estrada, ttamalfor@gmail.com
#
# This file is part of (AlignakApp).
#
# (AlignakApp) is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# (AlignakApp) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with (AlignakApp). If not, see <http://www.gnu.org/licenses/>.
"""
Event Item
++++++++++
Event item manage creation of ``QListWidgetItem`` for events
"""
from PyQt5.Qt import QTimer, QListWidgetItem, QIcon, Qt
from alignak_app.utils.config import settings
from alignak_app.utils.time import get_current_time
class EventItem(QListWidgetItem):
"""
Class who create an event QListWidgetItem
"""
def __init__(self):
super(EventItem, self).__init__()
self.timer = None
self.host = None
def initialize(self, event_type, msg, timer=False, host=None):
"""
Initialize QListWidgetItem
:param event_type: the type of event: OK, DOWN, ACK, ...
:type event_type: str
:param msg: message of event
:type msg: str
:param timer: timer to hide event at end of time
:type timer: bool
:param host: _id of host. Only necessary if "be_spied" is True
:type host: None | str
"""
self.host = host
if host:
self.setData(Qt.UserRole, host)
if timer:
self.timer = QTimer()
self.setData(Qt.DisplayRole, "%s" % msg)
msg_to_send = '%s. (Send at %s)' % (msg, get_current_time())
self.setToolTip(msg_to_send)
self.setData(
Qt.DecorationRole, QIcon(settings.get_image(self.get_icon(event_type)))
)
def close_item(self): # pragma: no cover
"""
Hide items when timer is finished
"""
self.setHidden(True)
@staticmethod
def get_icon(event_type):
"""
Return name of icon event
:param event_type: type of event
:type event_type: str
:return: name of icon
:rtype: str
"""
available_icons = {
'event_ok': ['OK', 'UP'],
'event_info': ['UNKNOWN', 'INFO', 'TODO'],
'event_warn': ['WARNING', 'UNREACHABLE', 'WARN'],
'event_alert': ['DOWN', 'CRITICAL', 'ALERT'],
'acknowledge': ['ACK'],
'downtime': ['DOWNTIME', 'DOWNTIMESTART (DOWN)'],
'spy': ['SPY']
}
for key, _ in available_icons.items():
if event_type in available_icons[key]:
return key
return 'error'
@staticmethod
def get_event_type(data):
"""
Return event type depending of data content
:param data: data of backend item
:type data: dict
:return: event type for item
:rtype: str
"""
event_type = ''
if data['ls_acknowledged']:
event_type = 'ACK'
if data['ls_downtimed']:
event_type = 'DOWNTIME'
if not event_type:
event_type = data['ls_state']
return event_type
| Alignak-monitoring-contrib/alignak-app | alignak_app/qobjects/events/item.py | Python | agpl-3.0 | 3,581 |
#!/usr/bin/env python # pylint: disable=too-many-lines
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
import atexit
import json
import os
import re
import shutil
import subprocess
import ruamel.yaml as yaml
#import yaml
#
## This is here because of a bug that causes yaml
## to incorrectly handle timezone info on timestamps
#def timestamp_constructor(_, node):
# '''return timestamps as strings'''
# return str(node.value)
#yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
self.all_namespaces = all_namespaces
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = '/tmp/%s' % rname
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''return all pods '''
cmd = ['-n', self.namespace, 'replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''return all pods '''
fname = '/tmp/%s' % rname
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''return all pods '''
return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
def _delete(self, resource, rname, selector=None):
'''return all pods '''
cmd = ['delete', resource, rname, '-n', self.namespace]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None):
'''return all pods '''
cmd = ['process', '-n', self.namespace]
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = '/tmp/%s' % template_name
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['-n', self.namespace, 'create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
if self.all_namespaces:
cmd.extend(['--all-namespaces'])
elif self.namespace:
cmd.extend(['-n', self.namespace])
cmd.extend(['-o', 'json'])
if rname:
cmd.append(rname)
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if rval.has_key('items'):
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
#pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
#pylint: disable=too-many-arguments
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = []
if oadm:
cmds = ['/usr/bin/oadm']
else:
cmds = ['/usr/bin/oc']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={'KUBECONFIG': self.kubeconfig})
stdout, stderr = proc.communicate(input_data)
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def create_file(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def create_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_file(item['path'], item['data'], ftype=content_type)
files.append({'name': os.path.basename(path), 'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if result.has_key('metadata') and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
contents = yaml.load(contents, yaml.RoundTripLoader)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if not user_def.has_key(key):
if debug:
print 'User data does not have key [%s]' % key
print 'User data: %s' % user_def
return False
if not isinstance(user_def[key], list):
if debug:
print 'user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])
return False
if len(user_def[key]) != len(value):
if debug:
print "List lengths are not equal."
print "key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))
print "user_def: %s" % user_def[key]
print "value: %s" % value
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print 'sending list - list'
print type(values[0])
print type(values[1])
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print 'list compare returned false'
return False
elif value != user_def[key]:
if debug:
print 'value should be identical'
print value
print user_def[key]
return False
# recurse on a dictionary
elif isinstance(value, dict):
if not user_def.has_key(key):
if debug:
print "user_def does not have key [%s]" % key
return False
if not isinstance(user_def[key], dict):
if debug:
print "dict returned false: not instance of dict"
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print "keys are not equal in dict"
print api_values
print user_values
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print "dict returned false"
print result
return False
# Verify each key, value pair is the same
else:
if not user_def.has_key(key) or value != user_def[key]:
if debug:
print "value not equal; user_def does not have key"
print key
print value
if user_def.has_key(key):
print user_def[key]
return False
if debug:
print 'returning true'
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key, data in self.config_options.items():
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
class YeditException(Exception):
''' Exception class for Yedit '''
pass
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self, filename=None, content=None, content_type='yaml', separator='.', backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict == None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and data.has_key(dict_key) and data[dict_key]:
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
return None
data[dict_key] = {}
data = data[dict_key]
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
return data
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
tmp_filename = self.filename + '.yedit'
try:
with open(tmp_filename, 'w') as yfd:
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except Exception as err:
raise YeditException(err.message)
os.rename(tmp_filename, self.filename)
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename == None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError as _:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if entry.has_key(key_or_item):
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# pylint: disable=no-member,maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type.' \
' value=[%s] [%s]' % (value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index != None:
ind = index
if ind != None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
#already exists, return
if ind != None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
# pylint: disable=too-many-instance-attributes
class PersistentVolumeClaimConfig(object):
''' Handle pvc options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
kubeconfig,
access_modes=None,
vol_capacity='1G'):
''' constructor for handling pvc options '''
self.kubeconfig = kubeconfig
self.name = sname
self.namespace = namespace
self.access_modes = access_modes
self.vol_capacity = vol_capacity
self.data = {}
self.create_dict()
def create_dict(self):
''' return a service as a dict '''
# version
self.data['apiVersion'] = 'v1'
# kind
self.data['kind'] = 'PersistentVolumeClaim'
# metadata
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
# spec
self.data['spec'] = {}
self.data['spec']['accessModes'] = ['ReadWriteOnce']
if self.access_modes:
self.data['spec']['accessModes'] = self.access_modes
# storage capacity
self.data['spec']['resources'] = {}
self.data['spec']['resources']['requests'] = {}
self.data['spec']['resources']['requests']['storage'] = self.vol_capacity
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class PersistentVolumeClaim(Yedit):
''' Class to wrap the oc command line tools '''
access_modes_path = "spec.accessModes"
volume_capacity_path = "spec.requests.storage"
volume_name_path = "spec.volumeName"
bound_path = "status.phase"
kind = 'PersistentVolumeClaim'
def __init__(self, content):
'''RoleBinding constructor'''
super(PersistentVolumeClaim, self).__init__(content=content)
self._access_modes = None
self._volume_capacity = None
self._volume_name = None
@property
def volume_name(self):
''' volume_name property '''
if self._volume_name == None:
self._volume_name = self.get_volume_name()
return self._volume_name
@volume_name.setter
def volume_name(self, data):
''' volume_name property setter'''
self._volume_name = data
@property
def access_modes(self):
''' access_modes property '''
if self._access_modes == None:
self._access_modes = self.get_access_modes()
return self._access_modes
@access_modes.setter
def access_modes(self, data):
''' access_modes property setter'''
self._access_modes = data
@property
def volume_capacity(self):
''' volume_capacity property '''
if self._volume_capacity == None:
self._volume_capacity = self.get_volume_capacity()
return self._volume_capacity
@volume_capacity.setter
def volume_capacity(self, data):
''' volume_capacity property setter'''
self._volume_capacity = data
def get_access_modes(self):
'''get access_modes'''
return self.get(PersistentVolumeClaim.access_modes_path) or []
def get_volume_capacity(self):
'''get volume_capacity'''
return self.get(PersistentVolumeClaim.volume_capacity_path) or []
def get_volume_name(self):
'''get volume_name'''
return self.get(PersistentVolumeClaim.volume_name_path) or []
def is_bound(self):
'''return whether volume is bound'''
return self.get(PersistentVolumeClaim.bound_path) or []
#### ADD #####
def add_access_mode(self, inc_mode):
''' add an access_mode'''
if self.access_modes:
self.access_modes.append(inc_mode)
else:
self.put(PersistentVolumeClaim.access_modes_path, [inc_mode])
return True
#### /ADD #####
#### Remove #####
def remove_access_mode(self, inc_mode):
''' remove an access_mode'''
try:
self.access_modes.remove(inc_mode)
except ValueError as _:
return False
return True
#### /REMOVE #####
#### UPDATE #####
def update_access_mode(self, inc_mode):
''' update an access_mode'''
try:
index = self.access_modes.index(inc_mode)
except ValueError as _:
return self.add_access_mode(inc_mode)
self.access_modes[index] = inc_mode
return True
#### /UPDATE #####
#### FIND ####
def find_access_mode(self, inc_mode):
''' find a user '''
index = None
try:
index = self.access_modes.index(inc_mode)
except ValueError as _:
return index
return index
# pylint: disable=too-many-instance-attributes
class OCPVC(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'pvc'
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
config,
verbose=False):
''' Constructor for OCVolume '''
super(OCPVC, self).__init__(config.namespace, config.kubeconfig)
self.config = config
self.namespace = config.namespace
self._pvc = None
@property
def pvc(self):
''' property function pvc'''
if not self._pvc:
self.get()
return self._pvc
@pvc.setter
def pvc(self, data):
''' setter function for yedit var '''
self._pvc = data
def bound(self):
'''return whether the pvc is bound'''
if self.pvc.get_volume_name():
return True
return False
def exists(self):
''' return whether a pvc exists '''
if self.pvc:
return True
return False
def get(self):
'''return pvc information '''
result = self._get(self.kind, self.config.name)
if result['returncode'] == 0:
self.pvc = PersistentVolumeClaim(content=result['results'][0])
elif '\"%s\" not found' % self.config.name in result['stderr']:
result['returncode'] = 0
result['results'] = [{}]
return result
def delete(self):
'''delete the object'''
return self._delete(self.kind, self.config.name)
def create(self):
'''create the object'''
return self._create_from_content(self.config.name, self.config.data)
def update(self):
'''update the object'''
# need to update the tls information and the service name
return self._replace_content(self.kind, self.config.name, self.config.data)
def needs_update(self):
''' verify an update is needed '''
if self.pvc.get_volume_name() or self.pvc.is_bound():
return False
skip = []
return not Utils.check_def_equal(self.config.data, self.pvc.yaml_dict, skip_keys=skip, debug=True)
#pylint: disable=too-many-branches
def main():
'''
ansible oc module for pvc
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
name=dict(default=None, required=True, type='str'),
namespace=dict(default=None, required=True, type='str'),
volume_capacity=dict(default='1G', type='str'),
access_modes=dict(default=None, type='list'),
),
supports_check_mode=True,
)
pconfig = PersistentVolumeClaimConfig(module.params['name'],
module.params['namespace'],
module.params['kubeconfig'],
module.params['access_modes'],
module.params['volume_capacity'],
)
oc_pvc = OCPVC(pconfig, verbose=module.params['debug'])
state = module.params['state']
api_rval = oc_pvc.get()
#####
# Get
#####
if state == 'list':
module.exit_json(changed=False, results=api_rval['results'], state="list")
########
# Delete
########
if state == 'absent':
if oc_pvc.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a delete.')
api_rval = oc_pvc.delete()
module.exit_json(changed=True, results=api_rval, state="absent")
module.exit_json(changed=False, state="absent")
if state == 'present':
########
# Create
########
if not oc_pvc.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a create.')
# Create it here
api_rval = oc_pvc.create()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
# return the created object
api_rval = oc_pvc.get()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
########
# Update
########
if oc_pvc.pvc.is_bound() or oc_pvc.pvc.get_volume_name():
api_rval['msg'] = '##### - This volume is currently bound. Will not update - ####'
module.exit_json(changed=False, results=api_rval, state="present")
if oc_pvc.needs_update():
api_rval = oc_pvc.update()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
# return the created object
api_rval = oc_pvc.get()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
module.exit_json(changed=False, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
| joelsmith/openshift-tools | ansible/roles/lib_openshift_3.2/library/oc_pvc.py | Python | apache-2.0 | 39,870 |
"""A file interface for handling local and remote data files.
The goal of datasource is to abstract some of the file system operations
when dealing with data files so the researcher doesn't have to know all the
low-level details. Through datasource, a researcher can obtain and use a
file with one function call, regardless of location of the file.
DataSource is meant to augment standard python libraries, not replace them.
It should work seamlessly with standard file IO operations and the os
module.
DataSource files can originate locally or remotely:
- local files : '/home/guido/src/local/data.txt'
- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt'
DataSource files can also be compressed or uncompressed. Currently only
gzip, bz2 and xz are supported.
Example::
>>> # Create a DataSource, use os.curdir (default) for local storage.
>>> ds = datasource.DataSource()
>>>
>>> # Open a remote file.
>>> # DataSource downloads the file, stores it locally in:
>>> # './www.google.com/index.html'
>>> # opens the file and returns a file object.
>>> fp = ds.open('http://www.google.com/index.html')
>>>
>>> # Use the file as you normally would
>>> fp.read()
>>> fp.close()
"""
from __future__ import division, absolute_import, print_function
import os
import sys
import shutil
import io
_open = open
def _check_mode(mode, encoding, newline):
"""Check mode and that encoding and newline are compatible.
Parameters
----------
mode : str
File open mode.
encoding : str
File encoding.
newline : str
Newline for text files.
"""
if "t" in mode:
if "b" in mode:
raise ValueError("Invalid mode: %r" % (mode,))
else:
if encoding is not None:
raise ValueError("Argument 'encoding' not supported in binary mode")
if newline is not None:
raise ValueError("Argument 'newline' not supported in binary mode")
def _python2_bz2open(fn, mode, encoding, newline):
"""Wrapper to open bz2 in text mode.
Parameters
----------
fn : str
File name
mode : {'r', 'w'}
File mode. Note that bz2 Text files are not supported.
encoding : str
Ignored, text bz2 files not supported in Python2.
newline : str
Ignored, text bz2 files not supported in Python2.
"""
import bz2
_check_mode(mode, encoding, newline)
if "t" in mode:
# BZ2File is missing necessary functions for TextIOWrapper
raise ValueError("bz2 text files not supported in python2")
else:
return bz2.BZ2File(fn, mode)
def _python2_gzipopen(fn, mode, encoding, newline):
""" Wrapper to open gzip in text mode.
Parameters
----------
fn : str, bytes, file
File path or opened file.
mode : str
File mode. The actual files are opened as binary, but will decoded
using the specified `encoding` and `newline`.
encoding : str
Encoding to be used when reading/writing as text.
newline : str
Newline to be used when reading/writing as text.
"""
import gzip
# gzip is lacking read1 needed for TextIOWrapper
class GzipWrap(gzip.GzipFile):
def read1(self, n):
return self.read(n)
_check_mode(mode, encoding, newline)
gz_mode = mode.replace("t", "")
if isinstance(fn, (str, bytes)):
binary_file = GzipWrap(fn, gz_mode)
elif hasattr(fn, "read") or hasattr(fn, "write"):
binary_file = GzipWrap(None, gz_mode, fileobj=fn)
else:
raise TypeError("filename must be a str or bytes object, or a file")
if "t" in mode:
return io.TextIOWrapper(binary_file, encoding, newline=newline)
else:
return binary_file
# Using a class instead of a module-level dictionary
# to reduce the initial 'import numpy' overhead by
# deferring the import of lzma, bz2 and gzip until needed
# TODO: .zip support, .tar support?
class _FileOpeners(object):
"""
Container for different methods to open (un-)compressed files.
`_FileOpeners` contains a dictionary that holds one method for each
supported file format. Attribute lookup is implemented in such a way
that an instance of `_FileOpeners` itself can be indexed with the keys
of that dictionary. Currently uncompressed files as well as files
compressed with ``gzip``, ``bz2`` or ``xz`` compression are supported.
Notes
-----
`_file_openers`, an instance of `_FileOpeners`, is made available for
use in the `_datasource` module.
Examples
--------
>>> np.lib._datasource._file_openers.keys()
[None, '.bz2', '.gz', '.xz', '.lzma']
>>> np.lib._datasource._file_openers['.gz'] is gzip.open
True
"""
def __init__(self):
self._loaded = False
self._file_openers = {None: io.open}
def _load(self):
if self._loaded:
return
try:
import bz2
if sys.version_info[0] >= 3:
self._file_openers[".bz2"] = bz2.open
else:
self._file_openers[".bz2"] = _python2_bz2open
except ImportError:
pass
try:
import gzip
if sys.version_info[0] >= 3:
self._file_openers[".gz"] = gzip.open
else:
self._file_openers[".gz"] = _python2_gzipopen
except ImportError:
pass
try:
import lzma
self._file_openers[".xz"] = lzma.open
self._file_openers[".lzma"] = lzma.open
except (ImportError, AttributeError):
# There are incompatible backports of lzma that do not have the
# lzma.open attribute, so catch that as well as ImportError.
pass
self._loaded = True
def keys(self):
"""
Return the keys of currently supported file openers.
Parameters
----------
None
Returns
-------
keys : list
The keys are None for uncompressed files and the file extension
strings (i.e. ``'.gz'``, ``'.xz'``) for supported compression
methods.
"""
self._load()
return list(self._file_openers.keys())
def __getitem__(self, key):
self._load()
return self._file_openers[key]
_file_openers = _FileOpeners()
def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None):
"""
Open `path` with `mode` and return the file object.
If ``path`` is an URL, it will be downloaded, stored in the
`DataSource` `destpath` directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open.
mode : str, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
append. Available modes depend on the type of object specified by
path. Default is 'r'.
destpath : str, optional
Path to the directory where the source file gets downloaded to for
use. If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
encoding : {None, str}, optional
Open text file with given encoding. The default encoding will be
what `io.open` uses.
newline : {None, str}, optional
Newline to use when reading text file.
Returns
-------
out : file object
The opened file.
Notes
-----
This is a convenience function that instantiates a `DataSource` and
returns the file object from ``DataSource.open(path)``.
"""
ds = DataSource(destpath)
return ds.open(path, mode, encoding=encoding, newline=newline)
class DataSource (object):
"""
DataSource(destpath='.')
A generic data source file (file, http, ftp, ...).
DataSources can be local files or remote files/URLs. The files may
also be compressed or uncompressed. DataSource hides some of the
low-level details of downloading the file, allowing you to simply pass
in a valid file path (or URL) and obtain a file object.
Parameters
----------
destpath : str or None, optional
Path to the directory where the source file gets downloaded to for
use. If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Notes
-----
URLs require a scheme string (``http://``) to be used, without it they
will fail::
>>> repos = DataSource()
>>> repos.exists('www.google.com/index.html')
False
>>> repos.exists('http://www.google.com/index.html')
True
Temporary directories are deleted when the DataSource is deleted.
Examples
--------
::
>>> ds = DataSource('/home/guido')
>>> urlname = 'http://www.google.com/index.html'
>>> gfile = ds.open('http://www.google.com/index.html') # remote file
>>> ds.abspath(urlname)
'/home/guido/www.google.com/site/index.html'
>>> ds = DataSource(None) # use with temporary file
>>> ds.open('/home/guido/foobar.txt')
<open file '/home/guido.foobar.txt', mode 'r' at 0x91d4430>
>>> ds.abspath('/home/guido/foobar.txt')
'/tmp/tmpy4pgsP/home/guido/foobar.txt'
"""
def __init__(self, destpath=os.curdir):
"""Create a DataSource with a local path at destpath."""
if destpath:
self._destpath = os.path.abspath(destpath)
self._istmpdest = False
else:
import tempfile # deferring import to improve startup time
self._destpath = tempfile.mkdtemp()
self._istmpdest = True
def __del__(self):
# Remove temp directories
if self._istmpdest:
shutil.rmtree(self._destpath)
def _iszip(self, filename):
"""Test if the filename is a zip file by looking at the file extension.
"""
fname, ext = os.path.splitext(filename)
return ext in _file_openers.keys()
def _iswritemode(self, mode):
"""Test if the given mode will open a file for writing."""
# Currently only used to test the bz2 files.
_writemodes = ("w", "+")
for c in mode:
if c in _writemodes:
return True
return False
def _splitzipext(self, filename):
"""Split zip extension from filename and return filename.
*Returns*:
base, zip_ext : {tuple}
"""
if self._iszip(filename):
return os.path.splitext(filename)
else:
return filename, None
def _possible_names(self, filename):
"""Return a tuple containing compressed filename variations."""
names = [filename]
if not self._iszip(filename):
for zipext in _file_openers.keys():
if zipext:
names.append(filename+zipext)
return names
def _isurl(self, path):
"""Test if path is a net location. Tests the scheme and netloc."""
# We do this here to reduce the 'import numpy' initial import time.
if sys.version_info[0] >= 3:
from urllib.parse import urlparse
else:
from urlparse import urlparse
# BUG : URLs require a scheme string ('http://') to be used.
# www.google.com will fail.
# Should we prepend the scheme for those that don't have it and
# test that also? Similar to the way we append .gz and test for
# for compressed versions of files.
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
return bool(scheme and netloc)
def _cache(self, path):
"""Cache the file specified by path.
Creates a copy of the file in the datasource cache.
"""
# We import these here because importing urllib2 is slow and
# a significant fraction of numpy's total import time.
if sys.version_info[0] >= 3:
from urllib.request import urlopen
from urllib.error import URLError
else:
from urllib2 import urlopen
from urllib2 import URLError
upath = self.abspath(path)
# ensure directory exists
if not os.path.exists(os.path.dirname(upath)):
os.makedirs(os.path.dirname(upath))
# TODO: Doesn't handle compressed files!
if self._isurl(path):
try:
openedurl = urlopen(path)
f = _open(upath, 'wb')
try:
shutil.copyfileobj(openedurl, f)
finally:
f.close()
openedurl.close()
except URLError:
raise URLError("URL not found: %s" % path)
else:
shutil.copyfile(path, upath)
return upath
def _findfile(self, path):
"""Searches for ``path`` and returns full path if found.
If path is an URL, _findfile will cache a local copy and return the
path to the cached file. If path is a local file, _findfile will
return a path to that local file.
The search will include possible compressed versions of the file
and return the first occurrence found.
"""
# Build list of possible local file paths
if not self._isurl(path):
# Valid local paths
filelist = self._possible_names(path)
# Paths in self._destpath
filelist += self._possible_names(self.abspath(path))
else:
# Cached URLs in self._destpath
filelist = self._possible_names(self.abspath(path))
# Remote URLs
filelist = filelist + self._possible_names(path)
for name in filelist:
if self.exists(name):
if self._isurl(name):
name = self._cache(name)
return name
return None
def abspath(self, path):
"""
Return absolute path of file in the DataSource directory.
If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
using the `open` method.
Parameters
----------
path : str
Can be a local file or a remote URL.
Returns
-------
out : str
Complete path, including the `DataSource` destination directory.
Notes
-----
The functionality is based on `os.path.abspath`.
"""
# We do this here to reduce the 'import numpy' initial import time.
if sys.version_info[0] >= 3:
from urllib.parse import urlparse
else:
from urlparse import urlparse
# TODO: This should be more robust. Handles case where path includes
# the destpath, but not other sub-paths. Failing case:
# path = /home/guido/datafile.txt
# destpath = /home/alex/
# upath = self.abspath(path)
# upath == '/home/alex/home/guido/datafile.txt'
# handle case where path includes self._destpath
splitpath = path.split(self._destpath, 2)
if len(splitpath) > 1:
path = splitpath[1]
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
netloc = self._sanitize_relative_path(netloc)
upath = self._sanitize_relative_path(upath)
return os.path.join(self._destpath, netloc, upath)
def _sanitize_relative_path(self, path):
"""Return a sanitised relative path for which
os.path.abspath(os.path.join(base, path)).startswith(base)
"""
last = None
path = os.path.normpath(path)
while path != last:
last = path
# Note: os.path.join treats '/' as os.sep on Windows
path = path.lstrip(os.sep).lstrip('/')
path = path.lstrip(os.pardir).lstrip('..')
drive, path = os.path.splitdrive(path) # for Windows
return path
def exists(self, path):
"""
Test if path exists.
Test if `path` exists as (and in this order):
- a local file.
- a remote URL that has been downloaded and stored locally in the
`DataSource` directory.
- a remote URL that has not been downloaded, but is valid and
accessible.
Parameters
----------
path : str
Can be a local file or a remote URL.
Returns
-------
out : bool
True if `path` exists.
Notes
-----
When `path` is an URL, `exists` will return True if it's either
stored locally in the `DataSource` directory, or is a valid remote
URL. `DataSource` does not discriminate between the two, the file
is accessible if it exists in either location.
"""
# We import this here because importing urllib2 is slow and
# a significant fraction of numpy's total import time.
if sys.version_info[0] >= 3:
from urllib.request import urlopen
from urllib.error import URLError
else:
from urllib2 import urlopen
from urllib2 import URLError
# Test local path
if os.path.exists(path):
return True
# Test cached url
upath = self.abspath(path)
if os.path.exists(upath):
return True
# Test remote url
if self._isurl(path):
try:
netfile = urlopen(path)
netfile.close()
del(netfile)
return True
except URLError:
return False
return False
def open(self, path, mode='r', encoding=None, newline=None):
"""
Open and return file-like object.
If `path` is an URL, it will be downloaded, stored in the
`DataSource` directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open.
mode : {'r', 'w', 'a'}, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing,
'a' to append. Available modes depend on the type of object
specified by `path`. Default is 'r'.
encoding : {None, str}, optional
Open text file with given encoding. The default encoding will be
what `io.open` uses.
newline : {None, str}, optional
Newline to use when reading text file.
Returns
-------
out : file object
File object.
"""
# TODO: There is no support for opening a file for writing which
# doesn't exist yet (creating a file). Should there be?
# TODO: Add a ``subdir`` parameter for specifying the subdirectory
# used to store URLs in self._destpath.
if self._isurl(path) and self._iswritemode(mode):
raise ValueError("URLs are not writeable")
# NOTE: _findfile will fail on a new file opened for writing.
found = self._findfile(path)
if found:
_fname, ext = self._splitzipext(found)
if ext == 'bz2':
mode.replace("+", "")
return _file_openers[ext](found, mode=mode,
encoding=encoding, newline=newline)
else:
raise IOError("%s not found." % path)
class Repository (DataSource):
"""
Repository(baseurl, destpath='.')
A data repository where multiple DataSource's share a base
URL/directory.
`Repository` extends `DataSource` by prepending a base URL (or
directory) to all the files it handles. Use `Repository` when you will
be working with multiple files from one base URL. Initialize
`Repository` with the base URL, then refer to each file by its filename
only.
Parameters
----------
baseurl : str
Path to the local directory or remote location that contains the
data files.
destpath : str or None, optional
Path to the directory where the source file gets downloaded to for
use. If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Examples
--------
To analyze all files in the repository, do something like this
(note: this is not self-contained code)::
>>> repos = np.lib._datasource.Repository('/home/user/data/dir/')
>>> for filename in filelist:
... fp = repos.open(filename)
... fp.analyze()
... fp.close()
Similarly you could use a URL for a repository::
>>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data')
"""
def __init__(self, baseurl, destpath=os.curdir):
"""Create a Repository with a shared url or directory of baseurl."""
DataSource.__init__(self, destpath=destpath)
self._baseurl = baseurl
def __del__(self):
DataSource.__del__(self)
def _fullpath(self, path):
"""Return complete path for path. Prepends baseurl if necessary."""
splitpath = path.split(self._baseurl, 2)
if len(splitpath) == 1:
result = os.path.join(self._baseurl, path)
else:
result = path # path contains baseurl already
return result
def _findfile(self, path):
"""Extend DataSource method to prepend baseurl to ``path``."""
return DataSource._findfile(self, self._fullpath(path))
def abspath(self, path):
"""
Return absolute path of file in the Repository directory.
If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
using the `open` method.
Parameters
----------
path : str
Can be a local file or a remote URL. This may, but does not
have to, include the `baseurl` with which the `Repository` was
initialized.
Returns
-------
out : str
Complete path, including the `DataSource` destination directory.
"""
return DataSource.abspath(self, self._fullpath(path))
def exists(self, path):
"""
Test if path exists prepending Repository base URL to path.
Test if `path` exists as (and in this order):
- a local file.
- a remote URL that has been downloaded and stored locally in the
`DataSource` directory.
- a remote URL that has not been downloaded, but is valid and
accessible.
Parameters
----------
path : str
Can be a local file or a remote URL. This may, but does not
have to, include the `baseurl` with which the `Repository` was
initialized.
Returns
-------
out : bool
True if `path` exists.
Notes
-----
When `path` is an URL, `exists` will return True if it's either
stored locally in the `DataSource` directory, or is a valid remote
URL. `DataSource` does not discriminate between the two, the file
is accessible if it exists in either location.
"""
return DataSource.exists(self, self._fullpath(path))
def open(self, path, mode='r', encoding=None, newline=None):
"""
Open and return file-like object prepending Repository base URL.
If `path` is an URL, it will be downloaded, stored in the
DataSource directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open. This may, but does not have to,
include the `baseurl` with which the `Repository` was
initialized.
mode : {'r', 'w', 'a'}, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing,
'a' to append. Available modes depend on the type of object
specified by `path`. Default is 'r'.
encoding : {None, str}, optional
Open text file with given encoding. The default encoding will be
what `io.open` uses.
newline : {None, str}, optional
Newline to use when reading text file.
Returns
-------
out : file object
File object.
"""
return DataSource.open(self, self._fullpath(path), mode,
encoding=encoding, newline=newline)
def listdir(self):
"""
List files in the source Repository.
Returns
-------
files : list of str
List of file names (not containing a directory part).
Notes
-----
Does not currently work for remote repositories.
"""
if self._isurl(self._baseurl):
raise NotImplementedError(
"Directory listing of URLs, not supported yet.")
else:
return os.listdir(self._baseurl)
| tynn/numpy | numpy/lib/_datasource.py | Python | bsd-3-clause | 25,311 |
# -*- coding: utf-8 -*-
import scrapy
import logging
from scrapy.utils.project import get_project_settings
from time import sleep
from AcgnzSpider.items import AcgnzItem
class AcgnzSpider(scrapy.Spider):
name = 'acgnz.cc'
start_urls = ['http://www.acgnz.cc/sign']
allowed_domains = ['acgnz.cc']
def parse(self, response):
settings = get_project_settings()
yield scrapy.FormRequest.from_response(
response,
method='POST',
# headers={'Content-Type': 'multipart/form-data'},
url='http://www.acgnz.cc/wp-admin/admin-ajax.php?action=theme_custom_sign',
formdata={
'user[email]': self.settings.get('LOGIN_CREDENTIALS_EMAIL'),
'user[pwd]': self.settings.get('LOGIN_CREDENTIALS_PWD'),
'user[remember]': '1',
'type': 'login',
'theme-nonce': '85dd62e1f6'
},
callback=self.parse_follow_seq
)
def parse_follow_seq(self, response):
if 'success' not in response.body:
raise CloseSpider('login failed')
logging.log(logging.DEBUG, 'login successful, sleeping for 5 seconds')
sleep(5)
for i in range(9000):
yield scrapy.Request(
'http://www.acgnz.cc/{index}'.format(index=i),
meta={'dont_redirect': True,
'handle_httpstatus_list': [302]},
callback=self.parse_page)
def parse_page(self, response):
if response.status == 302:
pass
else:
item = AcgnzItem()
item['url'] = response.url
item['title'] = (response.selector.xpath('//div[@class="entry-content content-reset"]').xpath(
'.//a/@title').extract() + response.selector.xpath('//article[@id]/h2/text()').extract())[0]
item['image_urls'] = response.selector.xpath(
'//div[@class="entry-content content-reset"]').xpath('.//img/@src').extract()
if response.selector.xpath('//div[@class="entry-circle"]/a[@class="meta meta-post-storage"]/@href'):
download_page_href = response.selector.xpath(
'//div[@class="entry-circle"]/a[@class="meta meta-post-storage"]/@href')[0].extract()
yield scrapy.Request(download_page_href, meta={'item': item}, callback=self.parse_download_page)
else:
item['download_link'] = ''
item['download_code'] = ''
item['unarchive_password'] = ''
yield item
def parse_download_page(self, response):
item = response.meta['item']
item['download_link'] = response.selector.xpath('//div[@class="post-download"]').xpath(
'.//a[@class="btn btn-lg btn-success btn-block"]/@href')[-1].extract()
if response.selector.xpath(
'//div[@class="post-download"]').xpath('.//input[@id="theme_custom_storage-0-download-pwd"]/@value'):
item['download_code'] = response.selector.xpath(
'//div[@class="post-download"]').xpath('.//input[@id="theme_custom_storage-0-download-pwd"]/@value')[-1].extract()
if response.selector.xpath(
'//div[@class="post-download"]').xpath('.//input[@id="theme_custom_storage-0-extract-pwd"]/@value'):
item['unarchive_password'] = response.selector.xpath(
'//div[@class="post-download"]').xpath('.//input[@id="theme_custom_storage-0-extract-pwd"]/@value')[-1].extract()
yield item
| Nymphet/acgnz-spider | AcgnzSpider/spiders/AcgnzSpider01.py | Python | mit | 3,584 |
import angr
import resource
import time
import os
b = angr.Project(os.path.join(
os.path.dirname(__file__),
"../../binaries-private/cgc_scored_event_2/cgc/0b32aa01_01"
))
start = time.time()
#s = b.factory.blank_state(add_options={"COMPOSITE_SOLVER"})
s = b.factory.blank_state(add_options={"COMPOSITE_SOLVER"}, remove_options={"LAZY_SOLVES"})
sm = b.factory.simgr(s)
angr.manager.l.setLevel("DEBUG")
sm.step(300)
end = time.time()
print "MB:", resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1024
print "time:", end-start
#assert len(sm.active) == 1538
#assert len(sm.deadended) == 27
| f-prettyland/angr | tests/manual_explosion.py | Python | bsd-2-clause | 601 |
# -*- coding: utf-8 -*-
# These tests don't work at the moment, due to the security_groups multi select not working
# in selenium (the group is selected then immediately reset)
import pytest
from cfme.cloud.provider import CloudProvider
from cfme.cloud.provider.azure import AzureProvider
from cfme.cloud.provider.openstack import OpenStackProvider
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.provider.scvmm import SCVMMProvider
from cfme.infrastructure.pxe import get_template_from_config
from cfme.markers.env_markers.provider import providers
from cfme.utils import ssh
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.providers import ProviderFilter
from cfme.utils.wait import wait_for
pf1 = ProviderFilter(classes=[CloudProvider, InfraProvider], required_flags=['provision',
'cloud_init'])
pf2 = ProviderFilter(classes=[SCVMMProvider], inverted=True) # SCVMM doesn't support cloud-init
pytestmark = [
pytest.mark.meta(server_roles="+automate"),
pytest.mark.provider(gen_func=providers, filters=[pf1, pf2], scope="module")
]
def find_global_ipv6(vm):
"""
Find global IPv6 on a VM if present.
Args:
vm: InfraVm object
Returns: IPv6 as a string if found, False otherwise
"""
all_ips = vm.mgmt.all_ips
for ip in all_ips:
if ':' in ip and not ip.startswith('fe80'):
return ip
return False
@pytest.fixture(scope="module")
def setup_ci_template(provider, appliance):
cloud_init_template_name = provider.data['provisioning']['ci-template']
get_template_from_config(
cloud_init_template_name,
create=True, appliance=appliance)
@pytest.fixture()
def vm_name():
return random_vm_name('ci')
@pytest.mark.rhv2
@pytest.mark.tier(3)
def test_provision_cloud_init(appliance, request, setup_provider, provider, provisioning,
setup_ci_template, vm_name):
""" Tests provisioning from a template with cloud_init
Metadata:
test_flag: cloud_init, provision
Bugzilla:
1619744
Polarion:
assignee: jhenner
initialEstimate: 1/4h
casecomponent: Provisioning
"""
image = provisioning.get('ci-image') or provisioning['image']['name']
note = ('Testing provisioning from image {} to vm {} on provider {}'.format(
image, vm_name, provider.key))
logger.info(note)
mgmt_system = provider.mgmt
inst_args = {
'request': {'notes': note},
'customize': {'custom_template': {'name': provisioning['ci-template']}}
}
# for image selection in before_fill
inst_args['template_name'] = image
if provider.one_of(AzureProvider):
inst_args['environment'] = {'public_ip_address': "New"}
if provider.one_of(OpenStackProvider):
ip_pool = provider.data['public_network']
floating_ip = mgmt_system.get_first_floating_ip(pool=ip_pool)
provider.refresh_provider_relationships()
inst_args['environment'] = {'public_ip_address': floating_ip}
if provider.one_of(InfraProvider) and appliance.version > '5.9':
inst_args['customize']['customize_type'] = 'Specification'
logger.info('Instance args: {}'.format(inst_args))
collection = appliance.provider_based_collection(provider)
instance = collection.create(vm_name, provider, form_values=inst_args)
request.addfinalizer(instance.cleanup_on_provider)
provision_request = provider.appliance.collections.requests.instantiate(vm_name,
partial_check=True)
provision_request.wait_for_request()
wait_for(lambda: instance.ip_address is not None, num_sec=600)
connect_ip = instance.ip_address
assert connect_ip, "VM has no IP"
# Check that we can at least get the uptime via ssh this should only be possible
# if the username and password have been set via the cloud-init script so
# is a valid check
with ssh.SSHClient(hostname=connect_ip, username=provisioning['ci-username'],
password=provisioning['ci-pass']) as ssh_client:
wait_for(ssh_client.uptime, num_sec=200, handle_exception=True)
@pytest.mark.rhv3
@pytest.mark.provider([RHEVMProvider], override=True)
def test_provision_cloud_init_payload(appliance, request, setup_provider, provider, provisioning,
vm_name):
"""
Tests that options specified in VM provisioning dialog in UI are properly passed as a cloud-init
payload to the newly provisioned VM.
Metadata:
test_flag: cloud_init, provision
Polarion:
assignee: jhenner
initialEstimate: 1/4h
casecomponent: Provisioning
"""
image = provisioning.get('ci-image', None)
if not image:
pytest.skip('No ci-image found in provider specification.')
note = ('Testing provisioning from image {image} to vm {vm} on provider {provider}'.format(
image=image, vm=vm_name, provider=provider.key))
logger.info(note)
ci_payload = {
'root_password': 'mysecret',
'address_mode': 'Static',
'hostname': 'cimachine',
'ip_address': '169.254.0.1',
'subnet_mask': '29',
'gateway': '169.254.0.2',
'dns_servers': '169.254.0.3',
'dns_suffixes': 'virt.lab.example.com',
'custom_template': {'name': 'oVirt cloud-init'}
}
inst_args = {
'request': {'notes': note},
'customize': {'customize_type': 'Specification'},
'template_name': image
}
inst_args['customize'].update(ci_payload)
logger.info('Instance args: {}'.format(inst_args))
# Provision VM
collection = appliance.provider_based_collection(provider)
instance = collection.create(vm_name, provider, form_values=inst_args)
request.addfinalizer(instance.cleanup_on_provider)
provision_request = provider.appliance.collections.requests.instantiate(vm_name,
partial_check=True)
provision_request.wait_for_request()
connect_ip = wait_for(find_global_ipv6, func_args=[instance], num_sec=600, delay=20).out
logger.info('Connect IP: {}'.format(connect_ip))
# Connect to the newly provisioned VM
with ssh.SSHClient(hostname=connect_ip,
username='root',
password=ci_payload['root_password']) as ssh_client:
# Check that correct hostname has been set
hostname_cmd = ssh_client.run_command('hostname')
assert hostname_cmd.success
assert hostname_cmd.output.strip() == ci_payload['hostname']
# Obtain network configuration script for eth0 and store it in a list
network_cfg_cmd = ssh_client.run_command('cat /etc/sysconfig/network-scripts/ifcfg-eth0')
assert network_cfg_cmd.success
config_list = network_cfg_cmd.output.split('\n')
# Compare contents of network script with cloud-init payload
assert 'BOOTPROTO=none' in config_list, 'Address mode was not set to static'
assert 'IPADDR={}'.format(ci_payload['ip_address']) in config_list
assert 'PREFIX={}'.format(ci_payload['subnet_mask']) in config_list
assert 'GATEWAY={}'.format(ci_payload['gateway']) in config_list
assert 'DNS1={}'.format(ci_payload['dns_servers']) in config_list
assert 'DOMAIN={}'.format(ci_payload['dns_suffixes']) in config_list
| RedHatQE/cfme_tests | cfme/tests/cloud_infra_common/test_cloud_init_provisioning.py | Python | gpl-2.0 | 7,648 |
from modeller.util.modlist import VarList
from modeller.excluded_pair import excluded_pair
import _modeller
class ExcludedPairList(VarList):
def __init__(self, mdl):
self.__mdl = mdl
VarList.__init__(self)
def __len__(self):
return _modeller.mod_model_nexcl_get(self.__mdl.modpt)
def _setdimfunc(self, num):
_modeller.mod_model_nexcl_set(self.__mdl.modpt, num)
def _getfunc(self, indx):
mdl = self.__mdl
inds = _modeller.mod_excluded_pair_get(mdl.modpt, indx)
p = excluded_pair(*[mdl.atoms[i-1] for i in inds])
return p
def _setfunc(self, indx, obj):
mdl = self.__mdl
if not isinstance(obj, excluded_pair):
raise TypeError("can only use excluded_pair objects here")
_modeller.mod_excluded_pair_set(mdl.modpt, indx,
obj._get_base_atoms(mdl))
| bjornwallner/proq2-server | apps/modeller9v8/modlib/modeller/excluded_pair_list.py | Python | gpl-3.0 | 903 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("credentials", "0002_signatory_organization_name_override"),
]
operations = [
migrations.AddField(
model_name="programcertificate",
name="use_org_name",
field=models.BooleanField(
default=False,
help_text="Display the associated organization's name (e.g. ACME University) instead of its short name (e.g. ACMEx)",
verbose_name="Use organization name",
),
),
]
| edx/credentials | credentials/apps/credentials/migrations/0003_programcertificate_use_org_name.py | Python | agpl-3.0 | 597 |
#
# Copyright 2014 Google Inc. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
"""
Core client functionality, common across all API requests (including performing
HTTP requests).
"""
import base64
from datetime import datetime
from datetime import timedelta
import hashlib
import hmac
import requests
import random
import time
import googlemaps
try: # Python 3
from urllib.parse import urlencode
except ImportError: # Python 2
from urllib import urlencode
_USER_AGENT = "GoogleGeoApiClientPython/%s" % googlemaps.__version__
_DEFAULT_BASE_URL = "https://maps.googleapis.com"
_RETRIABLE_STATUSES = set([500, 503, 504])
class Client(object):
"""Performs requests to the Google Maps API web services."""
def __init__(self, key=None, client_id=None, client_secret=None,
timeout=None, connect_timeout=None, read_timeout=None,
retry_timeout=60):
"""
:param key: Maps API key. Required, unless "client_id" and
"client_secret" are set.
:type key: string
:param timeout: Combined connect and read timeout for HTTP requests, in
seconds. Specify "None" for no timeout.
:type timeout: int
:param connect_timeout: Connection timeout for HTTP requests, in
seconds. You should specify read_timeout in addition to this option.
Note that this requires requests >= 2.4.0.
:type connect_timeout: int
:param read_timeout: Read timeout for HTTP requests, in
seconds. You should specify connect_timeout in addition to this
option. Note that this requires requests >= 2.4.0.
:type read_timeout: int
:param retry_timeout: Timeout across multiple retriable requests, in
seconds.
:type retry_timeout: int
:param client_id: (for Maps API for Work customers) Your client ID.
:type client_id: string
:param client_secret: (for Maps API for Work customers) Your client
secret (base64 encoded).
:type client_secret: string
:raises ValueError: when either credentials are missing, incomplete
or invalid.
:raises NotImplementedError: if connect_timeout and read_timeout are
used with a version of requests prior to 2.4.0.
"""
if not key and not (client_secret and client_id):
raise ValueError("Must provide API key or enterprise credentials "
"when creating client.")
if key and not key.startswith("AIza"):
raise ValueError("Invalid API key provided.")
self.key = key
if timeout and (connect_timeout or read_timeout):
raise ValueError("Specify either timeout, or connect_timeout " +
"and read_timeout")
if connect_timeout and read_timeout:
# Check that the version of requests is >= 2.4.0
chunks = requests.__version__.split(".")
if chunks[0] < 2 or (chunks[0] == 2 and chunks[1] < 4):
raise NotImplementedError("Connect/Read timeouts require "
"requests v2.4.0 or higher")
self.timeout = (connect_timeout, read_timeout)
else:
self.timeout = timeout
self.client_id = client_id
self.client_secret = client_secret
self.retry_timeout = timedelta(seconds=retry_timeout)
def _get(self, url, params, first_request_time=None, retry_counter=0,
base_url=_DEFAULT_BASE_URL, accepts_clientid=True, extract_body=None):
"""Performs HTTP GET request with credentials, returning the body as
JSON.
:param url: URL path for the request. Should begin with a slash.
:type url: string
:param params: HTTP GET parameters.
:type params: dict or list of key/value tuples
:param first_request_time: The time of the first request (None if no retries
have occurred).
:type first_request_time: datetime.datetime
:param retry_counter: The number of this retry, or zero for first attempt.
:type retry_counter: int
:param base_url: The base URL for the request. Defaults to the Maps API
server. Should not have a trailing slash.
:type base_url: string
:param accepts_clientid: Whether this call supports the client/signature
params. Some APIs require API keys (e.g. Roads).
:type accepts_clientid: bool
:param extract_body: A function that extracts the body from the request.
If the request was not successful, the function should raise a
googlemaps.HTTPError or googlemaps.ApiError as appropriate.
:type extract_body: function
:raises ApiError: when the API returns an error.
:raises Timeout: if the request timed out.
:raises TransportError: when something went wrong while trying to
exceute a request.
"""
if not first_request_time:
first_request_time = datetime.now()
elapsed = datetime.now() - first_request_time
if elapsed > self.retry_timeout:
raise googlemaps.exceptions.Timeout()
if retry_counter > 0:
# 0.5 * (1.5 ^ i) is an increased sleep time of 1.5x per iteration,
# starting at 0.5s when retry_counter=0. The first retry will occur
# at 1, so subtract that first.
delay_seconds = 0.5 * 1.5 ** (retry_counter - 1)
# Jitter this value by 50% and pause.
time.sleep(delay_seconds * (random.random() + 0.5))
authed_url = self._generate_auth_url(url, params, accepts_clientid)
try:
resp = requests.get(base_url + authed_url,
headers={"User-Agent": _USER_AGENT},
timeout=self.timeout,
verify=True) # NOTE(cbro): verify SSL certs.
except requests.exceptions.Timeout:
raise googlemaps.exceptions.Timeout()
except Exception as e:
raise googlemaps.exceptions.TransportError(e)
if resp.status_code in _RETRIABLE_STATUSES:
# Retry request.
return self._get(url, params, first_request_time, retry_counter + 1,
base_url, accepts_clientid, extract_body)
try:
if extract_body:
return extract_body(resp)
return self._get_body(resp)
except googlemaps.exceptions._RetriableRequest:
# Retry request.
return self._get(url, params, first_request_time, retry_counter + 1,
base_url, accepts_clientid, extract_body)
def _get_body(self, resp):
if resp.status_code != 200:
raise googlemaps.exceptions.HTTPError(resp.status_code)
body = resp.json()
api_status = body["status"]
if api_status == "OK" or api_status == "ZERO_RESULTS":
return body
if api_status == "OVER_QUERY_LIMIT":
raise googlemaps.exceptions._RetriableRequest()
if "error_message" in body:
raise googlemaps.exceptions.ApiError(api_status,
body["error_message"])
else:
raise googlemaps.exceptions.ApiError(api_status)
def _generate_auth_url(self, path, params, accepts_clientid):
"""Returns the path and query string portion of the request URL, first
adding any necessary parameters.
:param path: The path portion of the URL.
:type path: string
:param params: URL parameters.
:type params: dict or list of key/value tuples
:rtype: string
"""
# Deterministic ordering through sorting by key.
# Useful for tests, and in the future, any caching.
if type(params) is dict:
params = sorted(params.items())
else:
params = params[:] # Take a copy.
if accepts_clientid and self.client_id and self.client_secret:
params.append(("client", self.client_id))
path = "?".join([path, urlencode_params(params)])
sig = sign_hmac(self.client_secret, path)
return path + "&signature=" + sig
if self.key:
params.append(("key", self.key))
return path + "?" + urlencode_params(params)
raise ValueError("Must provide API key for this API. It does not accept "
"enterprise credentials.")
from googlemaps.directions import directions
from googlemaps.distance_matrix import distance_matrix
from googlemaps.elevation import elevation
from googlemaps.elevation import elevation_along_path
from googlemaps.geocoding import geocode
from googlemaps.geocoding import reverse_geocode
from googlemaps.timezone import timezone
from googlemaps.roads import snap_to_roads
from googlemaps.roads import speed_limits
from googlemaps.roads import snapped_speed_limits
Client.directions = directions
Client.distance_matrix = distance_matrix
Client.elevation = elevation
Client.elevation_along_path = elevation_along_path
Client.geocode = geocode
Client.reverse_geocode = reverse_geocode
Client.timezone = timezone
Client.snap_to_roads = snap_to_roads
Client.speed_limits = speed_limits
Client.snapped_speed_limits = snapped_speed_limits
def sign_hmac(secret, payload):
"""Returns a base64-encoded HMAC-SHA1 signature of a given string.
:param secret: The key used for the signature, base64 encoded.
:type secret: string
:param payload: The payload to sign.
:type payload: string
:rtype: string
"""
payload = payload.encode('ascii', 'strict')
secret = secret.encode('ascii', 'strict')
sig = hmac.new(base64.urlsafe_b64decode(secret), payload, hashlib.sha1)
out = base64.urlsafe_b64encode(sig.digest())
return out.decode('utf-8')
def urlencode_params(params):
"""URL encodes the parameters.
:param params: The parameters
:type params: list of key/value tuples.
"""
# urlencode does not handle unicode strings in Python 2.
# Firstly, normalize the values so they get encoded correctly.
params = [(key, normalize_for_urlencode(val)) for key, val in params]
# Secondly, unquote unreserved chars which are incorrectly quoted
# by urllib.urlencode, causing invalid auth signatures. See GH #72
# for more info.
return requests.utils.unquote_unreserved(urlencode(params))
try:
unicode
# NOTE(cbro): `unicode` was removed in Python 3. In Python 3, NameError is
# raised here, and caught below.
def normalize_for_urlencode(value):
"""(Python 2) Converts the value to a `str` (raw bytes)."""
if isinstance(value, unicode):
return value.encode('utf8')
if isinstance(value, str):
return value
return normalize_for_urlencode(str(value))
except NameError:
def normalize_for_urlencode(value):
"""(Python 3) No-op."""
# urlencode in Python 3 handles all the types we are passing it.
return value
| DataReply/google-maps-services-python | googlemaps/client.py | Python | apache-2.0 | 11,665 |
#!/usr/bin/env python
import roslib
import rospy
import smach
import smach_ros
from geometry_msgs.msg import Point
from geometry_msgs.msg import Point32
from geometry_msgs.msg import PointStamped
from geometry_msgs.msg import Pose
from geometry_msgs.msg import PoseStamped
from geometry_msgs.msg import Quaternion
from geometry_msgs.msg import PoseArray
from sensor_msgs.msg import PointCloud
from sensor_msgs import point_cloud2 as pc2
from sensor_msgs.msg import PointCloud2
from sensor_msgs.point_cloud2 import create_cloud_xyz32
import threading
import os
import subprocess
import signal
import time
def parse_pointstamped(point_input):
"""
Parse point_input into PointStamped.
"""
try:
assert isinstance(point_input, PointStamped)
return point_input
except:
pass
try:
assert isinstance(point_input, Point)
point = PointStamped(point = point_input)
point.header.stamp = rospy.Time.now()
return point
except:
pass
try:
assert isinstance(point_input, Point32)
point = PointStamped(point = Point(x=point_input.x, y=point_input.y, z=point_input.z))
point.header.stamp = rospy.Time.now()
return point
except:
pass
try:
point = point_input
point = PointStamped(point = Point(x=point[0], y=point[1], z=point[2]))
point.header.stamp = rospy.Time.now()
return point
except Exception as e:
raise ValueError('Point not properly specified (should be Point, PointStamped or [3] list type)!')
def parse_posestamped(pose_input):
"""
Parse pose_input into PoseStamped.
"""
try:
assert isinstance(pose_input, PoseStamped)
return pose_input
except:
pass
try:
assert isinstance(pose_input, Pose)
pose = PoseStamped(pose = pose_input)
pose.header.stamp = rospy.Time.now()
return pose
except:
pass
try:
pose = pose_input
position = Point(x=pose_input[0][0], y=pose_input[0][1], z=pose_input[0][2])
orientation = Quaternion(x=pose_input[1][0], y=pose_input[1][1], z=pose_input[1][2], w=pose_input[1][3])
pose = PoseStamped(pose = Pose(position=position, orientation=orientation))
pose.header.stamp = rospy.Time.now()
return pose
except Exception as e:
raise ValueError('Pose not properly specified (should be Pose, PoseStamped or [[3],[4]] list)!')
def parse_posearray(posearray_input):
"""
Parse posearray_input into a PoseArray.
"""
try:
assert isinstance(posearray_input, PoseArray)
return posearray_input
except:
pass
try:
assert isinstance(posearray_input, list)
posearray = PoseArray()
for pose in posearray_input:
try:
assert isinstance(pose, Pose)
posearray.poses.append(pose)
continue
except:
pass
try:
assert isinstance(pose, PoseStamped)
posearray.poses.append(pose.pose)
continue
except:
pass
try:
position = Point(x=pose[0][0], y=pose[0][1], z=pose[0][2])
orientation = Quaternion(x=pose[1][0], y=pose[1][1], z=pose[1][2], w=pose[1][3])
pose = Pose(position=position, orientation=orientation)
posearray.poses.append(pose)
continue
except Exception as e:
raise ValueError('Pose in pose array input not properly specified (should be Pose, PoseStamped or [[3],[4]] list)!')
posearray.header.stamp = rospy.Time.now()
return posearray
except Exception as e:
raise ValueError('Pose array not properly specified (should be PoseArray or list of Pose, PoseStamped or [[3],[4]] list types)!')
def parse_pointcloud(pointcloud_input):
"""
Parse pointcloud_input into PointCloud.
"""
try:
assert isinstance(pointcloud_input, PointCloud)
return pointcloud_input
except:
pass
try:
points = pc2.read_points(pointcloud_input, skip_nans=True, field_names=('x', 'y', 'z'))
return PointCloud(points = map(lambda point: Point32(*point), points))
except Exception as e:
raise ValueError('Point cloud not properly specified (should be PointCloud or PointCloud2 type): ' + repr(e))
def parse_pointcloud2(pointcloud_input):
"""
Parse pointcloud_input into PointCloud2.
"""
try:
assert isinstance(pointcloud_input, PointCloud2)
return pointcloud_input
except:
pass
try:
points = [[point.x, point.y, point.z] for point in pointcloud_input.points]
pointcloud2 = create_cloud_xyz32(header=pointcloud_input.header, points=points)
return pointcloud2
except:
raise ValueError('Point cloud not properly specified (should be PointCloud or PointCloud2 type)!')
class MsgPublisher(object):
"""
"""
def __init__(self):
# A dict of message publishers indexed by topic
self._pubs = dict()
# A dict of messages indexed by topic
self._msgs = dict()
# A dict of callbacks indexed by topic
self._callbacks = dict()
# A dict of message publication rates indexed by topic
self._pub_rates = dict()
# A dict of message publisher threads indexed by topic
self._pub_threads = dict()
# A dict of message publisher stop flags indexed by topic
self._stop_flags = dict()
# Length of timeout (in seconds) for waiting for the threads to finish
# publishing before forcibly unpublishing.
self._unpublish_timeout = 10.0
def _run_pub_thread(self, topic):
r = rospy.Rate(self._pub_rates[topic])
while not self._stop_flags[topic]:
# Apply callback to message
if self._callbacks[topic]:
try:
self._msgs[topic] = self._callbacks[topic](self._msgs[topic])
except Exception as e:
rospy.logerr('Error when applying callback to message being published on topic {}: {}'.format(topic, repr(e)))
# Publish message
try:
self._pubs[topic].publish(self._msgs[topic])
except Exception as e:
rospy.logerr('Error while publishing to topic {}: {}'.format(topic, repr(e)))
r.sleep()
self._unpublish(topic)
def _unpublish(self, topic):
try:
self._pubs[topic].unregister()
except Exception as e:
rospy.logerr('Failed to unregister publisher of topic {}: {}'.format(topic, repr(e)))
raise
del self._pubs[topic]
del self._msgs[topic]
del self._callbacks[topic]
del self._pub_rates[topic]
def start(self, msg, topic, rate, frame_id=None, callback=None):
# Set the message publisher stopping flag
self._stop_flags[topic] = False
# Save the message
self._msgs[topic] = msg
# Save the message publication rate
self._pub_rates[topic] = rate
# Use frame_id if specified
if frame_id:
try:
assert(isinstance(frame_id, str))
self._msgs[topic].header.frame_id = frame_id
except:
rospy.logwarn('Failed to add specified frame_id {} to message for publication on topic {}: {}'.format(frame_id, topic, repr(e)))
# Use callback if specified
if callback:
try:
assert(callable(callback))
self._callbacks[topic] = callback
except:
rospy.logwarn('Failed to add specified callback {} to publisher of topic {}: {}'.format(callback, topic, repr(e)))
self._callbacks[topic] = None
else:
self._callbacks[topic] = None
# Add publisher
try:
self._pubs[topic] = rospy.Publisher(topic, type(self._msgs[topic]))
except Exception as e:
del self._pub_rates[topic]
self._msgs[topic]
rospy.logwarn('Failed to add publisher for topic {}: {}'.format(topic, repr(e)))
return 'aborted'
# Spin up the message publication thread
self._pub_threads[topic] = threading.Thread(target=self._run_pub_thread, args=[topic])
self._pub_threads[topic].start()
return 'succeeded'
def stop(self, topic):
# Signal thread to stop publishing
self._stop_flags[topic] = True
# Wait for the topic to be unpublished
t = rospy.get_time()
r = rospy.Rate(self._pub_rates[topic])
while topic in list(self._pubs.keys()):
if rospy.get_time() - t < self._unpublish_timeout:
r.sleep()
else:
break
else:
return 'succeeded'
# If the publisher is still running, issue a warning and attempt forced unpublish.
rospy.logwarn('Warning: timeout exceeded for stopping publisher thread for topic {}. Attempting forced stop...'.format(topic))
try:
self._unpublish(topic)
except Exception as e:
rospy.logerr('Error during forced stop of publisher of topic {}: {}'.format(topic, repr(e)))
return 'aborted'
return 'succeeded'
def stop_all(self):
# Stop all current publishers
for topic in self._pubs.keys():
if self.stop(topic) != 'succeeded':
return 'aborted'
return 'succeeded'
class PublishMsgState(smach.State):
def __init__(self, name, msg_publisher, action, input_keys = ['msg', 'topic', 'rate'], output_keys = ['msg', 'topic'], callbacks = None):
smach.State.__init__(self, input_keys=input_keys, output_keys=output_keys, outcomes=['succeeded', 'aborted'])
# Save the state name
self._name = name
# Save the MsgPublisherObserver object reference
self._msg_publisher = msg_publisher
# Save the action
self._action = action
# Set up dict of parsing functions for certain message types/classes.
self._msg_parsers = {"<class 'geometry_msgs.msg._Point.Point'>": parse_pointstamped,
"<class 'geometry_msgs.msg._PointStamped.PointStamped'>": parse_pointstamped,
"<class 'geometry_msgs.msg._Pose.Pose'>": parse_posestamped,
"<class 'geometry_msgs.msg._PoseStamped.PoseStamped'>": parse_posestamped,
"<class 'geometry_msgs.msg._PoseArray.PoseArray'>": parse_posearray,
"<class 'sensor_msgs.msg._PointCloud.PointCloud'>": parse_pointcloud,
"<class 'sensor_msgs.msg._PointCloud2.PointCloud2'>": parse_pointcloud2}
self._cbs = []
if callbacks:
for cb in sorted(callbacks):
if cb in globals():
self._cbs.append(globals()[cb])
elif cb in locals():
self._cbs.append(locals()[cb])
elif cb in dir(self):
self._cbs.append(getattr(self, cb))
self._cb_input_keys = []
self._cb_output_keys = []
self._cb_outcomes = []
for cb in self._cbs:
if cb and smach.has_smach_interface(cb):
self._cb_input_keys.append(cb.get_registered_input_keys())
self._cb_output_keys.append(cb.get_registered_output_keys())
self._cb_outcomes.append(cb.get_registered_outcomes())
self.register_input_keys(self._cb_input_keys[-1])
self.register_output_keys(self._cb_output_keys[-1])
self.register_outcomes(self._cb_outcomes[-1])
def _parse_msg(self, msg, msg_type=None):
# First try using a known parser for a specified msg_type.
try:
assert msg_type
msg_class = str(roslib.message.get_message_class(msg_type))
published_msg = self._msg_parsers[msg_class](msg)
return published_msg
except:
pass
# Next, try to select a known parser by checking the type of message.
try:
msg_class = str(type(msg))
published_msg = self._msg_parsers[msg_class](msg)
return published_msg
except:
pass
# Next, try each message type parser in succession and see if something sticks.
for _, parser in self._msg_parsers.items():
try:
published_msg = parser(msg)
return published_msg
except:
pass
# Finally, if none of the above stuck, just return the original message.
return msg
def execute(self, userdata):
# Call callbacks
for (cb, ik, ok) in zip(self._cbs,
self._cb_input_keys,
self._cb_output_keys):
# Call callback with limited userdata
try:
cb_outcome = cb(self, smach.Remapper(userdata,ik,ok,{}))
except:
cb_outcome = cb(smach.Remapper(userdata,ik,ok,{}))
# Start or stop the message publisher
outcome = 'aborted'
if self._action == 'start':
# Parse msg
try:
if 'msg_type' in self._input_keys:
published_msg = self._parse_msg(userdata.msg, msg_type=userdata.msg_type)
else:
published_msg = self._parse_msg(userdata.msg)
except Exception as e:
rospy.logerr('Failed to parse message: '.format(repr(e)))
return 'aborted'
# Get topic if it's specified as an input key
if 'topic' in self._input_keys:
topic = userdata.topic
# Otherwise, construct it from the state name
else:
topic = 'smacha/' + self._name.lower()
# Get rate if it's specified as an input key
if 'rate' in self._input_keys:
rate = userdata.rate
else:
rate = 100.0
# Get callback if it's specified as an input key
if 'callback' in self._input_keys:
callback = userdata.callback
else:
callback = ''
# Get frame_id if it's specified as an input key
if 'frame_id' in self._input_keys:
frame_id = userdata.frame_id
else:
frame_id = ''
# Start the publisher
outcome = self._msg_publisher.start(published_msg, topic, rate, frame_id=frame_id, callback=callback)
elif self._action == 'stop':
outcome = self._msg_publisher.stop(topic)
elif self._action == 'stop_all':
outcome = self._msg_publisher.stop_all()
# Set topic output key if specified
if self._action == 'start' and outcome == 'succeeded':
for output_key in ['topic', 'output_topic', 'topic_output']:
if output_key in self._output_keys:
setattr(userdata, output_key, topic)
# Set msg output key if specified
if self._action == 'start' and outcome == 'succeeded':
for output_key in ['msg', 'output_msg', 'msg_output']:
if output_key in self._output_keys:
setattr(userdata, output_key, published_msg)
return outcome
class ROSBagCLIProcessRecorder(object):
"""A rosbag recorder class that uses subprocess calls to the rosbag CLI
(command-line interface) recording tool in order to circumvent threading
and Python GIL (global interpreter lock) issues.
"""
def __init__(self):
# A dict of bag recording processes indexed by bag filenames
self._processes = dict()
def start(self, bag_file, topics):
"""Start a rosbag recording.
"""
try:
if not topics:
topics = ['-a']
if not bag_file.endswith('.bag'):
time_str = time.strftime('%Y-%m-%d-%H-%M-%S')
bag_file = bag_file + '_' + time_str + '.bag'
# cmd = ['rosbag', 'record', '-j'] + topics + ['-O', bag_file]
cmd = ['rosbag', 'record'] + topics + ['-O', bag_file]
rospy.loginfo('Starting rosbag CLI recording with command: \'{}\''.format(' '.join(cmd)))
self._processes[bag_file] = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception as e:
rospy.logerr('Unable to start recording rosbag file \'{}\' with topics {}: {}'.format(bag_file, topics, repr(e)))
return 'aborted'
return 'succeeded'
def stop(self, bag_file):
"""Stop a rosbag recording.
See: https://answers.ros.org/question/10714/start-and-stop-rosbag-within-a-python-script/
"""
try:
rospy.loginfo('Stopping rosbag CLI recording process for rosbag file \'{}\''.format(bag_file))
# Kill child processes
ps_command = subprocess.Popen('ps -o pid --ppid {} --noheaders'.format(self._processes[bag_file].pid), shell=True, stdout=subprocess.PIPE)
ps_output = ps_command.stdout.read()
retcode = ps_command.wait()
assert retcode == 0, 'ps command returned {}'.format(retcode)
for pid_str in ps_output.split("\n")[:-1]:
os.kill(int(pid_str), signal.SIGINT)
# Kill parent process
os.kill(self._processes[bag_file].pid, signal.SIGINT)
except Exception as e:
rospy.logerr('Unable to terminate rosbag CLI recording process for rosbag file \'{}\': {}'.format(bag_file, repr(e)))
return 'aborted'
try:
assert(os.path.exists(bag_file))
except:
rospy.logwarn('rosbag file \'{}\''.format(bag_file) +
'was not detected on the file system after rosbag CLI process recording stopped ' +
'(it may take more time for the process to terminate)!')
return 'succeeded'
def stop_all(self):
"""Stop all rosbag recordings.
"""
for bag_file in list(self._processes.keys()):
if self.stop(bag_file) != 'succeeded':
return 'aborted'
return 'succeeded'
class RecordROSBagState(smach.State):
def __init__(self, name, bag_recorder, action, input_keys=['file', 'topics'], output_keys=[], callbacks = None):
smach.State.__init__(self, input_keys=input_keys, output_keys=output_keys, outcomes=['succeeded', 'aborted'])
# Save the state name
self._name = name
# Save the ROSBagRecorder object reference
self._bag_recorder= bag_recorder
# Save the action
self._action = action
self._cbs = []
if callbacks:
for cb in sorted(callbacks):
if cb in globals():
self._cbs.append(globals()[cb])
elif cb in locals():
self._cbs.append(locals()[cb])
elif cb in dir(self):
self._cbs.append(getattr(self, cb))
self._cb_input_keys = []
self._cb_output_keys = []
self._cb_outcomes = []
for cb in self._cbs:
if cb and smach.has_smach_interface(cb):
self._cb_input_keys.append(cb.get_registered_input_keys())
self._cb_output_keys.append(cb.get_registered_output_keys())
self._cb_outcomes.append(cb.get_registered_outcomes())
self.register_input_keys(self._cb_input_keys[-1])
self.register_output_keys(self._cb_output_keys[-1])
self.register_outcomes(self._cb_outcomes[-1])
def execute(self, userdata):
# Call callbacks
for (cb, ik, ok) in zip(self._cbs,
self._cb_input_keys,
self._cb_output_keys):
# Call callback with limited userdata
try:
cb_outcome = cb(self, smach.Remapper(userdata,ik,ok,{}))
except:
cb_outcome = cb(smach.Remapper(userdata,ik,ok,{}))
# Get filename from userdata
try:
bag_file = userdata.file
assert(isinstance(bag_file, str))
except Exception as e:
rospy.logerr('The rosbag filename must be specified as a userdata input key: {}'.format(repr(e)))
return 'aborted'
# Get topic names from userdata
try:
topics = userdata.topics
assert(not any(not isinstance(x, str) for x in topics))
except Exception as e:
rospy.logerr('Topic names must be specified as a userdata input key: {}'.format(repr(e)))
return 'aborted'
# Start or stop recording
outcome = 'aborted'
if self._action == 'start' or self._action == 'record':
outcome = self._bag_recorder.start(bag_file, topics)
elif self._action == 'stop':
outcome = self._bag_recorder.stop(bag_file)
elif self._action == 'stop_all':
outcome = self._bag_recorder.stop_all()
return outcome
class SleepState(smach.State):
def __init__(self, time, input_keys = [], output_keys = [], callbacks = [], outcomes=['succeeded']):
smach.State.__init__(self, input_keys=input_keys, output_keys=output_keys, outcomes=outcomes)
self._time = time
def execute(self, userdata):
rospy.sleep(self._time)
return 'succeeded'
def main():
rospy.init_node('sm')
msg_publisher = MsgPublisher()
bag_recorder = ROSBagCLIProcessRecorder()
sm = smach.StateMachine(outcomes=['succeeded', 'aborted'])
sm.userdata.rate = 100.0
sm.userdata.file = ''
sm.userdata.topics = ''
sm.userdata.rate = 100.0
sm.userdata.topic = ''
sm.userdata.point = Point()
sm.userdata.rate = 100.0
sm.userdata.topic = 'smacha/rosbag_cli_recording_1_point'
sm.userdata.file = '/tmp/rosbag_cli_recording_1.bag'
sm.userdata.topics = ['smacha/rosbag_cli_recording_1_point']
with sm:
smach.StateMachine.add('PUBLISH_MSG',
PublishMsgState('PUBLISH_MSG', msg_publisher, 'start'),
transitions={'aborted':'aborted',
'succeeded':'START_RECORDING'},
remapping={'msg':'point',
'rate':'rate',
'topic':'topic'})
smach.StateMachine.add('START_RECORDING',
RecordROSBagState('START_RECORDING', bag_recorder, 'start'),
transitions={'aborted':'aborted',
'succeeded':'WAIT'},
remapping={'file':'file',
'topics':'topics'})
smach.StateMachine.add('WAIT',
SleepState(5),
transitions={'succeeded':'STOP_RECORDING'})
smach.StateMachine.add('STOP_RECORDING',
RecordROSBagState('STOP_RECORDING', bag_recorder, 'stop_all'),
transitions={'aborted':'aborted',
'succeeded':'UNPUBLISH_MSG'})
smach.StateMachine.add('UNPUBLISH_MSG',
PublishMsgState('UNPUBLISH_MSG', msg_publisher, 'stop_all'),
transitions={'aborted':'aborted',
'succeeded':'succeeded'})
outcome = sm.execute()
if __name__ == '__main__':
main() | ReconCell/smacha | smacha_ros/test/smacha_generated_py/smacha_test_examples/rosbag_cli_recording_1_generate_output.py | Python | bsd-3-clause | 24,378 |
#!/usr/bin/env python
import rosunit
import unittest
import re
from flexbe_core.core.user_data import UserData
from .logger import Logger
from .test_interface import TestInterface
from .test_context import TestContext, LaunchContext
from .data_provider import DataProvider
class Tester(object):
def __init__(self):
self._tests = dict()
def run_test(self, name, config):
try:
self._verify_config(config)
except Exception as e:
Logger.print_title(name, 'Invalid', None)
Logger.print_error('invalid test specification!\n\t%s' % str(e))
Logger.print_result(name, False)
self._tests['test_%s_pass' % name] = self._test_config_invalid(str(e))
return 0
# allow to specify behavior name instead of generated module and class
if 'name' in config:
config['path'] += '.%s_sm' % re.sub(r'[^\w]', '_', config['name'].lower())
config['class'] = '%sSM' % re.sub(r'[^\w]', '', config['name'])
import_only = config.get('import_only', False)
Logger.print_title(name, config['class'], config['outcome'] if not import_only else None)
# import test subject
try:
test_interface = TestInterface(config['path'], config['class'])
except Exception as e:
Logger.print_failure('unable to import state %s (%s):\n\t%s' %
(config['class'], config['path'], str(e)))
self._tests['test_%s_pass' % name] = self._test_pass(False)
return 0
if not import_only:
# prepare test context
context = None
if 'launch' in config:
context = LaunchContext(config['launch'], config.get('wait_cond', 'True'))
else:
context = TestContext()
# load data source
try:
data = DataProvider(bagfile=config.get('data', None))
except Exception as e:
Logger.print_failure('unable to load data source %s:\n\t%s' %
(config['data'], str(e)))
self._tests['test_%s_pass' % name] = self._test_pass(False)
return 0
# run test context
with context:
if not context.verify():
Logger.print_error('failed to initialize test context:\n\t%s' % config['launch'])
self._tests['test_%s_pass' % name] = self._test_pass(False)
return 0
# instantiate test subject
params = {key: data.parse(value) for key, value in list(config.get('params', dict()).items())}
try:
test_interface.instantiate(params)
except Exception as e:
Logger.print_failure('unable to instantiate %s (%s) with params:\n\t%s\n\t%s' %
(config['class'], config['path'], str(params), str(e)))
self._tests['test_%s_pass' % name] = self._test_pass(False)
return 0
# prepare user data
userdata = UserData()
for input_key, input_value in list(config.get('input', dict()).items()):
userdata[input_key] = data.parse(input_value)
expected = {key: data.parse(value) for key, value in config.get('output', dict()).items()}
# run test subject
try:
outcome = test_interface.execute(userdata, spin_cb=context.spin_once)
except Exception as e:
Logger.print_failure('failed to execute %s (%s)\n\t%s' %
(config['class'], config['path'], str(e)))
self._tests['test_%s_pass' % name] = self._test_pass(False)
return 0
if config.get('require_launch_success', False):
context.wait_for_finishing()
# evaluate outcome
self._tests['test_%s_outcome' % name] = self._test_outcome(outcome, config['outcome'])
outcome_ok = outcome == config['outcome']
if outcome_ok:
Logger.print_positive('correctly returned outcome %s' % outcome)
else:
Logger.print_negative('wrong outcome: %s' % outcome)
# evaluate output
output_ok = True
for expected_key, expected_value in list(expected.items()):
if expected_key in userdata:
equals = userdata[expected_key] == expected_value
self._tests['test_%s_output_%s' % (name, expected_key)] = \
self._test_output(userdata[expected_key], expected_value)
if not equals:
Logger.print_negative('wrong result for %s: %s != %s' %
(expected_key, userdata[expected_key], expected_value))
output_ok = False
else:
Logger.print_negative('no result for %s' % expected_key)
output_ok = False
if not context.success and config.get('require_launch_success', False):
Logger.print_negative('Launch file did not exit cleanly')
output_ok = False
if len(expected) > 0 and output_ok:
Logger.print_positive('all result outputs match expected')
# report result
success = import_only or outcome_ok and output_ok
Logger.print_result(name, success)
self._tests['test_%s_pass' % name] = self._test_pass(success)
return 1 if success else 0
def _verify_config(self, config):
if not isinstance(config, dict):
raise AssertionError('config needs to be a dictionary but is:\n\t%s' % str(config))
assert 'path' in config
assert 'class' in config or 'name' in config
assert 'outcome' in config or config.get('import_only', False)
# ROSUNIT interface
def perform_rostest(self, test_pkg):
TestCase = type(test_pkg + '_test_class', (unittest.TestCase,), self._tests)
rosunit.unitrun(test_pkg, test_pkg + '_flexbe_tests', TestCase)
def _test_output(self, value, expected):
def _test_call(test_self):
test_self.assertEqual(value, expected, "Output value %s does not match expected %s" % (value, expected))
return _test_call
def _test_outcome(self, outcome, expected):
def _test_call(test_self):
test_self.assertEqual(outcome, expected, "Outcome %s does not match expected %s" % (outcome, expected))
return _test_call
def _test_pass(self, passed):
def _test_call(test_self):
test_self.assertTrue(passed, "Did not pass configured tests.")
return _test_call
def _test_config_invalid(self, config):
def _test_call(test_self):
test_self.fail("Test config is invalid: %s" % config)
return _test_call
| team-vigir/flexbe_behavior_engine | flexbe_testing/src/flexbe_testing/tester.py | Python | bsd-3-clause | 7,123 |
from flask import Blueprint, render_template, request, send_from_directory
from werkzeug.utils import secure_filename
from mrt_file_server import app, schematics
from mrt_file_server.utils.file_utils import get_filesize, split_file_root_and_extension, file_exists_in_dir
from mrt_file_server.utils.flash_utils import flash_by_key
from mrt_file_server.utils.log_utils import log_info, log_warn, log_error
from mrt_file_server.utils.string_utils import str_contains_whitespace
schematic_blueprint = Blueprint("schematic", __name__, url_prefix="/schematic")
@app.route("/schematic/upload", methods = ["GET", "POST"])
def route_schematic_upload():
if request.method == "POST":
upload_schematics()
return render_template("schematic/upload/index.html", home = False)
def upload_schematics():
username = request.form["userName"] if "userName" in request.form else None
if username == None or username == "":
flash_by_key(app, "SCHEMATIC_UPLOAD_USERNAME_EMPTY")
log_warn("SCHEMATIC_UPLOAD_USERNAME_EMPTY")
elif str_contains_whitespace(username):
flash_by_key(app, "SCHEMATIC_UPLOAD_USERNAME_WHITESPACE")
log_warn("SCHEMATIC_UPLOAD_USERNAME_WHITESPACE", username)
elif "schematic" not in request.files:
flash_by_key(app, "SCHEMATIC_UPLOAD_NO_FILES")
log_warn("SCHEMATIC_UPLOAD_NO_FILES", username)
else:
files = request.files.getlist("schematic")
if len(files) > app.config["SCHEMATIC_UPLOAD_MAX_NUMBER_OF_FILES"]:
flash_by_key(app, "SCHEMATIC_UPLOAD_TOO_MANY_FILES")
log_warn("SCHEMATIC_UPLOAD_TOO_MANY_FILES", username)
else:
for file in files:
upload_single_schematic(username, file)
def upload_single_schematic(username, file):
file.filename = "{}-{}".format(username, file.filename)
uploads_dir = app.config["SCHEMATIC_UPLOADS_DIR"]
if str_contains_whitespace(file.filename):
flash_by_key(app, "SCHEMATIC_UPLOAD_FILENAME_WHITESPACE", file.filename)
log_warn("SCHEMATIC_UPLOAD_FILENAME_WHITESPACE", file.filename, username)
return
file.filename = secure_filename(file.filename)
file_size = get_filesize(file)
file_pair = split_file_root_and_extension(file.filename)
file_root = file_pair[0]
file_extension = file_pair[1]
if file_extension != ".schematic" and file_extension != ".schem":
flash_by_key(app, "SCHEMATIC_UPLOAD_FILENAME_EXTENSION", file.filename)
log_warn("SCHEMATIC_UPLOAD_FILENAME_EXTENSION", file.filename, username)
elif file_size > app.config["SCHEMATIC_UPLOAD_MAX_FILE_SIZE"]:
flash_by_key(app, "SCHEMATIC_UPLOAD_FILE_TOO_LARGE", file.filename)
log_warn("SCHEMATIC_UPLOAD_FILE_TOO_LARGE", file.filename, username)
elif file_exists_in_dir(uploads_dir, file_root + ".schematic") or file_exists_in_dir(uploads_dir, file_root + ".schem"):
flash_by_key(app, "SCHEMATIC_UPLOAD_FILE_EXISTS", file.filename)
log_warn("SCHEMATIC_UPLOAD_FILE_EXISTS", file.filename, username)
else:
try:
schematics.save(file)
message = flash_by_key(app, "SCHEMATIC_UPLOAD_SUCCESS", file.filename)
log_info("SCHEMATIC_UPLOAD_SUCCESS", file.filename, username)
except Exception as e:
message = flash_by_key(app, "SCHEMATIC_UPLOAD_FAILURE", file.filename)
log_error("SCHEMATIC_UPLOAD_FAILURE", file.filename, username, e)
@app.route("/schematic/download", methods = ["GET", "POST"])
def route_schematic_download():
response = False
if request.method == "POST":
create_schematic_download_link()
return render_template("schematic/download/index.html", home = False)
def create_schematic_download_link():
file_root = request.form["fileRoot"]
file_extension = request.form["fileExtension"]
file_name = "{}.{}".format(file_root, file_extension)
downloads_dir = app.config["SCHEMATIC_DOWNLOADS_DIR"]
if file_root == "":
flash_by_key(app, "SCHEMATIC_DOWNLOAD_LINK_CREATION_FILENAME_EMPTY")
log_warn("SCHEMATIC_DOWNLOAD_LINK_CREATION_FILENAME_EMPTY")
return
if file_extension not in ["schem", "schematic"]:
flash_by_key(app, "SCHEMATIC_DOWNLOAD_LINK_CREATION_INVALID_EXTENSION", file_name)
log_warn("SCHEMATIC_DOWNLOAD_LINK_CREATION_INVALID_EXTENSION", file_name)
return
if str_contains_whitespace(file_root):
flash_by_key(app, "SCHEMATIC_DOWNLOAD_LINK_CREATION_FILENAME_WHITESPACE", file_name)
log_warn("SCHEMATIC_DOWNLOAD_LINK_CREATION_FILENAME_WHITESPACE", file_name)
return
secure_file_name = "{}.{}".format(secure_filename(file_root), file_extension)
if file_exists_in_dir(downloads_dir, secure_file_name):
flash_by_key(app, "SCHEMATIC_DOWNLOAD_LINK_CREATION_SUCCESS", secure_file_name)
log_info("SCHEMATIC_DOWNLOAD_LINK_CREATION_SUCCESS", secure_file_name)
else:
flash_by_key(app, "SCHEMATIC_DOWNLOAD_LINK_CREATION_FILE_NOT_FOUND", secure_file_name)
log_warn("SCHEMATIC_DOWNLOAD_LINK_CREATION_FILE_NOT_FOUND", secure_file_name)
@app.route("/schematic/download/<path:filename>")
def download_schematic(filename):
downloads_dir = app.config["SCHEMATIC_DOWNLOADS_DIR"]
response = send_from_directory(downloads_dir, filename, as_attachment = True)
log_info("SCHEMATIC_DOWNLOAD_SUCCESS", filename)
return response
| Frumple/mrt-file-server | mrt_file_server/blueprints/schematic.py | Python | mit | 5,181 |
# -*- coding: utf-8 -*-
from django.db import migrations
import os.path
def default_filename(apps, schema_editor):
PatientConsent = apps.get_model("patients", "PatientConsent")
for pc in PatientConsent.objects.all():
if not pc.filename:
pc.filename = os.path.basename(pc.form.name)
pc.save()
def clear_filename(apps, schema_editor):
PatientConsent = apps.get_model("patients", "PatientConsent")
PatientConsent.objects.all().update(filename="")
class Migration(migrations.Migration):
dependencies = [
('patients', '0016_patientconsent_filename'),
]
operations = [
migrations.RunPython(default_filename, clear_filename),
]
| muccg/rdrf | rdrf/registry/patients/migrations/0017_patientconsent_filename.py | Python | agpl-3.0 | 711 |
#!/usr/bin/env python
import traceback
import json
import os
import datetime
import fitbit
import time
import sys
import pprint
hr_metrics = [] # Store (time,value) tuples parsed from json
step_data = []
# Must take at least 10 steps to count as a walk period
MIN_STEPS_MINUTE_TO_COUNT = 10
MAX_LOW_INTERVALS_TO_END_PERIOD = 3 # Must not stop for more then 3 minutes
MIN_STEPS_TO_COUNT = 100
try:
config = json.loads(open('.basis_retriever.cfg', 'r').read())
FB_CONSUMER_KEY = config['fitbit']['consumer_key']
FB_CONSUMER_SECRET = config['fitbit']['consumer_secret']
FB_RESOURCE_OWNER_KEY = config['fitbit']['resource_owner_key']
FB_RESOURCE_OWNER_SECRET = config['fitbit']['resource_owner_secret']
except:
print "Could not open config file, please create .basis_retriever.cfg"
print "and include at least the \"fitbit\" section."
print "Format: "
print """
{
"basis": {
"username": "<your mybasis.com username>",
"password": "<your mybasis.com password>"
},
"fitbit": {
"consumer_key": "<your fitbit consumer key>",
"consumer_secret": <your fitbit consumer secret>",
"resource_owner_key": <fitbit resource owner key>",
"resource_owner_secret": <fitbit resource owner secret>""
}
}
"""
traceback.print_exc()
sys.exit(1)
FB_TOKEN_REQ_URL = 'https://api.fitbit.com/oauth/request_token'
FB_TOKEN_ACCESS_URL = 'https://api.fitbit.com/oauth/access_token'
FB_AUTHORIZE_URL = 'https://www.fitbit.com/oauth/authorize'
# from ./gather_keys_cli.py
# FB_WALKING_ACTIVITY=90013
FB_WALKING_ACTIVITY = 17200
today = time.strftime('%Y-%m-%d')
if len(sys.argv) > 1:
d = sys.argv[1]
else:
d = today
f = "data/basis-data-%s.json" % (d)
try:
jd = json.load(open(f, 'r'))
except:
print "Exception loading data file (%s)" % (f)
traceback.print_exc()
sys.exit(1)
activity_data = []
# steps_str=jd['metrics']['steps']['sum']
data_start = int(jd['starttime'])
data_end = int(jd['endtime'])
data_interval = (1 + (data_end - data_start)) / \
len(jd['metrics']['steps']['values'])
step_data = []
# Split into walks
in_current_walk = False
walk_steps = 0
walk_start = 0
walk_end = 0
strikes = 0
for counter, s in enumerate(jd['metrics']['steps']['values']):
try:
steps = int(s)
except:
steps = 0
if steps < MIN_STEPS_MINUTE_TO_COUNT:
strikes += 1
# end this walk
if in_current_walk and strikes >= MAX_LOW_INTERVALS_TO_END_PERIOD:
walk_end = data_start + (counter * data_interval)
in_current_walk = False
if walk_steps > MIN_STEPS_TO_COUNT:
step_data.append((walk_start, walk_end, walk_steps))
walk_start = 0
walk_end = 0
walk_steps = 0
else:
strikes = 0 # Reset strike counter if we had steps this minute
in_current_walk = True
if walk_start == 0:
walk_start = data_start + (counter * data_interval)
walk_steps += steps
print "Here are your walks for %s:" % (d)
for start, end, steps in step_data:
d = datetime.datetime.fromtimestamp(start)
s = d.strftime("%H:%M")
d = datetime.datetime.fromtimestamp(end)
e = d.strftime("%H:%M")
print "%s - %s: %d" % (s, e, steps)
try:
authd_client = fitbit.Fitbit(FB_CONSUMER_KEY, FB_CONSUMER_SECRET,
resource_owner_key=FB_RESOURCE_OWNER_KEY, resource_owner_secret=FB_RESOURCE_OWNER_SECRET)
except:
print "Exception connecting to fitbit"
traceback.print_exc()
# print "Browsing acts"
# a=authd_client.activities_list()
# pprint.pprint(a)
for start, end, steps in step_data:
start_date = time.strftime('%Y-%m-%d', time.localtime(start))
start_time = time.strftime('%H:%M', time.localtime(start))
duration_millis = 1000 * (end - start)
try:
activity_data = dict(
activityId=FB_WALKING_ACTIVITY, distance=steps, distanceUnit='Steps',
date=start_date, startTime=start_time, durationMillis=duration_millis)
authd_client.log_activity(activity_data)
print "Posted walking data to fitbit for %s (%s)" % (start_date, start_time)
except:
print "Exception posting data to fitbit, sleeping for 1 hour and retrying once" % (activity_data)
print activity_data
traceback.print_exc()
time.sleep(60 * 60)
activity_data = dict(
activityId=FB_WALKING_ACTIVITY, distance=steps, distanceUnit='Steps',
date=start_date, startTime=start_time, durationMillis=duration_millis)
try:
authd_client.log_activity(activity_data)
print "Posted walking data to fitbit for %s (%s)" % (start_date, start_time)
except:
print "Another exception, bombing out, sorry (today was %s)" % (today)
traceback.print_exc()
sys.exit(1)
# x=0
# for activity in activity_data:
# x+=1
# if x > 10:
# print "Pausing after %d runs" % x
# time.sleep(1)
# x=0
# d=time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(activity[0]))
# s=activity[2]
# print "Date: %s, Steps: %d" % (d,s)
# start_date = time.strftime('%Y-%m-%d', time.localtime(activity[0]))
# start_time = time.strftime('%H:%M', time.localtime(activity[0]))
# duration_millis = 1000 * ( activity[1] - activity[0])
# Post to fitbit: activityId=90013, d
## unauth_client = fitbit.Fitbit(FB_CONSUMER_KEY,FB_CONSUMER_SECRET)
# try:
# activity_data = dict(activityId=FB_WALKING_ACTIVITY, distance=s, distanceUnit='Steps', date=start_date, startTime=start_time, durationMillis=duration_millis)
# authd_client.log_activity(activity_data)
# print "Posted walking data to fitbit for %s" % (d)
## cont=raw_input("Enter to continue")
# except:
# print "Exception posting data"
# traceback.print_exc()
| scottmccool/basis_utilities | basis_steps_to_fitbit.py | Python | mit | 5,938 |
#
# As with the perceptron exercise, you will modify the
# last functions of this sigmoid unit class
#
# There are two functions for you to finish:
# First, in activate(), write the sigmoid activation function
#
# Second, in train(), write the gradient descent update rule
#
# NOTE: the following exercises creating classes for functioning
# neural networks are HARD, and are not efficient implementations.
# Consider them an extra challenge, not a requirement!
import numpy as np
class Sigmoid:
def activate(self,values):
'''Takes in @param values, @param weights lists of numbers
and @param threshold a single number.
@return the output of a threshold perceptron with
given weights and threshold, given values as inputs.
'''
#First calculate the strength with which the perceptron fires
strength = self.strength(values)
#modify strength using the sigmoid activation function
result = 1/(1.0 + np.exp(-strength))
return result
def strength(self,values):
strength = np.dot(values,self.weights)
return strength
def update(self,values,train,eta=.1):
'''
Updates the sigmoid unit with expected return
values @param train and learning rate @param eta
By modifying the weights according to the gradient descent rule
'''
#modify the perceptron training rule to a gradient descent
#training rule you will need to use the derivative of the
#logistic function evaluated at the last input value.
#Recall: d/dx logistic(x) = logistic(x)*(1-logistic(x))
sigmoid = self.activate(values)
result = sigmoid*(1.0-sigmoid)
for i in range(0,len(values)):
self.weights[i] += eta*(train[0] - sigmoid)*result*values[i]
def __init__(self,weights=None):
if weights:
self.weights = weights
unit = Sigmoid(weights=[3,-2,1])
unit.update([1,2,3],[0])
print unit.weights
#Expected: [2.99075, -2.0185, .97225] | dmytroKarataiev/MachineLearning | learning/algorithms/perceptron/sigmoid.py | Python | mit | 2,049 |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------
# Copyright (c) 2009 Jendrik Seipp
#
# RedNotebook is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# RedNotebook is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with RedNotebook; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# -----------------------------------------------------------------------
from xml.sax.saxutils import escape
from gi.repository import GObject
from gi.repository import Gtk
from rednotebook.gui.customwidgets import CustomComboBoxEntry, CustomListView
from rednotebook.util import dates
class SearchComboBox(CustomComboBoxEntry):
def __init__(self, combo_box, main_window):
CustomComboBoxEntry.__init__(self, combo_box)
self.main_window = main_window
self.journal = main_window.journal
self.entry.set_icon_from_stock(1, Gtk.STOCK_CLEAR)
self.entry.connect('icon-press', lambda *args: self.set_active_text(''))
self.entry.connect('changed', self.on_entry_changed)
self.entry.connect('activate', self.on_entry_activated)
def on_entry_changed(self, entry):
"""Called when the entry changes."""
self.search(self.get_active_text())
def on_entry_activated(self, entry):
"""Called when the user hits enter."""
search_text = self.get_active_text()
self.add_entry(search_text)
self.search(search_text)
def search(self, search_text):
tags = []
queries = []
for part in search_text.split():
if part.startswith('#'):
tags.append(part.lstrip('#').lower())
else:
queries.append(part)
search_text = ' '.join(queries)
# Highlight all occurences in the current day's text
self.main_window.highlight_text(search_text)
# Scroll to query.
if search_text:
GObject.idle_add(
self.main_window.day_text_field.scroll_to_text,
search_text)
self.main_window.search_tree_view.update_data(search_text, tags)
class SearchTreeView(CustomListView):
def __init__(self, main_window, always_show_results):
CustomListView.__init__(self, [(_('Date'), str), (_('Text'), str)])
self.main_window = main_window
self.journal = self.main_window.journal
self.always_show_results = always_show_results
self.tree_store = self.get_model()
self.connect('cursor_changed', self.on_cursor_changed)
def update_data(self, search_text, tags):
self.tree_store.clear()
if not self.always_show_results and not tags and not search_text:
self.main_window.cloud.show()
self.main_window.search_scroll.hide()
return
self.main_window.cloud.hide()
self.main_window.search_scroll.show()
for date_string, entries in self.journal.search(search_text, tags):
for entry in entries:
entry = escape(entry)
entry = entry.replace('STARTBOLD', '<b>').replace('ENDBOLD', '</b>')
self.tree_store.append([date_string, entry])
def on_cursor_changed(self, treeview):
"""Move to the selected day when user clicks on it"""
model, paths = self.get_selection().get_selected_rows()
if not paths:
return
date_string = self.tree_store[paths[0]][0]
new_date = dates.get_date_from_date_string(date_string)
self.journal.change_date(new_date)
| jendrikseipp/rednotebook-elementary | rednotebook/gui/search.py | Python | gpl-2.0 | 4,060 |
from django import forms
from japos.goods.models import Product, Group
class ProductForm(forms.ModelForm):
sku = forms.CharField(widget = forms.TextInput(attrs={'class': 'text-input small-input'}))
barcode = forms.CharField(widget = forms.TextInput(attrs={'class': 'text-input small-input'}))
name = forms.CharField(widget = forms.TextInput(attrs={'class': 'text-input small-input'}))
description = forms.CharField(widget = forms.Textarea(attrs={'class': 'textarea', 'rows': '5', 'cols': '50'}))
stock = forms.IntegerField(widget = forms.TextInput(attrs={'class': 'text-input small-input'}))
purchase_price = forms.DecimalField(widget = forms.TextInput(attrs={'class': 'text-input small-input'}))
class Meta:
model = Product
class GroupForm(forms.ModelForm):
sku = forms.CharField(widget = forms.TextInput(attrs={'class': 'text-input small-input'}))
name = forms.CharField(widget = forms.TextInput(attrs={'class': 'text-input small-input'}))
class Meta:
model = Group | jyr/japos | goods/forms.py | Python | gpl-2.0 | 1,027 |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pytest
from unittest import TestCase
import torch
import torch.nn as nn
from bigdl.orca.learn.metrics import Accuracy
from bigdl.orca import init_orca_context, stop_orca_context
from bigdl.orca.learn.pytorch import Estimator
class LinearDataset(torch.utils.data.Dataset):
def __init__(self, size=1000):
X1 = torch.randn(size // 2, 50)
X2 = torch.randn(size // 2, 50) + 1.5
self.x = torch.cat([X1, X2], dim=0)
Y1 = torch.zeros(size // 2, 1)
Y2 = torch.ones(size // 2, 1)
self.y = torch.cat([Y1, Y2], dim=0)
def __getitem__(self, index):
return self.x[index, None], self.y[index, None]
def __len__(self):
return len(self.x)
def train_data_loader(config, batch_size):
train_dataset = LinearDataset(size=config.get("data_size", 1000))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size
)
return train_loader
def val_data_loader(config, batch_size):
val_dataset = LinearDataset(size=config.get("val_size", 400))
validation_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=batch_size
)
return validation_loader
class Net(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(50, 50)
self.relu1 = nn.ReLU()
self.dout = nn.Dropout(0.2)
self.fc2 = nn.Linear(50, 100)
self.prelu = nn.PReLU(1)
self.out = nn.Linear(100, 1)
self.out_act = nn.Sigmoid()
def forward(self, input_):
a1 = self.fc1(input_)
h1 = self.relu1(a1)
dout = self.dout(h1)
a2 = self.fc2(dout)
h2 = self.prelu(a2)
a3 = self.out(h2)
y = self.out_act(a3)
return y
def get_model(config):
torch.manual_seed(0)
return Net()
def get_optimizer(model, config):
return torch.optim.SGD(model.parameters(), lr=config.get("lr", 1e-2))
class TestPytorchEstimator(TestCase):
def setUp(self):
init_orca_context(runtime="ray", address="localhost:6379")
def tearDown(self):
stop_orca_context()
def test_train(self):
estimator = Estimator.from_torch(model=get_model,
optimizer=get_optimizer,
loss=nn.BCELoss(),
metrics=Accuracy(),
config={"lr": 1e-2},
workers_per_node=2,
backend="torch_distributed",
sync_stats=True)
start_val_stats = estimator.evaluate(val_data_loader, batch_size=32)
print(start_val_stats)
train_stats = estimator.fit(train_data_loader, epochs=1, batch_size=32)
print(train_stats)
end_val_stats = estimator.evaluate(val_data_loader, batch_size=32)
print(end_val_stats)
assert 0 < end_val_stats["Accuracy"] < 1
assert estimator.get_model()
# sanity check that training worked
dloss = end_val_stats["val_loss"] - start_val_stats["val_loss"]
dacc = (end_val_stats["Accuracy"] - start_val_stats["Accuracy"])
print(f"dLoss: {dloss}, dAcc: {dacc}")
assert dloss < 0 < dacc, "training sanity check failed. loss increased!"
if __name__ == "__main__":
pytest.main([__file__])
| intel-analytics/BigDL | python/orca/test/bigdl/orca/learn/ray/pytorch/test_ray_pytorch_estimator.py | Python | apache-2.0 | 4,024 |
import PythonQt
from PythonQt import QtCore, QtGui, QtUiTools
from director import objectmodel as om
from director import visualization as vis
from director import transformUtils
from director import vtkAll as vtk
from director import cameracontrol
from director import propertyset
from director import pointpicker
import numpy as np
def addWidgetsToDict(widgets, d):
for widget in widgets:
if widget.objectName:
d[str(widget.objectName)] = widget
addWidgetsToDict(widget.children(), d)
class WidgetDict(object):
def __init__(self, widgets):
addWidgetsToDict(widgets, self.__dict__)
def clearLayout(w):
children = w.findChildren(QtGui.QWidget)
for child in children:
child.delete()
class PolyDataFrameConverter(cameracontrol.TargetFrameConverter):
def __init__(self, obj):
if obj is not None:
vis.addChildFrame(obj)
self.targetFrame = obj.getChildFrame()
else:
self.targetFrame = None
@classmethod
def canConvert(cls, obj):
return hasattr(obj, 'getChildFrame')
class RobotFrameConverter(cameracontrol.TargetFrameConverter):
def __init__(self, robotModel):
self.robotModel = robotModel
self.targetFrame = vis.FrameItem('robot frame', vtk.vtkTransform(), None)
self.callbackId = robotModel.connectModelChanged(self.onModelChanged)
self.updateTargetFrame()
def updateTargetFrame(self):
q = self.robotModel.model.getJointPositions()
pos = q[:3]
rpy = np.degrees(q[3:6])
transform = transformUtils.frameFromPositionAndRPY(pos, rpy)
self.targetFrame.copyFrame(transform)
def onModelChanged(self, robotModel):
self.updateTargetFrame()
@classmethod
def canConvert(cls, obj):
return hasattr(obj, 'connectModelChanged')
class CameraControlPanel(object):
def __init__(self, view):
self.view = view
self.trackerManager = cameracontrol.CameraTrackerManager()
self.trackerManager.setView(view)
loader = QtUiTools.QUiLoader()
uifile = QtCore.QFile(':/ui/ddCameraControlPanel.ui')
assert uifile.open(uifile.ReadOnly)
self.widget = loader.load(uifile)
self.ui = WidgetDict(self.widget.children())
self.ui.targetNameLabel.setText('None')
self.ui.setTargetButton.connect('clicked()', self.onSetTarget)
for modeName in self.trackerManager.trackers.keys():
self.ui.trackModeCombo.addItem(modeName)
self.ui.trackModeCombo.connect('currentIndexChanged(const QString&)', self.onTrackModeChanged)
self.ui.controlFrame.setEnabled(False)
l = self.ui.propertiesFrame.layout()
self.propertiesPanel = PythonQt.dd.ddPropertiesPanel()
self.propertiesPanel.setBrowserModeToWidget()
l.addWidget(self.propertiesPanel)
self.panelConnector = None
self.objectPicker = pointpicker.ObjectPicker(self.view)
self.objectPicker.callbackFunc = self.onPickObject
self.objectPicker.abortFunc = self.onAbortPick
self.picking = False
om.getDefaultObjectModel().connectObjectClicked(self.onTreeClicked)
def onPickObject(self, objs):
if objs:
self.setTarget(objs[0])
else:
self.onAbortPick()
def onTreeClicked(self, tree, obj):
if not self.picking:
return
self.setTarget(obj)
def onAbortPick(self):
self.ui.selectedObjectNameLabel.setText('')
self.ui.setTargetButton.setVisible(True)
self.objectPicker.stop()
self.picking = False
def getSelectedTarget(self):
obj = om.getActiveObject()
return obj if obj and hasattr(obj, 'actor') else None
def getObjectShortName(self, obj):
name = obj.getProperty('Name')
maxLength = 15
if len(name) > maxLength:
name = name[:maxLength-3] + '...'
return name
def onObjectRemoved(self, objectModel, obj):
if obj == self.trackerManager.target:
self.setTarget(None)
def onSetTarget(self):
self.ui.setTargetButton.setVisible(False)
self.ui.selectedObjectNameLabel.setText('Click an object in the view...')
self.objectPicker.start()
self.picking = True
def setTarget(self, obj):
self.onAbortPick()
converters = [PolyDataFrameConverter, RobotFrameConverter]
for converter in converters:
if converter.canConvert(obj):
converter = converter(obj)
break
else:
obj = None
converter = None
if obj is not None:
obj.connectRemovedFromObjectModel(self.onObjectRemoved)
self.trackerManager.setTarget(converter)
name = self.getObjectShortName(obj) if obj else 'None'
self.ui.targetNameLabel.setText(name)
self.ui.controlFrame.setEnabled(obj is not None)
if not obj:
self.ui.trackModeCombo.setCurrentIndex(0)
def onTrackModeChanged(self):
mode = self.ui.trackModeCombo.currentText
self.setTrackMode(mode)
def setTrackMode(self, mode):
self.trackerManager.setTrackerMode(mode)
clearLayout(self.ui.actionsFrame)
actions = self.trackerManager.getModeActions()
for actionName in actions:
def newActionButton(actionName):
actionButton = QtGui.QPushButton(actionName)
def onAction():
self.trackerManager.onModeAction(actionName)
actionButton.connect('clicked()', onAction)
return actionButton
self.ui.actionsFrame.layout().addWidget(newActionButton(actionName))
self.propertiesPanel.clear()
if self.panelConnector:
self.panelConnector.cleanup()
self.panelConnector = None
properties = self.trackerManager.getModeProperties()
if properties:
self.panelConnector = propertyset.PropertyPanelConnector(properties, self.propertiesPanel)
| patmarion/director | src/python/director/cameracontrolpanel.py | Python | bsd-3-clause | 6,133 |
# coding=utf-8
"""
__Arthur Marble__
__init__.py is here so we can import GameManager in Main,py
"""
from .ResourceLoader import *
| bubbles231/Hangman-Clone | Engine/__init__.py | Python | mit | 131 |
# -*- coding: utf-8 -*-
"""Parse settings, set defaults."""
#: Default values for settings.
DEFAULTS = {
'diecutter.service': 'diecutter.local:LocalService',
'diecutter.engine': 'jinja2',
'diecutter.filename_engine': 'filename',
'diecutter.engine.django': 'piecutter.engines.django:DjangoEngine',
'diecutter.engine.jinja2': 'piecutter.engines.jinja:Jinja2Engine',
'diecutter.engine.filename': 'piecutter.engines.filename:FilenameEngine',
}
def normalize(settings={}):
"""Return a copy of settings dictionary with normalized values.
Sets default values if necessary.
"""
normalized = settings.copy()
for key, value in DEFAULTS.items():
normalized.setdefault(key, value)
return normalized
| diecutter/diecutter | diecutter/settings.py | Python | bsd-3-clause | 748 |
import falcon
class Resource(object):
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
resp.content_type = 'text/plain'
resp.body = 'Hello, world!'
app = falcon.API()
app.add_route('/text', Resource())
| MuhammadAlkarouri/hug | benchmarks/http/falcon_test.py | Python | mit | 245 |
# coding=utf-8
#
# BSD 3-Clause License
#
# Copyright (c) 2016-18, University of Liverpool
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contact container used throughout ConKit"""
from __future__ import division
from __future__ import print_function
__author__ = "Felix Simkovic"
__date__ = "03 Aug 2016"
__version__ = "1.0"
from enum import Enum, unique
from conkit.core.entity import Entity
from conkit.core.mappings import AminoAcidOneToThree, AminoAcidThreeToOne, ContactMatchState
from conkit.misc import deprecate
class Contact(Entity):
"""A contact pair template to store all associated information
Examples
--------
>>> from conkit.core import Contact
>>> contact = Contact(1, 25, 1.0)
>>> print(contact)
Contact(id="(1, 25)" res1="A" res1_seq=1 res2="A" res2_seq=25 raw_score=1.0)
Attributes
----------
distance_bound : tuple
The lower and upper distance boundary values of a contact pair in Ångstrom [Default: 0-8Å].
id : str
A unique identifier
true_positive : bool
A boolean status for the contact
true_negative : bool
A boolean status for the contact
false_positive : bool
A boolean status for the contact
false_negative : bool
A boolean status for the contact
status_unknown : bool
A boolean status for the contact
lower_bound : int
The lower distance boundary value
raw_score : float
The prediction score for the contact pair
res1 : str
The amino acid of residue 1 [default: X]
res2 : str
The amino acid of residue 2 [default: X]
res1_chain : str
The chain for residue 1
res2_chain : str
The chain for residue 2
res1_seq : int
The residue sequence number of residue 1
res2_seq : int
The residue sequence number of residue 2
res1_altseq : int
The alternative residue sequence number of residue 1
res2_altseq : int
The alternative residue sequence number of residue 2
scalar_score : float
The :attr:`~conkit.core.contact.Contact.raw_score` scaled according to its average
status : int
An indication of the residue status
upper_bound : int
The upper distance boundary value
weight : float
A separate internal weight factor for the contact pair
"""
__slots__ = [
'_distance_bound', 'raw_score', '_res1', '_res2', 'res1_chain', 'res2_chain', '_res1_seq', '_res2_seq',
'_res1_altseq', '_res2_altseq', 'scalar_score', '_status', 'weight'
]
def __init__(self, res1_seq, res2_seq, raw_score, distance_bound=(0, 8)):
"""Initialize a generic contact pair
Parameters
----------
distance_bound : tuple, optional
The lower and upper distance boundary values of a contact pair in Ångstrom.
Default is set to between 0.0 and 8.0 Å.
raw_score : float
The covariance score for the contact pair
res1_seq : int
The residue sequence number of residue 1
res2_seq : int
The residue sequence number of residue 2
"""
self.raw_score = raw_score
self.res1_chain = ''
self.res2_chain = ''
self.scalar_score = 0.0
self.weight = 1.0
self._distance_bound = [0.0, 8.0]
self._res1 = 'X'
self._res2 = 'X'
self._res1_seq = 0
self._res2_seq = 0
self._res1_altseq = 0
self._res2_altseq = 0
self._status = ContactMatchState.unknown
self.distance_bound = distance_bound
self.res1_seq = res1_seq
self.res2_seq = res2_seq
super(Contact, self).__init__((res1_seq, res2_seq))
def __repr__(self):
text = "{name}(id={id} res1={_res1} res1_chain={res1_chain} res1_seq={_res1_seq} " \
"res2={_res2} res2_chain={res2_chain} res2_seq={_res2_seq} raw_score={raw_score})"
return text.format(
name=self.__class__.__name__, id=self._id, **{k: getattr(self, k)
for k in self.__class__.__slots__})
@property
def distance_bound(self):
"""The lower and upper distance boundary values of a contact pair in Ångstrom [Default: 0-8Å]."""
return tuple(self._distance_bound)
@distance_bound.setter
def distance_bound(self, distance_bound):
"""Define the lower and upper distance boundary value
Parameters
----------
distance_bound : list, tuple
A 2-element list/tuple with a lower and upper distance boundary value
"""
if isinstance(distance_bound, (list, tuple)):
self._distance_bound = list(map(float, distance_bound))
else:
raise TypeError("Data of type list or tuple required")
@property
@deprecate('0.11', msg='Use true_positive instead')
def is_match(self):
return self._status == ContactMatchState.true_positive
@property
@deprecate('0.11', msg='Use false_positive instead')
def is_mismatch(self):
return self._status == ContactMatchState.false_positive
@property
@deprecate('0.11', msg='Use status_unknown instead')
def is_unknown(self):
return self._status == ContactMatchState.unknown
@property
def lower_bound(self):
"""The lower distance boundary value"""
return self.distance_bound[0]
@lower_bound.setter
def lower_bound(self, value):
"""Set the lower distance boundary value
Parameters
----------
value : int, float
Raises
------
:exc:`ValueError`
:attr:`~conkit.core.contact.Contact.lower_bound` must be positive
:exc:`ValueError`
:attr:`~conkit.core.contact.Contact.lower_bound` must be smaller than
:attr:`~conkit.core.contact.Contact.upper_bound`
"""
if 0 < value < self.upper_bound:
self._distance_bound[0] = float(value)
else:
raise ValueError('Lower bound must be positive and smaller than upper bound')
@property
def upper_bound(self):
"""The upper distance boundary value"""
return self.distance_bound[1]
@upper_bound.setter
def upper_bound(self, value):
"""Set the upper distance boundary value
Parameters
----------
value : int, float
Raises
------
:exc:`ValueError`
:attr:`~conkit.core.contact.Contact.upper_bound` must be positive
:exc:`ValueError`
:attr:`~conkit.core.contact.Contact.upper_bound` must be larger than
:attr:`~conkit.core.contact.Contact.lower_bound`
"""
if 0 < value > self.lower_bound:
self._distance_bound[1] = float(value)
else:
raise ValueError('Upper bound must be positive and larger than lower bound')
@property
def res1(self):
"""The amino acid of residue 1 [default: X]"""
return self._res1
@res1.setter
def res1(self, amino_acid):
"""Define the amino acid of residue 1
Parameters
----------
amino_acid : str
The one- or three-letter code of an amino acid
"""
self._res1 = Contact._set_residue(amino_acid)
@property
def res2(self):
"""The amino acid of residue 2 [default: X]"""
return self._res2
@res2.setter
def res2(self, amino_acid):
"""Define the amino acid of residue 2
Parameters
----------
amino_acid : str
The one- or three-letter code of an amino acid
"""
self._res2 = Contact._set_residue(amino_acid)
@property
def res1_altseq(self):
"""The alternative residue sequence number of residue 1"""
return self._res1_altseq
@res1_altseq.setter
def res1_altseq(self, index):
"""Define the alternative residue 1 sequence index
Parameters
----------
index : int
"""
if isinstance(index, int):
self._res1_altseq = index
else:
raise TypeError('Data type int required for res_seq')
@property
def res2_altseq(self):
"""The alternative residue sequence number of residue 2"""
return self._res2_altseq
@res2_altseq.setter
def res2_altseq(self, index):
"""Define the alternative residue 2 sequence index
Parameters
----------
index : int
"""
if isinstance(index, int):
self._res2_altseq = index
else:
raise TypeError('Data type int required for res_seq')
@property
def res1_seq(self):
"""The residue sequence number of residue 1"""
return self._res1_seq
@res1_seq.setter
def res1_seq(self, index):
"""Define residue 1 sequence index
Parameters
----------
index : int
Raises
------
:exc:`TypeError`
Data type :obj:`int` required for :attr:`~conkit.core.contact.Contact.res1_seq`
"""
if isinstance(index, int):
self._res1_seq = index
else:
raise TypeError('Data type int required for res_seq')
@property
def res2_seq(self):
"""The residue sequence number of residue 2"""
return self._res2_seq
@res2_seq.setter
def res2_seq(self, index):
"""Define residue 2 sequence index
Parameters
----------
index : int
Raises
------
:exc:`TypeError`
Data type :obj:`int` required for :attr:`~conkit.core.contact.Contact.res2_seq`
"""
if isinstance(index, int):
self._res2_seq = index
else:
raise TypeError('Data type int required for res_seq')
@property
def status(self):
"""An indication of the residue status"""
return self._status.value
@status.setter
def status(self, status):
"""Set the status
Parameters
----------
status : int, :obj:`~conkit.core.mappings.ContactMatchState`
[0] for :attr:`~conkit.core.mappings.ContactMatchState.unknown`,
[1] for :attr:`~conkit.core.mappings.ContactMatchState.true_positive`,
[2] for :attr:`~conkit.core.mappings.ContactMatchState.true_negative`,
[3] for :attr:`~conkit.core.mappings.ContactMatchState.false_positive`,
[4] for :attr:`~conkit.core.mappings.ContactMatchState.false_negative`,
Raises
------
:exc:`ValueError`
Not a valid :obj:`~conkit.core.mappings.ContactMatchState`
"""
self._status = ContactMatchState(status)
@property
def true_positive(self):
return self._status == ContactMatchState.true_positive
@true_positive.setter
def true_positive(self, is_tp):
if is_tp:
self._status = ContactMatchState.true_positive
else:
self.status_unknown = True
@property
def true_negative(self):
return self._status == ContactMatchState.true_negative
@true_negative.setter
def true_negative(self, is_tn):
if is_tn:
self._status = ContactMatchState.true_negative
else:
self.status_unknown = True
@property
def false_positive(self):
return self._status == ContactMatchState.false_positive
@false_positive.setter
def false_positive(self, is_fp):
if is_fp:
self._status = ContactMatchState.false_positive
else:
self.status_unknown = True
@property
def false_negative(self):
return self._status == ContactMatchState.false_negative
@false_negative.setter
def false_negative(self, is_fn):
if is_fn:
self._status = ContactMatchState.false_negative
else:
self.status_unknown = True
@property
def status_unknown(self):
return self._status == ContactMatchState.unknown
@status_unknown.setter
def status_unknown(self, is_unknown):
if is_unknown:
self._status = ContactMatchState.unknown
else:
raise ValueError("Choose one of true_positive, false_positive, true_negative, false_negative instead!")
@deprecate('0.11', msg='Use true_positive instead')
def define_match(self):
"""Define a contact as matching contact"""
self._status = ContactMatchState.true_positive
@deprecate('0.11', msg='Use false_positive instead')
def define_mismatch(self):
"""Define a contact as mismatching contact"""
self._status = ContactMatchState.false_positive
@deprecate('0.11', msg='Use status_unknown instead')
def define_unknown(self):
"""Define a contact with unknown status"""
self._status = ContactMatchState.unknown
def _to_dict(self):
"""Convert the object into a dictionary"""
keys = ['id', 'true_positive', 'false_positive', 'status_unknown', 'lower_bound', 'upper_bound']
keys += [k for k in self.__slots__]
dict_ = {}
for k in keys:
if k[0] == '_':
k = k[1:]
dict_[k] = getattr(self, k)
return dict_
@staticmethod
def _set_residue(amino_acid):
"""Assign the residue to the corresponding amino_acid"""
a_a = amino_acid.upper()
if a_a in AminoAcidOneToThree.__members__:
return a_a
elif a_a in AminoAcidThreeToOne.__members__:
return AminoAcidThreeToOne[a_a].value
else:
raise ValueError("Unknown amino acid: {} (assert all is uppercase!)".format(amino_acid))
| fsimkovic/conkit | conkit/core/contact.py | Python | bsd-3-clause | 15,242 |
# Copyright (C) 2015 by Per Unneberg
import pytest
from snakemakelib.sample.organization import sample_org
def test_sample_org():
print (sample_org)
| percyfal/snakemakelib-core | snakemakelib/sample/tests/test_sampleorganization.py | Python | mit | 154 |
#!/usr/bin/python
# Tool to compare MPlayer translation files against a base file. Reports
# conflicting definitions, mismatching arguments, extra definitions
# not present in the base file and (optionally) missing definitions.
# Written by Uoti Urpala
import sys
import re
def parse(filename):
r = {}
f = open(filename)
it = iter(f)
cur = ''
for line in it:
line = line.strip()
if not line.startswith('#define'):
while line and line[-1] == '\\':
line = it.next().strip()
continue
try:
_, name, value = line.split(None, 2)
except ValueError:
if name in r:
continue
value = value.strip('"')
while line[-1] == '\\':
line = it.next().strip()
value += line.rstrip('\\').strip('"')
if name in r:
print 'Conflict: ', name
print r[name]
print value
print
r[name] = value
f.close()
return r
def compare(base, other, show_missing=False):
r = re.compile('%[^diouxXeEfFgGaAcspn%]*[diouxXeEfFgGaAcspn%]')
missing = []
for key in base:
if key not in other:
missing.append(key)
continue
if re.findall(r, base[key]) != re.findall(r, other[key]):
print 'Mismatch: ', key
print base[key]
print other[key]
print
del other[key]
if other:
extra = other.keys()
extra.sort()
print 'Extra: ', ' '.join(extra)
if show_missing and missing:
missing.sort()
print 'Missing: ', ' '.join(missing)
if len(sys.argv) < 3:
print 'Usage:\n'+sys.argv[0]+' [--missing] base_helpfile otherfile1 '\
'[otherfile2 ...]'
sys.exit(1)
i = 1
show_missing = False
if sys.argv[i] in ( '--missing', '-missing' ):
show_missing = True
i = 2
base = parse(sys.argv[i])
for filename in sys.argv[i+1:]:
print '*****', filename
compare(base, parse(filename), show_missing)
print '\n'
| philipl/mplayer | TOOLS/mphelp_check.py | Python | gpl-2.0 | 2,073 |
from django.db import models
from model_utils.models import TimeStampedModel
class MathEngineHistory(TimeStampedModel):
"""Math engime history model"""
ip = models.GenericIPAddressField()
values = models.CharField(max_length=255)
sum = models.IntegerField()
product = models.IntegerField()
def __unicode__(self):
return 'ip: %s, timestamp: %s, values: %s, sum: %s, product: %s' % (
self.ip, self.created, self.values, self.sum, self.product)
| morenopc/dimagi | dimagi/math_engine/models.py | Python | mit | 490 |
# Copyright © 2012-2015 Umang Varma <umang.me@gmail.com>
#
# This file is part of indicator-stickynotes.
#
# indicator-stickynotes is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# indicator-stickynotes is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# indicator-stickynotes. If not, see <http://www.gnu.org/licenses/>.
from string import Template
from gi.repository import Gtk, Gdk, Gio, GObject, GtkSource, Pango
from locale import gettext as _
import os.path
import colorsys
import uuid
def load_global_css():
"""Adds a provider for the global CSS"""
global_css = Gtk.CssProvider()
global_css.load_from_path(os.path.join(os.path.dirname(__file__), "..",
"style_global.css"))
Gtk.StyleContext.add_provider_for_screen(Gdk.Screen.get_default(),
global_css, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
class StickyNote:
"""Manages the GUI of an individual stickynote"""
def __init__(self, note):
"""Initializes the stickynotes window"""
self.path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..'))
self.note = note
self.noteset = note.noteset
self.locked = self.note.properties.get("locked", False)
# Create menu
self.menu = Gtk.Menu()
self.populate_menu()
# Load CSS template and initialize Gtk.CssProvider
with open(os.path.join(self.path, "style.css"), encoding="utf-8") \
as css_file:
self.css_template = Template(css_file.read())
self.css = Gtk.CssProvider()
self.build_note()
def build_note(self):
self.builder = Gtk.Builder()
GObject.type_register(GtkSource.View)
self.builder.add_from_file(os.path.join(self.path,
"StickyNotes.glade"))
self.builder.connect_signals(self)
self.winMain = self.builder.get_object("MainWindow")
# Get necessary objects
self.winMain.set_name("main-window")
widgets = ["txtNote", "bAdd", "imgAdd", "imgResizeR", "eResizeR",
"bLock", "imgLock", "imgUnlock", "imgClose", "imgDropdown",
"bClose", "confirmDelete", "movebox2", 'eTitle']
for w in widgets:
setattr(self, w, self.builder.get_object(w))
self.style_contexts = [self.winMain.get_style_context(),
self.txtNote.get_style_context()]
self.eTitle.set_text(self.note.title)
self.winMain.set_title(self.note.title)
# Update window-specific style. Global styles are loaded initially!
self.update_style()
self.update_font()
# Ensure buttons are displayed with images
settings = Gtk.Settings.get_default()
settings.props.gtk_button_images = True
# Set text buffer
self.bbody = GtkSource.Buffer()
self.bbody.begin_not_undoable_action()
# add note body to buffer
# searching for URLs and adding tags accordlying
self.set_text(self.note.body)
# adding markdown syntax highlight, probably has to add a settings for this
language_manager = GtkSource.LanguageManager()
self.bbody.set_language(language_manager.get_language('markdown'))
self.bbody.set_highlight_matching_brackets(False)
self.bbody.end_not_undoable_action()
self.txtNote.set_buffer(self.bbody)
# Make resize work
self.winMain.add_events(Gdk.EventMask.BUTTON_PRESS_MASK)
self.eResizeR.add_events(Gdk.EventMask.BUTTON_PRESS_MASK)
# Move Window
self.winMain.move(*self.note.properties.get("position", (10,10)))
self.winMain.resize(*self.note.properties.get("size", (200,150)))
# Show the window
self.winMain.set_skip_pager_hint(True)
self.winMain.show_all()
# Mouse over
self.eResizeR.get_window().set_cursor(Gdk.Cursor.new_for_display(
self.eResizeR.get_window().get_display(),
Gdk.CursorType.BOTTOM_RIGHT_CORNER))
# Set locked state
self.set_locked_state(self.locked)
# call set_keep_above just to have the note appearing
# above everything else.
# without it, it still won't appear above a window
# in which a cursor is active
self.winMain.set_keep_above(True)
# immediately undo the set keep above after the window
# is shown, so that windows won't stay up if we switch to
# a different window
self.winMain.set_keep_above(False)
def set_text(self, text):
"""Set the text in SourceBuffer
Take a look at http://download.gna.org/nfoview/doc/api/nfoview.view_source.html"""
import re
re_url = re.compile(r"(([0-9a-zA-Z]+://\S+?\.\S+)|(www\.\S+?\.\S+))")
bounds = self.bbody.get_bounds()
self.bbody.delete(*bounds)
lines = text.split("\n")
# Scan text word-by-word for possible URLs,
# but insert words in larger chunks to avoid
# doing too many slow text view updates.
word_queue = []
for i, line in enumerate(lines):
words = line.split(" ")
for j, word in enumerate(words):
match = re_url.search(word)
if match is not None:
a, z = match.span()
word_queue.append(word[:a])
self.insert_word("".join(word_queue))
word_queue = []
self.insert_url(word[a:z])
word_queue.append(word[z:])
else: # Normal text.
word_queue.append(word)
word_queue.append(" ")
word_queue.pop(-1)
if len(lines)-1==i: continue #
word_queue.append("\n")
if len(word_queue) > 100:
self.insert_word("".join(word_queue))
word_queue = []
self.insert_word("".join(word_queue))
def insert_word(self, word):
"""Insert `word` into the text view."""
itr = self.bbody.get_end_iter()
self.bbody.insert(itr, word)
def insert_url(self, url):
"""Insert `url` into the text view as a hyperlink."""
#text_buffer = self.txtNote.get_buffer()
tag = self.bbody.create_tag(None)
tag.props.underline = Pango.Underline.SINGLE
tag.props.foreground = "#0000FF"
tag.connect("event", self._on_link_tag_event)
tag.url = url
itr = self.bbody.get_end_iter()
self.bbody.insert_with_tags(itr, url, tag)
def motion_event( self, widget, event):
""" used to change cursors pointer when mouse over a link.
Changed from http://download.gna.org/nfoview/doc/api/nfoview.view_source.html
as returns False now so we can still select content """
window = Gtk.TextWindowType.WIDGET
x, y = widget.window_to_buffer_coords(window, int(event.x), int(event.y))
window = widget.get_window(Gtk.TextWindowType.TEXT)
for tag in widget.get_iter_at_location(x, y).get_tags():
if hasattr(tag, "url"):
window.set_cursor(Gdk.Cursor(cursor_type=Gdk.CursorType.HAND2))
return False # to call the default handler.
window.set_cursor(Gdk.Cursor(cursor_type=Gdk.CursorType.XTERM))
return False
def edit_title(self, titleEntry, event):
if(event.type==Gdk.EventType._2BUTTON_PRESS):
titleEntry.grab_focus()
titleEntry.props.editable=True
return True
else:
if titleEntry.props.editable: return False
self.move(titleEntry, event)
return True
def save_title(self,titleEntry,event=False):
titleEntry.props.editable=False
self.txtNote.grab_focus()
self.note.update(None,self.eTitle.get_text())
self.winMain.set_title(self.eTitle.get_text())
def _on_link_tag_event (self, tag, text_view, event, itr):
""" open links on default browser """
if event.type != Gdk.EventType.BUTTON_PRESS: return
Gtk.show_uri(None,tag.url,Gdk.CURRENT_TIME)
# (re-)show the sticky note after it has been hidden getting a sticky note
# to show itself was problematic after a "show desktop" command in unity.
# (see bug lp:1105948). Reappearance of dialog is problematic for any
# dialog which has the skip_taskbar_hint=True property in StickyNotes.glade
# (property necessary to prevent sticky note from showing on the taskbar)
# workaround which is based on deleting a sticky note and re-initializing
# it.
def show(self, widget=None, event=None, reload_from_backend=False):
"""Shows the stickynotes window"""
# don't overwrite settings if loading from backend
if not reload_from_backend:
# store sticky note's settings
self.update_note()
else:
# Categories may have changed in backend
self.populate_menu()
# destroy its main window
self.winMain.destroy()
# reinitialize that window
self.build_note()
def hide(self, *args):
"""Hides the stickynotes window"""
self.winMain.hide()
def update_note(self):
"""Update the underlying note object"""
self.note.update(self.bbody.get_text(self.bbody.get_start_iter(),
self.bbody.get_end_iter(), True))
self.set_text(self.note.body)
def move(self, widget, event):
"""Action to begin moving (by dragging) the window"""
self.winMain.begin_move_drag(event.button, event.x_root,
event.y_root, event.get_time())
return False
def resize(self, widget, event, *args):
"""Action to begin resizing (by dragging) the window"""
self.winMain.begin_resize_drag(Gdk.WindowEdge.SOUTH_EAST,
event.button, event.x_root, event.y_root, event.get_time())
return True
def properties(self):
"""Get properties of the current note"""
prop = {"position":self.winMain.get_position(),
"size":self.winMain.get_size(), "locked":self.locked}
if not self.winMain.get_visible():
prop["position"] = self.note.properties.get("position", (10, 10))
prop["size"] = self.note.properties.get("size", (200, 150))
return prop
def update_font(self):
"""Updates the font"""
# Unset any previously set font
self.txtNote.override_font(None)
font = Pango.FontDescription.from_string(
self.note.cat_prop("font"))
self.txtNote.override_font(font)
def update_style(self):
"""Updates the style using CSS template"""
self.update_button_color()
css_string = self.css_template.substitute(**self.css_data())\
.encode("ascii", "replace")
self.css.load_from_data(css_string)
for context in self.style_contexts:
context.add_provider(self.css,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
def update_button_color(self):
"""Switches between regular and dark icons appropriately"""
h,s,v = self.note.cat_prop("bgcolor_hsv")
# an arbitrary quadratic found by trial and error
thresh_sat = 1.05 - 1.7*((v-1)**2)
suffix = "-dark" if s >= thresh_sat else ""
iconfiles = {"imgAdd":"add", "imgClose":"close", "imgDropdown":"menu",
"imgLock":"lock", "imgUnlock":"unlock", "imgResizeR":"resizer"}
for img, filename in iconfiles.items():
getattr(self, img).set_from_file(
os.path.join(os.path.dirname(__file__), "..","Icons/" +
filename + suffix + ".png"))
def css_data(self):
"""Returns data to substitute into the CSS template"""
data = {}
# Converts to RGB hex. All RGB/HSV values are scaled to a max of 1
rgb_to_hex = lambda x: "#" + "".join(["{:02x}".format(int(255*a))
for a in x])
hsv_to_hex = lambda x: rgb_to_hex(colorsys.hsv_to_rgb(*x))
bgcolor_hsv = self.note.cat_prop("bgcolor_hsv")
data["bgcolor_hex"] = hsv_to_hex(
self.note.cat_prop("bgcolor_hsv"))
data["text_color"] = rgb_to_hex(self.note.cat_prop("textcolor"))
return data
def populate_menu(self):
"""(Re)populates the note's menu items appropriately"""
def _delete_menu_item(item, *args):
self.menu.remove(item)
self.menu.foreach(_delete_menu_item, None)
aot = Gtk.CheckMenuItem.new_with_label(_("Always on top"))
aot.connect("toggled", self.malways_on_top_toggled)
self.menu.append(aot)
aot.show()
mset = Gtk.MenuItem(_("Settings"))
mset.connect("activate", self.noteset.indicator.show_settings)
self.menu.append(mset)
mset.show()
sep = Gtk.SeparatorMenuItem()
self.menu.append(sep)
sep.show()
catgroup = []
mcats = Gtk.RadioMenuItem.new_with_label(catgroup,
_("Categories:"))
self.menu.append(mcats)
mcats.set_sensitive(False)
catgroup = mcats.get_group()
mcats.show()
for cid, cdata in self.noteset.categories.items():
mitem = Gtk.RadioMenuItem.new_with_label(catgroup,
cdata.get("name", _("New Category")))
catgroup = mitem.get_group()
if cid == self.note.category:
mitem.set_active(True)
mitem.connect("activate", self.set_category, cid)
self.menu.append(mitem)
mitem.show()
def malways_on_top_toggled(self, widget, *args):
self.winMain.set_keep_above(widget.get_active())
def save(self, *args):
self.note.noteset.save()
return False
def add(self, *args):
self.note.noteset.new()
return False
def delete(self, *args):
if self.bbody.get_char_count(): # ask for only non-empty notes
winConfirm = Gtk.MessageDialog(self.winMain, None,
Gtk.MessageType.QUESTION, Gtk.ButtonsType.NONE,
_("Are you sure you want to delete this note?"))
winConfirm.add_buttons(Gtk.STOCK_CANCEL, Gtk.ResponseType.REJECT,
Gtk.STOCK_DELETE, Gtk.ResponseType.ACCEPT)
confirm = winConfirm.run()
winConfirm.destroy()
else: confirm = Gtk.ResponseType.ACCEPT
if confirm == Gtk.ResponseType.ACCEPT:
self.note.delete()
self.winMain.destroy()
return False
else:
return True
def popup_menu(self, button, *args):
"""Pops up the note's menu"""
self.menu.popup(None, None, None, None, Gdk.BUTTON_PRIMARY,
Gtk.get_current_event_time())
def set_category(self, widget, cat):
"""Set the note's category"""
if not cat in self.noteset.categories:
raise KeyError("No such category")
self.note.category = cat
self.update_style()
self.update_font()
def set_locked_state(self, locked):
"""Change the locked state of the stickynote"""
self.locked = locked
self.txtNote.set_editable(not self.locked)
self.txtNote.set_cursor_visible(not self.locked)
self.bLock.set_image({True:self.imgLock,
False:self.imgUnlock}[self.locked])
self.bLock.set_tooltip_text({True: _("Unlock"),
False: _("Lock")}[self.locked])
def lock_clicked(self, *args):
"""Toggle the locked state of the note"""
self.set_locked_state(not self.locked)
def focus_out(self, *args):
self.save(*args)
def show_about_dialog():
glade_file = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', "GlobalDialogs.glade"))
builder = Gtk.Builder()
builder.add_from_file(glade_file)
winAbout = builder.get_object("AboutWindow")
ret = winAbout.run()
winAbout.destroy()
return ret
class SettingsCategory:
"""Widgets that handle properties of a category"""
def __init__(self, settingsdialog, cat):
self.settingsdialog = settingsdialog
self.noteset = settingsdialog.noteset
self.cat = cat
self.builder = Gtk.Builder()
self.path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..'))
self.builder.add_objects_from_file(os.path.join(self.path,
"SettingsCategory.glade"), ["catExpander"])
self.builder.connect_signals(self)
widgets = ["catExpander", "lExp", "cbBG", "cbText", "eName",
"confirmDelete", "fbFont"]
for w in widgets:
setattr(self, w, self.builder.get_object(w))
name = self.noteset.categories[cat].get("name", _("New Category"))
self.eName.set_text(name)
self.refresh_title()
self.cbBG.set_rgba(Gdk.RGBA(*colorsys.hsv_to_rgb(
*self.noteset.get_category_property(cat, "bgcolor_hsv")),
alpha=1))
self.cbText.set_rgba(Gdk.RGBA(
*self.noteset.get_category_property(cat, "textcolor"),
alpha=1))
fontname = self.noteset.get_category_property(cat, "font")
if not fontname:
# Get the system default font, if none is set
fontname = \
self.settingsdialog.wSettings.get_style_context()\
.get_font(Gtk.StateFlags.NORMAL).to_string()
#why.is.this.so.long?
self.fbFont.set_font(fontname)
def refresh_title(self, *args):
"""Updates the title of the category"""
name = self.noteset.categories[self.cat].get("name",
_("New Category"))
if self.noteset.properties.get("default_cat", "") == self.cat:
name += " (" + _("Default Category") + ")"
self.lExp.set_text(name)
def delete_cat(self, *args):
"""Delete a category"""
winConfirm = Gtk.MessageDialog(self.settingsdialog.wSettings, None,
Gtk.MessageType.QUESTION, Gtk.ButtonsType.NONE,
_("Are you sure you want to delete this category?"))
winConfirm.add_buttons(Gtk.STOCK_CANCEL, Gtk.ResponseType.REJECT,
Gtk.STOCK_DELETE, Gtk.ResponseType.ACCEPT)
confirm = winConfirm.run()
winConfirm.destroy()
if confirm == Gtk.ResponseType.ACCEPT:
self.settingsdialog.delete_category(self.cat)
def make_default(self, *args):
"""Make this the default category"""
self.noteset.properties["default_cat"] = self.cat
self.settingsdialog.refresh_category_titles()
for note in self.noteset.notes:
note.gui.update_style()
note.gui.update_font()
def eName_changed(self, *args):
"""Update a category name"""
self.noteset.categories[self.cat]["name"] = self.eName.get_text()
self.refresh_title()
for note in self.noteset.notes:
note.gui.populate_menu()
def update_bg(self, *args):
"""Action to update the background color"""
try:
rgba = self.cbBG.get_rgba()
except TypeError:
rgba = Gdk.RGBA()
self.cbBG.get_rgba(rgba)
# Some versions of GObjectIntrospection are affected by
# https://bugzilla.gnome.org/show_bug.cgi?id=687633
hsv = colorsys.rgb_to_hsv(rgba.red, rgba.green, rgba.blue)
self.noteset.categories[self.cat]["bgcolor_hsv"] = hsv
for note in self.noteset.notes:
note.gui.update_style()
# Remind some widgets that they are transparent, etc.
load_global_css()
def update_textcolor(self, *args):
"""Action to update the text color"""
try:
rgba = self.cbText.get_rgba()
except TypeError:
rgba = Gdk.RGBA()
self.cbText.get_rgba(rgba)
self.noteset.categories[self.cat]["textcolor"] = \
[rgba.red, rgba.green, rgba.blue]
for note in self.noteset.notes:
note.gui.update_style()
def update_font(self, *args):
"""Action to update the font size"""
self.noteset.categories[self.cat]["font"] = \
self.fbFont.get_font_name()
for note in self.noteset.notes:
note.gui.update_font()
class SettingsDialog:
"""Manages the GUI of the settings dialog"""
def __init__(self, noteset):
self.noteset = noteset
self.categories = {}
self.path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..'))
glade_file = (os.path.join(self.path, "GlobalDialogs.glade"))
self.builder = Gtk.Builder()
self.builder.add_from_file(glade_file)
self.builder.connect_signals(self)
widgets = ["wSettings", "boxCategories"]
for w in widgets:
setattr(self, w, self.builder.get_object(w))
for c in self.noteset.categories:
self.add_category_widgets(c)
ret = self.wSettings.run()
self.wSettings.destroy()
def add_category_widgets(self, cat):
"""Add the widgets for a category"""
self.categories[cat] = SettingsCategory(self, cat)
self.boxCategories.pack_start(self.categories[cat].catExpander,
False, False, 0)
def new_category(self, *args):
"""Make a new category"""
cid = str(uuid.uuid4())
self.noteset.categories[cid] = {}
self.add_category_widgets(cid)
def delete_category(self, cat):
"""Delete a category"""
del self.noteset.categories[cat]
self.categories[cat].catExpander.destroy()
del self.categories[cat]
for note in self.noteset.notes:
note.gui.populate_menu()
note.gui.update_style()
note.gui.update_font()
def refresh_category_titles(self):
for cid, catsettings in self.categories.items():
catsettings.refresh_title()
| lesion/indicator-stickynotes | stickynotes/gui.py | Python | gpl-3.0 | 22,520 |
import sys
import cPickle
from PySide import QtGui, QtCore
from PySide.QtCore import Qt
class Node(object):
def __init__(self, parent=None):
self._parent = parent
self._label = 'root'
self._type = None
self._items = []
self.setParent(parent)
def __repr__(self):
return self.log()
def __len__(self):
return len(self._items)
def label(self):
return self._label
def type(self):
return self._type
def parent(self):
return self._parent
def setParent(self, parent):
if parent is not None:
self._parent = parent
self._parent.appendItem(self)
else:
self._parent = None
def getItem(self, row):
return self._items[row]
def appendItem(self, node):
self._items.append(node)
def appendChild(self, node):
self._items.append(node)
def insertItem(self, row, node):
self._items.insert(row, node)
def removeItem(self, row):
item = self._items[row]
self._items.remove(item)
def columnCount(self):
return 2
def row(self):
if self._parent is not None:
return self._parent._items.index(self)
return -1
def log(self, level=-1):
level += 1
output = ['\t' for i in range(level)]
output.append(self._label if self._parent is not None else 'Root')
output.append('\n')
output.extend([item.log(level) for item in self._items])
level -= 1
return ''.join(output)
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
rootNode = Node()
childnode0 = Node(rootNode)
childnode0._label = 'Something'
childnode1 = Node(childnode0)
childnode1._label = 'Left Foot'
childnode2 = Node(rootNode)
childnode2._label = 'Testy'
print rootNode
model = SceneGraph(rootNode)
tree = QtGui.QTreeView()
tree.setDragEnabled(True)
tree.setAcceptDrops(True)
tree.setDropIndicatorShown(True)
tree.setDragDropMode(QtGui.QAbstractItemView.InternalMove)
# tree.change.connect()
tree.setModel(model)
tree.show()
sys.exit(app.exec_())
| arubertoson/piemenu | piemenu/menusystem/refs/test2.py | Python | gpl-2.0 | 2,336 |
#!/usr/bin/python
""" Creates a widget for monitoring logger information. """
# define authorship information
__authors__ = ['Eric Hulser']
__author__ = ','.join(__authors__)
__credits__ = []
__copyright__ = 'Copyright (c) 2011, Projex Software'
__license__ = 'LGPL'
# maintanence information
__maintainer__ = 'Projex Software'
__email__ = 'team@projexsoftware.com'
import logging
from projexui.xlogginghandler import XLoggingHandler
class XLoggerWidgetHandler(XLoggingHandler):
""" Custom class for handling error exceptions via the logging system,
based on the logging level. """
def __init__(self, parent):
super(XLoggerWidgetHandler, self).__init__(parent)
# define custom properties
self._loggerLevels = {}
self._activeLevels = []
self._recordQueue = []
# process all notifications, this will handle
# per logger vs. per handler
self.setLevel(logging.DEBUG)
def activeLevels(self):
"""
Returns the active levels that should be displayed for this handler.
:return [<int>, ..]
"""
return self._activeLevels
def emit(self, record):
"""
Throws an error based on the information that the logger reported,
given the logging level.
:param record | <logging.LogRecord>
"""
# if we've already processed this record, ignore it
if record in self._recordQueue:
return
if self._activeLevels and not record.levelno in self._activeLevels:
return
name = record.name
lvl = self.loggerLevel(name)
# don't process this log
if lvl > record.levelno:
return
self._recordQueue.insert(0, record)
self._recordQueue = self._recordQueue[:10]
# emit the change
super(XLoggerWidgetHandler, self).emit(record)
def loggerLevel(self, logger):
"""
Returns the level for the inputed logger.
:param logger | <str>
:return <int>
"""
try:
return self._loggerLevels[logger]
except KeyError:
items = sorted(self._loggerLevels.items())
for key, lvl in items:
if logger.startswith(key):
return lvl
return logging.NOTSET
def loggerLevels(self):
"""
Returns the logger levels for this handler.
:return {<str> logger: <int> level, ..}
"""
return self._loggerLevels
def setActiveLevels(self, levels):
"""
Sets the active levels that will be emitted for this handler.
:param levels | [<int>, ..]
"""
self._activeLevels = levels
def setLoggerLevel(self, logger, level):
"""
Sets the level to log the inputed logger at.
:param logger | <str>
level | <int>
"""
if logger == 'root':
_log = logging.getLogger()
else:
_log = logging.getLogger(logger)
_log.setLevel(level)
if level == logging.NOTSET:
self._loggerLevels.pop(logger, None)
else:
self._loggerLevels[logger] = level
| bitesofcode/projexui | projexui/widgets/xloggerwidget/xloggerwidgethandler.py | Python | lgpl-3.0 | 3,594 |
"""State estimator."""
from __future__ import absolute_import
from __future__ import division
#from __future__ import google_type_annotations
from __future__ import print_function
import numpy as np
from typing import Any, Sequence
import collections
_DEFAULT_WINDOW_SIZE = 20
class MovingWindowFilter(object):
"""A stable O(1) moving filter for incoming data streams.
We implement the Neumaier's algorithm to calculate the moving window average,
which is numerically stable.
"""
def __init__(self, window_size: int):
"""Initializes the class.
Args:
window_size: The moving window size.
"""
assert window_size > 0
self._window_size = window_size
self._value_deque = collections.deque(maxlen=window_size)
# The moving window sum.
self._sum = 0
# The correction term to compensate numerical precision loss during
# calculation.
self._correction = 0
def _neumaier_sum(self, value: float):
"""Update the moving window sum using Neumaier's algorithm.
For more details please refer to:
https://en.wikipedia.org/wiki/Kahan_summation_algorithm#Further_enhancements
Args:
value: The new value to be added to the window.
"""
new_sum = self._sum + value
if abs(self._sum) >= abs(value):
# If self._sum is bigger, low-order digits of value are lost.
self._correction += (self._sum - new_sum) + value
else:
# low-order digits of sum are lost
self._correction += (value - new_sum) + self._sum
self._sum = new_sum
def calculate_average(self, new_value: float) -> float:
"""Computes the moving window average in O(1) time.
Args:
new_value: The new value to enter the moving window.
Returns:
The average of the values in the window.
"""
deque_len = len(self._value_deque)
if deque_len < self._value_deque.maxlen:
pass
else:
# The left most value to be subtracted from the moving sum.
self._neumaier_sum(-self._value_deque[0])
self._neumaier_sum(new_value)
self._value_deque.append(new_value)
return (self._sum + self._correction) / self._window_size
class COMVelocityEstimator(object):
"""Estimate the CoM velocity using on board sensors.
Requires knowledge about the base velocity in world frame, which for example
can be obtained from a MoCap system. This estimator will filter out the high
frequency noises in the velocity so the results can be used with controllers
reliably.
"""
def __init__(
self,
robot: Any,
window_size: int = _DEFAULT_WINDOW_SIZE,
):
self._robot = robot
self._window_size = window_size
self.reset(0)
@property
def com_velocity_body_frame(self) -> Sequence[float]:
"""The base velocity projected in the body aligned inertial frame.
The body aligned frame is a intertia frame that coincides with the body
frame, but has a zero relative velocity/angular velocity to the world frame.
Returns:
The com velocity in body aligned frame.
"""
return self._com_velocity_body_frame
@property
def com_velocity_world_frame(self) -> Sequence[float]:
return self._com_velocity_world_frame
def reset(self, current_time):
del current_time
# We use a moving window filter to reduce the noise in velocity estimation.
self._velocity_filter_x = MovingWindowFilter(
window_size=self._window_size)
self._velocity_filter_y = MovingWindowFilter(
window_size=self._window_size)
self._velocity_filter_z = MovingWindowFilter(
window_size=self._window_size)
self._com_velocity_world_frame = np.array((0, 0, 0))
self._com_velocity_body_frame = np.array((0, 0, 0))
def update(self, current_time):
del current_time
velocity = self._robot.GetBaseVelocity()
vx = self._velocity_filter_x.calculate_average(velocity[0])
vy = self._velocity_filter_y.calculate_average(velocity[1])
vz = self._velocity_filter_z.calculate_average(velocity[2])
self._com_velocity_world_frame = np.array((vx, vy, vz))
base_orientation = self._robot.GetTrueBaseOrientation()
_, inverse_rotation = self._robot.pybullet_client.invertTransform(
(0, 0, 0), base_orientation)
self._com_velocity_body_frame, _ = (
self._robot.pybullet_client.multiplyTransforms(
(0, 0, 0), inverse_rotation, self._com_velocity_world_frame,
(0, 0, 0, 1)))
| google-research/motion_imitation | mpc_controller/com_velocity_estimator.py | Python | apache-2.0 | 4,437 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PatchADependency(Package):
"""Package that requries a patched version of a dependency."""
homepage = "http://www.example.com"
url = "http://www.example.com/patch-a-dependency-1.0.tar.gz"
version('1.0', '0123456789abcdef0123456789abcdef')
depends_on('libelf', patches=patch('libelf.patch'))
| rspavel/spack | var/spack/repos/builtin.mock/packages/patch-a-dependency/package.py | Python | lgpl-2.1 | 539 |
from __future__ import print_function
from __future__ import absolute_import
from .. import log
import os
import codecs
import re
import xml.dom.minidom
from Components.config import config
from .dvbscanner import DvbScanner
import six
from six.moves.urllib.parse import quote
class Tools():
SERVICEREF_ALLOWED_TYPES = [1, 4097, 5001, 5002]
def encodeNODE(self, data):
if six.PY2:
return data.encode("utf-8")
return six.ensure_str(data, encoding='utf-8', errors='ignore')
def parseXML(self, filename):
try:
tool = open(filename, "r")
except Exception as e:
#print("[ABM-Tools][parseXML] Cannot open %s: %s" % (filename, e), file=log)
return None
try:
dom = xml.dom.minidom.parse(tool)
except Exception as e:
print("[ABM-Tools][parseXML] XML parse error (%s): %s" % (filename, e), file=log)
tool.close()
return None
tool.close()
return dom
def customLCN(self, services, section_identifier, current_bouquet_key):
custom_dir = os.path.dirname(__file__) + "/../custom"
is_sorted = False
for number in services["video"]:
if number == services["video"][number]["service_id"]:
continue
is_sorted = True
break
for type in ["video", "radio"]:
skipextrachannels = 0
# Write Example CustomLCN file
xml_out_list = []
xml_out_list.append("<custom>\n\t<include>yes</include>\n\t<lcnlist>\n")
numbers = sorted(list(services[type].keys()))
for number in numbers:
if six.PY2:
servicename = unicode(services[type][number]["service_name"], errors='ignore')
else:
servicename = six.ensure_text(services[type][number]["service_name"], encoding='utf-8', errors='ignore')
xml_out_list.append("\t\t<configuration lcn=\"%d\" channelnumber=\"%d\" description=\"%s\"></configuration>\n" % (
number,
number,
servicename.replace("&", "+")
))
xml_out_list.append("\t</lcnlist>\n</custom>\n")
xmlout = open(custom_dir + "/EXAMPLE_" + ("sd" if current_bouquet_key.startswith('sd') else "hd") + "_" + section_identifier + "_Custom" + ("radio" if type == "radio" else "") + "LCN.xml", "w")
xmlout.write(''.join(xml_out_list))
xmlout.close()
del xml_out_list
# Read CustomLCN file
customfile = custom_dir + "/" + ("sd" if current_bouquet_key.startswith('sd') else "hd") + "_" + section_identifier + "_Custom" + ("radio" if type == "radio" else "") + "LCN.xml"
dom = self.parseXML(customfile)
if dom is None:
print("[ABM-Tools][customLCN] No custom " + type + " LCN file for " + section_identifier + ".", file=log)
elif dom.documentElement.nodeType == dom.documentElement.ELEMENT_NODE and dom.documentElement.tagName == "custom":
print("[ABM-Tools][customLCN] Reading custom " + type + " LCN file for " + section_identifier + ".", file=log)
customlcndict = {}
sort_order = [] # to process this file top down
for node in dom.documentElement.childNodes:
if node.nodeType != node.ELEMENT_NODE:
continue
if node.tagName == "include":
node.normalize()
if len(node.childNodes) == 1 and node.childNodes[0].nodeType == node.TEXT_NODE:
if self.encodeNODE(node.childNodes[0].data) == 'no':
skipextrachannels = 1
if node.tagName == "lcnlist":
for node2 in node.childNodes:
if node2.nodeType == node2.ELEMENT_NODE and node2.tagName == "configuration":
lcn = 0
channelnumber = 0
for i in list(range(0, node2.attributes.length)):
if node2.attributes.item(i).name == "lcn":
lcn = int(node2.attributes.item(i).value)
elif node2.attributes.item(i).name == "channelnumber":
channelnumber = int(node2.attributes.item(i).value)
if channelnumber and lcn:
customlcndict[channelnumber] = lcn
if channelnumber in services[type]:
sort_order.append(channelnumber)
temp_services = {}
extra_services = {}
# add channels not in the CustomLCN file to the sort list.
for number in sorted(list(services[type].keys())):
if number not in sort_order:
sort_order.append(number)
# add services from CustomLCN file
for number in sort_order:
if number in customlcndict and customlcndict[number] not in temp_services:
temp_services[customlcndict[number]] = services[type][number]
else:
extra_services[number] = services[type][number]
# add services not in CustomLCN file to correct lcn positions if slots are vacant
if is_sorted:
for number in list(extra_services.keys()):
if number not in temp_services: # CustomLCN has priority
temp_services[number] = extra_services[number]
del extra_services[number]
#add any remaining services to the end of list
if is_sorted or skipextrachannels == 0:
lastlcn = len(temp_services) and max(list(temp_services.keys()))
newservices = []
for number in self.sortServicesAlpha(extra_services):
temp_services[lastlcn + 1] = extra_services[number]
lastlcn += 1
newservices.append(number)
print("[ABM-Tools][customLCN] New " + type + " services %s" % (str(newservices)), file=log)
services[type] = temp_services
return services
def sortServicesAlpha(self, services):
# services is a dict with LCNs as keys
# returns keys, sorted flat alphabetic by service name (or interactive name if it is set).
sort_list = []
for lcn in services:
if "interactive_name" in services[lcn]:
sort_list.append((lcn, re.sub('^(?![a-z])', 'zzzzz', services[lcn]['interactive_name'].lower())))
else:
sort_list.append((lcn, re.sub('^(?![a-z])', 'zzzzz', services[lcn]['service_name'].lower())))
sort_list = sorted(sort_list, key=lambda listItem: listItem[1])
return [i[0] for i in sort_list]
def customMix(self, services, section_identifier, providers, providerConfig):
sections = providers[section_identifier]["sections"]
custom_dir = os.path.dirname(__file__) + "/../custom"
customfile = custom_dir + "/" + section_identifier + "_CustomMix.xml"
customised = {"video": {}, "radio": {}}
for type in ["video", "radio"]:
for number in services[section_identifier][type]:
customised[type][number] = services[section_identifier][type][number]
hacks = ""
dom = self.parseXML(customfile)
if dom is None:
print("[ABM-Tools][customMix] No CustomMix file for " + section_identifier + ".", file=log)
elif dom.documentElement.nodeType == dom.documentElement.ELEMENT_NODE and dom.documentElement.tagName == "custommix":
print("[ABM-Tools][customMix] Reading CustomMix file for " + section_identifier + ".", file=log)
for node in dom.documentElement.childNodes:
if node.nodeType != node.ELEMENT_NODE:
continue
if node.tagName == "inserts":
for node2 in node.childNodes:
if node2.nodeType == node2.ELEMENT_NODE and node2.tagName == "insert":
provider = ''
source = ''
target = ''
for i in list(range(0, node2.attributes.length)):
if node2.attributes.item(i).name == "provider":
provider = self.encodeNODE(node2.attributes.item(i).value)
elif node2.attributes.item(i).name == "source":
source = int(node2.attributes.item(i).value)
elif node2.attributes.item(i).name == "target":
target = int(node2.attributes.item(i).value)
if provider and source and target and provider in services and source in services[provider]["video"]:
customised["video"][target] = services[provider]["video"][source]
# experimental, to replace unavailable services with streams
elif node.tagName == "streams":
for node2 in node.childNodes:
if node2.nodeType == node2.ELEMENT_NODE and node2.tagName == "stream":
url = ''
target = ''
name = ''
servicereftype = ''
for i in list(range(0, node2.attributes.length)):
if node2.attributes.item(i).name == "name":
name = self.encodeNODE(node2.attributes.item(i).value)
elif node2.attributes.item(i).name == "url":
url = self.encodeNODE(node2.attributes.item(i).value)
if "%" not in url[:10]: # url not encoded
url = quote(url) # single encode url
elif node2.attributes.item(i).name == "target":
target = int(node2.attributes.item(i).value)
elif node2.attributes.item(i).name == "servicereftype":
servicereftype = int(node2.attributes.item(i).value)
if url and target and target in customised["video"]: # must be a current service
customised["video"][target]["stream"] = url
elif name and url and target and target not in customised["video"]: # non existing service
customised["video"][target] = {'service_id': 0, 'transport_stream_id': 0, 'original_network_id': 0, 'namespace': 0, 'service_name': name, 'number': target, 'numbers': [target], 'free_ca': 0, 'service_type': 1, 'stream': url}
if servicereftype and servicereftype in self.SERVICEREF_ALLOWED_TYPES and target and target in customised["video"] and "stream" in customised["video"][target]: # if a stream was added above, a custom servicereftype may also be added
customised["video"][target]["servicereftype"] = servicereftype
elif node.tagName == "deletes":
for node2 in node.childNodes:
if node2.nodeType == node2.ELEMENT_NODE and node2.tagName == "delete":
target = ''
for i in list(range(0, node2.attributes.length)):
if node2.attributes.item(i).name == "target":
target = int(node2.attributes.item(i).value)
if target and target in customised["video"]:
del customised["video"][target]
elif node.tagName == "sections":
for node2 in node.childNodes:
if node2.nodeType == node2.ELEMENT_NODE and node2.tagName == "section":
number = -1
for i in list(range(0, node2.attributes.length)):
if node2.attributes.item(i).name == "number":
number = int(node2.attributes.item(i).value)
if number == -1:
continue
node2.normalize()
if len(node2.childNodes) == 1 and node2.childNodes[0].nodeType == node2.TEXT_NODE:
sections[number] = self.encodeNODE(node2.childNodes[0].data)
elif node.tagName == "hacks":
node.normalize()
for i in list(range(0, len(node.childNodes))):
if node.childNodes[i].nodeType == node.CDATA_SECTION_NODE:
hacks = self.encodeNODE(node.childNodes[i].data).strip()
if len(hacks) > 0:
exec(hacks)
return customised, sections
def customtransponder(self, provider_key, bouquet_key):
customtransponders = []
providers_dir = os.path.dirname(__file__) + "/../providers"
# Read custom file
print("[ABM-Tools][customtransponder] Transponder provider name", provider_key, file=log)
providerfile = providers_dir + "/" + provider_key + ".xml"
dom = self.parseXML(providerfile)
if dom is None:
print("[ABM-Tools][customtransponder] Cannot read custom transponders from provider file.", file=log)
elif dom.documentElement.nodeType == dom.documentElement.ELEMENT_NODE and dom.documentElement.tagName == "provider":
for node in dom.documentElement.childNodes:
if node.nodeType != node.ELEMENT_NODE:
continue
elif node.tagName == "customtransponders":
for node2 in node.childNodes:
if node2.nodeType == node2.ELEMENT_NODE and node2.tagName == "customtransponder":
# The following are lamedb values for use directly in lamedb.
# For information on lamedb values look in README.txt in AutoBouquetsMaker custom folder.
# Key, frequency and TSID must come from the provider file.
# In the case of T2, "system" should also be present in the provider file.
# The following adds default values which can be overridden from the providers file.
customtransponder = {}
customtransponder["bandwidth"] = 0
customtransponder["code_rate_hp"] = 5
customtransponder["code_rate_lp"] = 5
customtransponder["modulation"] = 0
customtransponder["transmission_mode"] = 3
customtransponder["guard_interval"] = 4
customtransponder["hierarchy"] = 4
customtransponder["inversion"] = 2
customtransponder["flags"] = 0
customtransponder["system"] = 0
customtransponder["plpid"] = 0
for i in list(range(0, node2.attributes.length)):
if node2.attributes.item(i).name == "key":
if six.PY3:
customtransponder["key"] = six.ensure_str(node2.attributes.item(i).value)
else:
customtransponder["key"] = self.encodeNODE(node2.attributes.item(i).value)
elif node2.attributes.item(i).name == "transport_stream_id":
customtransponder["transport_stream_id"] = int(node2.attributes.item(i).value, 16)
elif node2.attributes.item(i).name == "frequency":
customtransponder["frequency"] = int(node2.attributes.item(i).value)
elif node2.attributes.item(i).name == "bandwidth":
customtransponder["bandwidth"] = int(node2.attributes.item(i).value)
elif node2.attributes.item(i).name == "code_rate_hp":
customtransponder["code_rate_hp"] = int(node2.attributes.item(i).value)
elif node2.attributes.item(i).name == "code_rate_lp":
customtransponder["code_rate_lp"] = int(node2.attributes.item(i).value)
elif node2.attributes.item(i).name == "modulation":
customtransponder["modulation"] = int(node2.attributes.item(i).value)
elif node2.attributes.item(i).name == "transmission_mode":
customtransponder["transmission_mode"] = int(node2.attributes.item(i).value)
elif node2.attributes.item(i).name == "guard_interval":
customtransponder["guard_interval"] = int(node2.attributes.item(i).value)
elif node2.attributes.item(i).name == "hierarchy":
customtransponder["hierarchy"] = int(node2.attributes.item(i).value)
elif node2.attributes.item(i).name == "inversion":
customtransponder["inversion"] = int(node2.attributes.item(i).value)
elif node2.attributes.item(i).name == "flags":
customtransponder["flags"] = int(node2.attributes.item(i).value)
elif node2.attributes.item(i).name == "system":
customtransponder["system"] = int(node2.attributes.item(i).value)
elif node2.attributes.item(i).name == "plpid":
customtransponder["plpid"] = int(node2.attributes.item(i).value)
if "key" in customtransponder and customtransponder["key"] == bouquet_key and "transport_stream_id" in customtransponder and "frequency" in customtransponder:
customtransponders.append(customtransponder)
if len(customtransponders) > 0:
print("[ABM-Tools][customtransponder] %d custom transponders found for that region." % len(customtransponders), file=log)
return customtransponders
def favourites(self, path, services, providers, providerConfigs, bouquetsOrder):
custom_dir = os.path.dirname(__file__) + "/../custom"
provider_key = "favourites"
customised = {"video": {}, "radio": {}}
name = ""
prefix = ""
sections = {}
bouquets = {"main": 1, "sections": 1}
area_key = ""
bouquets_to_hide = []
bouquetsToHide = []
swaprules = []
placement = 0
hacks = ""
# Read favourites file
dom = self.parseXML(custom_dir + "/favourites.xml")
if dom is None:
print("[ABM-Tools][favourites] No favourites.xml file", file=log)
elif dom.documentElement.nodeType == dom.documentElement.ELEMENT_NODE and dom.documentElement.tagName == "favourites":
print("[ABM-Tools][favourites] Reading favourites.xml file", file=log)
for node in dom.documentElement.childNodes:
if node.nodeType != node.ELEMENT_NODE:
continue
if node.tagName == "name":
node.normalize()
if len(node.childNodes) == 1 and node.childNodes[0].nodeType == node.TEXT_NODE:
name = self.encodeNODE(node.childNodes[0].data)
elif node.tagName == "sections":
sections = {}
for node2 in node.childNodes:
if node2.nodeType == node2.ELEMENT_NODE and node2.tagName == "section":
number = -1
for i in list(range(0, node2.attributes.length)):
if node2.attributes.item(i).name == "number":
number = int(node2.attributes.item(i).value)
if number == -1:
continue
node2.normalize()
if len(node2.childNodes) == 1 and node2.childNodes[0].nodeType == node2.TEXT_NODE:
sections[number] = self.encodeNODE(node2.childNodes[0].data)
elif node.tagName == "inserts":
for node2 in node.childNodes:
if node2.nodeType == node2.ELEMENT_NODE and node2.tagName == "insert":
provider = ''
source = ''
target = ''
for i in list(range(0, node2.attributes.length)):
if node2.attributes.item(i).name == "provider":
provider = self.encodeNODE(node2.attributes.item(i).value)
elif node2.attributes.item(i).name == "source":
source = int(node2.attributes.item(i).value)
elif node2.attributes.item(i).name == "target":
target = int(node2.attributes.item(i).value)
if provider and source and target and provider in services and source in services[provider]["video"]:
customised["video"][target] = services[provider]["video"][source]
elif node.tagName == "bouquets":
for node2 in node.childNodes:
if node2.nodeType == node2.ELEMENT_NODE and node2.tagName == "main":
node2.normalize()
if len(node2.childNodes) == 1 and node2.childNodes[0].nodeType == node2.TEXT_NODE and int(self.encodeNODE(node2.childNodes[0].data)) == 0:
bouquets["main"] = 0
elif node2.nodeType == node2.ELEMENT_NODE and node2.tagName == "sections":
node2.normalize()
if len(node2.childNodes) == 1 and node2.childNodes[0].nodeType == node2.TEXT_NODE and int(self.encodeNODE(node2.childNodes[0].data)) == 0:
bouquets["sections"] = 0
elif node.tagName == "placement":
node.normalize()
if len(node.childNodes) == 1 and node.childNodes[0].nodeType == node.TEXT_NODE:
placement = min(int(node.childNodes[0].data) - 1, len(bouquetsOrder))
if placement < 0:
placement = 0
elif node.tagName == "hacks":
node.normalize()
for i in list(range(0, len(node.childNodes))):
if node.childNodes[i].nodeType == node.CDATA_SECTION_NODE:
hacks = self.encodeNODE(node.childNodes[i].data).strip()
if len(hacks) > 0:
exec(hacks)
if len(customised["video"]) > 0:
providers[provider_key] = {}
providers[provider_key]["name"] = name
providers[provider_key]["bouquets"] = area_key
providers[provider_key]["protocol"] = 'nolcn'
providers[provider_key]["swapchannels"] = []
providers[provider_key]["sections"] = sections
if config.autobouquetsmaker.addprefix.value:
prefix = name
services[provider_key] = customised
bouquetsOrder.insert(placement, provider_key)
from .providerconfig import ProviderConfig
providerConfigs[provider_key] = ProviderConfig("%s::0:" % provider_key)
if bouquets["main"] == 1:
providerConfigs[provider_key].setMakeNormalMain()
if bouquets["sections"] == 1:
providerConfigs[provider_key].setMakeSections()
from .bouquetswriter import BouquetsWriter
BouquetsWriter().buildBouquets(path, providerConfigs[provider_key], services[provider_key], sections, provider_key, swaprules, bouquets_to_hide, prefix)
else:
print("[ABM-Tools][favourites] Favourites list is zero length.", file=log)
return services, providers, providerConfigs, bouquetsOrder
def clearsections(self, services, sections, bouquettype, servicetype):
# bouquettype = HD, FTAHD, FTA, ALL
# servicetype = video, radio
if len(sections) == 1:
return sections
active_sections = {}
for key in list(services[servicetype].keys()):
if (("FTA" not in bouquettype or services[servicetype][key]["free_ca"] == 0) and ("HD" not in bouquettype or services[servicetype][key]["service_type"] in DvbScanner.HD_ALLOWED_TYPES)) or 'ALL' in bouquettype:
section_number = max((x for x in sections if int(x) <= key))
if section_number not in active_sections:
active_sections[section_number] = sections[section_number]
if active_sections:
return active_sections
return sections
| oe-alliance/AutoBouquetsMaker | AutoBouquetsMaker/src/scanner/tools.py | Python | gpl-3.0 | 20,258 |
__version__ = "1.0.0"
| 5t111111/alt-gtags.vim | altgtags_lib/altgtags/__init__.py | Python | lgpl-2.1 | 23 |
from django.contrib import admin
from django.forms import ModelForm
from django.urls import reverse
from .models import Ballot, LoggedAction, PartySet
class LoggedActionAdminForm(ModelForm):
pass
@admin.register(LoggedAction)
class LoggedActionAdmin(admin.ModelAdmin):
form = LoggedActionAdminForm
search_fields = (
"user__username",
"popit_person_new_version",
"person__name",
"ip_address",
"source",
)
list_filter = ("action_type",)
list_display = [
"user",
"ip_address",
"action_type",
"popit_person_new_version",
"person_link",
"created",
"updated",
"source",
]
ordering = ("-created",)
def person_link(self, o):
if not o.person:
return ""
url = reverse("person-view", kwargs={"person_id": o.person.id})
return '<a href="{}">{}</a>'.format(url, o.person.name)
person_link.allow_tags = True
class PartySetAdminForm(ModelForm):
pass
@admin.register(PartySet)
class PartySetAdmin(admin.ModelAdmin):
form = PartySetAdminForm
@admin.register(Ballot)
class BallotAdmin(admin.ModelAdmin):
list_display = ["post", "election", "winner_count"]
list_filter = ("election__name", "election__current")
raw_id_fields = ("post", "election")
ordering = ("election", "post__label")
| DemocracyClub/yournextrepresentative | ynr/apps/candidates/admin.py | Python | agpl-3.0 | 1,386 |
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.conf import settings
from django.views.generic import TemplateView
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', TemplateView.as_view(template_name='index.html')),
# Examples:
# url(r'^$', 'demo.views.home', name='home'),
# url(r'^demo/', include('demo.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^traceability/', include('traceability.urls', namespace='traceability')),
)
# Uncomment the next line to serve media files in dev.
# urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
| vandorjw/django-traceability | demo/demo/urls.py | Python | bsd-3-clause | 1,137 |
#!/usr/bin/python
import sys
import argparse
import sql
import process_xls as p_xls
DEFAULT_EXPERIMENT_ID = 1
""" Change to whatever is needed. """
DEFAULT_DATE_STR = ''
DB_NAME = 'trost_prod'
TABLE_NAME = 'temps'
# column name in xls: (order, column name in sql, cast function[, lookup function])
columns_d = {
'Datum': (0, 'datum', str),
'Regen': (1, 'percipitation', float),
'Bewaesserung': (2, 'irrigation', float),# lambda x: 0 if x == 'NULL' or x == '' else x),
'Tmin': (3, 'tmin', float),
'Tmax': (4, 'tmax', float),
'StandortID': (5, 'location_id', int),
'invalid': (6, 'invalid', str),
}
###
def main(argv):
parser = argparse.ArgumentParser(description='')
parser.add_argument('files', nargs='+')
parser.add_argument('--pages', default=1)
args = parser.parse_args(argv)
for fn in args.files:
for page in range(args.pages):
data, headers = p_xls.read_xls_data(fn, page)
sql.write_sql_table(data, columns_d, table_name=TABLE_NAME, add_id=True)
return None
if __name__ == '__main__': main(sys.argv[1:])
| ingkebil/trost | scripts/update/import_climate.py | Python | gpl-2.0 | 1,110 |
# -*- coding: utf-8 -*-
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of ws4py nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
__author__ = "Sylvain Hellegouarch"
__version__ = "0.1.4"
__all__ = ['WS_KEY']
WS_KEY = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
| progrium/WebSocket-for-Python | ws4py/__init__.py | Python | bsd-3-clause | 1,625 |
from phystricks import *
def DeuxCercles():
pspict,fig = SinglePicture("DeuxCercles")
A=Point(0,0.5)
B=Point(0.5,0)
C1=Circle(A,0.5)
C2=Circle(B,0.5)
l1=C1.parametric_curve(-pi/2,0)
l2=C2.parametric_curve(pi/2,-pi)
l1.parameters.color="red"
l2.parameters.color="red"
surf=CustomSurface(l1,l2)
surf.parameters.hatched()
surf.parameters.hatch.color="lightgray"
pspict.DrawGraphs(surf,C1,C2,l1,l2)
pspict.axes.Dx=0.5
pspict.axes.Dy=0.5
pspict.DrawDefaultAxes()
pspict.dilatation(2)
fig.conclude()
fig.write_the_file()
| Naereen/mazhe | phystricksDeuxCercles.py | Python | gpl-3.0 | 540 |
from ..broker import Broker
class NetworkBroker(Broker):
controller = "networks"
def show(self, **kwargs):
"""Shows the details for the specified network.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier of a network.
:type id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return network: The network identified by the specified id.
:rtype network: Network
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available networks. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier of a network.
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier of a network.
:type id: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, name, description, created_at, updated_at.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each Network. Valid values are id, name, description, created_at, updated_at. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return networks: An array of the Network objects that match the specified input criteria.
:rtype networks: Array of Network
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available networks matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param created_at: The date and time the record was initially created in NetMRI.
:type created_at: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param created_at: The date and time the record was initially created in NetMRI.
:type created_at: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param description: The description of a network.
:type description: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param description: The description of a network.
:type description: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier of a network.
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier of a network.
:type id: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param name: The name of a network.
:type name: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param name: The name of a network.
:type name: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param updated_at: The date and time the record was last modified in NetMRI.
:type updated_at: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param updated_at: The date and time the record was last modified in NetMRI.
:type updated_at: Array of DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, name, description, created_at, updated_at.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each Network. Valid values are id, name, description, created_at, updated_at. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against networks, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: created_at, description, id, name, updated_at.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return networks: An array of the Network objects that match the specified input criteria.
:rtype networks: Array of Network
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available networks matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: created_at, description, id, name, updated_at.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_created_at: The operator to apply to the field created_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. created_at: The date and time the record was initially created in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_created_at: If op_created_at is specified, the field named in this input will be compared to the value in created_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_created_at must be specified if op_created_at is specified.
:type val_f_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_created_at: If op_created_at is specified, this value will be compared to the value in created_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_created_at must be specified if op_created_at is specified.
:type val_c_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_description: The operator to apply to the field description. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. description: The description of a network. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_description: If op_description is specified, the field named in this input will be compared to the value in description using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_description must be specified if op_description is specified.
:type val_f_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_description: If op_description is specified, this value will be compared to the value in description using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_description must be specified if op_description is specified.
:type val_c_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_id: The operator to apply to the field id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. id: The internal NetMRI identifier of a network. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_id: If op_id is specified, the field named in this input will be compared to the value in id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_id must be specified if op_id is specified.
:type val_f_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_id: If op_id is specified, this value will be compared to the value in id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_id must be specified if op_id is specified.
:type val_c_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_name: The operator to apply to the field name. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. name: The name of a network. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_name: If op_name is specified, the field named in this input will be compared to the value in name using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_name must be specified if op_name is specified.
:type val_f_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_name: If op_name is specified, this value will be compared to the value in name using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_name must be specified if op_name is specified.
:type val_c_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_updated_at: The operator to apply to the field updated_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. updated_at: The date and time the record was last modified in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_updated_at: If op_updated_at is specified, the field named in this input will be compared to the value in updated_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_updated_at must be specified if op_updated_at is specified.
:type val_f_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_updated_at: If op_updated_at is specified, this value will be compared to the value in updated_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_updated_at must be specified if op_updated_at is specified.
:type val_c_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, name, description, created_at, updated_at.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each Network. Valid values are id, name, description, created_at, updated_at. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return networks: An array of the Network objects that match the specified input criteria.
:rtype networks: Array of Network
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
| infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v3_8_0/network_broker.py | Python | apache-2.0 | 23,895 |
import re
from scrubadub.detectors.base import RegexDetector
from scrubadub.filth import TaxReferenceNumberFilth
class TaxReferenceNumberDetector(RegexDetector):
"""Use regular expressions to detect the UK PAYE temporary reference number (TRN),
Simple pattern matching, no checksum solution.
"""
name = 'tax_reference_number'
filth_cls = TaxReferenceNumberFilth
# this regex is looking for NINO that does not begin with certain letters
regex = re.compile(r'''\d{2}\s?[a-zA-Z]{1}(?:\s*\d\s*){5}''', re.IGNORECASE)
@classmethod
def supported_locale(cls, locale: str) -> bool:
"""Returns true if this ``Detector`` supports the given locale.
:param locale: The locale of the documents in the format: 2 letter lower-case language code followed by an
underscore and the two letter upper-case country code, eg "en_GB" or "de_CH".
:type locale: str
:return: ``True`` if the locale is supported, otherwise ``False``
:rtype: bool
"""
language, region = cls.locale_split(locale)
return region in ['GB']
| deanmalmgren/scrubadub | scrubadub/detectors/en_GB/tax_reference_number.py | Python | mit | 1,121 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Part of the Keras training engine related to plain array data.
"""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks as cbks
from tensorflow.python.keras.distribute import distributed_training_utils
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.utils.generic_utils import make_batches
from tensorflow.python.keras.utils.generic_utils import slice_arrays
from tensorflow.python.keras.utils.mode_keys import ModeKeys
from tensorflow.python.platform import tf_logging as logging
try:
from scipy.sparse import issparse # pylint: disable=g-import-not-at-top
except ImportError:
issparse = None
def model_iteration(model,
inputs,
targets=None,
sample_weights=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
val_inputs=None,
val_targets=None,
val_sample_weights=None,
shuffle=True,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_freq=1,
mode=ModeKeys.TRAIN,
validation_in_fit=False,
prepared_feed_values_from_dataset=False,
steps_name='steps',
**kwargs):
"""Loop function for arrays of data with modes TRAIN/TEST/PREDICT.
Arguments:
model: Keras Model instance.
inputs: Either a list or dictionary of arrays, or a dataset instance.
targets: List/dictionary of input arrays.
sample_weights: Optional list of sample weight arrays.
batch_size: Integer batch size or None if unknown.
epochs: Number of times to iterate over the data
verbose: Verbosity mode, 0, 1 or 2
callbacks: List of callbacks to be called during training
val_inputs: Either a list or dictionary of arrays, or a dataset instance.
val_targets: List/dictionary of target arrays.
val_sample_weights: Optional list of sample weight arrays.
shuffle: Whether to shuffle the data at the beginning of each epoch
concatenation of list the display names of the outputs of `f` and the
list of display names of the outputs of `f_val`.
initial_epoch: Epoch at which to start training (useful for resuming a
previous training run)
steps_per_epoch: Total number of steps (batches of samples) before
declaring one epoch finished and starting the next epoch. Ignored with
the default value of `None`.
validation_steps: Number of steps to run validation for (only if doing
validation from data tensors). Ignored with the default value of `None`.
validation_freq: Only relevant if validation data is provided. Integer or
`collections.Container` instance (e.g. list, tuple, etc.). If an
integer, specifies how many training epochs to run before a new
validation run is performed, e.g. `validation_freq=2` runs
validation every 2 epochs. If a Container, specifies the epochs on
which to run validation, e.g. `validation_freq=[1, 2, 10]` runs
validation at the end of the 1st, 2nd, and 10th epochs.
mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.
validation_in_fit: if true, then this method is invoked from within
training iteration (for validation). In the case where `val_inputs` is a
dataset, this flag indicates that its iterator and feed values are
already created so should properly reuse resources.
prepared_feed_values_from_dataset: if True, `inputs` is a list of feed
tensors returned from `_prepare_feed_values` call on the validation
dataset, so do not call it again on `inputs`. Should only be used for
inline validation (i.e., only if `validation_in_fit` is also True).
steps_name: The string name of the steps argument, either `steps`,
`validation_steps`, or `steps_per_epoch`. Only used for error message
formatting.
**kwargs: Additional arguments for backwards compatibility.
Returns:
- In TRAIN mode: `History` object.
- In TEST mode: Evaluation metrics.
- In PREDICT mode: Outputs of the Model called on inputs.
Raises:
ValueError: in case of invalid arguments.
"""
# Backwards compatibility.
if 'steps' in kwargs:
steps_per_epoch = kwargs.pop('steps')
if kwargs:
raise TypeError('Unknown arguments: %s' % (kwargs,))
# In case we were passed a dataset, we extract symbolic tensors from it.
reset_dataset_after_each_epoch = False
input_iterator = None
is_dataset = isinstance(inputs,
(dataset_ops.DatasetV1, dataset_ops.DatasetV2))
# TODO(fchollet): consider moving `steps_per_epoch` inference to
# _standardize_user_data and set reset_dataset_after_each_epoch as an
# attribute on the dataset instance.
if is_dataset:
if steps_per_epoch is None:
reset_dataset_after_each_epoch = True
steps_per_epoch = training_utils.infer_steps_for_dataset(
inputs, steps_per_epoch, epochs=epochs, steps_name=steps_name)
input_iterator = _get_iterator(inputs, model._distribution_strategy)
if mode == ModeKeys.TRAIN:
_print_train_info(inputs, val_inputs, steps_per_epoch, verbose)
# Enter DistributionStrategy scope.
if model._distribution_strategy:
scope = distributed_training_utils.distributed_scope(
strategy=model._distribution_strategy,
learning_phase=(1 if mode == ModeKeys.TRAIN else 0))
scope.__enter__()
# Get step function and loop type.
f = _make_execution_function(model, mode)
use_steps = is_dataset or steps_per_epoch is not None
do_validation = val_inputs is not None
# Convert Eager Tensors to NumPy arrays to support batching/shuffling.
inputs, targets, sample_weights = training_utils. \
convert_eager_tensors_to_numpy((inputs, targets, sample_weights))
# Prepare input data.
inputs = input_iterator or inputs
if validation_in_fit and prepared_feed_values_from_dataset:
# When invoking validation in training loop, avoid creating iterator and
# list of feed values for the same validation dataset multiple times (which
# essentially would call `iterator.get_next()` that slows down execution and
# leads to OOM errors eventually.
ins = inputs
else:
ins = _prepare_feed_values(model, inputs, targets, sample_weights, mode)
if not is_dataset:
num_samples_or_steps = _get_num_samples_or_steps(ins, batch_size,
steps_per_epoch)
else:
num_samples_or_steps = steps_per_epoch
# Prepare validation data. Hold references to the iterator and the input list
# to properly reinitialize and reuse in multiple validation passes.
val_iterator = None
if isinstance(val_inputs, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)):
if validation_steps is None:
# Because we pass an iterator feed instead of a Dataset to the eval
# model_iteration() call, it will not trigger the dataset-input path
# that determines the number of steps required. To avoid this issue,
# set validation_steps here if validation_steps is None.
validation_steps = training_utils.infer_steps_for_dataset(
val_inputs,
validation_steps,
epochs=epochs,
steps_name='validation_steps')
val_iterator = _get_iterator(val_inputs, model._distribution_strategy)
val_inputs = _prepare_feed_values(
model, val_iterator, val_targets, val_sample_weights, ModeKeys.TEST)
# Configure callbacks.
count_mode = 'steps' if use_steps else 'samples'
callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=do_validation,
batch_size=batch_size,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
samples=num_samples_or_steps,
verbose=0, # Handle ProgBarLogger separately in this loop.
mode=mode)
# TODO(omalleyt): Handle ProgBar as part of Callbacks once hooks are ready.
progbar = training_utils.get_progbar(model, count_mode)
progbar.params = callbacks.params
progbar.params['verbose'] = verbose
# Find beforehand arrays that need sparse-to-dense conversion.
if issparse is not None and not use_steps:
indices_for_conversion_to_dense = []
feed = _get_model_feed(model, mode)
for i, (input_data, feed_tensor) in enumerate(zip(ins, feed)):
if issparse(input_data) and not K.is_sparse(feed_tensor):
indices_for_conversion_to_dense.append(i)
# Select aggregation method.
if mode == ModeKeys.PREDICT:
aggregator = training_utils.OutputsAggregator(use_steps,
num_samples_or_steps)
else:
aggregator = training_utils.MetricsAggregator(use_steps,
num_samples_or_steps)
if model._compile_distribution:
distributed_training_utils._copy_weights_to_distributed_model(model, mode)
callbacks.model.stop_training = False
callbacks._call_begin_hook(mode)
progbar.on_train_begin()
for epoch in range(initial_epoch, epochs):
if callbacks.model.stop_training:
break
# Setup work for each epoch
epoch_logs = {}
model.reset_metrics()
if mode == ModeKeys.TRAIN:
callbacks.on_epoch_begin(epoch, epoch_logs)
progbar.on_epoch_begin(epoch, epoch_logs)
if use_steps:
# Step-wise loop.
if steps_per_epoch is None:
# Loop over dataset until `OutOfRangeError` is raised.
target_steps = np.inf
else:
# Loop over dataset for the specified number of steps.
target_steps = steps_per_epoch
step = 0
while step < target_steps:
batch_logs = {'batch': step, 'size': 1}
callbacks._call_batch_hook(mode, 'begin', step, batch_logs)
progbar.on_batch_begin(step, batch_logs)
# Get outputs.
try:
# `ins` can be callable in DistributionStrategy + eager case.
actual_inputs = ins() if callable(ins) else ins
batch_outs = f(actual_inputs)
except errors.OutOfRangeError:
if is_dataset:
# The dataset passed by the user ran out of batches.
# Now we know the cardinality of the dataset.
# If steps_per_epoch was specified, then running out of data is
# unexpected, so we stop training and inform the user.
if steps_per_epoch:
callbacks.model.stop_training = True
logging.warning(
'Your dataset ran out of data; interrupting training. '
'Make sure that your dataset can generate at least '
'`%s * epochs` batches (in this case, %d batches). '
'You may need to use the repeat() function when '
'building your dataset.'
% (steps_name, steps_per_epoch * epochs))
elif step > 0:
steps_per_epoch = step
aggregator.num_samples_or_steps = steps_per_epoch
if mode == ModeKeys.TRAIN:
progbar.params['steps'] = steps_per_epoch
progbar.progbar.target = steps_per_epoch
else:
# We ran out of batches while the user passed an iterator (legacy).
callbacks.model.stop_training = True
logging.warning(
'Your dataset iterator ran out of data; '
'interrupting training. Make sure that your iterator '
'can generate at least `%s * epochs` '
'batches (in this case, %d batches). You may need to'
'use the repeat() function when building your '
'dataset.' % (steps_name, steps_per_epoch * epochs))
break
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if model._distribution_strategy:
batch_outs = distributed_training_utils._per_device_aggregate_batch(
batch_outs, model, mode)
# Aggregate results.
if step == 0:
aggregator.create(batch_outs)
aggregator.aggregate(batch_outs)
# Callbacks batch end.
batch_logs = cbks.make_logs(model, batch_logs, batch_outs, mode)
callbacks._call_batch_hook(mode, 'end', step, batch_logs)
progbar.on_batch_end(step, batch_logs)
step += 1
if callbacks.model.stop_training:
break
else:
# Sample-wise loop.
index_array = np.arange(num_samples_or_steps)
if shuffle == 'batch':
index_array = training_utils.batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = make_batches(num_samples_or_steps, batch_size)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
# Slice into a batch.
try:
if ins and isinstance(ins[-1], int):
# Do not slice the training phase flag.
ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_arrays(ins, batch_ids)
except TypeError:
raise TypeError('TypeError while preparing batch. '
'If using HDF5 input data, '
'pass shuffle="batch".')
# Sparse to dense conversion.
if issparse is not None:
for i in indices_for_conversion_to_dense:
ins_batch[i] = ins_batch[i].toarray()
# Callbacks batch_begin.
batch_logs = {'batch': batch_index, 'size': len(batch_ids)}
callbacks._call_batch_hook(mode, 'begin', batch_index, batch_logs)
progbar.on_batch_begin(batch_index, batch_logs)
# Get outputs.
batch_outs = f(ins_batch)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
# Aggregate results.
if batch_index == 0:
aggregator.create(batch_outs)
aggregator.aggregate(batch_outs, batch_start, batch_end)
# Callbacks batch end.
batch_logs = cbks.make_logs(model, batch_logs, batch_outs, mode)
callbacks._call_batch_hook(mode, 'end', batch_index, batch_logs)
progbar.on_batch_end(batch_index, batch_logs)
if callbacks.model.stop_training:
break
aggregator.finalize()
results = aggregator.results
epoch_logs = cbks.make_logs(model, epoch_logs, results, mode)
if len(results) == 1:
results = results[0]
# Run the test loop every `validation_freq` epochs during training.
if (do_validation and
training_utils.should_run_validation(validation_freq, epoch) and
not callbacks.model.stop_training):
if model._compile_distribution:
# Since we create a new clone from the original model we need to copy
# the weights back to the original model before we can run validation.
distributed_training_utils._copy_weights_to_original_model(
model, ModeKeys.TRAIN)
val_results = model_iteration(
model,
val_inputs,
targets=val_targets,
sample_weights=val_sample_weights,
batch_size=batch_size,
steps_per_epoch=validation_steps,
callbacks=callbacks,
verbose=0,
mode=ModeKeys.TEST,
validation_in_fit=True,
prepared_feed_values_from_dataset=(val_iterator is not None),
steps_name='validation_steps')
if not isinstance(val_results, list):
val_results = [val_results]
epoch_logs = cbks.make_logs(
model, epoch_logs, val_results, mode, prefix='val_')
if val_iterator and epoch < epochs - 1:
_reinitialize_iterator(val_iterator, model._distribution_strategy)
if mode == ModeKeys.TRAIN:
# Epochs only apply to `fit`.
callbacks.on_epoch_end(epoch, epoch_logs)
progbar.on_epoch_end(epoch, epoch_logs)
# Reinitialize dataset iterator for the next epoch.
if reset_dataset_after_each_epoch and epoch < epochs - 1:
_reinitialize_iterator(input_iterator, model._distribution_strategy)
callbacks._call_end_hook(mode)
if model._distribution_strategy:
if model._compile_distribution:
# TODO(priyag, psv): Copy back metrics to the original model as well?
distributed_training_utils._copy_weights_to_original_model(model, mode)
scope.__exit__(None, None, None)
if mode == ModeKeys.TRAIN:
return model.history
return results
def _get_model_feed(model, mode):
if mode == ModeKeys.PREDICT:
feed = model._feed_inputs
else:
feed = (
model._feed_inputs + model._feed_targets + model._feed_sample_weights)
return feed
def _print_train_info(inputs, val_inputs, steps_per_epoch, verbose):
if (val_inputs and steps_per_epoch is None and verbose and inputs and
hasattr(inputs[0], 'shape') and hasattr(val_inputs[0], 'shape')):
print('Train on %d samples, validate on %d samples' %
(inputs[0].shape[0], val_inputs[0].shape[0]))
def _get_num_samples_or_steps(ins, batch_size, steps_per_epoch):
"""Returns total number of samples (when training in batch mode) or steps."""
if steps_per_epoch:
return steps_per_epoch
return training_utils.check_num_samples(ins, batch_size, steps_per_epoch,
'steps_per_epoch')
def _prepare_feed_values(model, inputs, targets, sample_weights, mode):
"""Prepare feed values to the model execution function.
Arguments:
model: Model to prepare feed values for.
inputs: List or dict of model inputs.
targets: Optional list of model targets.
sample_weights: Optional list of sample weight arrays.
mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.
Returns:
Feed values for the model in the given mode.
"""
if model._distribution_strategy:
if isinstance(inputs, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)):
inputs = distributed_training_utils.get_iterator(
inputs, model._distribution_strategy)
def get_distributed_inputs():
return distributed_training_utils._prepare_feed_values(
model, inputs, targets, sample_weights, mode)
# In the eager case, we want to call the input method per step, so return
# a lambda from here that can be called. Note that this is applicable only
# in Distribution Strategy case as it follows the same code path for both
# eager and graph modes.
# TODO(priyag,omalleyt): Either we should move the training DS with
# EagerIterator to use training_generator code path, or figure out how to
# set a symbolic Iterator out of a Dataset when in eager mode.
if context.executing_eagerly():
return get_distributed_inputs
else:
return get_distributed_inputs()
if isinstance(inputs, (dataset_ops.DatasetV1, dataset_ops.DatasetV2,
iterator_ops.Iterator)):
inputs, targets, sample_weights = model._standardize_user_data(
inputs,
extract_tensors_from_dataset=True)
inputs = training_utils.ModelInputs(inputs).as_list()
targets = targets or []
sample_weights = sample_weights or []
ins = inputs + targets + sample_weights
if mode == ModeKeys.TRAIN and not isinstance(K.symbolic_learning_phase(),
int):
ins += [True] # Add learning phase value.
return ins
def _get_iterator(inputs, distribution_strategy=None):
if distribution_strategy:
return distributed_training_utils.get_iterator(
inputs, distribution_strategy)
return training_utils.get_iterator(inputs)
def _reinitialize_iterator(iterator, distribution_strategy=None):
if distribution_strategy:
distributed_training_utils.initialize_iterator(
iterator, distribution_strategy)
else:
training_utils.initialize_iterator(iterator)
def _make_execution_function(model, mode):
"""Makes function to run one step of model execution."""
if model._distribution_strategy:
return distributed_training_utils._make_execution_function(model, mode)
return model._make_execution_function(mode)
# For backwards compatibility for internal users of these loops.
fit_loop = functools.partial(model_iteration, mode=ModeKeys.TRAIN)
test_loop = functools.partial(
model_iteration, mode=ModeKeys.TEST, shuffle=False)
predict_loop = functools.partial(
model_iteration, mode=ModeKeys.PREDICT, shuffle=False)
| kevin-coder/tensorflow-fork | tensorflow/python/keras/engine/training_arrays.py | Python | apache-2.0 | 21,792 |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import pytest
from google.cloud import errorreporting_v1beta1
from google.cloud.errorreporting_v1beta1.proto import common_pb2
from google.cloud.errorreporting_v1beta1.proto import error_stats_service_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestErrorStatsServiceClient(object):
def test_list_group_stats(self):
# Setup Expected Response
next_page_token = ''
error_group_stats_element = {}
error_group_stats = [error_group_stats_element]
expected_response = {
'next_page_token': next_page_token,
'error_group_stats': error_group_stats
}
expected_response = error_stats_service_pb2.ListGroupStatsResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = errorreporting_v1beta1.ErrorStatsServiceClient()
# Setup Request
project_name = client.project_path('[PROJECT]')
time_range = {}
paged_list_response = client.list_group_stats(project_name, time_range)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.error_group_stats[0] == resources[0]
assert len(channel.requests) == 1
expected_request = error_stats_service_pb2.ListGroupStatsRequest(
project_name=project_name, time_range=time_range)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_group_stats_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = errorreporting_v1beta1.ErrorStatsServiceClient()
# Setup request
project_name = client.project_path('[PROJECT]')
time_range = {}
paged_list_response = client.list_group_stats(project_name, time_range)
with pytest.raises(CustomException):
list(paged_list_response)
def test_list_events(self):
# Setup Expected Response
next_page_token = ''
error_events_element = {}
error_events = [error_events_element]
expected_response = {
'next_page_token': next_page_token,
'error_events': error_events
}
expected_response = error_stats_service_pb2.ListEventsResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = errorreporting_v1beta1.ErrorStatsServiceClient()
# Setup Request
project_name = client.project_path('[PROJECT]')
group_id = 'groupId506361563'
paged_list_response = client.list_events(project_name, group_id)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.error_events[0] == resources[0]
assert len(channel.requests) == 1
expected_request = error_stats_service_pb2.ListEventsRequest(
project_name=project_name, group_id=group_id)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_events_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = errorreporting_v1beta1.ErrorStatsServiceClient()
# Setup request
project_name = client.project_path('[PROJECT]')
group_id = 'groupId506361563'
paged_list_response = client.list_events(project_name, group_id)
with pytest.raises(CustomException):
list(paged_list_response)
def test_delete_events(self):
# Setup Expected Response
expected_response = {}
expected_response = error_stats_service_pb2.DeleteEventsResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = errorreporting_v1beta1.ErrorStatsServiceClient()
# Setup Request
project_name = client.project_path('[PROJECT]')
response = client.delete_events(project_name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = error_stats_service_pb2.DeleteEventsRequest(
project_name=project_name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_events_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = errorreporting_v1beta1.ErrorStatsServiceClient()
# Setup request
project_name = client.project_path('[PROJECT]')
with pytest.raises(CustomException):
client.delete_events(project_name)
| jonparrott/google-cloud-python | error_reporting/tests/unit/gapic/v1beta1/test_error_stats_service_client_v1beta1.py | Python | apache-2.0 | 7,368 |
from __future__ import absolute_import
import sys
import pytest
import myhdl
from myhdl import *
from myhdl import ConversionError
from myhdl.conversion._misc import _error
from myhdl.conversion import analyze, verify
import myhdl
from myhdl import *
"""
This set of tests exercies a peculiar scenario where an
expanded interface Signal is flagged as having multiple
drivers. This appears to be a name collision in the name
expansion and was introduced in 08519b4.
"""
class Intf1(object):
def __init__(self):
self.sig1 = Signal(bool(0))
self.sig2 = Signal(bool(0))
self.sig3 = Signal(modbv(0)[8:])
class Intf2(object):
def __init__(self):
self.sig1 = Signal(bool(0))
self.sig2 = Signal(bool(0))
self.sig3 = Signal(modbv(0)[8:])
self.intf = Intf1()
@block
def mod1(clock, reset, intf1, intf2):
sig1 = Signal(bool(0))
sig2 = Signal(bool(0))
@always_seq(clock.posedge, reset)
def proc():
if intf1.sig1:
sig1.next = True
sig2.next = False
else:
sig1.next = False
sig2.next = True
intf2.sig1.next = sig1
intf2.sig2.next = sig2 or intf1.sig2
intf2.sig3.next = ~intf1.sig3
return proc
@block
def mod2(clock, reset, intf1, intf2):
@always_seq(clock.posedge, reset)
def proc():
# remove the if/else and leave just the line in the
# if clause the error does not occur, inlcude the if/else
# and the error occurs
if intf1.sig3 > 0: # remove no error
intf2.sig1.next = not intf1.sig1
intf2.sig2.next = not intf1.sig2
intf2.sig3.next = intf1.sig3 + intf2.sig3
else: # remove no error
intf2.sig3.next = 0 # remove no error
return proc
@block
def m_top(clock, reset, sdi, sdo):
intf1 = Intf1()
intf2 = Intf2()
intf3 = Intf1()
g1 = mod1(clock, reset, intf1, intf2)
g2 = mod2(clock, reset, intf2, intf3)
@always_seq(clock.posedge, reset)
def assigns():
intf1.sig1.next = sdi
intf1.sig2.next = not sdi
intf1.sig3.next = concat(intf1.sig3[7:1], sdi)
sdo.next = intf3.sig1 | intf3.sig2 | intf3.sig3[2]
return g1, g2, assigns
@pytest.mark.verify_convert
@block
def test_top():
""" yet another interface test.
This test is used to expose a particular bug that was discovered
during the development of interface conversion. The structure
used in this example caused and invalid multiple driver error.
"""
clock = Signal(bool(0))
reset = ResetSignal(0, active=1, async=False)
sdi = Signal(bool(0))
sdo = Signal(bool(0))
tbdut = m_top(clock, reset, sdi, sdo)
@instance
def tbclk():
clock.next = False
while True:
yield delay(3)
clock.next = not clock
# there is an issue when using bools with varialbes and
# VHDL conversion, this might be an expected limitation?
#expected = (False, False, False, True, True, True,
# False, True, False, True)
expected = (0, 0, 0, 1, 1, 1, 0, 1, 0, 1)
ra = reset.active
@instance
def tbstim():
sdi.next = False
reset.next = ra
yield delay(13)
reset.next = not ra
yield clock.posedge
for ii in range(10):
print("sdi: %d, sdo: %d" % (sdi, sdo))
expected_bit = expected[ii]
assert sdo == expected_bit
sdi.next = not sdi
yield clock.posedge
raise StopSimulation
return tbclk, tbstim, tbdut
if __name__ == '__main__':
print(sys.argv[1])
verify.simulator = analyze.simulator = sys.argv[1]
print("*** verify example testbench ")
test_one_testbench()
print("*** verify example module conversion ")
test_one_analyze()
print("*** test testbench conversion ")
test_conversion()
print("*** verify testbench conversion and execution")
test_one_verify()
| juhasch/myhdl | myhdl/test/conversion/general/test_interfaces4.py | Python | lgpl-2.1 | 4,065 |
import numpy as np
import sklearn.cluster as skl_cluster
from sklearn.metrics import silhouette_score
from Orange.data import Table, DiscreteVariable, Domain, Instance
from Orange.projection import SklProjector, Projection
from Orange.distance import Euclidean
__all__ = ["KMeans"]
class KMeans(SklProjector):
__wraps__ = skl_cluster.KMeans
def __init__(self, n_clusters=8, init='k-means++', n_init=10, max_iter=300,
tol=0.0001, random_state=None, preprocessors=None):
super().__init__(preprocessors=preprocessors)
self.params = vars()
def fit(self, X, Y=None):
proj = skl_cluster.KMeans(**self.params)
if isinstance(X, Table):
proj = proj.fit(X.X, Y)
proj.silhouette = silhouette_score(X.X, proj.labels_)
else:
proj = proj.fit(X, Y)
proj.silhouette = silhouette_score(X, proj.labels_)
proj.inertia = proj.inertia_ / len(X)
cluster_dist = Euclidean(proj.cluster_centers_).X
proj.inter_cluster = np.mean(cluster_dist[np.triu_indices_from(cluster_dist, 1)])
return KMeansModel(proj, self.preprocessors)
class KMeansModel(Projection):
def __init__(self, proj, preprocessors=None):
super().__init__(proj=proj)
self.k = self.proj.get_params()["n_clusters"]
self.centroids = self.proj.cluster_centers_
def __call__(self, data):
if isinstance(data, Table):
if data.domain is not self.pre_domain:
data = Table(self.pkmre_domain, data)
c = DiscreteVariable(name='Cluster id', values=range(self.k))
domain = Domain([c])
return Table(
domain,
self.proj.predict(data.X).astype(int).reshape((len(data), 1)))
elif isinstance(data, Instance):
if data.domain is not self.pre_domain:
data = Instance(self.pre_domain, data)
c = DiscreteVariable(name='Cluster id', values=range(self.k))
domain = Domain([c])
return Table(
domain,
np.atleast_2d(self.proj.predict(data._x)).astype(int))
else:
return self.proj.predict(data).reshape((len(data), 1))
| qusp/orange3 | Orange/clustering/kmeans.py | Python | bsd-2-clause | 2,246 |
__author__ = 'allentran'
import json
import os
import re
import datetime
import unidecode
from spacy.en import English
import requests
import pandas as pd
import numpy as np
import allen_utils
logger = allen_utils.get_logger(__name__)
class Interval(object):
def __init__(self, start, end):
assert isinstance(start, datetime.date) and isinstance(end, datetime.date)
self.start = start
self.end = end
def contains(self, new_date):
assert isinstance(new_date, datetime.date)
return (new_date >= self.start) and (new_date <= self.end)
fed_regimes = {
0: Interval(datetime.date(1951, 4, 2), datetime.date(1970, 1, 31)),
1: Interval(datetime.date(1970, 2, 1), datetime.date(1978, 3, 7)),
2: Interval(datetime.date(1978, 3, 8), datetime.date(1979, 8, 6)),
3: Interval(datetime.date(1979, 8, 7), datetime.date(1987, 8, 11)),
4: Interval(datetime.date(1987, 8, 12), datetime.date(2006, 1, 31)),
5: Interval(datetime.date(2006, 2, 1), datetime.date(2020, 1, 31)),
}
def find_regime(date):
for regime, interval in fed_regimes.iteritems():
if interval.contains(date):
return regime
raise ValueError("Could not find regime for date, %s", date)
class PairedDocAndRates(object):
def __init__(self, date, sentences, is_minutes):
self.date = date
self.sentences = sentences
self.is_minutes = is_minutes
self.rates = None
self.regime = find_regime(date)
def match_rates(self, rates_df, days = [30, 90, 180]):
def get_closest_rate(days_to_add):
future_date = self.date + datetime.timedelta(days=days_to_add)
diff = abs(future_date - rates_df['date'])
if (last_available_date - future_date).total_seconds() >= 0:
closest_index = diff.argmin()
return float(rates_df.iloc[closest_index]['value'])
else:
return None
future_rates = {}
last_available_date = rates_df['date'].iloc[-1]
current_rate = get_closest_rate(0)
if current_rate:
future_rates['0'] = current_rate
for add_days in days:
future_rate = get_closest_rate(add_days)
if future_rate:
future_rates[str(add_days)] = future_rate
self.rates = future_rates
def to_dict(self):
return dict(
date = self.date.strftime('%Y-%m-%d'),
sentences = self.sentences,
rates = self.rates,
is_minutes = self.is_minutes,
regime = self.regime
)
class Vocab(object):
def __init__(self):
self.vocab = {}
self.special_words = [
'$CARDINAL$',
'$DATE$',
'$UNKNOWN$'
]
def update_count(self, word):
if word not in self.vocab:
self.vocab[word] = 1
else:
self.vocab[word] += 1
def to_dict(self, min_count=5):
position_dict = {word: idx for idx, word in enumerate(self.special_words)}
counter = len(self.special_words)
for word, word_count in self.vocab.iteritems():
if word_count >= min_count:
position_dict[word] = counter
counter += 1
return position_dict
class DataTransformer(object):
def __init__(self, data_dir, min_sentence_length):
self.url = 'https://api.stlouisfed.org/fred/series/observations'
self.data_dir = data_dir
self.min_sentence_length = min_sentence_length
self.replace_entities = {
'DATE': '$DATE$',
'CARDINAL': '$CARDINAL$'
}
self.nlp = English()
# custom token replacement
self.regexes = [
(re.compile(r'\d{4}'), '$DATE$'),
(re.compile(r'\d+[\.,]*\d+'), '$CARDINAL$')
]
self.vocab = Vocab()
self.word_positions = None
self.rates = None
self.docs = None
def get_rates(self, api_key):
params = dict(
api_key=api_key,
file_type='json',
series_id='FEDFUNDS'
)
r = requests.get(self.url, params=params)
if r.status_code == 200:
self.rates = pd.DataFrame(r.json()['observations'])
self.rates['date'] = self.rates['date'].apply(lambda s: datetime.datetime.strptime(s, '%Y-%m-%d').date())
self.rates.sort('date')
def build_vocab(self):
def process_doc(doc_path):
with open(doc_path, 'r') as f:
text = unidecode.unidecode(unicode(f.read().decode('iso-8859-1')))
text = ' '.join(text.split()).strip()
if len(text) > 0:
doc = self.nlp(unicode(text.lower()))
doc_words = set()
for sent in doc.sents:
if len(sent) > self.min_sentence_length:
for token in doc:
if token.text not in doc_words:
self.vocab.update_count(token.text)
doc_words.add(token.text)
file_re = re.compile(r'\d{8}')
for root, dirs, filenames in os.walk(self.data_dir):
for filename in filenames:
if file_re.search(filename):
filepath = os.path.join(root, filename)
process_doc(filepath)
logger.info("Built vocab from: %s", filepath)
self.word_positions = self.vocab.to_dict()
def strip_text(self, text):
doc = self.nlp(unicode(text).lower())
# spacy entity replacement
ents_dict = {ent.text: self.replace_entities[ent.label_] for ent in doc.ents if ent.label_ in self.replace_entities.keys()}
for ent in ents_dict:
text = text.replace(ent, ents_dict[ent])
return text
def get_docs(self, min_sentence_length=8):
def parse_doc(doc_path):
with open(doc_path, 'r') as f:
text = unidecode.unidecode(unicode(f.read().decode('iso-8859-1')))
text = ' '.join(text.split()).strip()
if len(text) > 0:
date = datetime.datetime.strptime(date_re.search(doc_path).group(0), '%Y%m%d').date()
stripped_text = self.strip_text(text)
doc = self.nlp(unicode(stripped_text))
sentences = list(doc.sents)
doc_sents = []
for sent in sentences[1:]:
if len(sent) > min_sentence_length:
sentence_as_idxes = []
for token in sent:
skip = False
for regex, replacement_token in self.regexes:
match = regex.match(token.text)
if match:
sentence_as_idxes.append(self.word_positions[replacement_token])
skip = True
if not skip:
try:
sentence_as_idxes.append(self.word_positions[token.text])
except KeyError:
sentence_as_idxes.append(self.word_positions['$UNKNOWN$'])
doc_sents.append(sentence_as_idxes)
paired_doc = PairedDocAndRates(date, doc_sents, doc_path.find('minutes') > -1)
paired_doc.match_rates(self.rates)
return paired_doc
date_re = re.compile(r'\d{8}')
file_re = re.compile(r'\d{8}')
docs = []
for root, dirs, filenames in os.walk(self.data_dir):
for filename in filenames:
if file_re.search(filename):
filepath = os.path.join(root, filename)
parsed_doc = parse_doc(filepath)
if parsed_doc:
logger.info("Parsed %s", filepath)
docs.append(parsed_doc)
self.docs = docs
def save_output(self):
with open(os.path.join(self.data_dir, 'paired_data.json'), 'w') as f:
json.dump([doc.to_dict() for doc in self.docs], f, indent=2, sort_keys=True)
with open(os.path.join(self.data_dir, 'dictionary.json'), 'w') as f:
json.dump(self.vocab.to_dict(), f, indent=2, sort_keys=True)
if __name__ == "__main__":
data_transformer = DataTransformer('data', min_sentence_length=8)
data_transformer.build_vocab()
data_transformer.get_rates('51c09c6b8aa464671aa8ac96c76a8416')
data_transformer.get_docs()
data_transformer.save_output()
| allentran/fed-rates-bot | fed_bot/model/data.py | Python | mit | 8,747 |
import json
import random
import colorbrewer
import itertools
import numpy as np
from astropy.io import fits
from collections import Counter
# Determine the number of unique morphological classes from GZ2 and the number
# of galaxies in each respective class
def collate_classes():
fitsfile = 'gz2class.fits'
with fits.open('gz2class.fits') as p:
data = p[1].data
gz2class_cnt = Counter(data['gz2class'])
return gz2class_cnt
# Determine HTML-compliant hex numbers for the RGB triplets returned by colorbrewer
_NUMERALS = '0123456789abcdefABCDEF'
_HEXDEC = {v: int(v, 16) for v in (x+y for x in _NUMERALS for y in _NUMERALS)}
LOWERCASE, UPPERCASE = 'x', 'X'
def triplet(rgb,lettercase=UPPERCASE):
return format((rgb[0]<<16 | rgb[1]<<8 | rgb[2]), '06'+lettercase)
# Find the URL for the SkyServer image of an example galaxy in each morphological class
def unique_gz2(cnt):
fitsfile = 'gz2class.fits'
with fits.open('gz2class.fits') as p:
data = p[1].data
image_dict = {}
# Loop over unique classes
for c in cnt:
matched = (data['gz2class'] == c)
ind = random.randint(0,np.sum(matched))-1
# Global image parameters
imgsize=424
scale = 0.5
# Specific parameters for this galaxy
ra = data[matched][ind]['ra']
dec = data[matched][ind]['dec']
size = data[matched][ind]['PETROR90_R']
imgsizex = imgsizey = imgsize
imgscale = size * 0.02 * scale
info = {'ra':ra, 'dec':dec, 'scale':imgscale,
'imgsizex':imgsizex, 'imgsizey':imgsizey}
urlformat = 'http://casjobs.sdss.org/ImgCutoutDR7/getjpeg.aspx?ra=%(ra).6f&dec=%(dec).6f&scale=%(scale).6f&width=%(imgsizex)i&height=%(imgsizey)i'
url = (urlformat % info)
# Add URL as a key:value pair to the dictionary
image_dict[c] = url
return image_dict
# Make the JSON data file that will be embedded in the HTML page for d3 visualization
def make_json(cnt,image_dict):
gallist = []
palette = colorbrewer.PuRd[8]
for gclass,ngal in cnt.iteritems():
# Pick color based on length of string (somewhat akin to "complexity" of morph.)
color = triplet(palette[len(str(gclass))-1])
# Add data in dictionary format
tempdata = {'playcount':str(ngal),
'$color':'#%s' % color,
"image": image_dict[gclass],
'$area':ngal}
tempgal = {'children':[],
'data':tempdata,
'id':'gal_%s' % gclass,
'name':'%s' % gclass}
gallist.append(tempgal)
# Set up the root node
allgals = {'children':gallist,'data':{},'id':'root','name':'Galaxy Zoo 2'}
# Dump all galaxies to one big JSON
with open('gz2.json','w') as f:
json.dump(allgals,f)
return allgals
# Write the Javascript file that will be run by the HTML page
def write_js(j):
a,b = open('half1.js','r'),open('half2.js','r')
with open('gz2.js','w') as file:
file.write(a.read())
file.write('var json = ')
json.dump(j,file)
file.write(b.read())
a.close()
b.close()
# Run all tasks in the module if called from command line
if __name__ == '__main__':
cnt = collate_classes()
image_dict = unique_gz2(cnt)
allgals = make_json(cnt,image_dict)
write_js(allgals)
| willettk/gzviz | prep_json.py | Python | gpl-2.0 | 3,453 |
def init(id):
go = getObject(id)
if go == None:
return
vc = go.getVelocityComponent()
if vc == None:
return
vc.accelerate("")
return
def tick(id):
return
def shatter(selfID, otherID):
go = getObject(selfID)
if go == None:
return
psc = go.getPhysicalStateComponent()
if psc == None:
return
psc.applyCentralForce(Vec3(0.0, 1000.0, 5000.0), True)
addPrint("ScriptCollision_" + str(selfID), "RPG/Blanko", 0.5, 0.48, "A crash? Registered in a python script? Nice!", Alignment.Center, 3000000)
setSize("ScriptCollision_" + str(selfID), 1.0, 0.3)
return
| ClockworkOrigins/i6engine | media/scripts/python/MoveScript.py | Python | lgpl-2.1 | 574 |
# Print the version splitted in three components
import sys
verfile = sys.argv[1]
f = open(verfile)
version = f.read()
l = [a[0] for a in version.split('.') if a[0] in '0123456789']
# If no revision, '0' is added
if len(l) == 2:
l.append('0')
for i in l:
print i,
f.close()
| cpcloud/PyTables | mswindows/get_pytables_version.py | Python | bsd-3-clause | 300 |
#!/usr/bin/python
def logPassing(jobName, passing):
if passing:
print(jobName + " is passing!")
elif not passing:
print(jobName + " is failing!")
else:
print(jobName + " has no completion status.")
def logBuilding(jobName, building):
if building:
print(jobName + " has a building task!")
elif not building:
print(jobName + " has no building tasks.")
else:
print("Job's building status could not be found for " + jobName + ".") | HopefulLlama/JenkinsLightPi | console_logger.py | Python | gpl-3.0 | 446 |
#!/usr/bin/python
from setuptools import setup
setup(name='pygments-github-lexers',
version='0.0.5',
description='Pygments Github custom lexers.',
keywords='pygments github lexer',
license='BSD',
author='Liluo',
author_email='i@liluo.org',
url='https://github.com/liluo/pygments-github-lexers',
packages=['pygments_github_lexers'],
install_requires=['pygments>=2.0.2'],
entry_points='''[pygments.lexers]
Dasm16Lexer=pygments_github_lexers:Dasm16Lexer
PuppetLexer=pygments_github_lexers:PuppetLexer
AugeasLexer=pygments_github_lexers:AugeasLexer
TOMLLexer=pygments_github_lexers:TOMLLexer
SlashLexer=pygments_github_lexers:SlashLexer''',
classifiers=[
'Environment :: Plugins',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
],)
| liluo/pygments-github-lexers | setup.py | Python | bsd-2-clause | 1,241 |
#!/usr/bin/python2.7
from socket import socket
import ssl
import sys
s = socket()
c = ssl.wrap_socket(s, cert_reqs=ssl.CERT_REQUIRED, ca_certs='/tmp/lev5/cert.pem', ciphers='ALL:eNULL', ssl_version=2)
#c = ssl.wrap_socket(s, ciphers='ALL:eNULL', ssl_version=2)
# maybe this is port 2112 in the future ? see meeting !
c.connect(('0.0.0.0', 21123))
cert = c.getpeercert()
k,v = cert['subject'][5][0]
if k == 'commonName' and v == 'abraxas.dildosfromspace.com':
print "yay!"
else:
print "Failed!"
sys.exit(0)
c.write('Password\n')
print c.recv()
c.close()
| godoppl/project | otw/abraxas/level5/kill.py | Python | gpl-3.0 | 570 |
#python
import k3d
import testing
setup = testing.setup_mesh_writer_test(["PolyCube", "TriangulateFaces", "STLMeshWriter"], "STLMeshReader", "mesh.sink.STLMeshWriter.stl")
setup.modifier.mesh_selection = k3d.geometry.selection.create(1)
testing.require_valid_mesh(setup.document, setup.reader.get_property("output_mesh"))
testing.require_similar_mesh(setup.document, setup.reader.get_property("output_mesh"), "mesh.sink.STLMeshWriter", 1)
| barche/k3d | tests/mesh/mesh.sink.STLMeshWriter.py | Python | gpl-2.0 | 442 |
#!/usr/bin/python3
# Thanks to the GNOME theme nerds for the original source of this script
import os
import sys
import xml.sax
import subprocess
INKSCAPE = '/usr/bin/inkscape'
OPTIPNG = '/usr/bin/optipng'
SRC = os.path.join('.', 'src/gtk3')
inkscape_process = None
def optimize_png(png_file):
if os.path.exists(OPTIPNG):
process = subprocess.Popen([OPTIPNG, '-quiet', '-o7', png_file])
process.wait()
def wait_for_prompt(process, command=None):
if command is not None:
process.stdin.write((command+'\n').encode('utf-8'))
# This is kinda ugly ...
# Wait for just a '>', or '\n>' if some other char appearead first
output = process.stdout.read(1)
if output == b'>':
return
output += process.stdout.read(1)
while output != b'\n>':
output += process.stdout.read(1)
output = output[1:]
def start_inkscape():
process = subprocess.Popen(
[INKSCAPE, '--shell'],
bufsize=0, stdin=subprocess.PIPE, stdout=subprocess.PIPE
)
wait_for_prompt(process)
return process
def inkscape_render_rect(icon_file, rect, output_file):
global inkscape_process
if inkscape_process is None:
inkscape_process = start_inkscape()
wait_for_prompt(inkscape_process,
'%s -i %s -e %s' %
(icon_file, rect, output_file))
optimize_png(output_file)
class ContentHandler(xml.sax.ContentHandler):
ROOT = 0
SVG = 1
LAYER = 2
OTHER = 3
TEXT = 4
def __init__(self, path, force=False, filter=None):
self.stack = [self.ROOT]
self.inside = [self.ROOT]
self.path = path
self.rects = []
self.state = self.ROOT
self.chars = ""
self.force = force
self.filter = filter
def endDocument(self):
pass
def startElement(self, name, attrs):
if self.inside[-1] == self.ROOT:
if name == "svg":
self.stack.append(self.SVG)
self.inside.append(self.SVG)
return
elif self.inside[-1] == self.SVG:
if (name == "g" and ('inkscape:groupmode' in attrs) and ('inkscape:label' in attrs)
and attrs['inkscape:groupmode'] == 'layer' and attrs['inkscape:label'].startswith('Baseplate')):
self.stack.append(self.LAYER)
self.inside.append(self.LAYER)
self.context = None
self.icon_name = None
self.rects = []
return
elif self.inside[-1] == self.LAYER:
if name == "text" and ('inkscape:label' in attrs) and attrs['inkscape:label'] == 'context':
self.stack.append(self.TEXT)
self.inside.append(self.TEXT)
self.text = 'context'
self.chars = ""
return
elif name == "text" and ('inkscape:label' in attrs) and attrs['inkscape:label'] == 'icon-name':
self.stack.append(self.TEXT)
self.inside.append(self.TEXT)
self.text = 'icon-name'
self.chars = ""
return
elif name == "rect":
self.rects.append(attrs)
self.stack.append(self.OTHER)
def endElement(self, name):
stacked = self.stack.pop()
if self.inside[-1] == stacked:
self.inside.pop()
if stacked == self.TEXT and self.text is not None:
assert self.text in ['context', 'icon-name']
if self.text == 'context':
self.context = self.chars
elif self.text == 'icon-name':
self.icon_name = self.chars
self.text = None
elif stacked == self.LAYER:
assert self.icon_name
assert self.context
if self.filter is not None and not self.icon_name in self.filter:
return
print (self.context, self.icon_name)
for rect in self.rects:
width = rect['width']
height = rect['height']
id = rect['id']
dir = os.path.join("Paper", "gtk-3.0", self.context)
outfile = os.path.join(dir, self.icon_name+'.png')
if not os.path.exists(dir):
os.makedirs(dir)
# Do a time based check!
if self.force or not os.path.exists(outfile):
inkscape_render_rect(self.path, id, outfile)
sys.stdout.write('.')
else:
stat_in = os.stat(self.path)
stat_out = os.stat(outfile)
if stat_in.st_mtime > stat_out.st_mtime:
inkscape_render_rect(self.path, id, outfile)
sys.stdout.write('.')
else:
sys.stdout.write('-')
sys.stdout.flush()
sys.stdout.write('\n')
sys.stdout.flush()
def characters(self, chars):
self.chars += chars.strip()
if len(sys.argv) == 1:
if not os.path.exists('Paper'):
os.mkdir('Paper')
print ('Rendering from SVGs in', SRC)
for file in os.listdir(SRC):
if file[-4:] == '.svg':
file = os.path.join(SRC, file)
handler = ContentHandler(file)
xml.sax.parse(open(file), handler)
else:
file = os.path.join(SRC, sys.argv[1] + '.svg')
if len(sys.argv) > 2:
icons = sys.argv[2:]
else:
icons = None
if os.path.exists(os.path.join(file)):
handler = ContentHandler(file, True, filter=icons)
xml.sax.parse(open(file), handler)
else:
print ("Error: No such file", file)
sys.exit(1)
| bysshe/paper-gtk-theme | render-gtk3-assets.py | Python | gpl-3.0 | 5,784 |
import basic
import likelihood
import mcmc
| profxj/xastropy | xastropy/stats/__init__.py | Python | bsd-3-clause | 43 |
###########################################################
#
# Copyright (c) 2014, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['GalleryWdg']
import urllib
from pyasm.biz import Snapshot, File
from pyasm.search import Search
from pyasm.web import HtmlElement, DivWdg, Table
from pyasm.widget import TextWdg, IconWdg
from tactic.ui.common import BaseRefreshWdg
class GalleryWdg(BaseRefreshWdg):
def init(self):
self.curr_path = None
def get_display(self):
self.sobject_data = {}
top = self.top
top.add_style
top.add_class("spt_gallery_top")
inner = DivWdg()
top.add(inner)
# make the whole Gallery unselectable
inner.add_class('unselectable')
inner.add_style("position: fixed")
inner.add_style("top: 0px")
inner.add_style("left: 0px")
inner.add_style("width: 100%")
inner.add_style("bottom: 0px")
inner.add_style("padding-bottom: 40px")
#inner.add_style("background: rgba(0,0,0,0.5)")
inner.add_style("background: rgba(0,0,0,1)")
inner.add_style("z-index: 2000")
height = self.kwargs.get("height")
# default to top.
align = self.kwargs.get("align")
if not align:
align = "top"
paths = self.get_paths(file_type='main')
# icon type may be too small
thumb_paths = self.get_paths(file_type='web')
descriptions = []
for path in paths:
sobject = self.sobject_data.get(path)
if not sobject:
descriptions.append("")
else:
description = sobject.get("description")
if not description:
description = ""
descriptions.append(description)
inner.add_behavior( {
'type': 'load',
'descriptions': descriptions,
'cbjs_action': '''
spt.gallery = {};
// 1250 is defined also in the css styles
spt.gallery.portrait = window.innerWidth < 1250;
spt.gallery.portrait = false
spt.gallery.top = bvr.src_el;
spt.gallery.content = spt.gallery.top.getElement(".spt_gallery_content");
spt.gallery.shelf = spt.gallery.top.getElement(".spt_gallery_shelf");
spt.gallery.content.setStyle('opacity','0.1')
spt.gallery.desc_el = spt.gallery.top.getElement(".spt_gallery_description");
//window.addEvent('domready', function() {
setTimeout(function() {
// set the img h or w directly
var items = bvr.src_el.getElements('.spt_gallery_item img');
// fade in
spt.gallery.content.set('tween', {duration: 250}).fade('in');
}, 50)
spt.gallery.descriptions = bvr.descriptions;
spt.gallery.index = 0;
spt.gallery.last_index = 0;
spt.gallery.total = bvr.descriptions.length;
spt.gallery.left_arrow = bvr.src_el.getElement('.spt_left_arrow');
spt.gallery.right_arrow = bvr.src_el.getElement('.spt_right_arrow');
spt.gallery.videos = {};
spt.gallery.init = function() {
}
spt.gallery.stack = [];
spt.gallery.push_stack = function(key) {
spt.gallery.stack.push(key);
}
spt.gallery.show_next = function(src_el) {
if (!src_el)
src_el = spt.gallery.right_arrow;
if (spt.gallery.index >= spt.gallery.total-2) {
spt.hide(src_el);
}
if (spt.gallery.index == spt.gallery.total-1) {
return;
}
spt.gallery.index += 1;
spt.gallery.show_index(spt.gallery.index);
}
spt.gallery.show_prev = function(src_el) {
if (!src_el)
src_el = spt.gallery.left_arrow;
if (spt.gallery.index <= 1) {
spt.hide(src_el);
}
if (spt.gallery.index == 0) {
return;
}
spt.gallery.index -= 1;
spt.gallery.show_index(spt.gallery.index);
}
spt.gallery.show_index = function(index) {
let last_index = spt.gallery.last_index;
// stop all videos
var videos = spt.gallery.top.getElements(".video-js");
for (var i = 0; i < videos.length; i++) {
try {
var video = videos[i];
var video_id = video.get("id");
var video_obj = videojs(video_id, {"nativeControlsForTouch": false});
video_obj.pause();
}
catch(e) {
}
}
// can't tween percentage with this library???
var width = window.innerWidth;
var margin = - width * index;
var content = spt.gallery.content;
//content.setStyle("margin-left", margin + "px");
new Fx.Tween(content,{duration: 250}).start("margin-left", margin);
spt.gallery.index = index;
var total = spt.gallery.total;
if (index == 0) {
spt.hide(spt.gallery.left_arrow);
spt.show(spt.gallery.right_arrow);
}
else if (index == total - 1) {
spt.show(spt.gallery.left_arrow);
spt.hide(spt.gallery.right_arrow);
}
else {
spt.show(spt.gallery.left_arrow);
spt.show(spt.gallery.right_arrow);
}
// move the shelf
let shelf_top = spt.gallery.shelf;
let items = shelf_top.getElements(".spt_gallery_shelf_item");
let last_item = items[last_index];
last_item.setStyle("border", "solid 3px transparent");
last_item.setStyle("opacity", 0.5)
let item = items[index];
item.setStyle("border", "solid 3px red");
item.setStyle("opacity", 1.0);
let offset = (index * 110) + 55;
//offset = "calc(50% - "+offset+"px)";
offset = width/2 - offset;
new Fx.Tween(shelf_top,{duration: 250}).start("margin-left", offset);
spt.gallery.last_index = index;
var description = spt.gallery.descriptions[index];
if (!description) {
description = (index+1)+" of "+total;
}
else {
description = (index+1)+" of "+total+" - " + description;
}
spt.gallery.set_description(description);
}
spt.gallery.close = function() {
var content = spt.gallery.content;
var gallery_top = content.getParent(".spt_gallery_top");
var top = gallery_top.getParent(".spt_top");
spt.behavior.destroy_element(gallery_top);
// header is sometimes not in view after closing, if a header exists
// make sure it is scrolled into view
if (top) {
var index_header = top.getElement(".spt_index_header");
if (index_header) {
index_header.scrollIntoView();
}
}
}
spt.gallery.set_description = function(desc) {
var desc_el = spt.gallery.desc_el;
desc_el.innerHTML = desc;
}
'''
} )
scroll = DivWdg(css='spt_gallery_scroll')
inner.add(scroll)
scroll.set_box_shadow()
if height:
scroll.add_style("height: %s" % height)
scroll.add_style("overflow-x: hidden")
scroll.add_style("overflow-y: hidden")
scroll.add_style("background: #000")
#scroll.add_style("position: absolute")
scroll.add_style("margin-left: auto")
scroll.add_style("margin-right: auto")
content = DivWdg()
top.add_attr('tabindex','-1')
scroll.add(content)
content.add_class("spt_gallery_content")
# make the items vertically align to bottom (flex-end)
# on a regular monitor, align to top (flex-start) is better
if align == 'bottom':
align_items = 'flex-end'
else:
align_items = 'flex-start'
content.add_styles("display: flex; flex-flow: row nowrap; align-items: %s;"%align_items)
content.add_style("height: calc(100% - 80px)")
content.add_style("width: max-content")
top.add_behavior( {
'type': 'load',
'cbjs_action': '''
bvr.src_el.focus();
'''
} )
top.add_behavior( {
'type': 'mouseenter',
'cbjs_action': '''
bvr.src_el.focus();
'''
} )
top.add_behavior( {
'type': 'mouseleave',
'cbjs_action': '''
bvr.src_el.blur();
'''
} )
top.add_behavior( {
'type': 'keydown',
'cbjs_action': '''
var key = evt.key;
if (key == "left") {
spt.gallery.push_stack(key);
spt.gallery.show_prev();
}
else if (key == "right") {
spt.gallery.push_stack(key);
spt.gallery.show_next();
}
else if (key == "esc" || key == "enter") {
spt.gallery.close();
}
'''
} )
curr_index = 0
for i, path in enumerate(paths):
path_div = DivWdg(css='spt_gallery_item')
content.add(path_div)
#path_div.add_style("float: left")
path_div.add_style("display: inline-block")
path_div.add_style("vertical-align: middle")
if path == self.curr_path:
curr_index = i
try:
thumb_path = thumb_paths[i]
except IndexError:
print("Cannot find the thumb_path [%s] "%i )
thumb_path = ''
path_div.add_style("width: 100vw")
path_div.add_style("height: 100%")
path_div.add_style("overflow-x: hidden")
path_div.add_style("overflow-y: hidden")
from tactic.ui.widget import EmbedWdg
embed = EmbedWdg(src=path, click=False, thumb_path=thumb_path, index=i, controls="true", layout="fit")
path_div.add(embed)
content.add_behavior({
'type': 'load',
'index': curr_index,
'cbjs_action': '''
if (!bvr.index) bvr.index = 0;
spt.gallery.show_index(bvr.index);
'''
} )
# Add a top shelf
top_shelf = DivWdg()
#inner.add(top_shelf)
top_shelf.add_style("position: fixed")
top_shelf.add_style("height: 30px")
top_shelf.add_style("width: 100vw")
top_shelf.add_style("top: 0px")
top_shelf.add_style("left: 0px")
top_shelf.add_style("background: rgba(0,0,0,0.3)")
top_shelf.add_style("display: flex")
top_shelf.add_style("color: #DDD")
from pyasm.widget import CheckboxWdg
checkbox = CheckboxWdg()
top_shelf.add(checkbox)
checkbox.add_style("width: 20px")
checkbox.add_style("height: 20px")
checkbox.add_style("margin: 7px 10px")
select_icon = IconWdg("Select Asset", icon="FA_REMOVE", size="2rem")
top_shelf.add(select_icon)
select_icon.add_style("margin-left: auto")
select_icon.add_style("margin-right: 10px")
shelf = DivWdg()
inner.add(shelf)
shelf.add_style("width: 100%")
shelf.add_style("height: 100px")
shelf.add_style("overflow: hidden")
shelf.add_style("padding-top: 3px")
inner_shelf = DivWdg()
shelf.add(inner_shelf)
inner_shelf.add_class("spt_gallery_shelf")
inner_shelf.add_style("display: flex")
inner_shelf.add_style("height: 100%")
for i, path in enumerate(paths):
thumb = DivWdg()
inner_shelf.add(thumb)
thumb.add_class("spt_gallery_shelf_item")
thumb.add_style('''background-image: url("%s")''' % path)
thumb.add_style("background-size", "cover")
thumb.add_style("background-position", "center")
thumb.add_style("height: 94px")
thumb.add_style("width: 100px")
thumb.add_style("min-width: 100px")
thumb.add_style("margin: 0px 5px")
thumb.add_style("border: solid 3px transparent")
thumb.add_style("box-sizing: border-box")
thumb.add_style("opacity: 0.5")
thumb.add_attr("spt_index", i)
shelf.add_relay_behavior( {
'type': 'click',
'bvr_match_class': "spt_gallery_shelf_item",
'cbjs_action': '''
let index = parseInt(bvr.src_el.getAttribute("spt_index"));
spt.gallery.show_index(index);
/*
let shelf_top = bvr.src_el.getParent(".spt_gallery_shelf");
let items = shelf_top.getElements(".spt_gallery_shelf_item");
items.forEach( (item) => {
item.setStyle("border", "solid 3px transparent");
item.setStyle("opacity", "0.7")
} );
let index = parseInt(bvr.src_el.getAttribute("spt_index"));
bvr.src_el.setStyle("border", "solid 3px #DDD");
bvr.src_el.setStyle("opacity", 1.0);
let offset = (index * 110) + 220;
//offset = "calc(50% - "+offset+"px)";
offset = screen.width/2 - offset;
new Fx.Tween(shelf_top,{duration: 250}).start("margin-left", offset);
spt.gallery.show_index(index);
*/
'''
} )
#icon = IconWdg(title="Close", icon="/context/icons/glyphs/close.png", width="40px")
icon = IconWdg(title="Close", icon="FA_REMOVE", size="3rem")
inner.add(icon)
icon.add_style("position: absolute")
icon.add_style("cursor: pointer")
icon.add_style("top: 20px")
icon.add_style("right: 15px")
icon.add_behavior( {
'type': 'click_up' ,
'cbjs_action': '''
spt.gallery.close();
'''
} )
icon.add_style("background", "rgba(48,48,48,0.7)")
icon.add_style("border-radius", "5px")
icon = IconWdg(title="Previous", icon="FAS_CHEVRON_LEFT", size="3rem")
inner.add(icon)
icon.add_class('spt_left_arrow')
icon.add_style("cursor: pointer")
icon.add_style("position: absolute")
icon.add_style("top: 40%")
icon.add_style("left: 20px")
#icon.add_style("opacity: 0.5")
icon.add_behavior( {
'type': 'click_up' ,
'cbjs_action': '''
var arrow = bvr.src_el;
spt.gallery.show_prev(arrow);
'''
} )
icon.add_style("background", "rgba(48,48,48,0.7)")
icon.add_style("border-radius", "5px")
icon = IconWdg(title="Next", icon="/context/icons/glyphs/chevron_right.png")
icon = IconWdg(title="Previous", icon="FAS_CHEVRON_RIGHT", size="3rem")
inner.add(icon)
icon.add_class('spt_right_arrow')
icon.add_style("position: absolute")
icon.add_style("cursor: pointer")
icon.add_style("top: 40%")
icon.add_style("right: 20px")
#icon.add_style("opacity: 0.5")
icon.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var arrow = bvr.src_el;
spt.gallery.show_next(arrow);
'''
} )
icon.add_style("background", "rgba(48,48,48,0.7)")
icon.add_style("border-radius", "5px")
desc_div = DivWdg()
desc_div.add_class("spt_gallery_description")
desc_div.add_style("height: 20px")
desc_div.add_style("width: 100vw")
desc_div.add_style("text-align: center")
desc_div.add_style("background: rgba(0,0,0,1)")
desc_div.add_style("color: #bbb")
desc_div.add_style("font-weight: bold")
desc_div.add_style("font-size: 16px")
desc_div.add_style("margin-left: -50vw")
desc_div.add_style("z-index: 1000")
desc_div.add("")
desc_outer_div = DivWdg()
inner.add(desc_outer_div)
desc_outer_div.add_style("position: fixed")
desc_outer_div.add(desc_div)
desc_outer_div.add_style("bottom: 0px")
desc_outer_div.add_style("left: 50%")
return top
def get_paths(self, file_type='main'):
# this is the selected one
search_key = self.kwargs.get("search_key")
search_keys = self.kwargs.get("search_keys")
paths = self.kwargs.get("paths")
if not paths:
paths = []
if search_keys:
sobjects = Search.get_by_search_keys(search_keys, keep_order=True)
# return_dict=True defaults to return the first of each snapshot list
# and so works well with is_latest=True
if sobjects and sobjects[0].get_base_search_type() == "sthpw/snapshot":
sobj_snapshot_dict = {}
for sobject in sobjects:
tmp_search_key = sobject.get_search_key()
sobj_snapshot_dict[tmp_search_key] = sobject
snapshots = sobjects
else:
sobj_snapshot_dict = Snapshot.get_by_sobjects(sobjects, is_latest=True, return_dict=True)
snapshots = sobj_snapshot_dict.values()
file_dict = Snapshot.get_files_dict_by_snapshots(snapshots, file_type=file_type)
for sobject in sobjects:
path = ''
snapshot = sobj_snapshot_dict.get(sobject.get_search_key())
# it is supposed to get one (latest), just a precaution
if isinstance(snapshot, list):
snapshot = snapshot[0]
if not snapshot:
continue
file_list = file_dict.get(snapshot.get_code())
if not file_list:
paths.append("")
continue
# NOTE: there should only be one file
tmp_paths = []
for file_object in file_list:
path = file_object.get_web_path()
# If the file type is not supported by web browsers, get the web version
# as a fallback.
# TODO: Note that this will disable
# the retrieval of a sequence of files as in ####.tif case, where
# the asset is a sequence of files. So it will only display the web
# version of the first file in the asset list.
extension = File.get_extension(path)
accepted_exts = ['mp4', 'mov', 'jpg', 'png', 'ogg', 'webm']
if file_type == 'main' and extension not in accepted_exts:
path= snapshot.get_web_path_by_type(type="web")
# If the asset is a sequence of files, retrieve all the file paths.
# NOTE: In this case, web versions do not exist for all of the files.
# The web version is generated only for the first one in the sequence.
if path.find("#") != -1:
expanded_paths = snapshot.get_expanded_web_paths()
path = "|".join(expanded_paths)
tmp_paths.append(path)
path = "|".join(tmp_paths)
self.sobject_data[path] = sobject
paths.append(path)
# set the current path the user clicks on
if not self.curr_path and sobject.get_search_key() == search_key and file_type=='main':
self.curr_path = path
elif paths:
return paths
else:
# TEST
paths = [
'/assets/test/store/The%20Boxter_v001.jpg',
'/assets/test/store/Another%20one_v001.jpg',
'/assets/test/store/Whatever_v001.jpg'
]
"""
for index,path in enumerate(paths):
path = urllib.pathname2url(path)
paths[index] = path
"""
return paths
| Southpaw-TACTIC/TACTIC | src/tactic/ui/widget/gallery_wdg.py | Python | epl-1.0 | 20,684 |
# coding=utf-8
"""
vckube
Kubernetes Vagrant Provisioning and management script
-
Active8 (08-03-15)
author: erik@a8.nl
license: GNU-GPL2
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from vckube import main
if __name__ == "__main__":
main()
| erikdejonge/vckube | main.py | Python | gpl-2.0 | 426 |
import sys
import jsonpickle
import os
import datetime
import inspect
jsonpickle.set_preferred_backend('json')
if sys.version_info.major < 3:
types = (bool, str, unicode, int, long, float, datetime.datetime)
else:
types = (bool, str, int, float, datetime.datetime)
def pretty_print():
jsonpickle.set_encoder_options('json', indent=2)
def ugly_print():
jsonpickle.set_encoder_options('json', indent=None)
class List:
def __init__(self, item_type):
self.item_type = item_type
class Field:
def __init__(self, field_type=None, default=None):
self.field_type = field_type
self.default = default
class Struct(object):
def __init__(self, adict):
self.__dict__.update(adict)
for k, v in adict.items():
if isinstance(v, (dict, list)):
self.__dict__[k] = to_object(v)
def get_field_value(field, field_meta, structure):
if field in structure.keys():
value = structure[field]
if field_meta.field_type:
return to_object(value, field_meta.field_type)
return value
else:
if field_meta.default is not None:
return field_meta.default
return None
def to_object(structure, obj_type=None):
if obj_type:
if isinstance(obj_type, List):
return [to_object(item, obj_type.item_type) for item in structure]
else:
obj = obj_type()
for field, field_meta in inspect.getmembers(obj):
if isinstance(field_meta, Field):
value = get_field_value(field, field_meta, structure)
setattr(obj, field, value)
return obj
else:
if structure is None or isinstance(structure, types):
return structure
if isinstance(structure, list):
return [to_object(item) for item in structure]
return Struct(structure)
def limit(dictionary, keys):
filtered = [key for key in dictionary.keys() if key not in keys]
for key in filtered:
dictionary.pop(key, None)
def to_dict(value):
if isinstance(value, list):
return [to_dict(item) for item in value]
if isinstance(value, dict):
cloned = value.copy()
for key, value in cloned.items():
cloned[key] = to_dict(value)
return cloned
if value is None or isinstance(value, types):
return value
result = value.__dict__.copy()
if hasattr(value, '__public__'):
limit(result, value.__public__)
for member, mvalue in result.items():
result[member] = to_dict(mvalue)
return result
def from_json(json_text, obj_type=None):
return to_object(jsonpickle.decode(json_text), obj_type)
def to_json(value):
return jsonpickle.encode(value, unpicklable=False)
def reformat(json_text):
return to_json(from_json(json_text))
def read_json(filename, obj_type=None):
if not os.path.isfile(filename):
return None
with open(filename, 'r') as f:
json = f.read()
if json.strip(' \t\n\r') == '':
return None
return from_json(json, obj_type)
def write_json(filename, obj):
if not obj:
json = ''
else:
json = to_json(obj)
with open(filename, 'w') as file:
file.write(json)
| syncloud/lib | syncloudlib/json/convertible.py | Python | gpl-3.0 | 3,287 |
"""
Shim to maintain backwards compatibility with old IPython.kernel imports.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import sys
from warnings import warn
from IPython.utils.shimmodule import ShimModule, ShimWarning
warn("The `IPython.kernel` package has been deprecated since IPython 4.0."
"You should import from ipykernel or jupyter_client instead.", ShimWarning)
# zmq subdir is gone
sys.modules['IPython.kernel.zmq.session'] = ShimModule(
src='IPython.kernel.zmq.session', mirror='jupyter_client.session')
sys.modules['IPython.kernel.zmq'] = ShimModule(
src='IPython.kernel.zmq', mirror='ipykernel')
for pkg in ('comm', 'inprocess'):
src = 'IPython.kernel.%s' % pkg
sys.modules[src] = ShimModule(src=src, mirror='ipykernel.%s' % pkg)
for pkg in ('ioloop', 'blocking'):
src = 'IPython.kernel.%s' % pkg
sys.modules[src] = ShimModule(src=src, mirror='jupyter_client.%s' % pkg)
# required for `from IPython.kernel import PKG`
from ipykernel import comm, inprocess
from jupyter_client import ioloop, blocking
# public API
from ipykernel.connect import *
from jupyter_client import *
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/IPython/kernel/__init__.py | Python | bsd-2-clause | 1,185 |
from __future__ import absolute_import, print_function, division
import unittest
from pony.orm.core import *
from pony.orm.tests.testutils import *
class TestKeys(unittest.TestCase):
def test_keys1(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = PrimaryKey(int)
b = Required(str)
self.assertEqual(Entity1._pk_attrs_, (Entity1.a,))
self.assertEqual(Entity1._pk_is_composite_, False)
self.assertEqual(Entity1._pk_, Entity1.a)
self.assertEqual(Entity1._keys_, [])
self.assertEqual(Entity1._simple_keys_, [])
self.assertEqual(Entity1._composite_keys_, [])
def test_keys2(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = Required(int)
b = Required(str)
PrimaryKey(a, b)
self.assertEqual(Entity1._pk_attrs_, (Entity1.a, Entity1.b))
self.assertEqual(Entity1._pk_is_composite_, True)
self.assertEqual(Entity1._pk_, (Entity1.a, Entity1.b))
self.assertEqual(Entity1._keys_, [])
self.assertEqual(Entity1._simple_keys_, [])
self.assertEqual(Entity1._composite_keys_, [])
@raises_exception(ERDiagramError, 'Only one primary key can be defined in each entity class')
def test_keys3(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = PrimaryKey(int)
b = PrimaryKey(int)
@raises_exception(ERDiagramError, 'Only one primary key can be defined in each entity class')
def test_keys4(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = PrimaryKey(int)
b = Required(int)
c = Required(int)
PrimaryKey(b, c)
def test_unique1(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = PrimaryKey(int)
b = Required(int, unique=True)
self.assertEqual(Entity1._keys_, [(Entity1.b,)])
self.assertEqual(Entity1._simple_keys_, [Entity1.b])
self.assertEqual(Entity1._composite_keys_, [])
def test_unique2(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = PrimaryKey(int)
b = Optional(int, unique=True)
self.assertEqual(Entity1._keys_, [(Entity1.b,)])
self.assertEqual(Entity1._simple_keys_, [Entity1.b])
self.assertEqual(Entity1._composite_keys_, [])
def test_unique2_1(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = PrimaryKey(int)
b = Optional(int)
c = Required(int)
composite_key(b, c)
self.assertEqual(Entity1._keys_, [(Entity1.b, Entity1.c)])
self.assertEqual(Entity1._simple_keys_, [])
self.assertEqual(Entity1._composite_keys_, [(Entity1.b, Entity1.c)])
@raises_exception(TypeError, 'composite_key() must receive at least two attributes as arguments')
def test_unique3(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = PrimaryKey(int)
composite_key()
@raises_exception(TypeError, 'composite_key() arguments must be attributes. Got: 123')
def test_unique4(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = PrimaryKey(int)
composite_key(123, 456)
@raises_exception(TypeError, "composite_key() arguments must be attributes. Got: %r" % int)
def test_unique5(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = PrimaryKey(int)
composite_key(int, a)
@raises_exception(TypeError, 'Set attribute Entity1.b cannot be part of unique index')
def test_unique6(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = Required(int)
b = Set('Entity2')
composite_key(a, b)
@raises_exception(TypeError, "'unique' option cannot be set for attribute Entity1.b because it is collection")
def test_unique7(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = PrimaryKey(int)
b = Set('Entity2', unique=True)
@raises_exception(TypeError, 'Optional attribute Entity1.b cannot be part of primary key')
def test_unique8(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = Required(int)
b = Optional(int)
PrimaryKey(a, b)
@raises_exception(TypeError, 'PrimaryKey attribute Entity1.a cannot be of type float')
def test_float_pk(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = PrimaryKey(float)
@raises_exception(TypeError, 'Attribute Entity1.b of type float cannot be part of primary key')
def test_float_composite_pk(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = Required(int)
b = Required(float)
PrimaryKey(a, b)
@raises_exception(TypeError, 'Attribute Entity1.b of type float cannot be part of unique index')
def test_float_composite_key(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = Required(int)
b = Required(float)
composite_key(a, b)
@raises_exception(TypeError, 'Unique attribute Entity1.a cannot be of type float')
def test_float_unique(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = Required(float, unique=True)
@raises_exception(TypeError, 'PrimaryKey attribute Entity1.a cannot be volatile')
def test_volatile_pk(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = PrimaryKey(int, volatile=True)
@raises_exception(TypeError, 'Set attribute Entity1.b cannot be volatile')
def test_volatile_set(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = PrimaryKey(int)
b = Set('Entity2', volatile=True)
@raises_exception(TypeError, 'Volatile attribute Entity1.b cannot be part of primary key')
def test_volatile_composite_pk(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = Required(int)
b = Required(int, volatile=True)
PrimaryKey(a, b)
if __name__ == '__main__':
unittest.main()
| compiteing/flask-ponypermission | venv/lib/python2.7/site-packages/pony/orm/tests/test_diagram_keys.py | Python | mit | 6,778 |
from GetListEmailAddresses import *
from AddEmailToList import *
from DeleteEmailFromList import *
| egetzel/wecrow | truehand2014/temboo/Library/SendGrid/NewsletterAPI/ListsEmail/__init__.py | Python | apache-2.0 | 99 |
#Contains a basic class for viewing a mesh, wrapping around glcanvas. Includes
#Basic functionality for setting up lighting/cameras and handling mouse events
#for changing viewing parameters
from OpenGL.GL import *
from OpenGL.arrays import vbo
import wx
from wx import glcanvas
from Primitives3D import *
from PolyMesh import *
from LaplacianMesh import *
from Cameras3D import *
DEFAULT_SIZE = wx.Size(1200, 800)
DEFAULT_POS = wx.Point(10, 10)
def saveImageGL(mvcanvas, filename):
view = glGetIntegerv(GL_VIEWPORT)
img = wx.EmptyImage(view[2], view[3] )
pixels = glReadPixels(0, 0, view[2], view[3], GL_RGB,
GL_UNSIGNED_BYTE)
img.SetData( pixels )
img = img.Mirror(False)
img.SaveFile(filename, wx.BITMAP_TYPE_PNG)
def saveImage(canvas, filename):
s = wx.ScreenDC()
w, h = canvas.size.Get()
b = wx.EmptyBitmap(w, h)
m = wx.MemoryDCFromDC(s)
m.SelectObject(b)
m.Blit(0, 0, w, h, s, 70, 0)
m.SelectObject(wx.NullBitmap)
b.SaveFile(filename, wx.BITMAP_TYPE_PNG)
class BasicMeshCanvas(glcanvas.GLCanvas):
def __init__(self, parent):
attribs = (glcanvas.WX_GL_RGBA, glcanvas.WX_GL_DOUBLEBUFFER, glcanvas.WX_GL_DEPTH_SIZE, 24)
glcanvas.GLCanvas.__init__(self, parent, -1, attribList = attribs)
self.context = glcanvas.GLContext(self)
self.parent = parent
#Camera state variables
self.size = self.GetClientSize()
#self.camera = MouseSphericalCamera(self.size.x, self.size.y)
self.camera = MousePolarCamera(self.size.width, self.size.height)
#Main state variables
self.MousePos = [0, 0]
self.bbox = BBox3D()
#Face mesh variables and manipulation variables
self.mesh = None
self.meshCentroid = None
self.displayMeshFaces = True
self.displayMeshEdges = False
self.displayBoundary = False
self.displayMeshVertices = True
self.displayVertexNormals = False
self.displayFaceNormals = False
self.useLighting = True
self.useTexture = False
self.GLinitialized = False
#GL-related events
wx.EVT_ERASE_BACKGROUND(self, self.processEraseBackgroundEvent)
wx.EVT_SIZE(self, self.processSizeEvent)
wx.EVT_PAINT(self, self.processPaintEvent)
#Mouse Events
wx.EVT_LEFT_DOWN(self, self.MouseDown)
wx.EVT_LEFT_UP(self, self.MouseUp)
wx.EVT_RIGHT_DOWN(self, self.MouseDown)
wx.EVT_RIGHT_UP(self, self.MouseUp)
wx.EVT_MIDDLE_DOWN(self, self.MouseDown)
wx.EVT_MIDDLE_UP(self, self.MouseUp)
wx.EVT_MOTION(self, self.MouseMotion)
def initMeshBBox(self):
if self.mesh:
self.bbox = self.mesh.getBBox()
print "Mesh BBox: %s\n"%self.bbox
self.camera.centerOnBBox(self.bbox, theta = -math.pi/2, phi = math.pi/2)
def viewFromFront(self, evt):
self.camera.centerOnBBox(self.bbox, theta = -math.pi/2, phi = math.pi/2)
self.Refresh()
def viewFromTop(self, evt):
self.camera.centerOnBBox(self.bbox, theta = -math.pi/2, phi = 0)
self.Refresh()
def viewFromSide(self, evt):
self.camera.centerOnBBox(self.bbox, theta = -math.pi, phi = math.pi/2)
self.Refresh()
def processEraseBackgroundEvent(self, event): pass #avoid flashing on MSW.
def processSizeEvent(self, event):
self.size = self.GetClientSize()
glViewport(0, 0, self.size.width, self.size.height)
#Update camera parameters based on new size
self.camera = MousePolarCamera(self.size.width, self.size.height)
self.camera.centerOnBBox(self.bbox, math.pi/2, math.pi/2)
def processPaintEvent(self, event):
dc = wx.PaintDC(self)
self.SetCurrent(self.context)
if not self.GLinitialized:
self.initGL()
self.GLinitialized = True
self.repaint()
def drawMeshStandard(self):
glEnable(GL_LIGHTING)
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, [0.8, 0.8, 0.8, 1.0])
glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, [0.2, 0.2, 0.2, 1.0])
glMaterialfv(GL_FRONT_AND_BACK, GL_SHININESS, 64)
#Set up modelview matrix
self.camera.gotoCameraFrame()
glLightfv(GL_LIGHT0, GL_POSITION, np.array([0, 0, 0, 1]))
self.mesh.renderGL(self.displayMeshEdges, self.displayMeshVertices, self.displayMeshFaces, self.displayVertexNormals, self.displayFaceNormals, self.useLighting, self.useTexture, self.displayBoundary)
def setupPerspectiveMatrix(self, nearDist = -1, farDist = -1):
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
if nearDist == -1:
farDist = self.camera.eye - self.bbox.getCenter()
farDist = np.sqrt(farDist.dot(farDist)) + self.bbox.getDiagLength()
nearDist = farDist/50.0
gluPerspective(180.0*self.camera.yfov/M_PI, float(self.size.x)/self.size.y, nearDist, farDist)
def repaint(self):
self.setupPerspectiveMatrix()
glClearColor(0.0, 0.0, 0.0, 0.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
if self.mesh:
self.drawMeshStandard()
self.SwapBuffers()
def initGL(self):
glLightModelfv(GL_LIGHT_MODEL_AMBIENT, [0.2, 0.2, 0.2, 1.0])
glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, GL_TRUE)
glLightfv(GL_LIGHT0, GL_DIFFUSE, [1.0, 1.0, 1.0, 1.0])
glEnable(GL_LIGHT0)
glLightfv(GL_LIGHT1, GL_DIFFUSE, [0.5, 0.5, 0.5, 1.0])
glEnable(GL_LIGHT1)
glEnable(GL_NORMALIZE)
glEnable(GL_LIGHTING)
glEnable(GL_DEPTH_TEST)
def handleMouseStuff(self, x, y):
#Invert y from what the window manager says
y = self.size.height - y
self.MousePos = [x, y]
def MouseDown(self, evt):
state = wx.GetMouseState()
x, y = evt.GetPosition()
self.CaptureMouse()
self.handleMouseStuff(x, y)
self.Refresh()
def MouseUp(self, evt):
x, y = evt.GetPosition()
self.handleMouseStuff(x, y)
self.ReleaseMouse()
self.Refresh()
def MouseMotion(self, evt):
state = wx.GetMouseState()
x, y = evt.GetPosition()
[lastX, lastY] = self.MousePos
self.handleMouseStuff(x, y)
dX = self.MousePos[0] - lastX
dY = self.MousePos[1] - lastY
if evt.Dragging():
#Translate/rotate shape
if evt.MiddleIsDown():
self.camera.translate(dX, dY)
elif evt.RightIsDown():
self.camera.zoom(-dY)#Want to zoom in as the mouse goes up
elif evt.LeftIsDown():
self.camera.orbitLeftRight(dX)
self.camera.orbitUpDown(dY)
self.Refresh()
if __name__ == '__main__':
app = wx.PySimpleApp()
frame = wx.Frame(None, wx.ID_ANY, "Basic Mesh Canvas", DEFAULT_POS, DEFAULT_SIZE)
g = BasicMeshCanvas(frame)
g.mesh = getDodecahedronMesh()
g.initMeshBBox()
frame.canvas = g
frame.Show()
app.MainLoop()
app.Destroy()
| ctralie/S3DGLPy | MeshCanvas.py | Python | apache-2.0 | 7,133 |
# -*- coding: utf-8 -*-
##===-----------------------------------------------------------------------------*- Python -*-===##
## _
## | |
## __| | __ ___ ___ ___
## / _` |/ _` \ \ /\ / / '_ |
## | (_| | (_| |\ V V /| | | |
## \__,_|\__,_| \_/\_/ |_| |_| - Compiler Toolchain
##
##
## This file is distributed under the MIT License (MIT).
## See LICENSE.txt for details.
##
##===------------------------------------------------------------------------------------------===##
"""Generate input for StagerMerger tests"""
import os
import dawn4py
from dawn4py.serialization import SIR
from dawn4py.serialization import utils as serial_utils
from google.protobuf.json_format import MessageToJson, Parse
backend = "c++-naive-ico"
def sparse_temporary():
outputfile = "DontDemoteSparse"
interval = serial_utils.make_interval(SIR.Interval.Start, SIR.Interval.End, 0, 0)
body_ast = serial_utils.make_ast(
[
serial_utils.make_loop_stmt(
serial_utils.make_assignment_stmt(
serial_utils.make_field_access_expr("sparseF"),
serial_utils.make_literal_access_expr("1.", SIR.BuiltinType.Double),
"="),
[SIR.LocationType.Value("Edge"), SIR.LocationType.Value("Cell"), SIR.LocationType.Value("Edge")]),
serial_utils.make_assignment_stmt(
serial_utils.make_field_access_expr("outF"),
serial_utils.make_reduction_over_neighbor_expr(
"+",
serial_utils.make_binary_operator(
serial_utils.make_unstructured_field_access_expr("inF", horizontal_offset=serial_utils.make_unstructured_offset(False)),
"*",
serial_utils.make_unstructured_field_access_expr("sparseF", horizontal_offset=serial_utils.make_unstructured_offset(True))),
serial_utils.make_literal_access_expr("0.", SIR.BuiltinType.Double),
[SIR.LocationType.Value("Edge"), SIR.LocationType.Value("Cell"), SIR.LocationType.Value("Edge")]),
"="),
]
)
vertical_region_stmt = serial_utils.make_vertical_region_decl_stmt(
body_ast, interval, SIR.VerticalRegion.Forward
)
sir = serial_utils.make_sir(
outputfile,
SIR.GridType.Value("Unstructured"),
[
serial_utils.make_stencil(
"generated",
serial_utils.make_ast([vertical_region_stmt]),
[
serial_utils.make_field(
"inF",
serial_utils.make_field_dimensions_unstructured(
[SIR.LocationType.Value("Edge")], 1
),
),
serial_utils.make_field(
"outF",
serial_utils.make_field_dimensions_unstructured(
[SIR.LocationType.Value("Edge")], 1
),
),
serial_utils.make_field(
"sparseF",
serial_utils.make_field_dimensions_unstructured(
[SIR.LocationType.Value("Edge"), SIR.LocationType.Value("Cell"), SIR.LocationType.Value("Edge")], 1
),
is_temporary=True
),
],
),
],
)
sim = dawn4py.lower_and_optimize(sir, groups=[])
with open(outputfile, mode="w") as f:
f.write(MessageToJson(sim["generated"]))
os.rename(outputfile, "../input/" + outputfile + ".iir")
if __name__ == "__main__":
sparse_temporary()
| MeteoSwiss-APN/dawn | dawn/test/unit-test/dawn/Optimizer/samples/TemporaryTypeDontDemoteSparse.py | Python | mit | 3,918 |
from mesh import Mesh1D
from fem import DofHandler
from function import Function
from fem import QuadFE
from assembler import Kernel
from assembler import Form
from fem import Basis
from assembler import Assembler
from solver import LinearSystem
from plot import Plot
import numpy as np
import matplotlib.pyplot as plt
def qfn(x):
"""
Diffusion coefficient
"""
q = 1 + 0.2*np.cos(2*np.pi*x) + 0.1*np.cos(3*np.pi*x) + 0.1*np.cos(4*np.pi*x)
return q
"""
Test 02
Parameter idenfication of continuous diffusion parameter
"""
#
# Define Computational Mesh
#
mesh = Mesh1D(resolution=(200,))
mesh.mark_region('left', lambda x: np.abs(x)<1e-9)
mesh.mark_region('right', lambda x: np.abs(x-1)<1e-9)
#
# Elements
#
Q0 = QuadFE(1, 'DQ0')
Q1 = QuadFE(1, 'Q1')
#
# Exact diffusion coefficient
#
qe = Function(qfn, 'explicit', dim=1)
one = Function(1, 'constant')
k1 = 1e-9
k2 = 1000
#
# Basis functions
#
u = Basis(Q1, 'u')
ux = Basis(Q1, 'ux')
q = Basis(Q1, 'q')
#
# Forms
#
a_qe = Form(kernel=Kernel(qe), trial=ux, test=ux)
a_one = Form(kernel=Kernel(one), trial=ux, test=ux)
L = Form(kernel=Kernel(one), test=u)
#
# Problems
#
problems = [[a_qe,L], [a_one]]
#
# Assembly
#
assembler = Assembler(problems, mesh)
assembler.assemble()
# =============================================================================
# Linear system for generating observations
# =============================================================================
system = LinearSystem(assembler,0)
f = system.get_rhs()
#
# Incorporate constraints
#
system.add_dirichlet_constraint('left',0)
system.add_dirichlet_constraint('right',0)
#
# Compute model output
#
system.solve_system()
ue = system.get_solution(as_function=True)
ue_x = ue.derivative((1,0))
plot = Plot()
plot.line(qe, mesh=mesh)
# ==============================================================================
# Linear system for generating the inverse laplacian C
# ==============================================================================
C = LinearSystem(assembler, 1)
#
# Incorporate constraints
#
C.add_dirichlet_constraint('left',0)
C.add_dirichlet_constraint('right',0)
#
# Compute penalty coefficients gamma and r
#
r = 1e-9
gamma = 1
beta = 1e-9
k_max = 20
n_max = 20
#
# Initial guesses
#
u_iter = [ue] # State
q_iter = [Function(1,'constant')] # Parameter
p_iter = [Function(0,'constant')] # Lagrange multiplier
for n in range(n_max):
#
# Augmented Lagrangian Subproblem
#
for k in range(k_max):
#
# Solve q = argmin L(q,u_{k-1}, lmd)
#
# Assemble Bu
u = u_iter[-1]
bu = Form(kernel=Kernel([u],['ux']), trial=ux, test=q)
assembler = Assembler([bu], mesh)
assembler.assemble()
Bu = assembler.af[0]['bilinear'].get_matrix()
# Compute B
# Incorporate boundary conditions
#
# Solve u_n^k = argmin L(q_n^k, u, lm_[n-1])
#
aq = Form(kernel=Kernel(qo), test=ux, trial=ux)
pass
#
# Update p
#
#p0 = p0 + r*eqn
#
# Record updates
#
u_iter.append(u0)
q_iter.append(q0)
p_iter.append(p0)
#
# Define elements
#
Q0 = QuadFE(1, 'DQ0')
Q1 = QuadFE(1, 'Q1')
#
# Forms
#
q = Function(qfn, 'nodal', mesh=mesh, element=Q0)
#q = Function(1, 'constant')
zero = Function(0, 'constant')
one = Function(1, 'constant')
#
# Trial and test functions
#
u = Basis(Q1,'u')
ux = Basis(Q1,'ux')
#
#
#
a = Form(kernel=Kernel(q), trial=ux, test=ux)
L = Form(kernel=Kernel(one), test=u)
problem = [a,L]
assembler = Assembler(problem, mesh)
assembler.assemble()
system = LinearSystem(assembler)
# Boundary functions
bm_left = lambda x: np.abs(x)<1e-9
bm_rght = lambda x: np.abs(x-1)<1e-9
# Mark boundary regions
mesh.mark_region('left', bm_left, on_boundary=True)
mesh.mark_region('right',bm_rght, on_boundary=True)
# Add Dirichlet constraints
system.add_dirichlet_constraint('left',0)
system.add_dirichlet_constraint('right',0)
system.set_constraint_matrix()
system.incorporate_constraints()
system.solve()
system.resolve_constraints()
u = system.sol(as_function=True)
plot = Plot()
plot.line(u)
#
# Define the exact parameter as a piecewise constant function
#
#qex_vec = np.array([0.5,2,0.5,2,0.5])
#qex_fn = Function(qex_vec, 'nodal', mesh=mesh, element=Q0, subforest_flag=0)
#
# Right hand side
#
#f = Function(1, 'constant')
| hvanwyk/quadmesh | experiments/multiscale_inverse/test02.py | Python | mit | 4,488 |
import pytest
from domainics.util import ContentTree
def test_dag():
dag = ContentTree()
a, b, c, d, e, f, g, h = (100 + i for i in range(0, 8))
A, B, C, D, E, F, G, H = 'ABCDEFGH'
"""
a(100) -> b(101) -> d(103) -> e(104)
| | + ------> h(107)
| + ------> g(106)
+ -----> c(102) -> f(105)
"""
dag.set(a, A)
dag.set(b, B, a)
dag.set(c, C, a)
dag.set(d, D, b)
dag.set(e, E, d)
dag.set(h, H, d)
dag.set(g, G, b)
dag.set(f, F, c)
assert list(dag[p] for p in dag.upwards(e)) == ['D', 'B', 'A']
assert list(dag[p] for p in dag.upwards(f)) == ['C', 'A']
with pytest.raises(ValueError):
dag.unset(d) # the point d has children.
assert set(dag.children(d)) == set([e, h])
dag.unset(e)
assert dag[e] is None
assert set(dag.children(d)) == set([h])
dag.unset(h)
dag.unset(d)
assert dag.parent(f) == c
| lcgong/domainics | test/pillar/test_contenttree.py | Python | apache-2.0 | 941 |
import os
import re
import sys
import zlib
import time
import socket
import httplib
import cPickle
import threading
import BaseHTTPServer
import httplib
import logging
import blocks
import pedrpc
import pgraph
import sex
import primitives
########################################################################################################################
class target:
'''
Target descriptor container.
'''
def __init__ (self, host, port, **kwargs):
'''
@type host: String
@param host: Hostname or IP address of target system
@type port: Integer
@param port: Port of target service
'''
self.host = host
self.port = port
# set these manually once target is instantiated.
self.netmon = None
self.procmon = None
self.vmcontrol = None
self.netmon_options = {}
self.procmon_options = {}
self.vmcontrol_options = {}
def pedrpc_connect (self):
'''
Pass specified target parameters to the PED-RPC server.
'''
# If the process monitor is alive, set it's options
if self.procmon:
while 1:
try:
if self.procmon.alive():
break
except:
pass
time.sleep(1)
# connection established.
for key in self.procmon_options.keys():
eval('self.procmon.set_%s(self.procmon_options["%s"])' % (key, key))
# If the network monitor is alive, set it's options
if self.netmon:
while 1:
try:
if self.netmon.alive():
break
except:
pass
time.sleep(1)
# connection established.
for key in self.netmon_options.keys():
eval('self.netmon.set_%s(self.netmon_options["%s"])' % (key, key))
########################################################################################################################
class connection (pgraph.edge.edge):
def __init__ (self, src, dst, callback=None):
'''
Extends pgraph.edge with a callback option. This allows us to register a function to call between node
transmissions to implement functionality such as challenge response systems. The callback method must follow
this prototype::
def callback(session, node, edge, sock)
Where node is the node about to be sent, edge is the last edge along the current fuzz path to "node", session
is a pointer to the session instance which is useful for snagging data such as sesson.last_recv which contains
the data returned from the last socket transmission and sock is the live socket. A callback is also useful in
situations where, for example, the size of the next packet is specified in the first packet.
@type src: Integer
@param src: Edge source ID
@type dst: Integer
@param dst: Edge destination ID
@type callback: Function
@param callback: (Optional, def=None) Callback function to pass received data to between node xmits
'''
# run the parent classes initialization routine first.
pgraph.edge.edge.__init__(self, src, dst)
self.callback = callback
########################################################################################################################
class session (pgraph.graph):
def __init__(
self,
session_filename=None,
skip=0,
sleep_time=1.0,
log_level=logging.INFO,
logfile=None,
logfile_level=logging.DEBUG,
proto="tcp",
bind=None,
restart_interval=0,
timeout=5.0,
web_port=26000,
crash_threshold=3,
restart_sleep_time=300
):
'''
Extends pgraph.graph and provides a container for architecting protocol dialogs.
@type session_filename: String
@kwarg session_filename: (Optional, def=None) Filename to serialize persistant data to
@type skip: Integer
@kwarg skip: (Optional, def=0) Number of test cases to skip
@type sleep_time: Float
@kwarg sleep_time: (Optional, def=1.0) Time to sleep in between tests
@type log_level: Integer
@kwarg log_level: (Optional, def=logger.INFO) Set the log level
@type logfile: String
@kwarg logfile: (Optional, def=None) Name of log file
@type logfile_level: Integer
@kwarg logfile_level: (Optional, def=logger.INFO) Set the log level for the logfile
@type proto: String
@kwarg proto: (Optional, def="tcp") Communication protocol ("tcp", "udp", "ssl")
@type bind: Tuple (host, port)
@kwarg bind: (Optional, def=random) Socket bind address and port
@type timeout: Float
@kwarg timeout: (Optional, def=5.0) Seconds to wait for a send/recv prior to timing out
@type restart_interval: Integer
@kwarg restart_interval (Optional, def=0) Restart the target after n test cases, disable by setting to 0
@type crash_threshold: Integer
@kwarg crash_threshold (Optional, def=3) Maximum number of crashes allowed before a node is exhaust
@type restart_sleep_time: Integer
@kwarg restart_sleep_time: Optional, def=300) Time in seconds to sleep when target can't be restarted
@type web_port: Integer
@kwarg web_port: (Optional, def=26000) Port for monitoring fuzzing campaign via a web browser
'''
# run the parent classes initialization routine first.
pgraph.graph.__init__(self)
self.session_filename = session_filename
self.skip = skip
self.sleep_time = sleep_time
self.proto = proto.lower()
self.bind = bind
self.ssl = False
self.restart_interval = restart_interval
self.timeout = timeout
self.web_port = web_port
self.crash_threshold = crash_threshold
self.restart_sleep_time = restart_sleep_time
# Initialize logger
self.logger = logging.getLogger("Sulley_logger")
self.logger.setLevel(log_level)
formatter = logging.Formatter('[%(asctime)s] [%(levelname)s] -> %(message)s')
if logfile != None:
filehandler = logging.FileHandler(logfile)
filehandler.setLevel(logfile_level)
filehandler.setFormatter(formatter)
self.logger.addHandler(filehandler)
consolehandler = logging.StreamHandler()
consolehandler.setFormatter(formatter)
consolehandler.setLevel(log_level)
self.logger.addHandler(consolehandler)
self.total_num_mutations = 0
self.total_mutant_index = 0
self.fuzz_node = None
self.targets = []
self.netmon_results = {}
self.procmon_results = {}
self.protmon_results = {}
self.pause_flag = False
self.crashing_primitives = {}
if self.proto == "tcp":
self.proto = socket.SOCK_STREAM
elif self.proto == "ssl":
self.proto = socket.SOCK_STREAM
self.ssl = True
elif self.proto == "udp":
self.proto = socket.SOCK_DGRAM
else:
raise sex.SullyRuntimeError("INVALID PROTOCOL SPECIFIED: %s" % self.proto)
# import settings if they exist.
self.import_file()
# create a root node. we do this because we need to start fuzzing from a single point and the user may want
# to specify a number of initial requests.
self.root = pgraph.node()
self.root.name = "__ROOT_NODE__"
self.root.label = self.root.name
self.last_recv = None
self.add_node(self.root)
####################################################################################################################
def add_node (self, node):
'''
Add a pgraph node to the graph. We overload this routine to automatically generate and assign an ID whenever a
node is added.
@type node: pGRAPH Node
@param node: Node to add to session graph
'''
node.number = len(self.nodes)
node.id = len(self.nodes)
if not self.nodes.has_key(node.id):
self.nodes[node.id] = node
return self
####################################################################################################################
def add_target (self, target):
'''
Add a target to the session. Multiple targets can be added for parallel fuzzing.
@type target: session.target
@param target: Target to add to session
'''
# pass specified target parameters to the PED-RPC server.
target.pedrpc_connect()
# add target to internal list.
self.targets.append(target)
####################################################################################################################
def connect (self, src, dst=None, callback=None):
'''
Create a connection between the two requests (nodes) and register an optional callback to process in between
transmissions of the source and destination request. Leverage this functionality to handle situations such as
challenge response systems. The session class maintains a top level node that all initial requests must be
connected to. Example::
sess = sessions.session()
sess.connect(sess.root, s_get("HTTP"))
If given only a single parameter, sess.connect() will default to attaching the supplied node to the root node.
This is a convenient alias and is identical to the second line from the above example::
sess.connect(s_get("HTTP"))
If you register callback method, it must follow this prototype::
def callback(session, node, edge, sock)
Where node is the node about to be sent, edge is the last edge along the current fuzz path to "node", session
is a pointer to the session instance which is useful for snagging data such as sesson.last_recv which contains
the data returned from the last socket transmission and sock is the live socket. A callback is also useful in
situations where, for example, the size of the next packet is specified in the first packet. As another
example, if you need to fill in the dynamic IP address of the target register a callback that snags the IP
from sock.getpeername()[0].
@type src: String or Request (Node)
@param src: Source request name or request node
@type dst: String or Request (Node)
@param dst: Destination request name or request node
@type callback: Function
@param callback: (Optional, def=None) Callback function to pass received data to between node xmits
@rtype: pgraph.edge
@return: The edge between the src and dst.
'''
# if only a source was provided, then make it the destination and set the source to the root node.
if not dst:
dst = src
src = self.root
# if source or destination is a name, resolve the actual node.
if type(src) is str:
src = self.find_node("name", src)
if type(dst) is str:
dst = self.find_node("name", dst)
# if source or destination is not in the graph, add it.
if src != self.root and not self.find_node("name", src.name):
self.add_node(src)
if not self.find_node("name", dst.name):
self.add_node(dst)
# create an edge between the two nodes and add it to the graph.
edge = connection(src.id, dst.id, callback)
self.add_edge(edge)
return edge
####################################################################################################################
def export_file (self):
'''
Dump various object values to disk.
@see: import_file()
'''
if not self.session_filename:
return
data = {}
data["session_filename"] = self.session_filename
data["skip"] = self.total_mutant_index
data["sleep_time"] = self.sleep_time
data["restart_sleep_time"] = self.restart_sleep_time
data["proto"] = self.proto
data["restart_interval"] = self.restart_interval
data["timeout"] = self.timeout
data["web_port"] = self.web_port
data["crash_threshold"] = self.crash_threshold
data["total_num_mutations"] = self.total_num_mutations
data["total_mutant_index"] = self.total_mutant_index
data["netmon_results"] = self.netmon_results
data["procmon_results"] = self.procmon_results
data['protmon_results'] = self.protmon_results
data["pause_flag"] = self.pause_flag
fh = open(self.session_filename, "wb+")
fh.write(zlib.compress(cPickle.dumps(data, protocol=2)))
fh.close()
####################################################################################################################
def fuzz (self, this_node=None, path=[]):
'''
Call this routine to get the ball rolling. No arguments are necessary as they are both utilized internally
during the recursive traversal of the session graph.
@type this_node: request (node)
@param this_node: (Optional, def=None) Current node that is being fuzzed.
@type path: List
@param path: (Optional, def=[]) Nodes along the path to the current one being fuzzed.
'''
# if no node is specified, then we start from the root node and initialize the session.
if not this_node:
# we can't fuzz if we don't have at least one target and one request.
if not self.targets:
raise sex.SullyRuntimeError("NO TARGETS SPECIFIED IN SESSION")
if not self.edges_from(self.root.id):
raise sex.SullyRuntimeError("NO REQUESTS SPECIFIED IN SESSION")
this_node = self.root
try: self.server_init()
except: return
# TODO: complete parallel fuzzing, will likely have to thread out each target
target = self.targets[0]
# step through every edge from the current node.
for edge in self.edges_from(this_node.id):
# the destination node is the one actually being fuzzed.
self.fuzz_node = self.nodes[edge.dst]
num_mutations = self.fuzz_node.num_mutations()
# keep track of the path as we fuzz through it, don't count the root node.
# we keep track of edges as opposed to nodes because if there is more then one path through a set of
# given nodes we don't want any ambiguity.
path.append(edge)
current_path = " -> ".join([self.nodes[e.src].name for e in path[1:]])
current_path += " -> %s" % self.fuzz_node.name
self.logger.info("current fuzz path: %s" % current_path)
self.logger.info("fuzzed %d of %d total cases" % (self.total_mutant_index, self.total_num_mutations))
done_with_fuzz_node = False
crash_count = 0
# loop through all possible mutations of the fuzz node.
while not done_with_fuzz_node:
# if we need to pause, do so.
self.pause()
# if we have exhausted the mutations of the fuzz node, break out of the while(1).
# note: when mutate() returns False, the node has been reverted to the default (valid) state.
if not self.fuzz_node.mutate():
self.logger.error("all possible mutations for current fuzz node exhausted")
done_with_fuzz_node = True
continue
# make a record in the session that a mutation was made.
self.total_mutant_index += 1
# if we've hit the restart interval, restart the target.
if self.restart_interval and self.total_mutant_index % self.restart_interval == 0:
self.logger.error("restart interval of %d reached" % self.restart_interval)
self.restart_target(target)
# exception error handling routine, print log message and restart target.
def error_handler (e, msg, target, sock=None):
if sock:
sock.close()
msg += "\nException caught: %s" % repr(e)
msg += "\nRestarting target and trying again"
self.logger.critical(msg)
self.restart_target(target)
# if we don't need to skip the current test case.
if self.total_mutant_index > self.skip:
self.logger.info("fuzzing %d of %d" % (self.fuzz_node.mutant_index, num_mutations))
# attempt to complete a fuzz transmission. keep trying until we are successful, whenever a failure
# occurs, restart the target.
while 1:
# instruct the debugger/sniffer that we are about to send a new fuzz.
if target.procmon:
try:
target.procmon.pre_send(self.total_mutant_index)
except Exception, e:
error_handler(e, "failed on procmon.pre_send()", target)
continue
if target.netmon:
try:
target.netmon.pre_send(self.total_mutant_index)
except Exception, e:
error_handler(e, "failed on netmon.pre_send()", target)
continue
try:
# establish a connection to the target.
(family, socktype, proto, canonname, sockaddr)=socket.getaddrinfo(target.host, target.port)[0]
sock = socket.socket(family, self.proto)
except Exception, e:
error_handler(e, "failed creating socket", target)
continue
if self.bind:
try:
sock.bind(self.bind)
except Exception, e:
error_handler(e, "failed binding on socket", target, sock)
continue
try:
sock.settimeout(self.timeout)
# Connect is needed only for TCP stream
if self.proto == socket.SOCK_STREAM:
sock.connect((target.host, target.port))
except Exception, e:
error_handler(e, "failed connecting on socket", target, sock)
continue
# if SSL is requested, then enable it.
if self.ssl:
try:
ssl = socket.ssl(sock)
sock = httplib.FakeSocket(sock, ssl)
except Exception, e:
error_handler(e, "failed ssl setup", target, sock)
continue
# if the user registered a pre-send function, pass it the sock and let it do the deed.
try:
self.pre_send(sock)
except Exception, e:
error_handler(e, "pre_send() failed", target, sock)
continue
# send out valid requests for each node in the current path up to the node we are fuzzing.
try:
for e in path[:-1]:
node = self.nodes[e.dst]
self.transmit(sock, node, e, target)
except Exception, e:
error_handler(e, "failed transmitting a node up the path", target, sock)
continue
# now send the current node we are fuzzing.
try:
self.transmit(sock, self.fuzz_node, edge, target)
except Exception, e:
error_handler(e, "failed transmitting fuzz node", target, sock)
continue
# if we reach this point the send was successful for break out of the while(1).
break
# if the user registered a post-send function, pass it the sock and let it do the deed.
# we do this outside the try/except loop because if our fuzz causes a crash then the post_send()
# will likely fail and we don't want to sit in an endless loop.
try:
self.post_send(sock)
except Exception, e:
error_handler(e, "post_send() failed", target, sock)
# done with the socket.
sock.close()
# delay in between test cases.
self.logger.info("sleeping for %f seconds" % self.sleep_time)
time.sleep(self.sleep_time)
# poll the PED-RPC endpoints (netmon, procmon etc...) for the target.
self.poll_pedrpc(target)
# serialize the current session state to disk.
self.export_file()
# recursively fuzz the remainder of the nodes in the session graph.
self.fuzz(self.fuzz_node, path)
# finished with the last node on the path, pop it off the path stack.
if path:
path.pop()
# loop to keep the main thread running and be able to receive signals
if self.signal_module:
# wait for a signal only if fuzzing is finished (this function is recursive)
# if fuzzing is not finished, web interface thread will catch it
if self.total_mutant_index == self.total_num_mutations:
import signal
try:
while True:
signal.pause()
except AttributeError:
# signal.pause() is missing for Windows; wait 1ms and loop instead
while True:
time.sleep(0.001)
####################################################################################################################
def import_file (self):
'''
Load varous object values from disk.
@see: export_file()
'''
try:
fh = open(self.session_filename, "rb")
data = cPickle.loads(zlib.decompress(fh.read()))
fh.close()
except:
return
# update the skip variable to pick up fuzzing from last test case.
self.skip = data["total_mutant_index"]
self.session_filename = data["session_filename"]
self.sleep_time = data["sleep_time"]
self.restart_sleep_time = data["restart_sleep_time"]
self.proto = data["proto"]
self.restart_interval = data["restart_interval"]
self.timeout = data["timeout"]
self.web_port = data["web_port"]
self.crash_threshold = data["crash_threshold"]
self.total_num_mutations = data["total_num_mutations"]
self.total_mutant_index = data["total_mutant_index"]
self.netmon_results = data["netmon_results"]
self.procmon_results = data["procmon_results"]
self.protmon_results = data["protmon_results"]
self.pause_flag = data["pause_flag"]
####################################################################################################################
#def log (self, msg, level=1):
'''
If the supplied message falls under the current log level, print the specified message to screen.
@type msg: String
@param msg: Message to log
'''
#
#if self.log_level >= level:
#print "[%s] %s" % (time.strftime("%I:%M.%S"), msg)
####################################################################################################################
def num_mutations (self, this_node=None, path=[]):
'''
Number of total mutations in the graph. The logic of this routine is identical to that of fuzz(). See fuzz()
for inline comments. The member varialbe self.total_num_mutations is updated appropriately by this routine.
@type this_node: request (node)
@param this_node: (Optional, def=None) Current node that is being fuzzed.
@type path: List
@param path: (Optional, def=[]) Nodes along the path to the current one being fuzzed.
@rtype: Integer
@return: Total number of mutations in this session.
'''
if not this_node:
this_node = self.root
self.total_num_mutations = 0
for edge in self.edges_from(this_node.id):
next_node = self.nodes[edge.dst]
self.total_num_mutations += next_node.num_mutations()
if edge.src != self.root.id:
path.append(edge)
self.num_mutations(next_node, path)
# finished with the last node on the path, pop it off the path stack.
if path:
path.pop()
return self.total_num_mutations
####################################################################################################################
def pause (self):
'''
If thet pause flag is raised, enter an endless loop until it is lowered.
'''
while 1:
if self.pause_flag:
time.sleep(1)
else:
break
####################################################################################################################
def poll_pedrpc (self, target):
'''
Poll the PED-RPC endpoints (netmon, procmon etc...) for the target.
@type target: session.target
@param target: Session target whose PED-RPC services we are polling
'''
# kill the pcap thread and see how many bytes the sniffer recorded.
if target.netmon:
bytes = target.netmon.post_send()
self.logger.info("netmon captured %d bytes for test case #%d" % (bytes, self.total_mutant_index))
self.netmon_results[self.total_mutant_index] = bytes
# check if our fuzz crashed the target. procmon.post_send() returns False if the target access violated.
if target.procmon and not target.procmon.post_send():
self.logger.info("procmon detected access violation on test case #%d" % self.total_mutant_index)
# retrieve the primitive that caused the crash and increment it's individual crash count.
self.crashing_primitives[self.fuzz_node.mutant] = self.crashing_primitives.get(self.fuzz_node.mutant, 0) + 1
# notify with as much information as possible.
if self.fuzz_node.mutant.name:
msg = "primitive name: %s, " % self.fuzz_node.mutant.name
else:
msg = "primitive lacks a name, "
msg += "type: %s, default value: %s" % (self.fuzz_node.mutant.s_type, self.fuzz_node.mutant.original_value)
self.logger.info(msg)
# print crash synopsis
self.procmon_results[self.total_mutant_index] = target.procmon.get_crash_synopsis()
self.logger.info(self.procmon_results[self.total_mutant_index].split("\n")[0])
# if the user-supplied crash threshold is reached, exhaust this node.
if self.crashing_primitives[self.fuzz_node.mutant] >= self.crash_threshold:
# as long as we're not a group and not a repeat.
if not isinstance(self.fuzz_node.mutant, primitives.group):
if not isinstance(self.fuzz_node.mutant, blocks.repeat):
skipped = self.fuzz_node.mutant.exhaust()
self.logger.warning("crash threshold reached for this primitive, exhausting %d mutants." % skipped)
self.total_mutant_index += skipped
self.fuzz_node.mutant_index += skipped
# start the target back up.
# If it returns False, stop the test
if self.restart_target(target, stop_first=False) == False:
self.logger.critical("Restarting the target failed, exiting.")
self.export_file()
try:
self.thread.join()
except:
self.logger.debug("No server launched")
sys.exit(0)
####################################################################################################################
def post_send (self, sock):
'''
Overload or replace this routine to specify actions to run after to each fuzz request. The order of events is
as follows::
pre_send() - req - callback ... req - callback - post_send()
When fuzzing RPC for example, register this method to tear down the RPC request.
@see: pre_send()
@type sock: Socket
@param sock: Connected socket to target
'''
# default to doing nothing.
pass
####################################################################################################################
def pre_send (self, sock):
'''
Overload or replace this routine to specify actions to run prior to each fuzz request. The order of events is
as follows::
pre_send() - req - callback ... req - callback - post_send()
When fuzzing RPC for example, register this method to establish the RPC bind.
@see: pre_send()
@type sock: Socket
@param sock: Connected socket to target
'''
# default to doing nothing.
pass
####################################################################################################################
def restart_target (self, target, stop_first=True):
'''
Restart the fuzz target. If a VMControl is available revert the snapshot, if a process monitor is available
restart the target process. Otherwise, do nothing.
@type target: session.target
@param target: Target we are restarting
'''
# vm restarting is the preferred method so try that first.
if target.vmcontrol:
self.logger.warning("restarting target virtual machine")
target.vmcontrol.restart_target()
# if we have a connected process monitor, restart the target process.
elif target.procmon:
self.logger.warning("restarting target process")
if stop_first:
target.procmon.stop_target()
if not target.procmon.start_target():
return False
# give the process a few seconds to settle in.
time.sleep(3)
# otherwise all we can do is wait a while for the target to recover on its own.
else:
self.logger.error("no vmcontrol or procmon channel available ... sleeping for %d seconds" % self.restart_sleep_time)
time.sleep(self.restart_sleep_time)
# TODO: should be good to relaunch test for crash before returning False
return False
# pass specified target parameters to the PED-RPC server to re-establish connections.
target.pedrpc_connect()
####################################################################################################################
def server_init (self):
'''
Called by fuzz() on first run (not on recursive re-entry) to initialize variables, web interface, etc...
'''
self.total_mutant_index = 0
self.total_num_mutations = self.num_mutations()
# web interface thread doesn't catch KeyboardInterrupt
# add a signal handler, and exit on SIGINT
# TODO: should wait for the end of the ongoing test case, and stop gracefully netmon and procmon
# TODO: doesn't work on OS where the signal module isn't available
try:
import signal
self.signal_module = True
except:
self.signal_module = False
if self.signal_module:
def exit_abruptly(signal, frame):
'''Save current settings (just in case) and exit'''
self.export_file()
self.logger.critical("SIGINT received ... exiting")
try:
self.thread.join()
except:
self.logger.debug( "No server launched")
sys.exit(0)
signal.signal(signal.SIGINT, exit_abruptly)
# spawn the web interface.
self.thread = web_interface_thread(self)
self.thread.start()
####################################################################################################################
def transmit (self, sock, node, edge, target):
'''
Render and transmit a node, process callbacks accordingly.
@type sock: Socket
@param sock: Socket to transmit node on
@type node: Request (Node)
@param node: Request/Node to transmit
@type edge: Connection (pgraph.edge)
@param edge: Edge along the current fuzz path from "node" to next node.
@type target: session.target
@param target: Target we are transmitting to
'''
data = None
# if the edge has a callback, process it. the callback has the option to render the node, modify it and return.
if edge.callback:
data = edge.callback(self, node, edge, sock)
self.logger.info("xmitting: [%d.%d]" % (node.id, self.total_mutant_index))
# if no data was returned by the callback, render the node here.
if not data:
data = node.render()
# if data length is > 65507 and proto is UDP, truncate it.
# TODO: this logic does not prevent duplicate test cases, need to address this in the future.
if self.proto == socket.SOCK_DGRAM:
# max UDP packet size.
# TODO: anyone know how to determine this value smarter?
# - See http://stackoverflow.com/questions/25841/maximum-buffer-length-for-sendto to fix this
MAX_UDP = 65507
if os.name != "nt" and os.uname()[0] == "Darwin":
MAX_UDP = 9216
if len(data) > MAX_UDP:
self.logger.debug("Too much data for UDP, truncating to %d bytes" % MAX_UDP)
data = data[:MAX_UDP]
try:
if self.proto == socket.SOCK_STREAM:
sock.send(data)
else:
sock.sendto(data, (self.targets[0].host, self.targets[0].port))
self.logger.debug("Packet sent : " + repr(data))
except Exception, inst:
self.logger.error("Socket error, send: %s" % inst)
if self.proto == (socket.SOCK_STREAM or socket.SOCK_DGRAM):
# TODO: might have a need to increase this at some point. (possibly make it a class parameter)
try:
self.last_recv = sock.recv(10000)
except Exception, e:
self.last_recv = ""
else:
self.last_recv = ""
if len(self.last_recv) > 0:
self.logger.debug("received: [%d] %s" % (len(self.last_recv), repr(self.last_recv)))
else:
self.logger.warning("Nothing received on socket.")
# Increment individual crash count
self.crashing_primitives[self.fuzz_node.mutant] = self.crashing_primitives.get(self.fuzz_node.mutant,0) +1
# Note crash information
self.protmon_results[self.total_mutant_index] = data ;
#print self.protmon_results
########################################################################################################################
class web_interface_handler (BaseHTTPServer.BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, request, client_address, server)
self.session = None
def commify (self, number):
number = str(number)
processing = 1
regex = re.compile(r"^(-?\d+)(\d{3})")
while processing:
(number, processing) = regex.subn(r"\1,\2",number)
return number
def do_GET (self):
self.do_everything()
def do_HEAD (self):
self.do_everything()
def do_POST (self):
self.do_everything()
def do_everything (self):
if "pause" in self.path:
self.session.pause_flag = True
if "resume" in self.path:
self.session.pause_flag = False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
if "view_crash" in self.path:
response = self.view_crash(self.path)
elif "view_pcap" in self.path:
response = self.view_pcap(self.path)
else:
response = self.view_index()
self.wfile.write(response)
def log_error (self, *args, **kwargs):
pass
def log_message (self, *args, **kwargs):
pass
def version_string (self):
return "Sulley Fuzz Session"
def view_crash (self, path):
test_number = int(path.split("/")[-1])
return "<html><pre>%s</pre></html>" % self.session.procmon_results[test_number]
def view_pcap (self, path):
return path
def view_index (self):
response = """
<html>
<head>
<meta http-equiv="refresh" content="5">
<title>Sulley Fuzz Control</title>
<style>
a:link {color: #FF8200; text-decoration: none;}
a:visited {color: #FF8200; text-decoration: none;}
a:hover {color: #C5C5C5; text-decoration: none;}
body
{
background-color: #000000;
font-family: Arial, Helvetica, sans-serif;
font-size: 12px;
color: #FFFFFF;
}
td
{
font-family: Arial, Helvetica, sans-serif;
font-size: 12px;
color: #A0B0B0;
}
.fixed
{
font-family: Courier New;
font-size: 12px;
color: #A0B0B0;
}
.input
{
font-family: Arial, Helvetica, sans-serif;
font-size: 11px;
color: #FFFFFF;
background-color: #333333;
border: thin none;
height: 20px;
}
</style>
</head>
<body>
<center>
<table border=0 cellpadding=5 cellspacing=0 width=750><tr><td>
<!-- begin bounding table -->
<table border=0 cellpadding=5 cellspacing=0 width="100%%">
<tr bgcolor="#333333">
<td><div style="font-size: 20px;">Sulley Fuzz Control</div></td>
<td align=right><div style="font-weight: bold; font-size: 20px;">%(status)s</div></td>
</tr>
<tr bgcolor="#111111">
<td colspan=2 align="center">
<table border=0 cellpadding=0 cellspacing=5>
<tr bgcolor="#111111">
<td><b>Total:</b></td>
<td>%(total_mutant_index)s</td>
<td>of</td>
<td>%(total_num_mutations)s</td>
<td class="fixed">%(progress_total_bar)s</td>
<td>%(progress_total)s</td>
</tr>
<tr bgcolor="#111111">
<td><b>%(current_name)s:</b></td>
<td>%(current_mutant_index)s</td>
<td>of</td>
<td>%(current_num_mutations)s</td>
<td class="fixed">%(progress_current_bar)s</td>
<td>%(progress_current)s</td>
</tr>
</table>
</td>
</tr>
<tr>
<td>
<form method=get action="/pause">
<input class="input" type="submit" value="Pause">
</form>
</td>
<td align=right>
<form method=get action="/resume">
<input class="input" type="submit" value="Resume">
</form>
</td>
</tr>
</table>
<!-- begin procmon results -->
<table border=0 cellpadding=5 cellspacing=0 width="100%%">
<tr bgcolor="#333333">
<td nowrap>Test Case #</td>
<td>Crash Synopsis</td>
<td nowrap>Captured Bytes</td>
</tr>
"""
keys = self.session.procmon_results.keys()
keys.sort()
for key in keys:
val = self.session.procmon_results[key]
bytes = " "
if self.session.netmon_results.has_key(key):
bytes = self.commify(self.session.netmon_results[key])
response += '<tr><td class="fixed"><a href="/view_crash/%d">%06d</a></td><td>%s</td><td align=right>%s</td></tr>' % (key, key, val.split("\n")[0], bytes)
response += """
<!-- end procmon results -->
</table>
<!-- end bounding table -->
</td></tr></table>
</center>
</body>
</html>
"""
# what is the fuzzing status.
if self.session.pause_flag:
status = "<font color=red>PAUSED</font>"
else:
status = "<font color=green>RUNNING</font>"
# if there is a current fuzz node.
if self.session.fuzz_node:
# which node (request) are we currently fuzzing.
if self.session.fuzz_node.name:
current_name = self.session.fuzz_node.name
else:
current_name = "[N/A]"
# render sweet progress bars.
progress_current = float(self.session.fuzz_node.mutant_index) / float(self.session.fuzz_node.num_mutations())
num_bars = int(progress_current * 50)
progress_current_bar = "[" + "=" * num_bars + " " * (50 - num_bars) + "]"
progress_current = "%.3f%%" % (progress_current * 100)
progress_total = float(self.session.total_mutant_index) / float(self.session.total_num_mutations)
num_bars = int(progress_total * 50)
progress_total_bar = "[" + "=" * num_bars + " " * (50 - num_bars) + "]"
progress_total = "%.3f%%" % (progress_total * 100)
response %= \
{
"current_mutant_index" : self.commify(self.session.fuzz_node.mutant_index),
"current_name" : current_name,
"current_num_mutations" : self.commify(self.session.fuzz_node.num_mutations()),
"progress_current" : progress_current,
"progress_current_bar" : progress_current_bar,
"progress_total" : progress_total,
"progress_total_bar" : progress_total_bar,
"status" : status,
"total_mutant_index" : self.commify(self.session.total_mutant_index),
"total_num_mutations" : self.commify(self.session.total_num_mutations),
}
else:
response %= \
{
"current_mutant_index" : "",
"current_name" : "",
"current_num_mutations" : "",
"progress_current" : "",
"progress_current_bar" : "",
"progress_total" : "",
"progress_total_bar" : "",
"status" : "<font color=yellow>UNAVAILABLE</font>",
"total_mutant_index" : "",
"total_num_mutations" : "",
}
return response
########################################################################################################################
class web_interface_server (BaseHTTPServer.HTTPServer):
'''
http://docs.python.org/lib/module-BaseHTTPServer.html
'''
def __init__(self, server_address, RequestHandlerClass, session):
BaseHTTPServer.HTTPServer.__init__(self, server_address, RequestHandlerClass)
self.RequestHandlerClass.session = session
########################################################################################################################
class web_interface_thread (threading.Thread):
def __init__ (self, session):
threading.Thread.__init__(self, name="SulleyWebServer")
self._stopevent = threading.Event()
self.session = session
self.server = None
def run (self):
self.server = web_interface_server(('', self.session.web_port), web_interface_handler, self.session)
while not self._stopevent.isSet():
self.server.handle_request()
def join(self, timeout=None):
# A little dirty but no other solution afaik
self._stopevent.set()
conn = httplib.HTTPConnection("localhost:%d" % self.session.web_port)
conn.request("GET", "/")
conn.getresponse()
| OpenRCE/sulley | sulley/sessions.py | Python | gpl-2.0 | 48,671 |
#!/usr/bin/python
# vim: et sw=4 ts=4:
# -*- coding: utf-8 -*-
#
# Piwik - free/libre analytics platform
#
# @link http://piwik.org
# @license http://www.gnu.org/licenses/gpl-3.0.html GPL v3 or later
# @version $Id$
#
# For more info see: http://piwik.org/log-analytics/ and http://piwik.org/docs/log-analytics-tool-how-to/
#
# Requires Python 2.6 or greater.
#
import base64
import bz2
import ConfigParser
import datetime
import fnmatch
import gzip
import hashlib
import httplib
import inspect
import itertools
import logging
import optparse
import os
import os.path
import Queue
import re
import sys
import threading
import time
import urllib
import urllib2
import urlparse
import subprocess
import functools
import traceback
import socket
import textwrap
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
if sys.version_info < (2, 6):
print >> sys.stderr, 'simplejson (http://pypi.python.org/pypi/simplejson/) is required.'
sys.exit(1)
##
## Constants.
##
STATIC_EXTENSIONS = set((
'gif jpg jpeg png bmp ico svg svgz ttf otf eot woff class swf css js xml robots.txt webp'
).split())
DOWNLOAD_EXTENSIONS = set((
'7z aac arc arj asf asx avi bin csv deb dmg doc docx exe flv gz gzip hqx '
'ibooks jar mpg mp2 mp3 mp4 mpeg mov movie msi msp odb odf odg odp '
'ods odt ogg ogv pdf phps ppt pptx qt qtm ra ram rar rpm sea sit tar tbz '
'bz2 tbz tgz torrent txt wav wma wmv wpd xls xlsx xml xsd z zip '
'azw3 epub mobi apk'
).split())
# A good source is: http://phpbb-bots.blogspot.com/
EXCLUDED_USER_AGENTS = (
'adsbot-google',
'ask jeeves',
'baidubot',
'bot-',
'bot/',
'ccooter/',
'crawl',
'curl',
'echoping',
'exabot',
'feed',
'googlebot',
'ia_archiver',
'java/',
'libwww',
'mediapartners-google',
'msnbot',
'netcraftsurvey',
'panopta',
'robot',
'spider',
'surveybot',
'twiceler',
'voilabot',
'yahoo',
'yandex',
)
PIWIK_DEFAULT_MAX_ATTEMPTS = 3
PIWIK_DEFAULT_DELAY_AFTER_FAILURE = 10
DEFAULT_SOCKET_TIMEOUT = 300
PIWIK_EXPECTED_IMAGE = base64.b64decode(
'R0lGODlhAQABAIAAAAAAAAAAACH5BAEAAAAALAAAAAABAAEAAAICRAEAOw=='
)
##
## Formats.
##
class BaseFormatException(Exception): pass
class BaseFormat(object):
def __init__(self, name):
self.name = name
self.regex = None
self.date_format = '%d/%b/%Y:%H:%M:%S'
def check_format(self, file):
line = file.readline()
file.seek(0)
return self.check_format_line(line)
def check_format_line(self, line):
return False
class JsonFormat(BaseFormat):
def __init__(self, name):
super(JsonFormat, self).__init__(name)
self.json = None
self.date_format = '%Y-%m-%dT%H:%M:%S'
def check_format_line(self, line):
try:
self.json = json.loads(line)
return True
except:
return False
def match(self, line):
try:
# nginx outputs malformed JSON w/ hex escapes when confronted w/ non-UTF input. we have to
# workaround this by converting hex escapes in strings to unicode escapes. the conversion is naive,
# so it does not take into account the string's actual encoding (which we don't have access to).
line = line.replace('\\x', '\\u00')
self.json = json.loads(line)
return self
except:
self.json = None
return None
def get(self, key):
# Some ugly patchs ...
if key == 'generation_time_milli':
self.json[key] = int(float(self.json[key]) * 1000)
# Patch date format ISO 8601
elif key == 'date':
tz = self.json[key][19:]
self.json['timezone'] = tz.replace(':', '')
self.json[key] = self.json[key][:19]
try:
return self.json[key]
except KeyError:
raise BaseFormatException()
def get_all(self,):
return self.json
def remove_ignored_groups(self, groups):
for group in groups:
del self.json[group]
class RegexFormat(BaseFormat):
def __init__(self, name, regex, date_format=None):
super(RegexFormat, self).__init__(name)
if regex is not None:
self.regex = re.compile(regex)
if date_format is not None:
self.date_format = date_format
self.matched = None
def check_format_line(self, line):
return self.match(line)
def match(self,line):
if not self.regex:
return None
match_result = self.regex.match(line)
if match_result:
self.matched = match_result.groupdict()
else:
self.matched = None
return match_result
def get(self, key):
try:
return self.matched[key]
except KeyError:
raise BaseFormatException("Cannot find group '%s'." % key)
def get_all(self,):
return self.matched
def remove_ignored_groups(self, groups):
for group in groups:
del self.matched[group]
class W3cExtendedFormat(RegexFormat):
FIELDS_LINE_PREFIX = '#Fields: '
fields = {
'date': '(?P<date>\d+[-\d+]+',
'time': '[\d+:]+)[.\d]*?', # TODO should not assume date & time will be together not sure how to fix ATM.
'cs-uri-stem': '(?P<path>/\S*)',
'cs-uri-query': '(?P<query_string>\S*)',
'c-ip': '"?(?P<ip>[\w*.:-]*)"?',
'cs(User-Agent)': '(?P<user_agent>".*?"|\S*)',
'cs(Referer)': '(?P<referrer>\S+)',
'sc-status': '(?P<status>\d+)',
'sc-bytes': '(?P<length>\S+)',
'cs-host': '(?P<host>\S+)',
'cs-username': '(?P<userid>\S+)',
'time-taken': '(?P<generation_time_secs>[.\d]+)'
}
def __init__(self):
super(W3cExtendedFormat, self).__init__('w3c_extended', None, '%Y-%m-%d %H:%M:%S')
def check_format(self, file):
self.create_regex(file)
# if we couldn't create a regex, this file does not follow the W3C extended log file format
if not self.regex:
file.seek(0)
return
first_line = file.readline()
file.seek(0)
return self.check_format_line(first_line)
def create_regex(self, file):
fields_line = None
if config.options.w3c_fields:
fields_line = config.options.w3c_fields
# collect all header lines up until the Fields: line
# if we're reading from stdin, we can't seek, so don't read any more than the Fields line
header_lines = []
while fields_line is None:
line = file.readline().strip()
if not line:
continue
if not line.startswith('#'):
break
if line.startswith(W3cExtendedFormat.FIELDS_LINE_PREFIX):
fields_line = line
else:
header_lines.append(line)
if not fields_line:
return
# store the header lines for a later check for IIS
self.header_lines = header_lines
# Parse the 'Fields: ' line to create the regex to use
full_regex = []
expected_fields = type(self).fields.copy() # turn custom field mapping into field => regex mapping
# if the --w3c-time-taken-millisecs option is used, make sure the time-taken field is interpreted as milliseconds
if config.options.w3c_time_taken_in_millisecs:
expected_fields['time-taken'] = '(?P<generation_time_milli>[\d.]+)'
for mapped_field_name, field_name in config.options.custom_w3c_fields.iteritems():
expected_fields[mapped_field_name] = expected_fields[field_name]
del expected_fields[field_name]
# add custom field regexes supplied through --w3c-field-regex option
for field_name, field_regex in config.options.w3c_field_regexes.iteritems():
expected_fields[field_name] = field_regex
# Skip the 'Fields: ' prefix.
fields_line = fields_line[9:].strip()
for field in re.split('\s+', fields_line):
try:
regex = expected_fields[field]
except KeyError:
regex = '(?:".*?"|\S+)'
full_regex.append(regex)
full_regex = '\s+'.join(full_regex)
logging.debug("Based on 'Fields:' line, computed regex to be %s", full_regex)
self.regex = re.compile(full_regex)
def check_for_iis_option(self):
if not config.options.w3c_time_taken_in_millisecs and self._is_time_taken_milli() and self._is_iis():
logging.info("WARNING: IIS log file being parsed without --w3c-time-taken-milli option. IIS"
" stores millisecond values in the time-taken field. If your logfile does this, the aforementioned"
" option must be used in order to get accurate generation times.")
def _is_iis(self):
return len([line for line in self.header_lines if 'internet information services' in line.lower() or 'iis' in line.lower()]) > 0
def _is_time_taken_milli(self):
return 'generation_time_milli' not in self.regex.pattern
class IisFormat(W3cExtendedFormat):
fields = W3cExtendedFormat.fields.copy()
fields.update({
'time-taken': '(?P<generation_time_milli>[.\d]+)',
'sc-win32-status': '(?P<__win32_status>\S+)' # this group is useless for log importing, but capturing it
# will ensure we always select IIS for the format instead of
# W3C logs when detecting the format. This way there will be
# less accidental importing of IIS logs w/o --w3c-time-taken-milli.
})
def __init__(self):
super(IisFormat, self).__init__()
self.name = 'iis'
class ShoutcastFormat(W3cExtendedFormat):
fields = W3cExtendedFormat.fields.copy()
fields.update({
'c-status': '(?P<status>\d+)',
'x-duration': '(?P<generation_time_secs>[.\d]+)'
})
def __init__(self):
super(ShoutcastFormat, self).__init__()
self.name = 'shoutcast'
def get(self, key):
if key == 'user_agent':
user_agent = super(ShoutcastFormat, self).get(key)
return urllib2.unquote(user_agent)
else:
return super(ShoutcastFormat, self).get(key)
class AmazonCloudFrontFormat(W3cExtendedFormat):
fields = W3cExtendedFormat.fields.copy()
fields.update({
'x-event': '(?P<event_action>\S+)',
'x-sname': '(?P<event_name>\S+)',
'cs-uri-stem': '(?:rtmp:/)?(?P<path>/\S*)',
'c-user-agent': '(?P<user_agent>".*?"|\S+)',
# following are present to match cloudfront instead of W3C when we know it's cloudfront
'x-edge-location': '(?P<x_edge_location>".*?"|\S+)',
'x-edge-result-type': '(?P<x_edge_result_type>".*?"|\S+)',
'x-edge-request-id': '(?P<x_edge_request_id>".*?"|\S+)',
'x-host-header': '(?P<x_host_header>".*?"|\S+)'
})
def __init__(self):
super(AmazonCloudFrontFormat, self).__init__()
self.name = 'amazon_cloudfront'
def get(self, key):
if key == 'event_category' and 'event_category' not in self.matched:
return 'cloudfront_rtmp'
elif key == 'status' and 'status' not in self.matched:
return '200'
elif key == 'user_agent':
user_agent = super(AmazonCloudFrontFormat, self).get(key)
return urllib2.unquote(user_agent)
else:
return super(AmazonCloudFrontFormat, self).get(key)
_HOST_PREFIX = '(?P<host>[\w\-\.]*)(?::\d+)?\s+'
_COMMON_LOG_FORMAT = (
'(?P<ip>\S+)\s+\S+\s+(?P<userid>\S+)\s+\[(?P<date>.*?)\s+(?P<timezone>.*?)\]\s+'
'"\S+\s+(?P<path>.*?)\s+\S+"\s+(?P<status>\S+)\s+(?P<length>\S+)'
)
_NCSA_EXTENDED_LOG_FORMAT = (_COMMON_LOG_FORMAT +
'\s+"(?P<referrer>.*?)"\s+"(?P<user_agent>.*?)"'
)
_S3_LOG_FORMAT = (
'\S+\s+(?P<host>\S+)\s+\[(?P<date>.*?)\s+(?P<timezone>.*?)\]\s+(?P<ip>\S+)\s+'
'\S+\s+\S+\s+\S+\s+\S+\s+"\S+\s+(?P<path>.*?)\s+\S+"\s+(?P<status>\S+)\s+\S+\s+(?P<length>\S+)\s+'
'\S+\s+\S+\s+\S+\s+"(?P<referrer>.*?)"\s+"(?P<user_agent>.*?)"'
)
_ICECAST2_LOG_FORMAT = ( _NCSA_EXTENDED_LOG_FORMAT +
'\s+(?P<session_time>\S+)'
)
FORMATS = {
'common': RegexFormat('common', _COMMON_LOG_FORMAT),
'common_vhost': RegexFormat('common_vhost', _HOST_PREFIX + _COMMON_LOG_FORMAT),
'ncsa_extended': RegexFormat('ncsa_extended', _NCSA_EXTENDED_LOG_FORMAT),
'common_complete': RegexFormat('common_complete', _HOST_PREFIX + _NCSA_EXTENDED_LOG_FORMAT),
'w3c_extended': W3cExtendedFormat(),
'amazon_cloudfront': AmazonCloudFrontFormat(),
'iis': IisFormat(),
'shoutcast': ShoutcastFormat(),
's3': RegexFormat('s3', _S3_LOG_FORMAT),
'icecast2': RegexFormat('icecast2', _ICECAST2_LOG_FORMAT),
'nginx_json': JsonFormat('nginx_json'),
}
##
## Code.
##
class Configuration(object):
"""
Stores all the configuration options by reading sys.argv and parsing,
if needed, the config.inc.php.
It has 2 attributes: options and filenames.
"""
class Error(Exception):
pass
def _create_parser(self):
"""
Initialize and return the OptionParser instance.
"""
option_parser = optparse.OptionParser(
usage='Usage: %prog [options] log_file [ log_file [...] ]',
description="Import HTTP access logs to Piwik. "
"log_file is the path to a server access log file (uncompressed, .gz, .bz2, or specify - to read from stdin). "
" By default, the script will try to produce clean reports and will exclude bots, static files, discard http error and redirects, etc. This is customizable, see below.",
epilog="About Piwik Server Log Analytics: http://piwik.org/log-analytics/ "
" Found a bug? Please create a ticket in http://dev.piwik.org/ "
" Please send your suggestions or successful user story to hello@piwik.org "
)
# Basic auth user
option_parser.add_option(
'--auth-user', dest='auth_user',
help="Basic auth user",
)
# Basic auth password
option_parser.add_option(
'--auth-password', dest='auth_password',
help="Basic auth password",
)
option_parser.add_option(
'--debug', '-d', dest='debug', action='count', default=0,
help="Enable debug output (specify multiple times for more verbose)",
)
option_parser.add_option(
'--debug-tracker', dest='debug_tracker', action='store_true', default=False,
help="Appends &debug=1 to tracker requests and prints out the result so the tracker can be debugged. If "
"using the log importer results in errors with the tracker or improperly recorded visits, this option can "
"be used to find out what the tracker is doing wrong. To see debug tracker output, you must also set the "
"[Tracker] debug_on_demand INI config to 1 in your Piwik's config.ini.php file."
)
option_parser.add_option(
'--debug-request-limit', dest='debug_request_limit', type='int', default=None,
help="Debug option that will exit after N requests are parsed. Can be used w/ --debug-tracker to limit the "
"output of a large log file."
)
option_parser.add_option(
'--url', dest='piwik_url',
help="REQUIRED Your Piwik server URL, eg. http://example.com/piwik/ or http://analytics.example.net",
)
option_parser.add_option(
'--dry-run', dest='dry_run',
action='store_true', default=False,
help="Perform a trial run with no tracking data being inserted into Piwik",
)
option_parser.add_option(
'--show-progress', dest='show_progress',
action='store_true', default=os.isatty(sys.stdout.fileno()),
help="Print a progress report X seconds (default: 1, use --show-progress-delay to override)"
)
option_parser.add_option(
'--show-progress-delay', dest='show_progress_delay',
type='int', default=1,
help="Change the default progress delay"
)
option_parser.add_option(
'--add-sites-new-hosts', dest='add_sites_new_hosts',
action='store_true', default=False,
help="When a hostname is found in the log file, but not matched to any website "
"in Piwik, automatically create a new website in Piwik with this hostname to "
"import the logs"
)
option_parser.add_option(
'--idsite', dest='site_id',
help= ("When specified, "
"data in the specified log files will be tracked for this Piwik site ID."
" The script will not auto-detect the website based on the log line hostname (new websites will not be automatically created).")
)
option_parser.add_option(
'--idsite-fallback', dest='site_id_fallback',
help="Default Piwik site ID to use if the hostname doesn't match any "
"known Website's URL. New websites will not be automatically created. "
" Used only if --add-sites-new-hosts or --idsite are not set",
)
default_config = os.path.abspath(
os.path.join(os.path.dirname(__file__),
'../../config/config.ini.php'),
)
option_parser.add_option(
'--config', dest='config_file', default=default_config,
help=(
"This is only used when --login and --password is not used. "
"Piwik will read the configuration file (default: %default) to "
"fetch the Super User token_auth from the config file. "
)
)
option_parser.add_option(
'--login', dest='login',
help="You can manually specify the Piwik Super User login"
)
option_parser.add_option(
'--password', dest='password',
help="You can manually specify the Piwik Super User password"
)
option_parser.add_option(
'--token-auth', dest='piwik_token_auth',
help="Piwik Super User token_auth, 32 characters hexadecimal string, found in Piwik > API",
)
option_parser.add_option(
'--hostname', dest='hostnames', action='append', default=[],
help="Accepted hostname (requests with other hostnames will be excluded). "
"Can be specified multiple times"
)
option_parser.add_option(
'--exclude-path', dest='excluded_paths', action='append', default=[],
help="Any URL path matching this exclude-path will not be imported in Piwik. Can be specified multiple times"
)
option_parser.add_option(
'--exclude-path-from', dest='exclude_path_from',
help="Each line from this file is a path to exclude (see: --exclude-path)"
)
option_parser.add_option(
'--include-path', dest='included_paths', action='append', default=[],
help="Paths to include. Can be specified multiple times. If not specified, all paths are included."
)
option_parser.add_option(
'--include-path-from', dest='include_path_from',
help="Each line from this file is a path to include"
)
option_parser.add_option(
'--useragent-exclude', dest='excluded_useragents',
action='append', default=[],
help="User agents to exclude (in addition to the standard excluded "
"user agents). Can be specified multiple times",
)
option_parser.add_option(
'--enable-static', dest='enable_static',
action='store_true', default=False,
help="Track static files (images, css, js, ico, ttf, etc.)"
)
option_parser.add_option(
'--enable-bots', dest='enable_bots',
action='store_true', default=False,
help="Track bots. All bot visits will have a Custom Variable set with name='Bot' and value='$Bot_user_agent_here$'"
)
option_parser.add_option(
'--enable-http-errors', dest='enable_http_errors',
action='store_true', default=False,
help="Track HTTP errors (status code 4xx or 5xx)"
)
option_parser.add_option(
'--enable-http-redirects', dest='enable_http_redirects',
action='store_true', default=False,
help="Track HTTP redirects (status code 3xx except 304)"
)
option_parser.add_option(
'--enable-reverse-dns', dest='reverse_dns',
action='store_true', default=False,
help="Enable reverse DNS, used to generate the 'Providers' report in Piwik. "
"Disabled by default, as it impacts performance"
)
option_parser.add_option(
'--strip-query-string', dest='strip_query_string',
action='store_true', default=False,
help="Strip the query string from the URL"
)
option_parser.add_option(
'--query-string-delimiter', dest='query_string_delimiter', default='?',
help="The query string delimiter (default: %default)"
)
option_parser.add_option(
'--log-format-name', dest='log_format_name', default=None,
help=("Access log format to detect (supported are: %s). "
"When not specified, the log format will be autodetected by trying all supported log formats."
% ', '.join(sorted(FORMATS.iterkeys())))
)
available_regex_groups = ['date', 'path', 'query_string', 'ip', 'user_agent', 'referrer', 'status',
'length', 'host', 'userid', 'generation_time_milli', 'event_action',
'event_name', 'timezone', 'session_time']
option_parser.add_option(
'--log-format-regex', dest='log_format_regex', default=None,
help="Regular expression used to parse log entries. Regexes must contain named groups for different log fields. "
"Recognized fields include: %s. For an example of a supported Regex, see the source code of this file. "
"Overrides --log-format-name." % (', '.join(available_regex_groups))
)
option_parser.add_option(
'--log-date-format', dest='log_date_format', default=None,
help="Format string used to parse dates. You can specify any format that can also be specified to "
"the strptime python function."
)
option_parser.add_option(
'--log-hostname', dest='log_hostname', default=None,
help="Force this hostname for a log format that doesn't include it. All hits "
"will seem to come to this host"
)
option_parser.add_option(
'--skip', dest='skip', default=0, type='int',
help="Skip the n first lines to start parsing/importing data at a given line for the specified log file",
)
option_parser.add_option(
'--recorders', dest='recorders', default=1, type='int',
help="Number of simultaneous recorders (default: %default). "
"It should be set to the number of CPU cores in your server. "
"You can also experiment with higher values which may increase performance until a certain point",
)
option_parser.add_option(
'--recorder-max-payload-size', dest='recorder_max_payload_size', default=200, type='int',
help="Maximum number of log entries to record in one tracking request (default: %default). "
)
option_parser.add_option(
'--replay-tracking', dest='replay_tracking',
action='store_true', default=False,
help="Replay piwik.php requests found in custom logs (only piwik.php requests expected). \nSee http://piwik.org/faq/how-to/faq_17033/"
)
option_parser.add_option(
'--replay-tracking-expected-tracker-file', dest='replay_tracking_expected_tracker_file', default='piwik.php',
help="The expected suffix for tracking request paths. Only logs whose paths end with this will be imported. Defaults "
"to 'piwik.php' so only requests to the piwik.php file will be imported."
)
option_parser.add_option(
'--output', dest='output',
help="Redirect output (stdout and stderr) to the specified file"
)
option_parser.add_option(
'--encoding', dest='encoding', default='utf8',
help="Log files encoding (default: %default)"
)
option_parser.add_option(
'--disable-bulk-tracking', dest='use_bulk_tracking',
default=True, action='store_false',
help="Disables use of bulk tracking so recorders record one hit at a time."
)
option_parser.add_option(
'--debug-force-one-hit-every-Ns', dest='force_one_action_interval', default=False, type='float',
help="Debug option that will force each recorder to record one hit every N secs."
)
option_parser.add_option(
'--force-lowercase-path', dest='force_lowercase_path', default=False, action='store_true',
help="Make URL path lowercase so paths with the same letters but different cases are "
"treated the same."
)
option_parser.add_option(
'--enable-testmode', dest='enable_testmode', default=False, action='store_true',
help="If set, it will try to get the token_auth from the piwik_tests directory"
)
option_parser.add_option(
'--download-extensions', dest='download_extensions', default=None,
help="By default Piwik tracks as Downloads the most popular file extensions. If you set this parameter (format: pdf,doc,...) then files with an extension found in the list will be imported as Downloads, other file extensions downloads will be skipped."
)
option_parser.add_option(
'--add-download-extensions', dest='extra_download_extensions', default=None,
help="Add extensions that should be treated as downloads. See --download-extensions for more info."
)
option_parser.add_option(
'--w3c-map-field', action='callback', callback=functools.partial(self._set_option_map, 'custom_w3c_fields'), type='string',
help="Map a custom log entry field in your W3C log to a default one. Use this option to load custom log "
"files that use the W3C extended log format such as those from the Advanced Logging W3C module. Used "
"as, eg, --w3c-map-field my-date=date. Recognized default fields include: %s\n\n"
"Formats that extend the W3C extended log format (like the cloudfront RTMP log format) may define more "
"fields that can be mapped."
% (', '.join(W3cExtendedFormat.fields.keys()))
)
option_parser.add_option(
'--w3c-time-taken-millisecs', action='store_true', default=False, dest='w3c_time_taken_in_millisecs',
help="If set, interprets the time-taken W3C log field as a number of milliseconds. This must be set for importing"
" IIS logs."
)
option_parser.add_option(
'--w3c-fields', dest='w3c_fields', default=None,
help="Specify the '#Fields:' line for a log file in the W3C Extended log file format. Use this option if "
"your log file doesn't contain the '#Fields:' line which is required for parsing. This option must be used "
"in conjuction with --log-format-name=w3c_extended.\n"
"Example: --w3c-fields='#Fields: date time c-ip ...'"
)
option_parser.add_option(
'--w3c-field-regex', action='callback', callback=functools.partial(self._set_option_map, 'w3c_field_regexes'), type='string',
help="Specify a regex for a field in your W3C extended log file. You can use this option to parse fields the "
"importer does not natively recognize and then use one of the --regex-group-to-XXX-cvar options to track "
"the field in a custom variable. For example, specifying --w3c-field-regex=sc-win32-status=(?P<win32_status>\\S+) "
"--regex-group-to-page-cvar=\"win32_status=Windows Status Code\" will track the sc-win32-status IIS field "
"in the 'Windows Status Code' custom variable. Regexes must contain a named group."
)
option_parser.add_option(
'--title-category-delimiter', dest='title_category_delimiter', default='/',
help="If --enable-http-errors is used, errors are shown in the page titles report. If you have "
"changed General.action_title_category_delimiter in your Piwik configuration, you need to set this "
"option to the same value in order to get a pretty page titles report."
)
option_parser.add_option(
'--dump-log-regex', dest='dump_log_regex', action='store_true', default=False,
help="Prints out the regex string used to parse log lines and exists. Can be useful for using formats "
"in newer versions of the script in older versions of the script. The output regex can be used with "
"the --log-format-regex option."
)
option_parser.add_option(
'--ignore-groups', dest='regex_groups_to_ignore', default=None,
help="Comma separated list of regex groups to ignore when parsing log lines. Can be used to, for example, "
"disable normal user id tracking. See documentation for --log-format-regex for list of available "
"regex groups."
)
option_parser.add_option(
'--regex-group-to-visit-cvar', action='callback', callback=functools.partial(self._set_option_map, 'regex_group_to_visit_cvars_map'), type='string',
help="Track an attribute through a custom variable with visit scope instead of through Piwik's normal "
"approach. For example, to track usernames as a custom variable instead of through the uid tracking "
"parameter, supply --regex-group-to-visit-cvar=\"userid=User Name\". This will track usernames in a "
"custom variable named 'User Name'. The list of available regex groups can be found in the documentation "
"for --log-format-regex (additional regex groups you may have defined "
"in --log-format-regex can also be used)."
)
option_parser.add_option(
'--regex-group-to-page-cvar', action='callback', callback=functools.partial(self._set_option_map, 'regex_group_to_page_cvars_map'), type='string',
help="Track an attribute through a custom variable with page scope instead of through Piwik's normal "
"approach. For example, to track usernames as a custom variable instead of through the uid tracking "
"parameter, supply --regex-group-to-page-cvar=\"userid=User Name\". This will track usernames in a "
"custom variable named 'User Name'. The list of available regex groups can be found in the documentation "
"for --log-format-regex (additional regex groups you may have defined "
"in --log-format-regex can also be used)."
)
option_parser.add_option(
'--retry-max-attempts', dest='max_attempts', default=PIWIK_DEFAULT_MAX_ATTEMPTS, type='int',
help="The maximum number of times to retry a failed tracking request."
)
option_parser.add_option(
'--retry-delay', dest='delay_after_failure', default=PIWIK_DEFAULT_DELAY_AFTER_FAILURE, type='int',
help="The number of seconds to wait before retrying a failed tracking request."
)
option_parser.add_option(
'--request-timeout', dest='request_timeout', default=DEFAULT_SOCKET_TIMEOUT, type='int',
help="The maximum number of seconds to wait before terminating an HTTP request to Piwik."
)
return option_parser
def _set_option_map(self, option_attr_name, option, opt_str, value, parser):
"""
Sets a key-value mapping in a dict that is built from command line options. Options that map
string keys to string values (like --w3c-map-field) can set the callback to a bound partial
of this method to handle the option.
"""
parts = value.split('=')
if len(parts) != 2:
fatal_error("Invalid %s option: '%s'" % (opt_str, value))
key, value = parts
if not hasattr(parser.values, option_attr_name):
setattr(parser.values, option_attr_name, {})
getattr(parser.values, option_attr_name)[key] = value
def _parse_args(self, option_parser):
"""
Parse the command line args and create self.options and self.filenames.
"""
self.options, self.filenames = option_parser.parse_args(sys.argv[1:])
if self.options.output:
sys.stdout = sys.stderr = open(self.options.output, 'a+', 0)
if not self.filenames:
print(option_parser.format_help())
sys.exit(1)
# Configure logging before calling logging.{debug,info}.
logging.basicConfig(
format='%(asctime)s: [%(levelname)s] %(message)s',
level=logging.DEBUG if self.options.debug >= 1 else logging.INFO,
)
self.options.excluded_useragents = set([s.lower() for s in self.options.excluded_useragents])
if self.options.exclude_path_from:
paths = [path.strip() for path in open(self.options.exclude_path_from).readlines()]
self.options.excluded_paths.extend(path for path in paths if len(path) > 0)
if self.options.excluded_paths:
self.options.excluded_paths = set(self.options.excluded_paths)
logging.debug('Excluded paths: %s', ' '.join(self.options.excluded_paths))
if self.options.include_path_from:
paths = [path.strip() for path in open(self.options.include_path_from).readlines()]
self.options.included_paths.extend(path for path in paths if len(path) > 0)
if self.options.included_paths:
self.options.included_paths = set(self.options.included_paths)
logging.debug('Included paths: %s', ' '.join(self.options.included_paths))
if self.options.hostnames:
logging.debug('Accepted hostnames: %s', ', '.join(self.options.hostnames))
else:
logging.debug('Accepted hostnames: all')
if self.options.log_format_regex:
self.format = RegexFormat('custom', self.options.log_format_regex, self.options.log_date_format)
elif self.options.log_format_name:
try:
self.format = FORMATS[self.options.log_format_name]
except KeyError:
fatal_error('invalid log format: %s' % self.options.log_format_name)
else:
self.format = None
if not hasattr(self.options, 'custom_w3c_fields'):
self.options.custom_w3c_fields = {}
elif self.format is not None:
# validate custom field mappings
for custom_name, default_name in self.options.custom_w3c_fields.iteritems():
if default_name not in type(format).fields:
fatal_error("custom W3C field mapping error: don't know how to parse and use the '%' field" % default_name)
return
if not hasattr(self.options, 'regex_group_to_visit_cvars_map'):
self.options.regex_group_to_visit_cvars_map = {}
if not hasattr(self.options, 'regex_group_to_page_cvars_map'):
self.options.regex_group_to_page_cvars_map = {}
if not hasattr(self.options, 'w3c_field_regexes'):
self.options.w3c_field_regexes = {}
else:
# make sure each custom w3c field regex has a named group
for field_name, field_regex in self.options.w3c_field_regexes.iteritems():
if '(?P<' not in field_regex:
fatal_error("cannot find named group in custom w3c field regex '%s' for field '%s'" % (field_regex, field_name))
return
if not self.options.piwik_url:
fatal_error('no URL given for Piwik')
if not (self.options.piwik_url.startswith('http://') or self.options.piwik_url.startswith('https://')):
self.options.piwik_url = 'http://' + self.options.piwik_url
logging.debug('Piwik URL is: %s', self.options.piwik_url)
if not self.options.piwik_token_auth:
try:
self.options.piwik_token_auth = self._get_token_auth()
except Piwik.Error, e:
fatal_error(e)
logging.debug('Authentication token token_auth is: %s', self.options.piwik_token_auth)
if self.options.recorders < 1:
self.options.recorders = 1
download_extensions = DOWNLOAD_EXTENSIONS
if self.options.download_extensions:
download_extensions = set(self.options.download_extensions.split(','))
if self.options.extra_download_extensions:
download_extensions.update(self.options.extra_download_extensions.split(','))
self.options.download_extensions = download_extensions
if self.options.regex_groups_to_ignore:
self.options.regex_groups_to_ignore = set(self.options.regex_groups_to_ignore.split(','))
def __init__(self):
self._parse_args(self._create_parser())
def _get_token_auth(self):
"""
If the token auth is not specified in the options, get it from Piwik.
"""
# Get superuser login/password from the options.
logging.debug('No token-auth specified')
if self.options.login and self.options.password:
piwik_login = self.options.login
piwik_password = hashlib.md5(self.options.password).hexdigest()
logging.debug('Using credentials: (login = %s, password = %s)', piwik_login, piwik_password)
try:
api_result = piwik.call_api('UsersManager.getTokenAuth',
userLogin=piwik_login,
md5Password=piwik_password,
_token_auth='',
_url=self.options.piwik_url,
)
except urllib2.URLError, e:
fatal_error('error when fetching token_auth from the API: %s' % e)
try:
return api_result['value']
except KeyError:
# Happens when the credentials are invalid.
message = api_result.get('message')
fatal_error(
'error fetching authentication token token_auth%s' % (
': %s' % message if message else '')
)
else:
# Fallback to the given (or default) configuration file, then
# get the token from the API.
logging.debug(
'No credentials specified, reading them from "%s"',
self.options.config_file,
)
config_file = ConfigParser.RawConfigParser()
success = len(config_file.read(self.options.config_file)) > 0
if not success:
fatal_error(
"the configuration file" + self.options.config_file + " could not be read. Please check permission. This file must be readable to get the authentication token"
)
updatetokenfile = os.path.abspath(
os.path.join(os.path.dirname(__file__),
'../../misc/cron/updatetoken.php'),
)
phpBinary = 'php'
is_windows = sys.platform.startswith('win')
if is_windows:
try:
processWin = subprocess.Popen('where php.exe', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
[stdout, stderr] = processWin.communicate()
if processWin.returncode == 0:
phpBinary = stdout.strip()
else:
fatal_error("We couldn't detect PHP. It might help to add your php.exe to the path or alternatively run the importer using the --login and --password option")
except:
fatal_error("We couldn't detect PHP. You can run the importer using the --login and --password option to fix this issue")
command = [phpBinary, updatetokenfile]
if self.options.enable_testmode:
command.append('--testmode')
hostname = urlparse.urlparse( self.options.piwik_url ).hostname
command.append('--piwik-domain=' + hostname )
command = subprocess.list2cmdline(command)
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
[stdout, stderr] = process.communicate()
if process.returncode != 0:
fatal_error("`" + command + "` failed with error: " + stderr + ".\nReponse code was: " + str(process.returncode) + ". You can alternatively run the importer using the --login and --password option")
filename = stdout
credentials = open(filename, 'r').readline()
credentials = credentials.split('\t')
return credentials[1]
def get_resolver(self):
if self.options.site_id:
logging.debug('Resolver: static')
return StaticResolver(self.options.site_id)
else:
logging.debug('Resolver: dynamic')
return DynamicResolver()
class Statistics(object):
"""
Store statistics about parsed logs and recorded entries.
Can optionally print statistics on standard output every second.
"""
class Counter(object):
"""
Simple integers cannot be used by multithreaded programs. See:
http://stackoverflow.com/questions/6320107/are-python-ints-thread-safe
"""
def __init__(self):
# itertools.count's implementation in C does not release the GIL and
# therefore is thread-safe.
self.counter = itertools.count(1)
self.value = 0
def increment(self):
self.value = self.counter.next()
def advance(self, n):
for i in range(n):
self.increment()
def __str__(self):
return str(int(self.value))
def __init__(self):
self.time_start = None
self.time_stop = None
self.piwik_sites = set() # sites ID
self.piwik_sites_created = [] # (hostname, site ID)
self.piwik_sites_ignored = set() # hostname
self.count_lines_parsed = self.Counter()
self.count_lines_recorded = self.Counter()
# requests that the Piwik tracker considered invalid (or failed to track)
self.invalid_lines = []
# Do not match the regexp.
self.count_lines_invalid = self.Counter()
# No site ID found by the resolver.
self.count_lines_no_site = self.Counter()
# Hostname filtered by config.options.hostnames
self.count_lines_hostname_skipped = self.Counter()
# Static files.
self.count_lines_static = self.Counter()
# Ignored user-agents.
self.count_lines_skipped_user_agent = self.Counter()
# Ignored HTTP erors.
self.count_lines_skipped_http_errors = self.Counter()
# Ignored HTTP redirects.
self.count_lines_skipped_http_redirects = self.Counter()
# Downloads
self.count_lines_downloads = self.Counter()
# Ignored downloads when --download-extensions is used
self.count_lines_skipped_downloads = self.Counter()
# Misc
self.dates_recorded = set()
self.monitor_stop = False
def set_time_start(self):
self.time_start = time.time()
def set_time_stop(self):
self.time_stop = time.time()
def _compute_speed(self, value, start, end):
delta_time = end - start
if value == 0:
return 0
if delta_time == 0:
return 'very high!'
else:
return value / delta_time
def _round_value(self, value, base=100):
return round(value * base) / base
def _indent_text(self, lines, level=1):
"""
Return an indented text. 'lines' can be a list of lines or a single
line (as a string). One level of indentation is 4 spaces.
"""
prefix = ' ' * (4 * level)
if isinstance(lines, basestring):
return prefix + lines
else:
return '\n'.join(
prefix + line
for line in lines
)
def print_summary(self):
invalid_lines_summary = ''
if self.invalid_lines:
invalid_lines_summary = '''Invalid log lines
-----------------
The following lines were not tracked by Piwik, either due to a malformed tracker request or error in the tracker:
%s
''' % textwrap.fill(", ".join(self.invalid_lines), 80)
print '''
%(invalid_lines)sLogs import summary
-------------------
%(count_lines_recorded)d requests imported successfully
%(count_lines_downloads)d requests were downloads
%(total_lines_ignored)d requests ignored:
%(count_lines_skipped_http_errors)d HTTP errors
%(count_lines_skipped_http_redirects)d HTTP redirects
%(count_lines_invalid)d invalid log lines
%(count_lines_no_site)d requests did not match any known site
%(count_lines_hostname_skipped)d requests did not match any --hostname
%(count_lines_skipped_user_agent)d requests done by bots, search engines...
%(count_lines_static)d requests to static resources (css, js, images, ico, ttf...)
%(count_lines_skipped_downloads)d requests to file downloads did not match any --download-extensions
Website import summary
----------------------
%(count_lines_recorded)d requests imported to %(total_sites)d sites
%(total_sites_existing)d sites already existed
%(total_sites_created)d sites were created:
%(sites_created)s
%(total_sites_ignored)d distinct hostnames did not match any existing site:
%(sites_ignored)s
%(sites_ignored_tips)s
Performance summary
-------------------
Total time: %(total_time)d seconds
Requests imported per second: %(speed_recording)s requests per second
Processing your log data
------------------------
In order for your logs to be processed by Piwik, you may need to run the following command:
./console core:archive --force-all-websites --force-all-periods=315576000 --force-date-last-n=1000 --url='%(url)s'
''' % {
'count_lines_recorded': self.count_lines_recorded.value,
'count_lines_downloads': self.count_lines_downloads.value,
'total_lines_ignored': sum([
self.count_lines_invalid.value,
self.count_lines_skipped_user_agent.value,
self.count_lines_skipped_http_errors.value,
self.count_lines_skipped_http_redirects.value,
self.count_lines_static.value,
self.count_lines_skipped_downloads.value,
self.count_lines_no_site.value,
self.count_lines_hostname_skipped.value,
]),
'count_lines_invalid': self.count_lines_invalid.value,
'count_lines_skipped_user_agent': self.count_lines_skipped_user_agent.value,
'count_lines_skipped_http_errors': self.count_lines_skipped_http_errors.value,
'count_lines_skipped_http_redirects': self.count_lines_skipped_http_redirects.value,
'count_lines_static': self.count_lines_static.value,
'count_lines_skipped_downloads': self.count_lines_skipped_downloads.value,
'count_lines_no_site': self.count_lines_no_site.value,
'count_lines_hostname_skipped': self.count_lines_hostname_skipped.value,
'total_sites': len(self.piwik_sites),
'total_sites_existing': len(self.piwik_sites - set(site_id for hostname, site_id in self.piwik_sites_created)),
'total_sites_created': len(self.piwik_sites_created),
'sites_created': self._indent_text(
['%s (ID: %d)' % (hostname, site_id) for hostname, site_id in self.piwik_sites_created],
level=3,
),
'total_sites_ignored': len(self.piwik_sites_ignored),
'sites_ignored': self._indent_text(
self.piwik_sites_ignored, level=3,
),
'sites_ignored_tips': '''
TIPs:
- if one of these hosts is an alias host for one of the websites
in Piwik, you can add this host as an "Alias URL" in Settings > Websites.
- use --add-sites-new-hosts if you wish to automatically create
one website for each of these hosts in Piwik rather than discarding
these requests.
- use --idsite-fallback to force all these log lines with a new hostname
to be recorded in a specific idsite (for example for troubleshooting/visualizing the data)
- use --idsite to force all lines in the specified log files
to be all recorded in the specified idsite
- or you can also manually create a new Website in Piwik with the URL set to this hostname
''' if self.piwik_sites_ignored else '',
'total_time': self.time_stop - self.time_start,
'speed_recording': self._round_value(self._compute_speed(
self.count_lines_recorded.value,
self.time_start, self.time_stop,
)),
'url': config.options.piwik_url,
'invalid_lines': invalid_lines_summary
}
##
## The monitor is a thread that prints a short summary each second.
##
def _monitor(self):
latest_total_recorded = 0
while not self.monitor_stop:
current_total = stats.count_lines_recorded.value
time_elapsed = time.time() - self.time_start
print '%d lines parsed, %d lines recorded, %d records/sec (avg), %d records/sec (current)' % (
stats.count_lines_parsed.value,
current_total,
current_total / time_elapsed if time_elapsed != 0 else 0,
(current_total - latest_total_recorded) / config.options.show_progress_delay,
)
latest_total_recorded = current_total
time.sleep(config.options.show_progress_delay)
def start_monitor(self):
t = threading.Thread(target=self._monitor)
t.daemon = True
t.start()
def stop_monitor(self):
self.monitor_stop = True
class Piwik(object):
"""
Make requests to Piwik.
"""
class Error(Exception):
def __init__(self, message, code = None):
super(Exception, self).__init__(message)
self.code = code
class RedirectHandlerWithLogging(urllib2.HTTPRedirectHandler):
"""
Special implementation of HTTPRedirectHandler that logs redirects in debug mode
to help users debug system issues.
"""
def redirect_request(self, req, fp, code, msg, hdrs, newurl):
logging.debug("Request redirected (code: %s) to '%s'" % (code, newurl))
return urllib2.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, hdrs, newurl)
@staticmethod
def _call(path, args, headers=None, url=None, data=None):
"""
Make a request to the Piwik site. It is up to the caller to format
arguments, to embed authentication, etc.
"""
if url is None:
url = config.options.piwik_url
headers = headers or {}
if data is None:
# If Content-Type isn't defined, PHP do not parse the request's body.
headers['Content-type'] = 'application/x-www-form-urlencoded'
data = urllib.urlencode(args)
elif not isinstance(data, basestring) and headers['Content-type'] == 'application/json':
data = json.dumps(data)
if args:
path = path + '?' + urllib.urlencode(args)
headers['User-Agent'] = 'Piwik/LogImport'
try:
timeout = config.options.request_timeout
except:
timeout = None # the config global object may not be created at this point
request = urllib2.Request(url + path, data, headers)
# Handle basic auth if auth_user set
try:
auth_user = config.options.auth_user
auth_password = config.options.auth_password
except:
auth_user = None
auth_password = None
if auth_user is not None:
base64string = base64.encodestring('%s:%s' % (auth_user, auth_password)).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
opener = urllib2.build_opener(Piwik.RedirectHandlerWithLogging())
response = opener.open(request, timeout = timeout)
result = response.read()
response.close()
return result
@staticmethod
def _call_api(method, **kwargs):
"""
Make a request to the Piwik API taking care of authentication, body
formatting, etc.
"""
args = {
'module' : 'API',
'format' : 'json2',
'method' : method,
'filter_limit' : '-1',
}
# token_auth, by default, is taken from config.
token_auth = kwargs.pop('_token_auth', None)
if token_auth is None:
token_auth = config.options.piwik_token_auth
if token_auth:
args['token_auth'] = token_auth
url = kwargs.pop('_url', None)
if kwargs:
args.update(kwargs)
# Convert lists into appropriate format.
# See: http://developer.piwik.org/api-reference/reporting-api#passing-an-array-of-data-as-a-parameter
# Warning: we have to pass the parameters in order: foo[0], foo[1], foo[2]
# and not foo[1], foo[0], foo[2] (it will break Piwik otherwise.)
final_args = []
for key, value in args.iteritems():
if isinstance(value, (list, tuple)):
for index, obj in enumerate(value):
final_args.append(('%s[%d]' % (key, index), obj))
else:
final_args.append((key, value))
res = Piwik._call('/', final_args, url=url)
try:
return json.loads(res)
except ValueError:
raise urllib2.URLError('Piwik returned an invalid response: ' + res)
@staticmethod
def _call_wrapper(func, expected_response, on_failure, *args, **kwargs):
"""
Try to make requests to Piwik at most PIWIK_FAILURE_MAX_RETRY times.
"""
errors = 0
while True:
try:
response = func(*args, **kwargs)
if expected_response is not None and response != expected_response:
if on_failure is not None:
error_message = on_failure(response, kwargs.get('data'))
else:
error_message = "didn't receive the expected response. Response was %s " % response
raise urllib2.URLError(error_message)
return response
except (urllib2.URLError, httplib.HTTPException, ValueError, socket.timeout), e:
logging.info('Error when connecting to Piwik: %s', e)
code = None
if isinstance(e, urllib2.HTTPError):
# See Python issue 13211.
message = 'HTTP Error %s %s' % (e.code, e.msg)
code = e.code
elif isinstance(e, urllib2.URLError):
message = e.reason
else:
message = str(e)
# decorate message w/ HTTP response, if it can be retrieved
if hasattr(e, 'read'):
message = message + ", response: " + e.read()
errors += 1
if errors == config.options.max_attempts:
logging.info("Max number of attempts reached, server is unreachable!")
raise Piwik.Error(message, code)
else:
logging.info("Retrying request, attempt number %d" % (errors + 1))
time.sleep(config.options.delay_after_failure)
@classmethod
def call(cls, path, args, expected_content=None, headers=None, data=None, on_failure=None):
return cls._call_wrapper(cls._call, expected_content, on_failure, path, args, headers,
data=data)
@classmethod
def call_api(cls, method, **kwargs):
return cls._call_wrapper(cls._call_api, None, None, method, **kwargs)
##
## Resolvers.
##
## A resolver is a class that turns a hostname into a Piwik site ID.
##
class StaticResolver(object):
"""
Always return the same site ID, specified in the configuration.
"""
def __init__(self, site_id):
self.site_id = site_id
# Go get the main URL
site = piwik.call_api(
'SitesManager.getSiteFromId', idSite=self.site_id
)
if site.get('result') == 'error':
fatal_error(
"cannot get the main URL of this site: %s" % site.get('message')
)
self._main_url = site['main_url']
stats.piwik_sites.add(self.site_id)
def resolve(self, hit):
return (self.site_id, self._main_url)
def check_format(self, format):
pass
class DynamicResolver(object):
"""
Use Piwik API to determine the site ID.
"""
_add_site_lock = threading.Lock()
def __init__(self):
self._cache = {}
if config.options.replay_tracking:
# get existing sites
self._cache['sites'] = piwik.call_api('SitesManager.getAllSites')
def _get_site_id_from_hit_host(self, hit):
return piwik.call_api(
'SitesManager.getSitesIdFromSiteUrl',
url=hit.host,
)
def _add_site(self, hit):
main_url = 'http://' + hit.host
DynamicResolver._add_site_lock.acquire()
try:
# After we obtain the lock, make sure the site hasn't already been created.
res = self._get_site_id_from_hit_host(hit)
if res:
return res[0]['idsite']
# The site doesn't exist.
logging.debug('No Piwik site found for the hostname: %s', hit.host)
if config.options.site_id_fallback is not None:
logging.debug('Using default site for hostname: %s', hit.host)
return config.options.site_id_fallback
elif config.options.add_sites_new_hosts:
if config.options.dry_run:
# Let's just return a fake ID.
return 0
logging.debug('Creating a Piwik site for hostname %s', hit.host)
result = piwik.call_api(
'SitesManager.addSite',
siteName=hit.host,
urls=[main_url],
)
if result.get('result') == 'error':
logging.error("Couldn't create a Piwik site for host %s: %s",
hit.host, result.get('message'),
)
return None
else:
site_id = result['value']
stats.piwik_sites_created.append((hit.host, site_id))
return site_id
else:
# The site doesn't exist, we don't want to create new sites and
# there's no default site ID. We thus have to ignore this hit.
return None
finally:
DynamicResolver._add_site_lock.release()
def _resolve(self, hit):
res = self._get_site_id_from_hit_host(hit)
if res:
# The site already exists.
site_id = res[0]['idsite']
else:
site_id = self._add_site(hit)
if site_id is not None:
stats.piwik_sites.add(site_id)
return site_id
def _resolve_when_replay_tracking(self, hit):
"""
If parsed site ID found in the _cache['sites'] return site ID and main_url,
otherwise return (None, None) tuple.
"""
site_id = hit.args['idsite']
if site_id in self._cache['sites']:
stats.piwik_sites.add(site_id)
return (site_id, self._cache['sites'][site_id]['main_url'])
else:
return (None, None)
def _resolve_by_host(self, hit):
"""
Returns the site ID and site URL for a hit based on the hostname.
"""
try:
site_id = self._cache[hit.host]
except KeyError:
logging.debug(
'Site ID for hostname %s not in cache', hit.host
)
site_id = self._resolve(hit)
logging.debug('Site ID for hostname %s: %s', hit.host, site_id)
self._cache[hit.host] = site_id
return (site_id, 'http://' + hit.host)
def resolve(self, hit):
"""
Return the site ID from the cache if found, otherwise call _resolve.
If replay_tracking option is enabled, call _resolve_when_replay_tracking.
"""
if config.options.replay_tracking:
# We only consider requests with piwik.php which don't need host to be imported
return self._resolve_when_replay_tracking(hit)
else:
return self._resolve_by_host(hit)
def check_format(self, format):
if config.options.replay_tracking:
pass
elif format.regex is not None and 'host' not in format.regex.groupindex and not config.options.log_hostname:
fatal_error(
"the selected log format doesn't include the hostname: you must "
"specify the Piwik site ID with the --idsite argument"
)
class Recorder(object):
"""
A Recorder fetches hits from the Queue and inserts them into Piwik using
the API.
"""
recorders = []
def __init__(self):
self.queue = Queue.Queue(maxsize=2)
# if bulk tracking disabled, make sure we can store hits outside of the Queue
if not config.options.use_bulk_tracking:
self.unrecorded_hits = []
@classmethod
def launch(cls, recorder_count):
"""
Launch a bunch of Recorder objects in a separate thread.
"""
for i in xrange(recorder_count):
recorder = Recorder()
cls.recorders.append(recorder)
run = recorder._run_bulk if config.options.use_bulk_tracking else recorder._run_single
t = threading.Thread(target=run)
t.daemon = True
t.start()
logging.debug('Launched recorder')
@classmethod
def add_hits(cls, all_hits):
"""
Add a set of hits to the recorders queue.
"""
# Organize hits so that one client IP will always use the same queue.
# We have to do this so visits from the same IP will be added in the right order.
hits_by_client = [[] for r in cls.recorders]
for hit in all_hits:
hits_by_client[hit.get_visitor_id_hash() % len(cls.recorders)].append(hit)
for i, recorder in enumerate(cls.recorders):
recorder.queue.put(hits_by_client[i])
@classmethod
def wait_empty(cls):
"""
Wait until all recorders have an empty queue.
"""
for recorder in cls.recorders:
recorder._wait_empty()
def _run_bulk(self):
while True:
try:
hits = self.queue.get()
except:
# TODO: we should log something here, however when this happens, logging.etc will throw
return
if len(hits) > 0:
try:
self._record_hits(hits)
except Piwik.Error, e:
fatal_error(e, hits[0].filename, hits[0].lineno) # approximate location of error
self.queue.task_done()
def _run_single(self):
while True:
if config.options.force_one_action_interval != False:
time.sleep(config.options.force_one_action_interval)
if len(self.unrecorded_hits) > 0:
hit = self.unrecorded_hits.pop(0)
try:
self._record_hits([hit])
except Piwik.Error, e:
fatal_error(e, hit.filename, hit.lineno)
else:
self.unrecorded_hits = self.queue.get()
self.queue.task_done()
def _wait_empty(self):
"""
Wait until the queue is empty.
"""
while True:
if self.queue.empty():
# We still have to wait for the last queue item being processed
# (queue.empty() returns True before queue.task_done() is
# called).
self.queue.join()
return
time.sleep(1)
def date_to_piwik(self, date):
date, time = date.isoformat(sep=' ').split()
return '%s %s' % (date, time.replace('-', ':'))
def _get_hit_args(self, hit):
"""
Returns the args used in tracking a hit, without the token_auth.
"""
site_id, main_url = resolver.resolve(hit)
if site_id is None:
# This hit doesn't match any known Piwik site.
if config.options.replay_tracking:
stats.piwik_sites_ignored.add('unrecognized site ID %s' % hit.args.get('idsite'))
else:
stats.piwik_sites_ignored.add(hit.host)
stats.count_lines_no_site.increment()
return
stats.dates_recorded.add(hit.date.date())
path = hit.path
if hit.query_string and not config.options.strip_query_string:
path += config.options.query_string_delimiter + hit.query_string
# only prepend main url / host if it's a path
url_prefix = self._get_host_with_protocol(hit.host, main_url) if hasattr(hit, 'host') else main_url
url = (url_prefix if path.startswith('/') else '') + path[:1024]
# handle custom variables before generating args dict
if config.options.enable_bots:
if hit.is_robot:
hit.add_visit_custom_var("Bot", hit.user_agent)
else:
hit.add_visit_custom_var("Not-Bot", hit.user_agent)
hit.add_page_custom_var("HTTP-code", hit.status)
args = {
'rec': '1',
'apiv': '1',
'url': url.encode('utf8'),
'urlref': hit.referrer[:1024].encode('utf8'),
'cip': hit.ip,
'cdt': self.date_to_piwik(hit.date),
'idsite': site_id,
'dp': '0' if config.options.reverse_dns else '1',
'ua': hit.user_agent.encode('utf8')
}
if config.options.replay_tracking:
# prevent request to be force recorded when option replay-tracking
args['rec'] = '0'
# idsite is already determined by resolver
if 'idsite' in hit.args:
del hit.args['idsite']
args.update(hit.args)
if hit.is_download:
args['download'] = args['url']
if config.options.enable_bots:
args['bots'] = '1'
if hit.is_error or hit.is_redirect:
args['action_name'] = '%s%sURL = %s%s' % (
hit.status,
config.options.title_category_delimiter,
urllib.quote(args['url'], ''),
("%sFrom = %s" % (
config.options.title_category_delimiter,
urllib.quote(args['urlref'], '')
) if args['urlref'] != '' else '')
)
if hit.generation_time_milli > 0:
args['gt_ms'] = int(hit.generation_time_milli)
if hit.event_category and hit.event_action:
args['e_c'] = hit.event_category
args['e_a'] = hit.event_action
if hit.event_name:
args['e_n'] = hit.event_name
if hit.length:
args['bw_bytes'] = hit.length
# convert custom variable args to JSON
if 'cvar' in args and not isinstance(args['cvar'], basestring):
args['cvar'] = json.dumps(args['cvar'])
if '_cvar' in args and not isinstance(args['_cvar'], basestring):
args['_cvar'] = json.dumps(args['_cvar'])
return args
def _get_host_with_protocol(self, host, main_url):
if '://' not in host:
parts = urlparse.urlparse(main_url)
host = parts.scheme + '://' + host
return host
def _record_hits(self, hits):
"""
Inserts several hits into Piwik.
"""
if not config.options.dry_run:
data = {
'token_auth': config.options.piwik_token_auth,
'requests': [self._get_hit_args(hit) for hit in hits]
}
try:
args = {}
if config.options.debug_tracker:
args['debug'] = '1'
response = piwik.call(
'/piwik.php', args=args,
expected_content=None,
headers={'Content-type': 'application/json'},
data=data,
on_failure=self._on_tracking_failure
)
if config.options.debug_tracker:
logging.debug('tracker response:\n%s' % response)
# check for invalid requests
try:
response = json.loads(response)
except:
logging.info("bulk tracking returned invalid JSON")
# don't display the tracker response if we're debugging the tracker.
# debug tracker output will always break the normal JSON output.
if not config.options.debug_tracker:
logging.info("tracker response:\n%s" % response)
response = {}
if ('invalid_indices' in response and isinstance(response['invalid_indices'], list) and
response['invalid_indices']):
invalid_count = len(response['invalid_indices'])
invalid_lines = [str(hits[index].lineno) for index in response['invalid_indices']]
invalid_lines_str = ", ".join(invalid_lines)
stats.invalid_lines.extend(invalid_lines)
logging.info("The Piwik tracker identified %s invalid requests on lines: %s" % (invalid_count, invalid_lines_str))
elif 'invalid' in response and response['invalid'] > 0:
logging.info("The Piwik tracker identified %s invalid requests." % response['invalid'])
except Piwik.Error, e:
# if the server returned 400 code, BulkTracking may not be enabled
if e.code == 400:
fatal_error("Server returned status 400 (Bad Request).\nIs the BulkTracking plugin disabled?", hits[0].filename, hits[0].lineno)
raise
stats.count_lines_recorded.advance(len(hits))
def _is_json(self, result):
try:
json.loads(result)
return True
except ValueError, e:
return False
def _on_tracking_failure(self, response, data):
"""
Removes the successfully tracked hits from the request payload so
they are not logged twice.
"""
try:
response = json.loads(response)
except:
# the response should be in JSON, but in case it can't be parsed just try another attempt
logging.debug("cannot parse tracker response, should be valid JSON")
return response
# remove the successfully tracked hits from payload
tracked = response['tracked']
data['requests'] = data['requests'][tracked:]
return response['message']
class Hit(object):
"""
It's a simple container.
"""
def __init__(self, **kwargs):
for key, value in kwargs.iteritems():
setattr(self, key, value)
super(Hit, self).__init__()
if config.options.force_lowercase_path:
self.full_path = self.full_path.lower()
def get_visitor_id_hash(self):
visitor_id = self.ip
if config.options.replay_tracking:
for param_name_to_use in ['uid', 'cid', '_id', 'cip']:
if param_name_to_use in self.args:
visitor_id = self.args[param_name_to_use]
break
return abs(hash(visitor_id))
def add_page_custom_var(self, key, value):
"""
Adds a page custom variable to this Hit.
"""
self._add_custom_var(key, value, 'cvar')
def add_visit_custom_var(self, key, value):
"""
Adds a visit custom variable to this Hit.
"""
self._add_custom_var(key, value, '_cvar')
def _add_custom_var(self, key, value, api_arg_name):
if api_arg_name not in self.args:
self.args[api_arg_name] = {}
if isinstance(self.args[api_arg_name], basestring):
logging.debug("Ignoring custom %s variable addition [ %s = %s ], custom var already set to string." % (api_arg_name, key, value))
return
index = len(self.args[api_arg_name]) + 1
self.args[api_arg_name][index] = [key, value]
class Parser(object):
"""
The Parser parses the lines in a specified file and inserts them into
a Queue.
"""
def __init__(self):
self.check_methods = [method for name, method
in inspect.getmembers(self, predicate=inspect.ismethod)
if name.startswith('check_')]
## All check_* methods are called for each hit and must return True if the
## hit can be imported, False otherwise.
def check_hostname(self, hit):
# Check against config.hostnames.
if not hasattr(hit, 'host') or not config.options.hostnames:
return True
# Accept the hostname only if it matches one pattern in the list.
result = any(
fnmatch.fnmatch(hit.host, pattern)
for pattern in config.options.hostnames
)
if not result:
stats.count_lines_hostname_skipped.increment()
return result
def check_static(self, hit):
if hit.extension in STATIC_EXTENSIONS:
if config.options.enable_static:
hit.is_download = True
return True
else:
stats.count_lines_static.increment()
return False
return True
def check_download(self, hit):
if hit.extension in config.options.download_extensions:
stats.count_lines_downloads.increment()
hit.is_download = True
return True
# the file is not in the white-listed downloads
# if it's a know download file, we shall skip it
elif hit.extension in DOWNLOAD_EXTENSIONS:
stats.count_lines_skipped_downloads.increment()
return False
return True
def check_user_agent(self, hit):
user_agent = hit.user_agent.lower()
for s in itertools.chain(EXCLUDED_USER_AGENTS, config.options.excluded_useragents):
if s in user_agent:
if config.options.enable_bots:
hit.is_robot = True
return True
else:
stats.count_lines_skipped_user_agent.increment()
return False
return True
def check_http_error(self, hit):
if hit.status[0] in ('4', '5'):
if config.options.replay_tracking:
# process error logs for replay tracking, since we don't care if piwik error-ed the first time
return True
elif config.options.enable_http_errors:
hit.is_error = True
return True
else:
stats.count_lines_skipped_http_errors.increment()
return False
return True
def check_http_redirect(self, hit):
if hit.status[0] == '3' and hit.status != '304':
if config.options.enable_http_redirects:
hit.is_redirect = True
return True
else:
stats.count_lines_skipped_http_redirects.increment()
return False
return True
def check_path(self, hit):
for excluded_path in config.options.excluded_paths:
if fnmatch.fnmatch(hit.path, excluded_path):
return False
# By default, all paths are included.
if config.options.included_paths:
for included_path in config.options.included_paths:
if fnmatch.fnmatch(hit.path, included_path):
return True
return False
return True
@staticmethod
def check_format(lineOrFile):
format = False
format_groups = 0
for name, candidate_format in FORMATS.iteritems():
logging.debug("Check format %s", name)
match = None
try:
if isinstance(lineOrFile, basestring):
match = candidate_format.check_format_line(lineOrFile)
else:
match = candidate_format.check_format(lineOrFile)
except Exception, e:
logging.debug('Error in format checking: %s', traceback.format_exc())
pass
if match:
logging.debug('Format %s matches', name)
# compare format groups if this *BaseFormat has groups() method
try:
# if there's more info in this match, use this format
match_groups = len(match.groups())
logging.debug('Format match contains %d groups' % match_groups)
if format_groups < match_groups:
format = candidate_format
format_groups = match_groups
except AttributeError:
format = candidate_format
else:
logging.debug('Format %s does not match', name)
# if the format is W3cExtendedFormat, check if the logs are from IIS and if so, issue a warning if the
# --w3c-time-taken-milli option isn't set
if isinstance(format, W3cExtendedFormat):
format.check_for_iis_option()
return format
@staticmethod
def detect_format(file):
"""
Return the best matching format for this file, or None if none was found.
"""
logging.debug('Detecting the log format')
format = False
# check the format using the file (for formats like the W3cExtendedFormat one)
format = Parser.check_format(file)
# check the format using the first N lines (to avoid irregular ones)
lineno = 0
limit = 100000
while not format and lineno < limit:
line = file.readline()
if not line: # if at eof, don't keep looping
break
lineno = lineno + 1
logging.debug("Detecting format against line %i" % lineno)
format = Parser.check_format(line)
try:
file.seek(0)
except IOError:
pass
if not format:
fatal_error("cannot automatically determine the log format using the first %d lines of the log file. " % limit +
"\nMaybe try specifying the format with the --log-format-name command line argument." )
return
logging.debug('Format %s is the best match', format.name)
return format
def parse(self, filename):
"""
Parse the specified filename and insert hits in the queue.
"""
def invalid_line(line, reason):
stats.count_lines_invalid.increment()
if config.options.debug >= 2:
logging.debug('Invalid line detected (%s): %s' % (reason, line))
if filename == '-':
filename = '(stdin)'
file = sys.stdin
else:
if not os.path.exists(filename):
print >> sys.stderr, "\n=====> Warning: File %s does not exist <=====" % filename
return
else:
if filename.endswith('.bz2'):
open_func = bz2.BZ2File
elif filename.endswith('.gz'):
open_func = gzip.open
else:
open_func = open
file = open_func(filename, 'r')
if config.options.show_progress:
print 'Parsing log %s...' % filename
if config.format:
# The format was explicitely specified.
format = config.format
if isinstance(format, W3cExtendedFormat):
format.create_regex(file)
if format.regex is None:
return fatal_error(
"File is not in the correct format, is there a '#Fields:' line? "
"If not, use the --w3c-fields option."
)
else:
# If the file is empty, don't bother.
data = file.read(100)
if len(data.strip()) == 0:
return
try:
file.seek(0)
except IOError:
pass
format = self.detect_format(file)
if format is None:
return fatal_error(
'Cannot guess the logs format. Please give one using '
'either the --log-format-name or --log-format-regex option'
)
# Make sure the format is compatible with the resolver.
resolver.check_format(format)
if config.options.dump_log_regex:
logging.info("Using format '%s'." % format.name)
if format.regex:
logging.info("Regex being used: %s" % format.regex.pattern)
else:
logging.info("Format %s does not use a regex to parse log lines." % format.name)
logging.info("--dump-log-regex option used, aborting log import.")
os._exit(0)
valid_lines_count = 0
hits = []
lineno = -1
while True:
line = file.readline()
if not line: break
lineno = lineno + 1
try:
line = line.decode(config.options.encoding)
except UnicodeDecodeError:
invalid_line(line, 'invalid encoding')
continue
stats.count_lines_parsed.increment()
if stats.count_lines_parsed.value <= config.options.skip:
continue
match = format.match(line)
if not match:
invalid_line(line, 'line did not match')
continue
valid_lines_count = valid_lines_count + 1
if config.options.debug_request_limit and valid_lines_count >= config.options.debug_request_limit:
if len(hits) > 0:
Recorder.add_hits(hits)
logging.info("Exceeded limit specified in --debug-request-limit, exiting.")
return
hit = Hit(
filename=filename,
lineno=lineno,
status=format.get('status'),
full_path=format.get('path'),
is_download=False,
is_robot=False,
is_error=False,
is_redirect=False,
args={},
)
if config.options.regex_group_to_page_cvars_map:
self._add_custom_vars_from_regex_groups(hit, format, config.options.regex_group_to_page_cvars_map, True)
if config.options.regex_group_to_visit_cvars_map:
self._add_custom_vars_from_regex_groups(hit, format, config.options.regex_group_to_visit_cvars_map, False)
if config.options.regex_groups_to_ignore:
format.remove_ignored_groups(config.options.regex_groups_to_ignore)
try:
hit.query_string = format.get('query_string')
hit.path = hit.full_path
except BaseFormatException:
hit.path, _, hit.query_string = hit.full_path.partition(config.options.query_string_delimiter)
# W3cExtendedFormat detaults to - when there is no query string, but we want empty string
if hit.query_string == '-':
hit.query_string = ''
hit.extension = hit.path.rsplit('.')[-1].lower()
try:
hit.referrer = format.get('referrer')
if hit.referrer.startswith('"'):
hit.referrer = hit.referrer[1:-1]
except BaseFormatException:
hit.referrer = ''
if hit.referrer == '-':
hit.referrer = ''
try:
hit.user_agent = format.get('user_agent')
# in case a format parser included enclosing quotes, remove them so they are not
# sent to Piwik
if hit.user_agent.startswith('"'):
hit.user_agent = hit.user_agent[1:-1]
except BaseFormatException:
hit.user_agent = ''
hit.ip = format.get('ip')
try:
hit.length = int(format.get('length'))
except (ValueError, BaseFormatException):
# Some lines or formats don't have a length (e.g. 304 redirects, W3C logs)
hit.length = 0
try:
hit.generation_time_milli = float(format.get('generation_time_milli'))
except BaseFormatException:
try:
hit.generation_time_milli = float(format.get('generation_time_micro')) / 1000
except BaseFormatException:
try:
hit.generation_time_milli = float(format.get('generation_time_secs')) * 1000
except BaseFormatException:
hit.generation_time_milli = 0
if config.options.log_hostname:
hit.host = config.options.log_hostname
else:
try:
hit.host = format.get('host').lower().strip('.')
if hit.host.startswith('"'):
hit.host = hit.host[1:-1]
except BaseFormatException:
# Some formats have no host.
pass
# Add userid
try:
hit.userid = None
userid = format.get('userid')
if userid != '-':
hit.args['uid'] = hit.userid = userid
except:
pass
# add event info
try:
hit.event_category = hit.event_action = hit.event_name = None
hit.event_category = format.get('event_category')
hit.event_action = format.get('event_action')
hit.event_name = format.get('event_name')
if hit.event_name == '-':
hit.event_name = None
except:
pass
# Check if the hit must be excluded.
if not all((method(hit) for method in self.check_methods)):
continue
# Parse date.
# We parse it after calling check_methods as it's quite CPU hungry, and
# we want to avoid that cost for excluded hits.
date_string = format.get('date')
try:
hit.date = datetime.datetime.strptime(date_string, format.date_format)
except ValueError, e:
invalid_line(line, 'invalid date or invalid format: %s' % str(e))
continue
# Parse timezone and substract its value from the date
try:
timezone = float(format.get('timezone'))
except BaseFormatException:
timezone = 0
except ValueError:
invalid_line(line, 'invalid timezone')
continue
if timezone:
hit.date -= datetime.timedelta(hours=timezone/100)
if config.options.replay_tracking:
# we need a query string and we only consider requests with piwik.php
if not hit.query_string or not hit.path.lower().endswith(config.options.replay_tracking_expected_tracker_file):
invalid_line(line, 'no query string, or ' + hit.path.lower() + ' does not end with piwik.php')
continue
query_arguments = urlparse.parse_qs(hit.query_string)
if not "idsite" in query_arguments:
invalid_line(line, 'missing idsite')
continue
try:
hit.args.update((k, v.pop().encode('raw_unicode_escape').decode(config.options.encoding)) for k, v in query_arguments.iteritems())
except UnicodeDecodeError:
invalid_line(line, 'invalid encoding')
continue
hits.append(hit)
if len(hits) >= config.options.recorder_max_payload_size * len(Recorder.recorders):
Recorder.add_hits(hits)
hits = []
# add last chunk of hits
if len(hits) > 0:
Recorder.add_hits(hits)
def _add_custom_vars_from_regex_groups(self, hit, format, groups, is_page_var):
for group_name, custom_var_name in groups.iteritems():
if group_name in format.get_all():
value = format.get(group_name)
# don't track the '-' empty placeholder value
if value == '-':
continue
if is_page_var:
hit.add_page_custom_var(custom_var_name, value)
else:
hit.add_visit_custom_var(custom_var_name, value)
def main():
"""
Start the importing process.
"""
stats.set_time_start()
if config.options.show_progress:
stats.start_monitor()
recorders = Recorder.launch(config.options.recorders)
try:
for filename in config.filenames:
parser.parse(filename)
Recorder.wait_empty()
except KeyboardInterrupt:
pass
stats.set_time_stop()
if config.options.show_progress:
stats.stop_monitor()
stats.print_summary()
def fatal_error(error, filename=None, lineno=None):
print >> sys.stderr, 'Fatal error: %s' % error
if filename and lineno is not None:
print >> sys.stderr, (
'You can restart the import of "%s" from the point it failed by '
'specifying --skip=%d on the command line.\n' % (filename, lineno)
)
os._exit(1)
if __name__ == '__main__':
try:
piwik = Piwik()
config = Configuration()
stats = Statistics()
resolver = config.get_resolver()
parser = Parser()
main()
sys.exit(0)
except KeyboardInterrupt:
pass
| hannesk001/SPHERE-Framework | Library/Piwik/misc/log-analytics/import_logs.py | Python | agpl-3.0 | 92,545 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.widgets.reportview import get_match_cond
def get_filters_cond(doctype, filters, conditions):
if filters:
if isinstance(filters, dict):
filters = filters.items()
flt = []
for f in filters:
if isinstance(f[1], basestring) and f[1][0] == '!':
flt.append([doctype, f[0], '!=', f[1][1:]])
else:
flt.append([doctype, f[0], '=', f[1]])
from webnotes.widgets.reportview import build_filter_conditions
build_filter_conditions(flt, conditions)
cond = ' and ' + ' and '.join(conditions)
else:
cond = ''
return cond
# searches for active employees
def employee_query(doctype, txt, searchfield, start, page_len, filters):
conditions = []
return webnotes.conn.sql("""select name, employee_name from `tabEmployee`
where status = 'Active'
and docstatus < 2
and (%(key)s like "%(txt)s"
or employee_name like "%(txt)s")
%(fcond)s %(mcond)s
order by
case when name like "%(txt)s" then 0 else 1 end,
case when employee_name like "%(txt)s" then 0 else 1 end,
name
limit %(start)s, %(page_len)s""" % {'key': searchfield, 'txt': "%%%s%%" % txt, 'fcond':get_filters_cond(doctype, filters, conditions) ,
'mcond':get_match_cond(doctype, searchfield), 'start': start, 'page_len': page_len})
# searches for leads which are not converted
def lead_query(doctype, txt, searchfield, start, page_len, filters):
return webnotes.conn.sql("""select name, lead_name, company_name from `tabLead`
where docstatus < 2
and ifnull(status, '') != 'Converted'
and (%(key)s like "%(txt)s"
or lead_name like "%(txt)s"
or company_name like "%(txt)s")
%(mcond)s
order by
case when name like "%(txt)s" then 0 else 1 end,
case when lead_name like "%(txt)s" then 0 else 1 end,
case when company_name like "%(txt)s" then 0 else 1 end,
lead_name asc
limit %(start)s, %(page_len)s""" % {'key': searchfield, 'txt': "%%%s%%" % txt,
'mcond':get_match_cond(doctype, searchfield), 'start': start, 'page_len': page_len})
# searches for customer
def customer_query(doctype, txt, searchfield, start, page_len, filters):
cust_master_name = webnotes.defaults.get_user_default("cust_master_name")
if cust_master_name == "Customer Name":
fields = ["name", "customer_group", "territory"]
else:
fields = ["name", "customer_name", "customer_group", "territory"]
fields = ", ".join(fields)
return webnotes.conn.sql("""select %(field)s from `tabCustomer`
where docstatus < 2
and (%(key)s like "%(txt)s"
or customer_name like "%(txt)s")
%(mcond)s
order by
case when name like "%(txt)s" then 0 else 1 end,
case when customer_name like "%(txt)s" then 0 else 1 end,
name, customer_name
limit %(start)s, %(page_len)s""" % {'field': fields,'key': searchfield,
'txt': "%%%s%%" % txt, 'mcond':get_match_cond(doctype, searchfield),
'start': start, 'page_len': page_len})
# searches for supplier
def supplier_query(doctype, txt, searchfield, start, page_len, filters):
supp_master_name = webnotes.defaults.get_user_default("supp_master_name")
if supp_master_name == "Supplier Name":
fields = ["name", "supplier_type"]
else:
fields = ["name", "supplier_name", "supplier_type"]
fields = ", ".join(fields)
return webnotes.conn.sql("""select %(field)s from `tabSupplier`
where docstatus < 2
and (%(key)s like "%(txt)s"
or supplier_name like "%(txt)s")
%(mcond)s
order by
case when name like "%(txt)s" then 0 else 1 end,
case when supplier_name like "%(txt)s" then 0 else 1 end,
name, supplier_name
limit %(start)s, %(page_len)s """ % {'field': fields,'key': searchfield,
'txt': "%%%s%%" % txt, 'mcond':get_match_cond(doctype, searchfield), 'start': start,
'page_len': page_len})
def tax_account_query(doctype, txt, searchfield, start, page_len, filters):
return webnotes.conn.sql("""select name, parent_account, debit_or_credit
from tabAccount
where tabAccount.docstatus!=2
and (account_type in (%s) or
(ifnull(is_pl_account, 'No') = 'Yes' and debit_or_credit = %s) )
and group_or_ledger = 'Ledger'
and company = %s
and `%s` LIKE %s
limit %s, %s""" %
(", ".join(['%s']*len(filters.get("account_type"))),
"%s", "%s", searchfield, "%s", "%s", "%s"),
tuple(filters.get("account_type") + [filters.get("debit_or_credit"),
filters.get("company"), "%%%s%%" % txt, start, page_len]))
def item_query(doctype, txt, searchfield, start, page_len, filters):
from webnotes.utils import nowdate
conditions = []
return webnotes.conn.sql("""select tabItem.name,
if(length(tabItem.item_name) > 40,
concat(substr(tabItem.item_name, 1, 40), "..."), item_name) as item_name,
if(length(tabItem.description) > 40, \
concat(substr(tabItem.description, 1, 40), "..."), description) as decription
from tabItem
where tabItem.docstatus < 2
and (ifnull(tabItem.end_of_life, '') = '' or tabItem.end_of_life > %(today)s)
and (tabItem.`{key}` LIKE %(txt)s
or tabItem.item_name LIKE %(txt)s)
{fcond} {mcond}
limit %(start)s, %(page_len)s """.format(key=searchfield,
fcond=get_filters_cond(doctype, filters, conditions),
mcond=get_match_cond(doctype, searchfield)),
{
"today": nowdate(),
"txt": "%%%s%%" % txt,
"start": start,
"page_len": page_len
})
def bom(doctype, txt, searchfield, start, page_len, filters):
conditions = []
return webnotes.conn.sql("""select tabBOM.name, tabBOM.item
from tabBOM
where tabBOM.docstatus=1
and tabBOM.is_active=1
and tabBOM.%(key)s like "%(txt)s"
%(fcond)s %(mcond)s
limit %(start)s, %(page_len)s """ % {'key': searchfield, 'txt': "%%%s%%" % txt,
'fcond': get_filters_cond(doctype, filters, conditions),
'mcond':get_match_cond(doctype, searchfield), 'start': start, 'page_len': page_len})
def get_project_name(doctype, txt, searchfield, start, page_len, filters):
cond = ''
if filters['customer']:
cond = '(`tabProject`.customer = "' + filters['customer'] + '" or ifnull(`tabProject`.customer,"")="") and'
return webnotes.conn.sql("""select `tabProject`.name from `tabProject`
where `tabProject`.status not in ("Completed", "Cancelled")
and %(cond)s `tabProject`.name like "%(txt)s" %(mcond)s
order by `tabProject`.name asc
limit %(start)s, %(page_len)s """ % {'cond': cond,'txt': "%%%s%%" % txt,
'mcond':get_match_cond(doctype, searchfield),'start': start, 'page_len': page_len})
def get_delivery_notes_to_be_billed(doctype, txt, searchfield, start, page_len, filters):
return webnotes.conn.sql("""select `tabDelivery Note`.name, `tabDelivery Note`.customer_name
from `tabDelivery Note`
where `tabDelivery Note`.`%(key)s` like %(txt)s and
`tabDelivery Note`.docstatus = 1 %(fcond)s and
(ifnull((select sum(qty) from `tabDelivery Note Item` where
`tabDelivery Note Item`.parent=`tabDelivery Note`.name), 0) >
ifnull((select sum(qty) from `tabSales Invoice Item` where
`tabSales Invoice Item`.docstatus = 1 and
`tabSales Invoice Item`.delivery_note=`tabDelivery Note`.name), 0))
%(mcond)s order by `tabDelivery Note`.`%(key)s` asc
limit %(start)s, %(page_len)s""" % {
"key": searchfield,
"fcond": get_filters_cond(doctype, filters, []),
"mcond": get_match_cond(doctype),
"start": "%(start)s", "page_len": "%(page_len)s", "txt": "%(txt)s"
}, { "start": start, "page_len": page_len, "txt": ("%%%s%%" % txt) })
def get_batch_no(doctype, txt, searchfield, start, page_len, filters):
from controllers.queries import get_match_cond
if filters.has_key('warehouse'):
return webnotes.conn.sql("""select batch_no from `tabStock Ledger Entry` sle
where item_code = '%(item_code)s'
and warehouse = '%(warehouse)s'
and batch_no like '%(txt)s'
and exists(select * from `tabBatch`
where name = sle.batch_no
and (ifnull(expiry_date, '')='' or expiry_date >= '%(posting_date)s')
and docstatus != 2)
%(mcond)s
group by batch_no having sum(actual_qty) > 0
order by batch_no desc
limit %(start)s, %(page_len)s """ % {'item_code': filters['item_code'],
'warehouse': filters['warehouse'], 'posting_date': filters['posting_date'],
'txt': "%%%s%%" % txt, 'mcond':get_match_cond(doctype, searchfield),
'start': start, 'page_len': page_len})
else:
return webnotes.conn.sql("""select name from tabBatch
where docstatus != 2
and item = '%(item_code)s'
and (ifnull(expiry_date, '')='' or expiry_date >= '%(posting_date)s')
and name like '%(txt)s'
%(mcond)s
order by name desc
limit %(start)s, %(page_len)s""" % {'item_code': filters['item_code'],
'posting_date': filters['posting_date'], 'txt': "%%%s%%" % txt,
'mcond':get_match_cond(doctype, searchfield),'start': start,
'page_len': page_len})
| saurabh6790/test-med-app | controllers/queries.py | Python | agpl-3.0 | 9,022 |
import subprocess
import smtplib
import socket
from email.mime.text import MIMEText
import datetime
# Change to your own account information
to = 'rk.ryan.king@gmail.com'
gmail_user = 'rk.ryan.king@gmail.com'
gmail_password = 'nzwaahcmdzjchxsz'
smtpserver = smtplib.SMTP('smtp.gmail.com', 587)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo
smtpserver.login(gmail_user, gmail_password)
today = datetime.date.today()
# Very Linux Specific
arg='ip route list'
p=subprocess.Popen(arg,shell=True,stdout=subprocess.PIPE)
data = p.communicate()
split_data = data[0].split()
ipaddr = split_data[split_data.index('src')+1]
my_ip = 'Your ip is %s' % ipaddr
msg = MIMEText(my_ip)
msg['Subject'] = 'IP For RaspberryPi on %s' % today.strftime('%b %d %Y')
msg['From'] = gmail_user
msg['To'] = to
smtpserver.sendmail(gmail_user, [to], msg.as_string())
smtpserver.quit() | rkryan/seniordesign | pi/utils/startup_ip.py | Python | gpl-2.0 | 865 |
from __future__ import annotations
import glob
import logging
import os
import shutil
import uuid
from xia2.Driver.DriverFactory import DriverFactory
from xia2.lib.bits import auto_logfiler
from xia2.Wrappers.XIA.Integrate import Integrate as XIA2Integrate
logger = logging.getLogger("xia2.Applications.xia2_helpers")
def process_one_sweep(args):
assert len(args) == 1
args = args[0]
# stop_after = args.stop_after
command_line_args = args.command_line_args
nproc = args.nproc
crystal_id = args.crystal_id
wavelength_id = args.wavelength_id
sweep_id = args.sweep_id
failover = args.failover
driver_type = args.driver_type
default_driver_type = DriverFactory.get_driver_type()
DriverFactory.set_driver_type(driver_type)
curdir = os.path.abspath(os.curdir)
if "-xinfo" in command_line_args:
idx = command_line_args.index("-xinfo")
del command_line_args[idx + 1]
del command_line_args[idx]
xia2_integrate = XIA2Integrate()
# import tempfile
# tmpdir = tempfile.mkdtemp(dir=curdir)
tmpdir = os.path.join(curdir, str(uuid.uuid4()))
os.makedirs(tmpdir)
xia2_integrate.set_working_directory(tmpdir)
xia2_integrate.add_command_line_args(args.command_line_args)
xia2_integrate.set_phil_file(os.path.join(curdir, "xia2-working.phil"))
xia2_integrate.add_command_line_args(["sweep.id=%s" % sweep_id])
xia2_integrate.set_nproc(nproc)
xia2_integrate.set_njob(1)
xia2_integrate.set_mp_mode("serial")
auto_logfiler(xia2_integrate)
sweep_tmp_dir = os.path.join(tmpdir, crystal_id, wavelength_id, sweep_id)
sweep_target_dir = os.path.join(curdir, crystal_id, wavelength_id, sweep_id)
output = None
success = False
xsweep_dict = None
try:
xia2_integrate.run()
output = get_sweep_output_only(xia2_integrate.get_all_output())
success = True
except Exception as e:
logger.warning("Processing sweep %s failed: %s", sweep_id, str(e))
if not failover:
raise
finally:
from xia2.Schema.XProject import XProject
xia2_json = os.path.join(tmpdir, "xia2.json")
json_files = glob.glob(os.path.join(sweep_tmp_dir, "*", "*.json"))
json_files.extend(glob.glob(os.path.join(sweep_tmp_dir, "*", "*.expt")))
if os.path.exists(xia2_json):
json_files.append(xia2_json)
import fileinput
for line in fileinput.FileInput(files=json_files, inplace=1):
line = line.replace(sweep_tmp_dir, sweep_target_dir)
print(line)
if os.path.exists(xia2_json):
new_json = os.path.join(curdir, "xia2-%s.json" % sweep_id)
shutil.copyfile(xia2_json, new_json)
move_output_folder(sweep_tmp_dir, sweep_target_dir)
if success:
xinfo = XProject.from_json(new_json)
xcryst = list(xinfo.get_crystals().values())[0]
xsweep = xcryst.get_xwavelength(wavelength_id).get_sweeps()[0]
xsweep_dict = xsweep.to_dict()
shutil.rmtree(tmpdir, ignore_errors=True)
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir, ignore_errors=True)
DriverFactory.set_driver_type(default_driver_type)
return success, output, xsweep_dict
def get_sweep_output_only(all_output):
sweep_lines = []
in_sweep = False
for line in all_output:
if line.startswith("Processing took "):
break
elif in_sweep:
sweep_lines.append(line)
elif line.startswith("Command line: "):
in_sweep = True
return "".join(sweep_lines)
def move_output_folder(sweep_tmp_dir, sweep_target_dir):
"""Move contents of xia2 sweep processing folder from sweep_tmp_dir to
sweep_target_dir, while also updating any absolute path in any xia2.json
file.
"""
if os.path.exists(sweep_target_dir):
shutil.rmtree(sweep_target_dir)
# print "Moving %s to %s" %(sweep_tmp_dir, sweep_target_dir)
shutil.move(sweep_tmp_dir, sweep_target_dir)
| xia2/xia2 | src/xia2/Applications/xia2_helpers.py | Python | bsd-3-clause | 4,081 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import logging
import shutil
HOME_DIR_ENV = 'AVA_HOME'
PROFILE_ENV = 'AVA_PROFILE'
HOME_DIR_NAME = u'home'
PKGS_DIR_NAME = u'pkgs'
LOGS_DIR_NAME = u'logs'
DATA_DIR_NAME = u'data'
CONF_DIR_NAME = u'conf'
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',)
_logger = logging.getLogger(__name__)
class Environment(object):
"""
Encapsulates the runtime environment.
"""
def __init__(self, home=None, a_profile=None):
# Determines the location of the base directory which contains files shared by all users.
# This script assumes it is located at 'eavatar/runtime' sub-directory.
from ava.util import base_path
self.base_dir = base_path()
# Determines the location of the home directory.
self.home_dir = os.path.join(self.base_dir, HOME_DIR_NAME)
self.home_dir = os.path.abspath(self.home_dir)
self.conf_dir = os.path.join(self.home_dir, CONF_DIR_NAME)
self.pkgs_dir = os.path.join(self.home_dir, PKGS_DIR_NAME)
self.data_dir = os.path.join(self.home_dir, DATA_DIR_NAME)
self.logs_dir = os.path.join(self.home_dir, LOGS_DIR_NAME)
_logger.debug("Home dir: %s", self.home_dir)
# Flag indicating if the runtime is launched by a shell.
self.has_shell = False
self.shell_port = 0
# The global environment.
_environ = None
def get_environ(home=None, profile=None):
global _environ
if _environ is None:
_environ = Environment(home, profile)
return _environ
def base_dir():
"""
Gets the base directory.
:return:
"""
return get_environ().base_dir
def home_dir():
"""
Gets the home directory.
:return:
"""
return get_environ().home_dir
def conf_dir():
"""
Gets the path for configuration files.
:return: The configuration path.
"""
return get_environ().conf_dir
def data_dir():
"""
Gets the path for data files.
:return: The path.
"""
return get_environ().data_dir
def logs_dir():
"""
Gets the path for log files.
:return: The path.
"""
return get_environ().logs_dir
def pkgs_dir():
"""
Gets the path for packages files.
:return: The path.
"""
return get_environ().pkgs_dir
| eavatar/ava-srv | src/eavatar.ava/ava/runtime/environ.py | Python | bsd-3-clause | 2,446 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='sample_project',
version='0.1.0',
description='Sample project built using cookiecutter',
long_description=readme + '\n\n' + history,
author='Saksham Gautam',
author_email='saksham.gautam@gmail.com',
url='https://github.com/saksham/sample_project',
packages=[
'sample_project',
],
package_dir={'sample_project':
'sample_project'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='sample_project',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements
) | saksham/sample_project | setup.py | Python | bsd-3-clause | 1,503 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
# Python ctypes bindings for VLC
#
# Copyright (C) 2009-2012 the VideoLAN team
# $Id: $
#
# Authors: Olivier Aubert <contact at olivieraubert.net>
# Jean Brouwers <MrJean1 at gmail.com>
# Geoff Salmon <geoff.salmon at gmail.com>
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
"""This module provides bindings for the LibVLC public API, see
U{http://wiki.videolan.org/LibVLC}.
You can find the documentation and a README file with some examples
at U{http://www.advene.org/download/python-ctypes/}.
Basically, the most important class is L{Instance}, which is used
to create a libvlc instance. From this instance, you then create
L{MediaPlayer} and L{MediaListPlayer} instances.
Alternatively, you may create instances of the L{MediaPlayer} and
L{MediaListPlayer} class directly and an instance of L{Instance}
will be implicitly created. The latter can be obtained using the
C{get_instance} method of L{MediaPlayer} and L{MediaListPlayer}.
"""
import ctypes
from ctypes.util import find_library
import os
import sys
import functools
# Used by EventManager in override.py
from inspect import getargspec
__version__ = "N/A"
build_date = "Mon Mar 20 11:04:27 2017"
# The libvlc doc states that filenames are expected to be in UTF8, do
# not rely on sys.getfilesystemencoding() which will be confused,
# esp. on windows.
DEFAULT_ENCODING = 'utf-8'
if sys.version_info[0] > 2:
str = str
unicode = str
bytes = bytes
basestring = (str, bytes)
PYTHON3 = True
def str_to_bytes(s):
"""Translate string or bytes to bytes.
"""
if isinstance(s, str):
return bytes(s, DEFAULT_ENCODING)
else:
return s
def bytes_to_str(b):
"""Translate bytes to string.
"""
if isinstance(b, bytes):
return b.decode(DEFAULT_ENCODING)
else:
return b
else:
str = str
unicode = unicode
bytes = str
basestring = basestring
PYTHON3 = False
def str_to_bytes(s):
"""Translate string or bytes to bytes.
"""
if isinstance(s, unicode):
return s.encode(DEFAULT_ENCODING)
else:
return s
def bytes_to_str(b):
"""Translate bytes to unicode string.
"""
if isinstance(b, str):
return unicode(b, DEFAULT_ENCODING)
else:
return b
# Internal guard to prevent internal classes to be directly
# instanciated.
_internal_guard = object()
def find_lib():
dll = None
plugin_path = None
if sys.platform.startswith('linux'):
p = find_library('vlc')
try:
dll = ctypes.CDLL(p)
except OSError: # may fail
dll = ctypes.CDLL('libvlc.so.5')
elif sys.platform.startswith('win'):
libname = 'libvlc.dll'
p = find_library(libname)
if p is None:
try: # some registry settings
# leaner than win32api, win32con
if PYTHON3:
import winreg as w
else:
import _winreg as w
for r in w.HKEY_LOCAL_MACHINE, w.HKEY_CURRENT_USER:
try:
r = w.OpenKey(r, 'Software\\VideoLAN\\VLC')
plugin_path, _ = w.QueryValueEx(r, 'InstallDir')
w.CloseKey(r)
break
except w.error:
pass
except ImportError: # no PyWin32
pass
if plugin_path is None:
# try some standard locations.
programfiles = os.environ["ProgramFiles"]
homedir = os.environ["HOMEDRIVE"]
for p in ('{programfiles}\\VideoLan{libname}', '{homedir}:\\VideoLan{libname}',
'{programfiles}{libname}', '{homedir}:{libname}'):
p = p.format(homedir = homedir,
programfiles = programfiles,
libname = '\\VLC\\' + libname)
if os.path.exists(p):
plugin_path = os.path.dirname(p)
break
if plugin_path is not None: # try loading
p = os.getcwd()
os.chdir(plugin_path)
# if chdir failed, this will raise an exception
dll = ctypes.CDLL(libname)
# restore cwd after dll has been loaded
os.chdir(p)
else: # may fail
dll = ctypes.CDLL(libname)
else:
plugin_path = os.path.dirname(p)
dll = ctypes.CDLL(p)
elif sys.platform.startswith('darwin'):
# FIXME: should find a means to configure path
d = '/Applications/VLC.app/Contents/MacOS/'
p = d + 'lib/libvlc.dylib'
if os.path.exists(p):
dll = ctypes.CDLL(p)
for p in ('modules', 'plugins'):
p = d + p
if os.path.isdir(p):
plugin_path = p
break
else: # hope, some PATH is set...
dll = ctypes.CDLL('libvlc.dylib')
else:
raise NotImplementedError('%s: %s not supported' % (sys.argv[0], sys.platform))
return (dll, plugin_path)
# plugin_path used on win32 and MacOS in override.py
dll, plugin_path = find_lib()
class VLCException(Exception):
"""Exception raised by libvlc methods.
"""
pass
try:
_Ints = (int, long)
except NameError: # no long in Python 3+
_Ints = int
_Seqs = (list, tuple)
# Used for handling *event_manager() methods.
class memoize_parameterless(object):
"""Decorator. Caches a parameterless method's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
Adapted from https://wiki.python.org/moin/PythonDecoratorLibrary
"""
def __init__(self, func):
self.func = func
self._cache = {}
def __call__(self, obj):
try:
return self._cache[obj]
except KeyError:
v = self._cache[obj] = self.func(obj)
return v
def __repr__(self):
"""Return the function's docstring.
"""
return self.func.__doc__
def __get__(self, obj, objtype):
"""Support instance methods.
"""
return functools.partial(self.__call__, obj)
# Default instance. It is used to instanciate classes directly in the
# OO-wrapper.
_default_instance = None
def get_default_instance():
"""Return the default VLC.Instance.
"""
global _default_instance
if _default_instance is None:
_default_instance = Instance()
return _default_instance
_Cfunctions = {} # from LibVLC __version__
_Globals = globals() # sys.modules[__name__].__dict__
def _Cfunction(name, flags, errcheck, *types):
"""(INTERNAL) New ctypes function binding.
"""
if hasattr(dll, name) and name in _Globals:
p = ctypes.CFUNCTYPE(*types)
f = p((name, dll), flags)
if errcheck is not None:
f.errcheck = errcheck
# replace the Python function
# in this module, but only when
# running as python -O or -OO
if __debug__:
_Cfunctions[name] = f
else:
_Globals[name] = f
return f
raise NameError('no function %r' % (name,))
def _Cobject(cls, ctype):
"""(INTERNAL) New instance from ctypes.
"""
o = object.__new__(cls)
o._as_parameter_ = ctype
return o
def _Constructor(cls, ptr=_internal_guard):
"""(INTERNAL) New wrapper from ctypes.
"""
if ptr == _internal_guard:
raise VLCException("(INTERNAL) ctypes class. You should get references for this class through methods of the LibVLC API.")
if ptr is None or ptr == 0:
return None
return _Cobject(cls, ctypes.c_void_p(ptr))
class _Cstruct(ctypes.Structure):
"""(INTERNAL) Base class for ctypes structures.
"""
_fields_ = [] # list of 2-tuples ('name', ctyptes.<type>)
def __str__(self):
l = [' %s:\t%s' % (n, getattr(self, n)) for n, _ in self._fields_]
return '\n'.join([self.__class__.__name__] + l)
def __repr__(self):
return '%s.%s' % (self.__class__.__module__, self)
class _Ctype(object):
"""(INTERNAL) Base class for ctypes.
"""
@staticmethod
def from_param(this): # not self
"""(INTERNAL) ctypes parameter conversion method.
"""
if this is None:
return None
return this._as_parameter_
class ListPOINTER(object):
"""Just like a POINTER but accept a list of ctype as an argument.
"""
def __init__(self, etype):
self.etype = etype
def from_param(self, param):
if isinstance(param, _Seqs):
return (self.etype * len(param))(*param)
else:
return ctypes.POINTER(param)
# errcheck functions for some native functions.
def string_result(result, func, arguments):
"""Errcheck function. Returns a string and frees the original pointer.
It assumes the result is a char *.
"""
if result:
# make a python string copy
s = bytes_to_str(ctypes.string_at(result))
# free original string ptr
libvlc_free(result)
return s
return None
def class_result(classname):
"""Errcheck function. Returns a function that creates the specified class.
"""
def wrap_errcheck(result, func, arguments):
if result is None:
return None
return classname(result)
return wrap_errcheck
# Wrapper for the opaque struct libvlc_log_t
class Log(ctypes.Structure):
pass
Log_ptr = ctypes.POINTER(Log)
# FILE* ctypes wrapper, copied from
# http://svn.python.org/projects/ctypes/trunk/ctypeslib/ctypeslib/contrib/pythonhdr.py
class FILE(ctypes.Structure):
pass
FILE_ptr = ctypes.POINTER(FILE)
if PYTHON3:
PyFile_FromFd = ctypes.pythonapi.PyFile_FromFd
PyFile_FromFd.restype = ctypes.py_object
PyFile_FromFd.argtypes = [ctypes.c_int,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_int,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_int ]
PyFile_AsFd = ctypes.pythonapi.PyObject_AsFileDescriptor
PyFile_AsFd.restype = ctypes.c_int
PyFile_AsFd.argtypes = [ctypes.py_object]
else:
PyFile_FromFile = ctypes.pythonapi.PyFile_FromFile
PyFile_FromFile.restype = ctypes.py_object
PyFile_FromFile.argtypes = [FILE_ptr,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.CFUNCTYPE(ctypes.c_int, FILE_ptr)]
PyFile_AsFile = ctypes.pythonapi.PyFile_AsFile
PyFile_AsFile.restype = FILE_ptr
PyFile_AsFile.argtypes = [ctypes.py_object]
# Generated enum types #
class _Enum(ctypes.c_uint):
'''(INTERNAL) Base class
'''
_enum_names_ = {}
def __str__(self):
n = self._enum_names_.get(self.value, '') or ('FIXME_(%r)' % (self.value,))
return '.'.join((self.__class__.__name__, n))
def __hash__(self):
return self.value
def __repr__(self):
return '.'.join((self.__class__.__module__, self.__str__()))
def __eq__(self, other):
return ( (isinstance(other, _Enum) and self.value == other.value)
or (isinstance(other, _Ints) and self.value == other) )
def __ne__(self, other):
return not self.__eq__(other)
class LogLevel(_Enum):
'''Logging messages level.
\note future libvlc versions may define new levels.
'''
_enum_names_ = {
0: 'DEBUG',
2: 'NOTICE',
3: 'WARNING',
4: 'ERROR',
}
LogLevel.DEBUG = LogLevel(0)
LogLevel.ERROR = LogLevel(4)
LogLevel.NOTICE = LogLevel(2)
LogLevel.WARNING = LogLevel(3)
class MediaDiscovererCategory(_Enum):
'''Category of a media discoverer
See libvlc_media_discoverer_list_get().
'''
_enum_names_ = {
0: 'devices',
1: 'lan',
2: 'podcasts',
3: 'localdirs',
}
MediaDiscovererCategory.devices = MediaDiscovererCategory(0)
MediaDiscovererCategory.lan = MediaDiscovererCategory(1)
MediaDiscovererCategory.localdirs = MediaDiscovererCategory(3)
MediaDiscovererCategory.podcasts = MediaDiscovererCategory(2)
class DialogQuestionType(_Enum):
'''@defgroup libvlc_dialog libvlc dialog
@ingroup libvlc
@{
@file
libvlc dialog external api.
'''
_enum_names_ = {
0: 'NORMAL',
1: 'WARNING',
2: 'CRITICAL',
}
DialogQuestionType.CRITICAL = DialogQuestionType(2)
DialogQuestionType.NORMAL = DialogQuestionType(0)
DialogQuestionType.WARNING = DialogQuestionType(1)
class EventType(_Enum):
'''Event types.
'''
_enum_names_ = {
0: 'MediaMetaChanged',
1: 'MediaSubItemAdded',
2: 'MediaDurationChanged',
3: 'MediaParsedChanged',
4: 'MediaFreed',
5: 'MediaStateChanged',
6: 'MediaSubItemTreeAdded',
0x100: 'MediaPlayerMediaChanged',
257: 'MediaPlayerNothingSpecial',
258: 'MediaPlayerOpening',
259: 'MediaPlayerBuffering',
260: 'MediaPlayerPlaying',
261: 'MediaPlayerPaused',
262: 'MediaPlayerStopped',
263: 'MediaPlayerForward',
264: 'MediaPlayerBackward',
265: 'MediaPlayerEndReached',
266: 'MediaPlayerEncounteredError',
267: 'MediaPlayerTimeChanged',
268: 'MediaPlayerPositionChanged',
269: 'MediaPlayerSeekableChanged',
270: 'MediaPlayerPausableChanged',
271: 'MediaPlayerTitleChanged',
272: 'MediaPlayerSnapshotTaken',
273: 'MediaPlayerLengthChanged',
274: 'MediaPlayerVout',
275: 'MediaPlayerScrambledChanged',
276: 'MediaPlayerESAdded',
277: 'MediaPlayerESDeleted',
278: 'MediaPlayerESSelected',
279: 'MediaPlayerCorked',
280: 'MediaPlayerUncorked',
281: 'MediaPlayerMuted',
282: 'MediaPlayerUnmuted',
283: 'MediaPlayerAudioVolume',
284: 'MediaPlayerAudioDevice',
285: 'MediaPlayerChapterChanged',
0x200: 'MediaListItemAdded',
513: 'MediaListWillAddItem',
514: 'MediaListItemDeleted',
515: 'MediaListWillDeleteItem',
516: 'MediaListEndReached',
0x300: 'MediaListViewItemAdded',
769: 'MediaListViewWillAddItem',
770: 'MediaListViewItemDeleted',
771: 'MediaListViewWillDeleteItem',
0x400: 'MediaListPlayerPlayed',
1025: 'MediaListPlayerNextItemSet',
1026: 'MediaListPlayerStopped',
0x500: 'MediaDiscovererStarted',
1281: 'MediaDiscovererEnded',
1282: 'RendererDiscovererItemAdded',
1283: 'RendererDiscovererItemDeleted',
0x600: 'VlmMediaAdded',
1537: 'VlmMediaRemoved',
1538: 'VlmMediaChanged',
1539: 'VlmMediaInstanceStarted',
1540: 'VlmMediaInstanceStopped',
1541: 'VlmMediaInstanceStatusInit',
1542: 'VlmMediaInstanceStatusOpening',
1543: 'VlmMediaInstanceStatusPlaying',
1544: 'VlmMediaInstanceStatusPause',
1545: 'VlmMediaInstanceStatusEnd',
1546: 'VlmMediaInstanceStatusError',
}
EventType.MediaDiscovererEnded = EventType(1281)
EventType.MediaDiscovererStarted = EventType(0x500)
EventType.MediaDurationChanged = EventType(2)
EventType.MediaFreed = EventType(4)
EventType.MediaListEndReached = EventType(516)
EventType.MediaListItemAdded = EventType(0x200)
EventType.MediaListItemDeleted = EventType(514)
EventType.MediaListPlayerNextItemSet = EventType(1025)
EventType.MediaListPlayerPlayed = EventType(0x400)
EventType.MediaListPlayerStopped = EventType(1026)
EventType.MediaListViewItemAdded = EventType(0x300)
EventType.MediaListViewItemDeleted = EventType(770)
EventType.MediaListViewWillAddItem = EventType(769)
EventType.MediaListViewWillDeleteItem = EventType(771)
EventType.MediaListWillAddItem = EventType(513)
EventType.MediaListWillDeleteItem = EventType(515)
EventType.MediaMetaChanged = EventType(0)
EventType.MediaParsedChanged = EventType(3)
EventType.MediaPlayerAudioDevice = EventType(284)
EventType.MediaPlayerAudioVolume = EventType(283)
EventType.MediaPlayerBackward = EventType(264)
EventType.MediaPlayerBuffering = EventType(259)
EventType.MediaPlayerChapterChanged = EventType(285)
EventType.MediaPlayerCorked = EventType(279)
EventType.MediaPlayerESAdded = EventType(276)
EventType.MediaPlayerESDeleted = EventType(277)
EventType.MediaPlayerESSelected = EventType(278)
EventType.MediaPlayerEncounteredError = EventType(266)
EventType.MediaPlayerEndReached = EventType(265)
EventType.MediaPlayerForward = EventType(263)
EventType.MediaPlayerLengthChanged = EventType(273)
EventType.MediaPlayerMediaChanged = EventType(0x100)
EventType.MediaPlayerMuted = EventType(281)
EventType.MediaPlayerNothingSpecial = EventType(257)
EventType.MediaPlayerOpening = EventType(258)
EventType.MediaPlayerPausableChanged = EventType(270)
EventType.MediaPlayerPaused = EventType(261)
EventType.MediaPlayerPlaying = EventType(260)
EventType.MediaPlayerPositionChanged = EventType(268)
EventType.MediaPlayerScrambledChanged = EventType(275)
EventType.MediaPlayerSeekableChanged = EventType(269)
EventType.MediaPlayerSnapshotTaken = EventType(272)
EventType.MediaPlayerStopped = EventType(262)
EventType.MediaPlayerTimeChanged = EventType(267)
EventType.MediaPlayerTitleChanged = EventType(271)
EventType.MediaPlayerUncorked = EventType(280)
EventType.MediaPlayerUnmuted = EventType(282)
EventType.MediaPlayerVout = EventType(274)
EventType.MediaStateChanged = EventType(5)
EventType.MediaSubItemAdded = EventType(1)
EventType.MediaSubItemTreeAdded = EventType(6)
EventType.RendererDiscovererItemAdded = EventType(1282)
EventType.RendererDiscovererItemDeleted = EventType(1283)
EventType.VlmMediaAdded = EventType(0x600)
EventType.VlmMediaChanged = EventType(1538)
EventType.VlmMediaInstanceStarted = EventType(1539)
EventType.VlmMediaInstanceStatusEnd = EventType(1545)
EventType.VlmMediaInstanceStatusError = EventType(1546)
EventType.VlmMediaInstanceStatusInit = EventType(1541)
EventType.VlmMediaInstanceStatusOpening = EventType(1542)
EventType.VlmMediaInstanceStatusPause = EventType(1544)
EventType.VlmMediaInstanceStatusPlaying = EventType(1543)
EventType.VlmMediaInstanceStopped = EventType(1540)
EventType.VlmMediaRemoved = EventType(1537)
class Meta(_Enum):
'''Meta data types.
'''
_enum_names_ = {
0: 'Title',
1: 'Artist',
2: 'Genre',
3: 'Copyright',
4: 'Album',
5: 'TrackNumber',
6: 'Description',
7: 'Rating',
8: 'Date',
9: 'Setting',
10: 'URL',
11: 'Language',
12: 'NowPlaying',
13: 'Publisher',
14: 'EncodedBy',
15: 'ArtworkURL',
16: 'TrackID',
17: 'TrackTotal',
18: 'Director',
19: 'Season',
20: 'Episode',
21: 'ShowName',
22: 'Actors',
23: 'AlbumArtist',
24: 'DiscNumber',
25: 'DiscTotal',
}
Meta.Actors = Meta(22)
Meta.Album = Meta(4)
Meta.AlbumArtist = Meta(23)
Meta.Artist = Meta(1)
Meta.ArtworkURL = Meta(15)
Meta.Copyright = Meta(3)
Meta.Date = Meta(8)
Meta.Description = Meta(6)
Meta.Director = Meta(18)
Meta.DiscNumber = Meta(24)
Meta.DiscTotal = Meta(25)
Meta.EncodedBy = Meta(14)
Meta.Episode = Meta(20)
Meta.Genre = Meta(2)
Meta.Language = Meta(11)
Meta.NowPlaying = Meta(12)
Meta.Publisher = Meta(13)
Meta.Rating = Meta(7)
Meta.Season = Meta(19)
Meta.Setting = Meta(9)
Meta.ShowName = Meta(21)
Meta.Title = Meta(0)
Meta.TrackID = Meta(16)
Meta.TrackNumber = Meta(5)
Meta.TrackTotal = Meta(17)
Meta.URL = Meta(10)
class State(_Enum):
'''Note the order of libvlc_state_t enum must match exactly the order of
See mediacontrol_playerstatus, See input_state_e enums,
and videolan.libvlc.state (at bindings/cil/src/media.cs).
expected states by web plugins are:
idle/close=0, opening=1, playing=3, paused=4,
stopping=5, ended=6, error=7.
'''
_enum_names_ = {
0: 'NothingSpecial',
1: 'Opening',
2: 'Buffering',
3: 'Playing',
4: 'Paused',
5: 'Stopped',
6: 'Ended',
7: 'Error',
}
State.Buffering = State(2)
State.Ended = State(6)
State.Error = State(7)
State.NothingSpecial = State(0)
State.Opening = State(1)
State.Paused = State(4)
State.Playing = State(3)
State.Stopped = State(5)
class TrackType(_Enum):
'''N/A
'''
_enum_names_ = {
-1: 'unknown',
0: 'audio',
1: 'video',
2: 'text',
}
TrackType.audio = TrackType(0)
TrackType.text = TrackType(2)
TrackType.unknown = TrackType(-1)
TrackType.video = TrackType(1)
class VideoOrient(_Enum):
'''N/A
'''
_enum_names_ = {
0: 'left',
1: 'right',
2: 'left',
3: 'right',
4: 'top',
5: 'bottom',
6: 'top',
7: 'bottom',
}
VideoOrient.bottom = VideoOrient(5)
VideoOrient.bottom = VideoOrient(7)
VideoOrient.left = VideoOrient(0)
VideoOrient.left = VideoOrient(2)
VideoOrient.right = VideoOrient(1)
VideoOrient.right = VideoOrient(3)
VideoOrient.top = VideoOrient(4)
VideoOrient.top = VideoOrient(6)
class VideoProjection(_Enum):
'''N/A
'''
_enum_names_ = {
0: 'rectangular',
1: 'equirectangular',
0x100: 'standard',
}
VideoProjection.equirectangular = VideoProjection(1)
VideoProjection.rectangular = VideoProjection(0)
VideoProjection.standard = VideoProjection(0x100)
class MediaType(_Enum):
'''Media type
See libvlc_media_get_type.
'''
_enum_names_ = {
0: 'unknown',
1: 'file',
2: 'directory',
3: 'disc',
4: 'stream',
5: 'playlist',
}
MediaType.directory = MediaType(2)
MediaType.disc = MediaType(3)
MediaType.file = MediaType(1)
MediaType.playlist = MediaType(5)
MediaType.stream = MediaType(4)
MediaType.unknown = MediaType(0)
class MediaParseFlag(_Enum):
'''Parse flags used by libvlc_media_parse_with_options()
See libvlc_media_parse_with_options.
'''
_enum_names_ = {
0x0: 'local',
0x1: 'network',
0x2: 'local',
0x4: 'network',
0x8: 'interact',
}
MediaParseFlag.interact = MediaParseFlag(0x8)
MediaParseFlag.local = MediaParseFlag(0x0)
MediaParseFlag.local = MediaParseFlag(0x2)
MediaParseFlag.network = MediaParseFlag(0x1)
MediaParseFlag.network = MediaParseFlag(0x4)
class MediaParsedStatus(_Enum):
'''Parse status used sent by libvlc_media_parse_with_options() or returned by
libvlc_media_get_parsed_status()
See libvlc_media_parse_with_options
See libvlc_media_get_parsed_status.
'''
_enum_names_ = {
1: 'skipped',
2: 'failed',
3: 'timeout',
4: 'done',
}
MediaParsedStatus.done = MediaParsedStatus(4)
MediaParsedStatus.failed = MediaParsedStatus(2)
MediaParsedStatus.skipped = MediaParsedStatus(1)
MediaParsedStatus.timeout = MediaParsedStatus(3)
class MediaSlaveType(_Enum):
'''Type of a media slave: subtitle or audio.
'''
_enum_names_ = {
0: 'subtitle',
1: 'audio',
}
MediaSlaveType.audio = MediaSlaveType(1)
MediaSlaveType.subtitle = MediaSlaveType(0)
class VideoMarqueeOption(_Enum):
'''Marq options definition.
'''
_enum_names_ = {
0: 'Enable',
1: 'Text',
2: 'Color',
3: 'Opacity',
4: 'Position',
5: 'Refresh',
6: 'Size',
7: 'Timeout',
8: 'marquee_X',
9: 'marquee_Y',
}
VideoMarqueeOption.Color = VideoMarqueeOption(2)
VideoMarqueeOption.Enable = VideoMarqueeOption(0)
VideoMarqueeOption.Opacity = VideoMarqueeOption(3)
VideoMarqueeOption.Position = VideoMarqueeOption(4)
VideoMarqueeOption.Refresh = VideoMarqueeOption(5)
VideoMarqueeOption.Size = VideoMarqueeOption(6)
VideoMarqueeOption.Text = VideoMarqueeOption(1)
VideoMarqueeOption.Timeout = VideoMarqueeOption(7)
VideoMarqueeOption.marquee_X = VideoMarqueeOption(8)
VideoMarqueeOption.marquee_Y = VideoMarqueeOption(9)
class NavigateMode(_Enum):
'''Navigation mode.
'''
_enum_names_ = {
0: 'activate',
1: 'up',
2: 'down',
3: 'left',
4: 'right',
5: 'popup',
}
NavigateMode.activate = NavigateMode(0)
NavigateMode.down = NavigateMode(2)
NavigateMode.left = NavigateMode(3)
NavigateMode.popup = NavigateMode(5)
NavigateMode.right = NavigateMode(4)
NavigateMode.up = NavigateMode(1)
class Position(_Enum):
'''Enumeration of values used to set position (e.g. of video title).
'''
_enum_names_ = {
-1: 'disable',
0: 'center',
1: 'left',
2: 'right',
3: 'top',
4: 'left',
5: 'right',
6: 'bottom',
7: 'left',
8: 'right',
}
Position.bottom = Position(6)
Position.center = Position(0)
Position.disable = Position(-1)
Position.left = Position(1)
Position.left = Position(4)
Position.left = Position(7)
Position.right = Position(2)
Position.right = Position(5)
Position.right = Position(8)
Position.top = Position(3)
class VideoLogoOption(_Enum):
'''Option values for libvlc_video_{get,set}_logo_{int,string}.
'''
_enum_names_ = {
0: 'enable',
1: 'file',
2: 'logo_x',
3: 'logo_y',
4: 'delay',
5: 'repeat',
6: 'opacity',
7: 'position',
}
VideoLogoOption.delay = VideoLogoOption(4)
VideoLogoOption.enable = VideoLogoOption(0)
VideoLogoOption.file = VideoLogoOption(1)
VideoLogoOption.logo_x = VideoLogoOption(2)
VideoLogoOption.logo_y = VideoLogoOption(3)
VideoLogoOption.opacity = VideoLogoOption(6)
VideoLogoOption.position = VideoLogoOption(7)
VideoLogoOption.repeat = VideoLogoOption(5)
class VideoAdjustOption(_Enum):
'''Option values for libvlc_video_{get,set}_adjust_{int,float,bool}.
'''
_enum_names_ = {
0: 'Enable',
1: 'Contrast',
2: 'Brightness',
3: 'Hue',
4: 'Saturation',
5: 'Gamma',
}
VideoAdjustOption.Brightness = VideoAdjustOption(2)
VideoAdjustOption.Contrast = VideoAdjustOption(1)
VideoAdjustOption.Enable = VideoAdjustOption(0)
VideoAdjustOption.Gamma = VideoAdjustOption(5)
VideoAdjustOption.Hue = VideoAdjustOption(3)
VideoAdjustOption.Saturation = VideoAdjustOption(4)
class AudioOutputDeviceTypes(_Enum):
'''Audio device types.
'''
_enum_names_ = {
-1: 'Error',
1: 'Mono',
2: 'Stereo',
4: '_2F2R',
5: '_3F2R',
6: '_5_1',
7: '_6_1',
8: '_7_1',
10: 'SPDIF',
}
AudioOutputDeviceTypes.Error = AudioOutputDeviceTypes(-1)
AudioOutputDeviceTypes.Mono = AudioOutputDeviceTypes(1)
AudioOutputDeviceTypes.SPDIF = AudioOutputDeviceTypes(10)
AudioOutputDeviceTypes.Stereo = AudioOutputDeviceTypes(2)
AudioOutputDeviceTypes._2F2R = AudioOutputDeviceTypes(4)
AudioOutputDeviceTypes._3F2R = AudioOutputDeviceTypes(5)
AudioOutputDeviceTypes._5_1 = AudioOutputDeviceTypes(6)
AudioOutputDeviceTypes._6_1 = AudioOutputDeviceTypes(7)
AudioOutputDeviceTypes._7_1 = AudioOutputDeviceTypes(8)
class AudioOutputChannel(_Enum):
'''Audio channels.
'''
_enum_names_ = {
-1: 'Error',
1: 'Stereo',
2: 'RStereo',
3: 'Left',
4: 'Right',
5: 'Dolbys',
}
AudioOutputChannel.Dolbys = AudioOutputChannel(5)
AudioOutputChannel.Error = AudioOutputChannel(-1)
AudioOutputChannel.Left = AudioOutputChannel(3)
AudioOutputChannel.RStereo = AudioOutputChannel(2)
AudioOutputChannel.Right = AudioOutputChannel(4)
AudioOutputChannel.Stereo = AudioOutputChannel(1)
class MediaPlayerRole(_Enum):
'''Media player roles.
\version libvlc 3.0.0 and later.
see \ref libvlc_media_player_set_role().
'''
_enum_names_ = {
0: '_None',
1: 'Music',
2: 'Video',
3: 'Communication',
4: 'Game',
5: 'Notification',
6: 'Animation',
7: 'Production',
8: 'Accessibility',
9: 'Test',
}
MediaPlayerRole.Accessibility = MediaPlayerRole(8)
MediaPlayerRole.Animation = MediaPlayerRole(6)
MediaPlayerRole.Communication = MediaPlayerRole(3)
MediaPlayerRole.Game = MediaPlayerRole(4)
MediaPlayerRole.Music = MediaPlayerRole(1)
MediaPlayerRole.Notification = MediaPlayerRole(5)
MediaPlayerRole.Production = MediaPlayerRole(7)
MediaPlayerRole.Test = MediaPlayerRole(9)
MediaPlayerRole.Video = MediaPlayerRole(2)
MediaPlayerRole._None = MediaPlayerRole(0)
class PlaybackMode(_Enum):
'''Defines playback modes for playlist.
'''
_enum_names_ = {
0: 'default',
1: 'loop',
2: 'repeat',
}
PlaybackMode.default = PlaybackMode(0)
PlaybackMode.loop = PlaybackMode(1)
PlaybackMode.repeat = PlaybackMode(2)
class Callback(ctypes.c_void_p):
"""Callback function notification.
@param p_event: the event triggering the callback.
"""
pass
class LogCb(ctypes.c_void_p):
"""Callback prototype for LibVLC log message handler.
@param data: data pointer as given to L{libvlc_log_set}().
@param level: message level (@ref libvlc_log_level).
@param ctx: message context (meta-information about the message).
@param fmt: printf() format string (as defined by ISO C11).
@param args: variable argument list for the format @note Log message handlers B{must} be thread-safe. @warning The message context pointer, the format string parameters and the variable arguments are only valid until the callback returns.
"""
pass
class MediaOpenCb(ctypes.c_void_p):
"""Callback prototype to open a custom bitstream input media.
The same media item can be opened multiple times. Each time, this callback
is invoked. It should allocate and initialize any instance-specific
resources, then store them in *datap. The instance resources can be freed
in the @ref libvlc_media_close_cb callback.
@param opaque: private pointer as passed to L{libvlc_media_new_callbacks}().
@return: datap storage space for a private data pointer, sizep byte length of the bitstream or UINT64_MAX if unknown.
"""
pass
class MediaReadCb(ctypes.c_void_p):
"""Callback prototype to read data from a custom bitstream input media.
@param opaque: private pointer as set by the @ref libvlc_media_open_cb callback.
@param buf: start address of the buffer to read data into.
@param len: bytes length of the buffer.
@return: strictly positive number of bytes read, 0 on end-of-stream, or -1 on non-recoverable error @note If no data is immediately available, then the callback should sleep. @warning The application is responsible for avoiding deadlock situations. In particular, the callback should return an error if playback is stopped; if it does not return, then L{libvlc_media_player_stop}() will never return.
"""
pass
class MediaSeekCb(ctypes.c_void_p):
"""Callback prototype to seek a custom bitstream input media.
@param opaque: private pointer as set by the @ref libvlc_media_open_cb callback.
@param offset: absolute byte offset to seek to.
@return: 0 on success, -1 on error.
"""
pass
class MediaCloseCb(ctypes.c_void_p):
"""Callback prototype to close a custom bitstream input media.
@param opaque: private pointer as set by the @ref libvlc_media_open_cb callback.
"""
pass
class VideoLockCb(ctypes.c_void_p):
"""Callback prototype to allocate and lock a picture buffer.
Whenever a new video frame needs to be decoded, the lock callback is
invoked. Depending on the video chroma, one or three pixel planes of
adequate dimensions must be returned via the second parameter. Those
planes must be aligned on 32-bytes boundaries.
@param opaque: private pointer as passed to L{libvlc_video_set_callbacks}() [IN].
@param planes: start address of the pixel planes (LibVLC allocates the array of void pointers, this callback must initialize the array) [OUT].
@return: a private pointer for the display and unlock callbacks to identify the picture buffers.
"""
pass
class VideoUnlockCb(ctypes.c_void_p):
"""Callback prototype to unlock a picture buffer.
When the video frame decoding is complete, the unlock callback is invoked.
This callback might not be needed at all. It is only an indication that the
application can now read the pixel values if it needs to.
@note: A picture buffer is unlocked after the picture is decoded,
but before the picture is displayed.
@param opaque: private pointer as passed to L{libvlc_video_set_callbacks}() [IN].
@param picture: private pointer returned from the @ref libvlc_video_lock_cb callback [IN].
@param planes: pixel planes as defined by the @ref libvlc_video_lock_cb callback (this parameter is only for convenience) [IN].
"""
pass
class VideoDisplayCb(ctypes.c_void_p):
"""Callback prototype to display a picture.
When the video frame needs to be shown, as determined by the media playback
clock, the display callback is invoked.
@param opaque: private pointer as passed to L{libvlc_video_set_callbacks}() [IN].
@param picture: private pointer returned from the @ref libvlc_video_lock_cb callback [IN].
"""
pass
class VideoFormatCb(ctypes.c_void_p):
"""Callback prototype to configure picture buffers format.
This callback gets the format of the video as output by the video decoder
and the chain of video filters (if any). It can opt to change any parameter
as it needs. In that case, LibVLC will attempt to convert the video format
(rescaling and chroma conversion) but these operations can be CPU intensive.
@param opaque: pointer to the private pointer passed to L{libvlc_video_set_callbacks}() [IN/OUT].
@param chroma: pointer to the 4 bytes video format identifier [IN/OUT].
@param width: pointer to the pixel width [IN/OUT].
@param height: pointer to the pixel height [IN/OUT].
@param pitches: table of scanline pitches in bytes for each pixel plane (the table is allocated by LibVLC) [OUT].
@return: lines table of scanlines count for each plane.
"""
pass
class VideoCleanupCb(ctypes.c_void_p):
"""Callback prototype to configure picture buffers format.
@param opaque: private pointer as passed to L{libvlc_video_set_callbacks}() (and possibly modified by @ref libvlc_video_format_cb) [IN].
"""
pass
class AudioPlayCb(ctypes.c_void_p):
"""Callback prototype for audio playback.
The LibVLC media player decodes and post-processes the audio signal
asynchronously (in an internal thread). Whenever audio samples are ready
to be queued to the output, this callback is invoked.
The number of samples provided per invocation may depend on the file format,
the audio coding algorithm, the decoder plug-in, the post-processing
filters and timing. Application must not assume a certain number of samples.
The exact format of audio samples is determined by L{libvlc_audio_set_format}()
or L{libvlc_audio_set_format_callbacks}() as is the channels layout.
Note that the number of samples is per channel. For instance, if the audio
track sampling rate is 48000 Hz, then 1200 samples represent 25 milliseconds
of audio signal - regardless of the number of audio channels.
@param data: data pointer as passed to L{libvlc_audio_set_callbacks}() [IN].
@param samples: pointer to a table of audio samples to play back [IN].
@param count: number of audio samples to play back.
@param pts: expected play time stamp (see libvlc_delay()).
"""
pass
class AudioPauseCb(ctypes.c_void_p):
"""Callback prototype for audio pause.
LibVLC invokes this callback to pause audio playback.
@note: The pause callback is never called if the audio is already paused.
@param data: data pointer as passed to L{libvlc_audio_set_callbacks}() [IN].
@param pts: time stamp of the pause request (should be elapsed already).
"""
pass
class AudioResumeCb(ctypes.c_void_p):
"""Callback prototype for audio resumption.
LibVLC invokes this callback to resume audio playback after it was
previously paused.
@note: The resume callback is never called if the audio is not paused.
@param data: data pointer as passed to L{libvlc_audio_set_callbacks}() [IN].
@param pts: time stamp of the resumption request (should be elapsed already).
"""
pass
class AudioFlushCb(ctypes.c_void_p):
"""Callback prototype for audio buffer flush.
LibVLC invokes this callback if it needs to discard all pending buffers and
stop playback as soon as possible. This typically occurs when the media is
stopped.
@param data: data pointer as passed to L{libvlc_audio_set_callbacks}() [IN].
"""
pass
class AudioDrainCb(ctypes.c_void_p):
"""Callback prototype for audio buffer drain.
LibVLC may invoke this callback when the decoded audio track is ending.
There will be no further decoded samples for the track, but playback should
nevertheless continue until all already pending buffers are rendered.
@param data: data pointer as passed to L{libvlc_audio_set_callbacks}() [IN].
"""
pass
class AudioSetVolumeCb(ctypes.c_void_p):
"""Callback prototype for audio volume change.
@param data: data pointer as passed to L{libvlc_audio_set_callbacks}() [IN].
@param volume: software volume (1. = nominal, 0. = mute).
@param mute: muted flag.
"""
pass
class AudioSetupCb(ctypes.c_void_p):
"""Callback prototype to setup the audio playback.
This is called when the media player needs to create a new audio output.
@param opaque: pointer to the data pointer passed to L{libvlc_audio_set_callbacks}() [IN/OUT].
@param format: 4 bytes sample format [IN/OUT].
@param rate: sample rate [IN/OUT].
@param channels: channels count [IN/OUT].
@return: 0 on success, anything else to skip audio playback.
"""
pass
class AudioCleanupCb(ctypes.c_void_p):
"""Callback prototype for audio playback cleanup.
This is called when the media player no longer needs an audio output.
@param opaque: data pointer as passed to L{libvlc_audio_set_callbacks}() [IN].
"""
pass
class CallbackDecorators(object):
"Class holding various method decorators for callback functions."
Callback = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p)
Callback.__doc__ = '''Callback function notification.
@param p_event: the event triggering the callback.
'''
LogCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, Log_ptr, ctypes.c_char_p, ctypes.c_void_p)
LogCb.__doc__ = '''Callback prototype for LibVLC log message handler.
@param data: data pointer as given to L{libvlc_log_set}().
@param level: message level (@ref libvlc_log_level).
@param ctx: message context (meta-information about the message).
@param fmt: printf() format string (as defined by ISO C11).
@param args: variable argument list for the format @note Log message handlers B{must} be thread-safe. @warning The message context pointer, the format string parameters and the variable arguments are only valid until the callback returns.
'''
MediaOpenCb = ctypes.CFUNCTYPE(ctypes.POINTER(ctypes.c_int), ctypes.c_void_p, ctypes.POINTER(ctypes.c_void_p), ctypes.POINTER(ctypes.c_uint64))
MediaOpenCb.__doc__ = '''Callback prototype to open a custom bitstream input media.
The same media item can be opened multiple times. Each time, this callback
is invoked. It should allocate and initialize any instance-specific
resources, then store them in *datap. The instance resources can be freed
in the @ref libvlc_media_close_cb callback.
@param opaque: private pointer as passed to L{libvlc_media_new_callbacks}().
@return: datap storage space for a private data pointer, sizep byte length of the bitstream or UINT64_MAX if unknown.
'''
MediaReadCb = ctypes.CFUNCTYPE(ctypes.POINTER(ctypes.c_ssize_t), ctypes.c_void_p, ctypes.c_char_p, ctypes.c_size_t)
MediaReadCb.__doc__ = '''Callback prototype to read data from a custom bitstream input media.
@param opaque: private pointer as set by the @ref libvlc_media_open_cb callback.
@param buf: start address of the buffer to read data into.
@param len: bytes length of the buffer.
@return: strictly positive number of bytes read, 0 on end-of-stream, or -1 on non-recoverable error @note If no data is immediately available, then the callback should sleep. @warning The application is responsible for avoiding deadlock situations. In particular, the callback should return an error if playback is stopped; if it does not return, then L{libvlc_media_player_stop}() will never return.
'''
MediaSeekCb = ctypes.CFUNCTYPE(ctypes.POINTER(ctypes.c_int), ctypes.c_void_p, ctypes.c_uint64)
MediaSeekCb.__doc__ = '''Callback prototype to seek a custom bitstream input media.
@param opaque: private pointer as set by the @ref libvlc_media_open_cb callback.
@param offset: absolute byte offset to seek to.
@return: 0 on success, -1 on error.
'''
MediaCloseCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p)
MediaCloseCb.__doc__ = '''Callback prototype to close a custom bitstream input media.
@param opaque: private pointer as set by the @ref libvlc_media_open_cb callback.
'''
VideoLockCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.POINTER(ctypes.c_void_p))
VideoLockCb.__doc__ = '''Callback prototype to allocate and lock a picture buffer.
Whenever a new video frame needs to be decoded, the lock callback is
invoked. Depending on the video chroma, one or three pixel planes of
adequate dimensions must be returned via the second parameter. Those
planes must be aligned on 32-bytes boundaries.
@param opaque: private pointer as passed to L{libvlc_video_set_callbacks}() [IN].
@param planes: start address of the pixel planes (LibVLC allocates the array of void pointers, this callback must initialize the array) [OUT].
@return: a private pointer for the display and unlock callbacks to identify the picture buffers.
'''
VideoUnlockCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.POINTER(ctypes.c_void_p))
VideoUnlockCb.__doc__ = '''Callback prototype to unlock a picture buffer.
When the video frame decoding is complete, the unlock callback is invoked.
This callback might not be needed at all. It is only an indication that the
application can now read the pixel values if it needs to.
@note: A picture buffer is unlocked after the picture is decoded,
but before the picture is displayed.
@param opaque: private pointer as passed to L{libvlc_video_set_callbacks}() [IN].
@param picture: private pointer returned from the @ref libvlc_video_lock_cb callback [IN].
@param planes: pixel planes as defined by the @ref libvlc_video_lock_cb callback (this parameter is only for convenience) [IN].
'''
VideoDisplayCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p)
VideoDisplayCb.__doc__ = '''Callback prototype to display a picture.
When the video frame needs to be shown, as determined by the media playback
clock, the display callback is invoked.
@param opaque: private pointer as passed to L{libvlc_video_set_callbacks}() [IN].
@param picture: private pointer returned from the @ref libvlc_video_lock_cb callback [IN].
'''
VideoFormatCb = ctypes.CFUNCTYPE(ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_void_p), ctypes.c_char_p, ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
VideoFormatCb.__doc__ = '''Callback prototype to configure picture buffers format.
This callback gets the format of the video as output by the video decoder
and the chain of video filters (if any). It can opt to change any parameter
as it needs. In that case, LibVLC will attempt to convert the video format
(rescaling and chroma conversion) but these operations can be CPU intensive.
@param opaque: pointer to the private pointer passed to L{libvlc_video_set_callbacks}() [IN/OUT].
@param chroma: pointer to the 4 bytes video format identifier [IN/OUT].
@param width: pointer to the pixel width [IN/OUT].
@param height: pointer to the pixel height [IN/OUT].
@param pitches: table of scanline pitches in bytes for each pixel plane (the table is allocated by LibVLC) [OUT].
@return: lines table of scanlines count for each plane.
'''
VideoCleanupCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p)
VideoCleanupCb.__doc__ = '''Callback prototype to configure picture buffers format.
@param opaque: private pointer as passed to L{libvlc_video_set_callbacks}() (and possibly modified by @ref libvlc_video_format_cb) [IN].
'''
AudioPlayCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint, ctypes.c_int64)
AudioPlayCb.__doc__ = '''Callback prototype for audio playback.
The LibVLC media player decodes and post-processes the audio signal
asynchronously (in an internal thread). Whenever audio samples are ready
to be queued to the output, this callback is invoked.
The number of samples provided per invocation may depend on the file format,
the audio coding algorithm, the decoder plug-in, the post-processing
filters and timing. Application must not assume a certain number of samples.
The exact format of audio samples is determined by L{libvlc_audio_set_format}()
or L{libvlc_audio_set_format_callbacks}() as is the channels layout.
Note that the number of samples is per channel. For instance, if the audio
track sampling rate is 48000 Hz, then 1200 samples represent 25 milliseconds
of audio signal - regardless of the number of audio channels.
@param data: data pointer as passed to L{libvlc_audio_set_callbacks}() [IN].
@param samples: pointer to a table of audio samples to play back [IN].
@param count: number of audio samples to play back.
@param pts: expected play time stamp (see libvlc_delay()).
'''
AudioPauseCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int64)
AudioPauseCb.__doc__ = '''Callback prototype for audio pause.
LibVLC invokes this callback to pause audio playback.
@note: The pause callback is never called if the audio is already paused.
@param data: data pointer as passed to L{libvlc_audio_set_callbacks}() [IN].
@param pts: time stamp of the pause request (should be elapsed already).
'''
AudioResumeCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int64)
AudioResumeCb.__doc__ = '''Callback prototype for audio resumption.
LibVLC invokes this callback to resume audio playback after it was
previously paused.
@note: The resume callback is never called if the audio is not paused.
@param data: data pointer as passed to L{libvlc_audio_set_callbacks}() [IN].
@param pts: time stamp of the resumption request (should be elapsed already).
'''
AudioFlushCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int64)
AudioFlushCb.__doc__ = '''Callback prototype for audio buffer flush.
LibVLC invokes this callback if it needs to discard all pending buffers and
stop playback as soon as possible. This typically occurs when the media is
stopped.
@param data: data pointer as passed to L{libvlc_audio_set_callbacks}() [IN].
'''
AudioDrainCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p)
AudioDrainCb.__doc__ = '''Callback prototype for audio buffer drain.
LibVLC may invoke this callback when the decoded audio track is ending.
There will be no further decoded samples for the track, but playback should
nevertheless continue until all already pending buffers are rendered.
@param data: data pointer as passed to L{libvlc_audio_set_callbacks}() [IN].
'''
AudioSetVolumeCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_float, ctypes.c_bool)
AudioSetVolumeCb.__doc__ = '''Callback prototype for audio volume change.
@param data: data pointer as passed to L{libvlc_audio_set_callbacks}() [IN].
@param volume: software volume (1. = nominal, 0. = mute).
@param mute: muted flag.
'''
AudioSetupCb = ctypes.CFUNCTYPE(ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_void_p), ctypes.c_char_p, ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
AudioSetupCb.__doc__ = '''Callback prototype to setup the audio playback.
This is called when the media player needs to create a new audio output.
@param opaque: pointer to the data pointer passed to L{libvlc_audio_set_callbacks}() [IN/OUT].
@param format: 4 bytes sample format [IN/OUT].
@param rate: sample rate [IN/OUT].
@param channels: channels count [IN/OUT].
@return: 0 on success, anything else to skip audio playback.
'''
AudioCleanupCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p)
AudioCleanupCb.__doc__ = '''Callback prototype for audio playback cleanup.
This is called when the media player no longer needs an audio output.
@param opaque: data pointer as passed to L{libvlc_audio_set_callbacks}() [IN].
'''
cb = CallbackDecorators
# End of generated enum types #
# From libvlc_structures.h
class AudioOutput(_Cstruct):
def __str__(self):
return '%s(%s:%s)' % (self.__class__.__name__, self.name, self.description)
AudioOutput._fields_ = [ # recursive struct
('name', ctypes.c_char_p),
('description', ctypes.c_char_p),
('next', ctypes.POINTER(AudioOutput)),
]
class LogMessage(_Cstruct):
_fields_ = [
('size', ctypes.c_uint ),
('severity', ctypes.c_int ),
('type', ctypes.c_char_p),
('name', ctypes.c_char_p),
('header', ctypes.c_char_p),
('message', ctypes.c_char_p),
]
def __init__(self):
super(LogMessage, self).__init__()
self.size = ctypes.sizeof(self)
def __str__(self):
return '%s(%d:%s): %s' % (self.__class__.__name__, self.severity, self.type, self.message)
class MediaEvent(_Cstruct):
_fields_ = [
('media_name', ctypes.c_char_p),
('instance_name', ctypes.c_char_p),
]
class MediaStats(_Cstruct):
_fields_ = [
('read_bytes', ctypes.c_int ),
('input_bitrate', ctypes.c_float),
('demux_read_bytes', ctypes.c_int ),
('demux_bitrate', ctypes.c_float),
('demux_corrupted', ctypes.c_int ),
('demux_discontinuity', ctypes.c_int ),
('decoded_video', ctypes.c_int ),
('decoded_audio', ctypes.c_int ),
('displayed_pictures', ctypes.c_int ),
('lost_pictures', ctypes.c_int ),
('played_abuffers', ctypes.c_int ),
('lost_abuffers', ctypes.c_int ),
('sent_packets', ctypes.c_int ),
('sent_bytes', ctypes.c_int ),
('send_bitrate', ctypes.c_float),
]
class MediaTrackInfo(_Cstruct):
_fields_ = [
('codec', ctypes.c_uint32),
('id', ctypes.c_int ),
('type', TrackType ),
('profile', ctypes.c_int ),
('level', ctypes.c_int ),
('channels_or_height', ctypes.c_uint ),
('rate_or_width', ctypes.c_uint ),
]
class AudioTrack(_Cstruct):
_fields_ = [
('channels', ctypes.c_uint),
('rate', ctypes.c_uint),
]
class VideoTrack(_Cstruct):
_fields_ = [
('height', ctypes.c_uint),
('width', ctypes.c_uint),
('sar_num', ctypes.c_uint),
('sar_den', ctypes.c_uint),
('frame_rate_num', ctypes.c_uint),
('frame_rate_den', ctypes.c_uint),
]
class SubtitleTrack(_Cstruct):
_fields_ = [
('encoding', ctypes.c_char_p),
]
class MediaTrackTracks(ctypes.Union):
_fields_ = [
('audio', ctypes.POINTER(AudioTrack)),
('video', ctypes.POINTER(VideoTrack)),
('subtitle', ctypes.POINTER(SubtitleTrack)),
]
class MediaTrack(_Cstruct):
_anonymous_ = ("u",)
_fields_ = [
('codec', ctypes.c_uint32),
('original_fourcc', ctypes.c_uint32),
('id', ctypes.c_int ),
('type', TrackType ),
('profile', ctypes.c_int ),
('level', ctypes.c_int ),
('u', MediaTrackTracks),
('bitrate', ctypes.c_uint),
('language', ctypes.c_char_p),
('description', ctypes.c_char_p),
]
class PlaylistItem(_Cstruct):
_fields_ = [
('id', ctypes.c_int ),
('uri', ctypes.c_char_p),
('name', ctypes.c_char_p),
]
def __str__(self):
return '%s #%d %s (uri %s)' % (self.__class__.__name__, self.id, self.name, self.uri)
class Position(object):
"""Enum-like, immutable window position constants.
See e.g. VideoMarqueeOption.Position.
"""
Center = 0
Left = 1
CenterLeft = 1
Right = 2
CenterRight = 2
Top = 4
TopCenter = 4
TopLeft = 5
TopRight = 6
Bottom = 8
BottomCenter = 8
BottomLeft = 9
BottomRight = 10
def __init__(self, *unused):
raise TypeError('constants only')
def __setattr__(self, *unused): #PYCHOK expected
raise TypeError('immutable constants')
class Rectangle(_Cstruct):
_fields_ = [
('top', ctypes.c_int),
('left', ctypes.c_int),
('bottom', ctypes.c_int),
('right', ctypes.c_int),
]
class TrackDescription(_Cstruct):
def __str__(self):
return '%s(%d:%s)' % (self.__class__.__name__, self.id, self.name)
TrackDescription._fields_ = [ # recursive struct
('id', ctypes.c_int ),
('name', ctypes.c_char_p),
('next', ctypes.POINTER(TrackDescription)),
]
def track_description_list(head):
"""Convert a TrackDescription linked list to a Python list (and release the former).
"""
r = []
if head:
item = head
while item:
item = item.contents
r.append((item.id, item.name))
item = item.next
try:
libvlc_track_description_release(head)
except NameError:
libvlc_track_description_list_release(head)
return r
class EventUnion(ctypes.Union):
_fields_ = [
('meta_type', ctypes.c_uint ),
('new_child', ctypes.c_uint ),
('new_duration', ctypes.c_longlong),
('new_status', ctypes.c_int ),
('media', ctypes.c_void_p ),
('new_state', ctypes.c_uint ),
# FIXME: Media instance
('new_cache', ctypes.c_float ),
('new_position', ctypes.c_float ),
('new_time', ctypes.c_longlong),
('new_title', ctypes.c_int ),
('new_seekable', ctypes.c_longlong),
('new_pausable', ctypes.c_longlong),
('new_scrambled', ctypes.c_longlong),
('new_count', ctypes.c_longlong),
# FIXME: Skipped MediaList and MediaListView...
('filename', ctypes.c_char_p ),
('new_length', ctypes.c_longlong),
('media_event', MediaEvent ),
]
class Event(_Cstruct):
_fields_ = [
('type', EventType ),
('object', ctypes.c_void_p),
('u', EventUnion ),
]
class ModuleDescription(_Cstruct):
def __str__(self):
return '%s %s (%s)' % (self.__class__.__name__, self.shortname, self.name)
ModuleDescription._fields_ = [ # recursive struct
('name', ctypes.c_char_p),
('shortname', ctypes.c_char_p),
('longname', ctypes.c_char_p),
('help', ctypes.c_char_p),
('next', ctypes.POINTER(ModuleDescription)),
]
def module_description_list(head):
"""Convert a ModuleDescription linked list to a Python list (and release the former).
"""
r = []
if head:
item = head
while item:
item = item.contents
r.append((item.name, item.shortname, item.longname, item.help))
item = item.next
libvlc_module_description_list_release(head)
return r
class AudioOutputDevice(_Cstruct):
def __str__(self):
return '%s(%d:%s)' % (self.__class__.__name__, self.id, self.name)
AudioOutputDevice._fields_ = [ # recursive struct
('next', ctypes.POINTER(AudioOutputDevice)),
('device', ctypes.c_char_p ),
('description', ctypes.c_char_p),
]
class TitleDescription(_Cstruct):
_fields = [
('duration', ctypes.c_longlong),
('name', ctypes.c_char_p),
('menu', ctypes.c_bool),
]
class ChapterDescription(_Cstruct):
_fields = [
('time_offset', ctypes.c_longlong),
('duration', ctypes.c_longlong),
('name', ctypes.c_char_p),
]
class VideoViewpoint(_Cstruct):
_fields = [
('yaw', ctypes.c_float),
('pitch', ctypes.c_float),
('roll', ctypes.c_float),
('field_of_view', ctypes.c_float),
]
# This struct depends on the MediaSlaveType enum that is defined only
# in > 2.2
if 'MediaSlaveType' in locals():
class MediaSlave(_Cstruct):
_fields = [
('psz_uri', ctypes.c_char_p),
('i_type', MediaSlaveType),
('i_priority', ctypes.c_uint)
]
class RDDescription(_Cstruct):
_fields = [
('name', ctypes.c_char_p),
('longname', ctypes.c_char_p)
]
# End of header.py #
class EventManager(_Ctype):
'''Create an event manager with callback handler.
This class interposes the registration and handling of
event notifications in order to (a) remove the need for
decorating each callback functions with the decorator
'@callbackmethod', (b) allow any number of positional
and/or keyword arguments to the callback (in addition
to the Event instance) and (c) to preserve the Python
objects such that the callback and argument objects
remain alive (i.e. are not garbage collected) until
B{after} the notification has been unregistered.
@note: Only a single notification can be registered
for each event type in an EventManager instance.
'''
_callback_handler = None
_callbacks = {}
def __new__(cls, ptr=_internal_guard):
if ptr == _internal_guard:
raise VLCException("(INTERNAL) ctypes class.\nYou should get a reference to EventManager through the MediaPlayer.event_manager() method.")
return _Constructor(cls, ptr)
def event_attach(self, eventtype, callback, *args, **kwds):
"""Register an event notification.
@param eventtype: the desired event type to be notified about.
@param callback: the function to call when the event occurs.
@param args: optional positional arguments for the callback.
@param kwds: optional keyword arguments for the callback.
@return: 0 on success, ENOMEM on error.
@note: The callback function must have at least one argument,
an Event instance. Any other, optional positional and keyword
arguments are in B{addition} to the first one.
"""
if not isinstance(eventtype, EventType):
raise VLCException("%s required: %r" % ('EventType', eventtype))
if not hasattr(callback, '__call__'): # callable()
raise VLCException("%s required: %r" % ('callable', callback))
# check that the callback expects arguments
if not any(getargspec(callback)[:2]): # list(...)
raise VLCException("%s required: %r" % ('argument', callback))
if self._callback_handler is None:
_called_from_ctypes = ctypes.CFUNCTYPE(None, ctypes.POINTER(Event), ctypes.c_void_p)
@_called_from_ctypes
def _callback_handler(event, k):
"""(INTERNAL) handle callback call from ctypes.
@note: We cannot simply make this an EventManager
method since ctypes does not prepend self as the
first parameter, hence this closure.
"""
try: # retrieve Python callback and arguments
call, args, kwds = self._callbacks[k]
# deref event.contents to simplify callback code
call(event.contents, *args, **kwds)
except KeyError: # detached?
pass
self._callback_handler = _callback_handler
self._callbacks = {}
k = eventtype.value
r = libvlc_event_attach(self, k, self._callback_handler, k)
if not r:
self._callbacks[k] = (callback, args, kwds)
return r
def event_detach(self, eventtype):
"""Unregister an event notification.
@param eventtype: the event type notification to be removed.
"""
if not isinstance(eventtype, EventType):
raise VLCException("%s required: %r" % ('EventType', eventtype))
k = eventtype.value
if k in self._callbacks:
del self._callbacks[k] # remove, regardless of libvlc return value
libvlc_event_detach(self, k, self._callback_handler, k)
class Instance(_Ctype):
'''Create a new Instance instance.
It may take as parameter either:
- a string
- a list of strings as first parameters
- the parameters given as the constructor parameters (must be strings)
'''
def __new__(cls, *args):
if len(args) == 1:
# Only 1 arg. It is either a C pointer, or an arg string,
# or a tuple.
i = args[0]
if isinstance(i, _Ints):
return _Constructor(cls, i)
elif isinstance(i, basestring):
args = i.strip().split()
elif isinstance(i, _Seqs):
args = list(i)
else:
raise VLCException('Instance %r' % (args,))
else:
args = list(args)
if not args: # no parameters passed
args = ['vlc']
elif args[0] != 'vlc':
args.insert(0, 'vlc')
if plugin_path is not None:
# set plugin_path if detected, win32 and MacOS,
# if the user did not specify it itself.
os.environ.setdefault('VLC_PLUGIN_PATH', plugin_path)
if PYTHON3:
args = [ str_to_bytes(a) for a in args ]
return libvlc_new(len(args), args)
def media_player_new(self, uri=None):
"""Create a new MediaPlayer instance.
@param uri: an optional URI to play in the player.
"""
p = libvlc_media_player_new(self)
if uri:
p.set_media(self.media_new(uri))
p._instance = self
return p
def media_list_player_new(self):
"""Create a new MediaListPlayer instance.
"""
p = libvlc_media_list_player_new(self)
p._instance = self
return p
def media_new(self, mrl, *options):
"""Create a new Media instance.
If mrl contains a colon (:) preceded by more than 1 letter, it
will be treated as a URL. Else, it will be considered as a
local path. If you need more control, directly use
media_new_location/media_new_path methods.
Options can be specified as supplementary string parameters,
but note that many options cannot be set at the media level,
and rather at the Instance level. For instance, the marquee
filter must be specified when creating the vlc.Instance or
vlc.MediaPlayer.
Alternatively, options can be added to the media using the
Media.add_options method (with the same limitation).
@param options: optional media option=value strings
"""
if ':' in mrl and mrl.index(':') > 1:
# Assume it is a URL
m = libvlc_media_new_location(self, str_to_bytes(mrl))
else:
# Else it should be a local path.
m = libvlc_media_new_path(self, str_to_bytes(os.path.normpath(mrl)))
for o in options:
libvlc_media_add_option(m, str_to_bytes(o))
m._instance = self
return m
def media_list_new(self, mrls=None):
"""Create a new MediaList instance.
@param mrls: optional list of MRL strings
"""
l = libvlc_media_list_new(self)
# We should take the lock, but since we did not leak the
# reference, nobody else can access it.
if mrls:
for m in mrls:
l.add_media(m)
l._instance = self
return l
def audio_output_enumerate_devices(self):
"""Enumerate the defined audio output devices.
@return: list of dicts {name:, description:, devices:}
"""
r = []
head = libvlc_audio_output_list_get(self)
if head:
i = head
while i:
i = i.contents
d = [{'id': libvlc_audio_output_device_id (self, i.name, d),
'longname': libvlc_audio_output_device_longname(self, i.name, d)}
for d in range(libvlc_audio_output_device_count (self, i.name))]
r.append({'name': i.name, 'description': i.description, 'devices': d})
i = i.next
libvlc_audio_output_list_release(head)
return r
def audio_filter_list_get(self):
"""Returns a list of available audio filters.
"""
return module_description_list(libvlc_audio_filter_list_get(self))
def video_filter_list_get(self):
"""Returns a list of available video filters.
"""
return module_description_list(libvlc_video_filter_list_get(self))
def release(self):
'''Decrement the reference count of a libvlc instance, and destroy it
if it reaches zero.
'''
return libvlc_release(self)
def retain(self):
'''Increments the reference count of a libvlc instance.
The initial reference count is 1 after L{new}() returns.
'''
return libvlc_retain(self)
def add_intf(self, name):
'''Try to start a user interface for the libvlc instance.
@param name: interface name, or None for default.
@return: 0 on success, -1 on error.
'''
return libvlc_add_intf(self, str_to_bytes(name))
def set_user_agent(self, name, http):
'''Sets the application name. LibVLC passes this as the user agent string
when a protocol requires it.
@param name: human-readable application name, e.g. "FooBar player 1.2.3".
@param http: HTTP User Agent, e.g. "FooBar/1.2.3 Python/2.6.0".
@version: LibVLC 1.1.1 or later.
'''
return libvlc_set_user_agent(self, str_to_bytes(name), str_to_bytes(http))
def set_app_id(self, id, version, icon):
'''Sets some meta-information about the application.
See also L{set_user_agent}().
@param id: Java-style application identifier, e.g. "com.acme.foobar".
@param version: application version numbers, e.g. "1.2.3".
@param icon: application icon name, e.g. "foobar".
@version: LibVLC 2.1.0 or later.
'''
return libvlc_set_app_id(self, str_to_bytes(id), str_to_bytes(version), str_to_bytes(icon))
def log_unset(self):
'''Unsets the logging callback.
This function deregisters the logging callback for a LibVLC instance.
This is rarely needed as the callback is implicitly unset when the instance
is destroyed.
@note: This function will wait for any pending callbacks invocation to
complete (causing a deadlock if called from within the callback).
@version: LibVLC 2.1.0 or later.
'''
return libvlc_log_unset(self)
def log_set(self, cb, data):
'''Sets the logging callback for a LibVLC instance.
This function is thread-safe: it will wait for any pending callbacks
invocation to complete.
@param data: opaque data pointer for the callback function @note Some log messages (especially debug) are emitted by LibVLC while is being initialized. These messages cannot be captured with this interface. @warning A deadlock may occur if this function is called from the callback.
@param p_instance: libvlc instance.
@version: LibVLC 2.1.0 or later.
'''
return libvlc_log_set(self, cb, data)
def log_set_file(self, stream):
'''Sets up logging to a file.
@param stream: FILE pointer opened for writing (the FILE pointer must remain valid until L{log_unset}()).
@version: LibVLC 2.1.0 or later.
'''
return libvlc_log_set_file(self, stream)
def media_discoverer_new(self, psz_name):
'''Create a media discoverer object by name.
After this object is created, you should attach to media_list events in
order to be notified of new items discovered.
You need to call L{media_discoverer_start}() in order to start the
discovery.
See L{media_discoverer_media_list}
See L{media_discoverer_event_manager}
See L{media_discoverer_start}.
@param psz_name: service name; use L{media_discoverer_list_get}() to get a list of the discoverer names available in this libVLC instance.
@return: media discover object or None in case of error.
@version: LibVLC 3.0.0 or later.
'''
return libvlc_media_discoverer_new(self, str_to_bytes(psz_name))
def media_discoverer_list_get(self, i_cat, ppp_services):
'''Get media discoverer services by category.
@param i_cat: category of services to fetch.
@param ppp_services: address to store an allocated array of media discoverer services (must be freed with L{media_discoverer_list_release}() by the caller) [OUT].
@return: the number of media discoverer services (0 on error).
@version: LibVLC 3.0.0 and later.
'''
return libvlc_media_discoverer_list_get(self, i_cat, ppp_services)
def media_library_new(self):
'''Create an new Media Library object.
@return: a new object or None on error.
'''
return libvlc_media_library_new(self)
def vlm_release(self):
'''Release the vlm instance related to the given L{Instance}.
'''
return libvlc_vlm_release(self)
def vlm_add_broadcast(self, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop):
'''Add a broadcast, with one input.
@param psz_name: the name of the new broadcast.
@param psz_input: the input MRL.
@param psz_output: the output MRL (the parameter to the "sout" variable).
@param i_options: number of additional options.
@param ppsz_options: additional options.
@param b_enabled: boolean for enabling the new broadcast.
@param b_loop: Should this broadcast be played in loop ?
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_add_broadcast(self, str_to_bytes(psz_name), str_to_bytes(psz_input), str_to_bytes(psz_output), i_options, ppsz_options, b_enabled, b_loop)
def vlm_add_vod(self, psz_name, psz_input, i_options, ppsz_options, b_enabled, psz_mux):
'''Add a vod, with one input.
@param psz_name: the name of the new vod media.
@param psz_input: the input MRL.
@param i_options: number of additional options.
@param ppsz_options: additional options.
@param b_enabled: boolean for enabling the new vod.
@param psz_mux: the muxer of the vod media.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_add_vod(self, str_to_bytes(psz_name), str_to_bytes(psz_input), i_options, ppsz_options, b_enabled, str_to_bytes(psz_mux))
def vlm_del_media(self, psz_name):
'''Delete a media (VOD or broadcast).
@param psz_name: the media to delete.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_del_media(self, str_to_bytes(psz_name))
def vlm_set_enabled(self, psz_name, b_enabled):
'''Enable or disable a media (VOD or broadcast).
@param psz_name: the media to work on.
@param b_enabled: the new status.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_set_enabled(self, str_to_bytes(psz_name), b_enabled)
def vlm_set_output(self, psz_name, psz_output):
'''Set the output for a media.
@param psz_name: the media to work on.
@param psz_output: the output MRL (the parameter to the "sout" variable).
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_set_output(self, str_to_bytes(psz_name), str_to_bytes(psz_output))
def vlm_set_input(self, psz_name, psz_input):
'''Set a media's input MRL. This will delete all existing inputs and
add the specified one.
@param psz_name: the media to work on.
@param psz_input: the input MRL.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_set_input(self, str_to_bytes(psz_name), str_to_bytes(psz_input))
def vlm_add_input(self, psz_name, psz_input):
'''Add a media's input MRL. This will add the specified one.
@param psz_name: the media to work on.
@param psz_input: the input MRL.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_add_input(self, str_to_bytes(psz_name), str_to_bytes(psz_input))
def vlm_set_loop(self, psz_name, b_loop):
'''Set a media's loop status.
@param psz_name: the media to work on.
@param b_loop: the new status.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_set_loop(self, str_to_bytes(psz_name), b_loop)
def vlm_set_mux(self, psz_name, psz_mux):
'''Set a media's vod muxer.
@param psz_name: the media to work on.
@param psz_mux: the new muxer.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_set_mux(self, str_to_bytes(psz_name), str_to_bytes(psz_mux))
def vlm_change_media(self, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop):
'''Edit the parameters of a media. This will delete all existing inputs and
add the specified one.
@param psz_name: the name of the new broadcast.
@param psz_input: the input MRL.
@param psz_output: the output MRL (the parameter to the "sout" variable).
@param i_options: number of additional options.
@param ppsz_options: additional options.
@param b_enabled: boolean for enabling the new broadcast.
@param b_loop: Should this broadcast be played in loop ?
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_change_media(self, str_to_bytes(psz_name), str_to_bytes(psz_input), str_to_bytes(psz_output), i_options, ppsz_options, b_enabled, b_loop)
def vlm_play_media(self, psz_name):
'''Play the named broadcast.
@param psz_name: the name of the broadcast.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_play_media(self, str_to_bytes(psz_name))
def vlm_stop_media(self, psz_name):
'''Stop the named broadcast.
@param psz_name: the name of the broadcast.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_stop_media(self, str_to_bytes(psz_name))
def vlm_pause_media(self, psz_name):
'''Pause the named broadcast.
@param psz_name: the name of the broadcast.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_pause_media(self, str_to_bytes(psz_name))
def vlm_seek_media(self, psz_name, f_percentage):
'''Seek in the named broadcast.
@param psz_name: the name of the broadcast.
@param f_percentage: the percentage to seek to.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_seek_media(self, str_to_bytes(psz_name), f_percentage)
def vlm_show_media(self, psz_name):
'''Return information about the named media as a JSON
string representation.
This function is mainly intended for debugging use,
if you want programmatic access to the state of
a vlm_media_instance_t, please use the corresponding
libvlc_vlm_get_media_instance_xxx -functions.
Currently there are no such functions available for
vlm_media_t though.
@param psz_name: the name of the media, if the name is an empty string, all media is described.
@return: string with information about named media, or None on error.
'''
return libvlc_vlm_show_media(self, str_to_bytes(psz_name))
def vlm_get_media_instance_position(self, psz_name, i_instance):
'''Get vlm_media instance position by name or instance id.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: position as float or -1. on error.
'''
return libvlc_vlm_get_media_instance_position(self, str_to_bytes(psz_name), i_instance)
def vlm_get_media_instance_time(self, psz_name, i_instance):
'''Get vlm_media instance time by name or instance id.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: time as integer or -1 on error.
'''
return libvlc_vlm_get_media_instance_time(self, str_to_bytes(psz_name), i_instance)
def vlm_get_media_instance_length(self, psz_name, i_instance):
'''Get vlm_media instance length by name or instance id.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: length of media item or -1 on error.
'''
return libvlc_vlm_get_media_instance_length(self, str_to_bytes(psz_name), i_instance)
def vlm_get_media_instance_rate(self, psz_name, i_instance):
'''Get vlm_media instance playback rate by name or instance id.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: playback rate or -1 on error.
'''
return libvlc_vlm_get_media_instance_rate(self, str_to_bytes(psz_name), i_instance)
def vlm_get_media_instance_title(self, psz_name, i_instance):
'''Get vlm_media instance title number by name or instance id.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: title as number or -1 on error.
@bug: will always return 0.
'''
return libvlc_vlm_get_media_instance_title(self, str_to_bytes(psz_name), i_instance)
def vlm_get_media_instance_chapter(self, psz_name, i_instance):
'''Get vlm_media instance chapter number by name or instance id.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: chapter as number or -1 on error.
@bug: will always return 0.
'''
return libvlc_vlm_get_media_instance_chapter(self, str_to_bytes(psz_name), i_instance)
def vlm_get_media_instance_seekable(self, psz_name, i_instance):
'''Is libvlc instance seekable ?
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: 1 if seekable, 0 if not, -1 if media does not exist.
@bug: will always return 0.
'''
return libvlc_vlm_get_media_instance_seekable(self, str_to_bytes(psz_name), i_instance)
@memoize_parameterless
def vlm_get_event_manager(self):
'''Get libvlc_event_manager from a vlm media.
The p_event_manager is immutable, so you don't have to hold the lock.
@return: libvlc_event_manager.
'''
return libvlc_vlm_get_event_manager(self)
def media_new_location(self, psz_mrl):
'''Create a media with a certain given media resource location,
for instance a valid URL.
@note: To refer to a local file with this function,
the file://... URI syntax B{must} be used (see IETF RFC3986).
We recommend using L{media_new_path}() instead when dealing with
local files.
See L{media_release}.
@param psz_mrl: the media location.
@return: the newly created media or None on error.
'''
return libvlc_media_new_location(self, str_to_bytes(psz_mrl))
def media_new_path(self, path):
'''Create a media for a certain file path.
See L{media_release}.
@param path: local filesystem path.
@return: the newly created media or None on error.
'''
return libvlc_media_new_path(self, str_to_bytes(path))
def media_new_fd(self, fd):
'''Create a media for an already open file descriptor.
The file descriptor shall be open for reading (or reading and writing).
Regular file descriptors, pipe read descriptors and character device
descriptors (including TTYs) are supported on all platforms.
Block device descriptors are supported where available.
Directory descriptors are supported on systems that provide fdopendir().
Sockets are supported on all platforms where they are file descriptors,
i.e. all except Windows.
@note: This library will B{not} automatically close the file descriptor
under any circumstance. Nevertheless, a file descriptor can usually only be
rendered once in a media player. To render it a second time, the file
descriptor should probably be rewound to the beginning with lseek().
See L{media_release}.
@param fd: open file descriptor.
@return: the newly created media or None on error.
@version: LibVLC 1.1.5 and later.
'''
return libvlc_media_new_fd(self, fd)
def media_new_callbacks(self, open_cb, read_cb, seek_cb, close_cb, opaque):
'''Create a media with custom callbacks to read the data from.
@param open_cb: callback to open the custom bitstream input media.
@param read_cb: callback to read data (must not be None).
@param seek_cb: callback to seek, or None if seeking is not supported.
@param close_cb: callback to close the media, or None if unnecessary.
@param opaque: data pointer for the open callback.
@return: the newly created media or None on error @note If open_cb is None, the opaque pointer will be passed to read_cb, seek_cb and close_cb, and the stream size will be treated as unknown. @note The callbacks may be called asynchronously (from another thread). A single stream instance need not be reentrant. However the open_cb needs to be reentrant if the media is used by multiple player instances. @warning The callbacks may be used until all or any player instances that were supplied the media item are stopped. See L{media_release}.
@version: LibVLC 3.0.0 and later.
'''
return libvlc_media_new_callbacks(self, open_cb, read_cb, seek_cb, close_cb, opaque)
def media_new_as_node(self, psz_name):
'''Create a media as an empty node with a given name.
See L{media_release}.
@param psz_name: the name of the node.
@return: the new empty media or None on error.
'''
return libvlc_media_new_as_node(self, str_to_bytes(psz_name))
def renderer_discoverer_new(self, psz_name):
'''Create a renderer discoverer object by name
After this object is created, you should attach to events in order to be
notified of the discoverer events.
You need to call L{renderer_discoverer_start}() in order to start the
discovery.
See L{renderer_discoverer_event_manager}()
See L{renderer_discoverer_start}().
@param psz_name: service name; use L{renderer_discoverer_list_get}() to get a list of the discoverer names available in this libVLC instance.
@return: media discover object or None in case of error.
@version: LibVLC 3.0.0 or later.
'''
return libvlc_renderer_discoverer_new(self, str_to_bytes(psz_name))
def renderer_discoverer_list_get(self, ppp_services):
'''Get media discoverer services
See libvlc_renderer_list_release().
@param ppp_services: address to store an allocated array of renderer discoverer services (must be freed with libvlc_renderer_list_release() by the caller) [OUT].
@return: the number of media discoverer services (0 on error).
@version: LibVLC 3.0.0 and later.
'''
return libvlc_renderer_discoverer_list_get(self, ppp_services)
def audio_output_device_count(self, psz_audio_output):
'''Backward compatibility stub. Do not use in new code.
\deprecated Use L{audio_output_device_list_get}() instead.
@return: always 0.
'''
return libvlc_audio_output_device_count(self, str_to_bytes(psz_audio_output))
def audio_output_device_longname(self, psz_output, i_device):
'''Backward compatibility stub. Do not use in new code.
\deprecated Use L{audio_output_device_list_get}() instead.
@return: always None.
'''
return libvlc_audio_output_device_longname(self, str_to_bytes(psz_output), i_device)
def audio_output_device_id(self, psz_audio_output, i_device):
'''Backward compatibility stub. Do not use in new code.
\deprecated Use L{audio_output_device_list_get}() instead.
@return: always None.
'''
return libvlc_audio_output_device_id(self, str_to_bytes(psz_audio_output), i_device)
def media_discoverer_new_from_name(self, psz_name):
'''\deprecated Use L{media_discoverer_new}() and L{media_discoverer_start}().
'''
return libvlc_media_discoverer_new_from_name(self, str_to_bytes(psz_name))
def wait(self):
'''Waits until an interface causes the instance to exit.
You should start at least one interface first, using L{add_intf}().
'''
return libvlc_wait(self)
def get_log_verbosity(self):
'''Always returns minus one.
This function is only provided for backward compatibility.
@return: always -1.
'''
return libvlc_get_log_verbosity(self)
def set_log_verbosity(self, level):
'''This function does nothing.
It is only provided for backward compatibility.
@param level: ignored.
'''
return libvlc_set_log_verbosity(self, level)
def log_open(self):
'''This function does nothing useful.
It is only provided for backward compatibility.
@return: an unique pointer or None on error.
'''
return libvlc_log_open(self)
def playlist_play(self, i_id, i_options, ppsz_options):
'''Start playing (if there is any item in the playlist).
Additionnal playlist item options can be specified for addition to the
item before it is played.
@param i_id: the item to play. If this is a negative number, the next item will be selected. Otherwise, the item with the given ID will be played.
@param i_options: the number of options to add to the item.
@param ppsz_options: the options to add to the item.
'''
return libvlc_playlist_play(self, i_id, i_options, ppsz_options)
def audio_output_list_get(self):
'''Gets the list of available audio output modules.
@return: list of available audio outputs. It must be freed with In case of error, None is returned.
'''
return libvlc_audio_output_list_get(self)
def audio_output_device_list_get(self, aout):
'''Gets a list of audio output devices for a given audio output module,
See L{audio_output_device_set}().
@note: Not all audio outputs support this. In particular, an empty (None)
list of devices does B{not} imply that the specified audio output does
not work.
@note: The list might not be exhaustive.
@warning: Some audio output devices in the list might not actually work in
some circumstances. By default, it is recommended to not specify any
explicit audio device.
@param aout: audio output name (as returned by L{audio_output_list_get}()).
@return: A None-terminated linked list of potential audio output devices. It must be freed with L{audio_output_device_list_release}().
@version: LibVLC 2.1.0 or later.
'''
return libvlc_audio_output_device_list_get(self, str_to_bytes(aout))
class LogIterator(_Ctype):
'''Create a new VLC log iterator.
'''
def __new__(cls, ptr=_internal_guard):
'''(INTERNAL) ctypes wrapper constructor.
'''
return _Constructor(cls, ptr)
def __iter__(self):
return self
def next(self):
if self.has_next():
b = LogMessage()
i = libvlc_log_iterator_next(self, b)
return i.contents
raise StopIteration
def __next__(self):
return self.next()
def free(self):
'''Frees memory allocated by L{log_get_iterator}().
'''
return libvlc_log_iterator_free(self)
def has_next(self):
'''Always returns zero.
This function is only provided for backward compatibility.
@return: always zero.
'''
return libvlc_log_iterator_has_next(self)
class Media(_Ctype):
'''Create a new Media instance.
Usage: Media(MRL, *options)
See vlc.Instance.media_new documentation for details.
'''
def __new__(cls, *args):
if args:
i = args[0]
if isinstance(i, _Ints):
return _Constructor(cls, i)
if isinstance(i, Instance):
return i.media_new(*args[1:])
o = get_default_instance().media_new(*args)
return o
def get_instance(self):
return getattr(self, '_instance', None)
def add_options(self, *options):
"""Add a list of options to the media.
Options must be written without the double-dash. Warning: most
audio and video options, such as text renderer, have no
effects on an individual media. These options must be set at
the vlc.Instance or vlc.MediaPlayer instanciation.
@param options: optional media option=value strings
"""
for o in options:
self.add_option(o)
def tracks_get(self):
"""Get media descriptor's elementary streams description
Note, you need to call L{parse}() or play the media at least once
before calling this function.
Not doing this will result in an empty array.
The result must be freed with L{tracks_release}.
@version: LibVLC 2.1.0 and later.
"""
mediaTrack_pp = ctypes.POINTER(MediaTrack)()
n = libvlc_media_tracks_get(self, ctypes.byref(mediaTrack_pp))
info = ctypes.cast(mediaTrack_pp, ctypes.POINTER(ctypes.POINTER(MediaTrack) * n))
try:
contents = info.contents
except ValueError:
# Media not parsed, no info.
return None
tracks = ( contents[i].contents for i in range(len(contents)) )
# libvlc_media_tracks_release(mediaTrack_pp, n)
return tracks
def add_option(self, psz_options):
'''Add an option to the media.
This option will be used to determine how the media_player will
read the media. This allows to use VLC's advanced
reading/streaming options on a per-media basis.
@note: The options are listed in 'vlc --long-help' from the command line,
e.g. "-sout-all". Keep in mind that available options and their semantics
vary across LibVLC versions and builds.
@warning: Not all options affects L{Media} objects:
Specifically, due to architectural issues most audio and video options,
such as text renderer options, have no effects on an individual media.
These options must be set through L{new}() instead.
@param psz_options: the options (as a string).
'''
return libvlc_media_add_option(self, str_to_bytes(psz_options))
def add_option_flag(self, psz_options, i_flags):
'''Add an option to the media with configurable flags.
This option will be used to determine how the media_player will
read the media. This allows to use VLC's advanced
reading/streaming options on a per-media basis.
The options are detailed in vlc --long-help, for instance
"--sout-all". Note that all options are not usable on medias:
specifically, due to architectural issues, video-related options
such as text renderer options cannot be set on a single media. They
must be set on the whole libvlc instance instead.
@param psz_options: the options (as a string).
@param i_flags: the flags for this option.
'''
return libvlc_media_add_option_flag(self, str_to_bytes(psz_options), i_flags)
def retain(self):
'''Retain a reference to a media descriptor object (libvlc_media_t). Use
L{release}() to decrement the reference count of a
media descriptor object.
'''
return libvlc_media_retain(self)
def release(self):
'''Decrement the reference count of a media descriptor object. If the
reference count is 0, then L{release}() will release the
media descriptor object. It will send out an libvlc_MediaFreed event
to all listeners. If the media descriptor object has been released it
should not be used again.
'''
return libvlc_media_release(self)
def get_mrl(self):
'''Get the media resource locator (mrl) from a media descriptor object.
@return: string with mrl of media descriptor object.
'''
return libvlc_media_get_mrl(self)
def duplicate(self):
'''Duplicate a media descriptor object.
'''
return libvlc_media_duplicate(self)
def get_meta(self, e_meta):
'''Read the meta of the media.
If the media has not yet been parsed this will return None.
See L{parse}
See L{parse_with_options}
See libvlc_MediaMetaChanged.
@param e_meta: the meta to read.
@return: the media's meta.
'''
return libvlc_media_get_meta(self, e_meta)
def set_meta(self, e_meta, psz_value):
'''Set the meta of the media (this function will not save the meta, call
L{save_meta} in order to save the meta).
@param e_meta: the meta to write.
@param psz_value: the media's meta.
'''
return libvlc_media_set_meta(self, e_meta, str_to_bytes(psz_value))
def save_meta(self):
'''Save the meta previously set.
@return: true if the write operation was successful.
'''
return libvlc_media_save_meta(self)
def get_state(self):
'''Get current state of media descriptor object. Possible media states are
libvlc_NothingSpecial=0, libvlc_Opening, libvlc_Playing, libvlc_Paused,
libvlc_Stopped, libvlc_Ended, libvlc_Error.
See libvlc_state_t.
@return: state of media descriptor object.
'''
return libvlc_media_get_state(self)
def get_stats(self, p_stats):
'''Get the current statistics about the media.
@param p_stats:: structure that contain the statistics about the media (this structure must be allocated by the caller).
@return: true if the statistics are available, false otherwise \libvlc_return_bool.
'''
return libvlc_media_get_stats(self, p_stats)
def subitems(self):
'''Get subitems of media descriptor object. This will increment
the reference count of supplied media descriptor object. Use
L{list_release}() to decrement the reference counting.
@return: list of media descriptor subitems or None.
'''
return libvlc_media_subitems(self)
@memoize_parameterless
def event_manager(self):
'''Get event manager from media descriptor object.
NOTE: this function doesn't increment reference counting.
@return: event manager object.
'''
return libvlc_media_event_manager(self)
def get_duration(self):
'''Get duration (in ms) of media descriptor object item.
@return: duration of media item or -1 on error.
'''
return libvlc_media_get_duration(self)
def parse_with_options(self, parse_flag, timeout):
'''Parse the media asynchronously with options.
This fetches (local or network) art, meta data and/or tracks information.
This method is the extended version of L{parse_with_options}().
To track when this is over you can listen to libvlc_MediaParsedChanged
event. However if this functions returns an error, you will not receive any
events.
It uses a flag to specify parse options (see libvlc_media_parse_flag_t). All
these flags can be combined. By default, media is parsed if it's a local
file.
@note: Parsing can be aborted with L{parse_stop}().
See libvlc_MediaParsedChanged
See L{get_meta}
See L{tracks_get}
See L{get_parsed_status}
See libvlc_media_parse_flag_t.
@param parse_flag: parse options:
@param timeout: maximum time allowed to preparse the media. If -1, the default "preparse-timeout" option will be used as a timeout. If 0, it will wait indefinitely. If > 0, the timeout will be used (in milliseconds).
@return: -1 in case of error, 0 otherwise.
@version: LibVLC 3.0.0 or later.
'''
return libvlc_media_parse_with_options(self, parse_flag, timeout)
def parse_stop(self):
'''Stop the parsing of the media
When the media parsing is stopped, the libvlc_MediaParsedChanged event will
be sent with the libvlc_media_parsed_status_timeout status.
See L{parse_with_options}.
@version: LibVLC 3.0.0 or later.
'''
return libvlc_media_parse_stop(self)
def get_parsed_status(self):
'''Get Parsed status for media descriptor object.
See libvlc_MediaParsedChanged
See libvlc_media_parsed_status_t.
@return: a value of the libvlc_media_parsed_status_t enum.
@version: LibVLC 3.0.0 or later.
'''
return libvlc_media_get_parsed_status(self)
def set_user_data(self, p_new_user_data):
'''Sets media descriptor's user_data. user_data is specialized data
accessed by the host application, VLC.framework uses it as a pointer to
an native object that references a L{Media} pointer.
@param p_new_user_data: pointer to user data.
'''
return libvlc_media_set_user_data(self, p_new_user_data)
def get_user_data(self):
'''Get media descriptor's user_data. user_data is specialized data
accessed by the host application, VLC.framework uses it as a pointer to
an native object that references a L{Media} pointer.
'''
return libvlc_media_get_user_data(self)
def get_type(self):
'''Get the media type of the media descriptor object.
@return: media type.
@version: LibVLC 3.0.0 and later. See libvlc_media_type_t.
'''
return libvlc_media_get_type(self)
def slaves_add(self, i_type, i_priority, psz_uri):
'''Add a slave to the current media.
A slave is an external input source that may contains an additional subtitle
track (like a .srt) or an additional audio track (like a .ac3).
@note: This function must be called before the media is parsed (via
L{parse_with_options}()) or before the media is played (via
L{player_play}()).
@param i_type: subtitle or audio.
@param i_priority: from 0 (low priority) to 4 (high priority).
@param psz_uri: Uri of the slave (should contain a valid scheme).
@return: 0 on success, -1 on error.
@version: LibVLC 3.0.0 and later.
'''
return libvlc_media_slaves_add(self, i_type, i_priority, str_to_bytes(psz_uri))
def slaves_clear(self):
'''Clear all slaves previously added by L{slaves_add}() or
internally.
@version: LibVLC 3.0.0 and later.
'''
return libvlc_media_slaves_clear(self)
def slaves_get(self, ppp_slaves):
'''Get a media descriptor's slave list
The list will contain slaves parsed by VLC or previously added by
L{slaves_add}(). The typical use case of this function is to save
a list of slave in a database for a later use.
@param ppp_slaves: address to store an allocated array of slaves (must be freed with L{slaves_release}()) [OUT].
@return: the number of slaves (zero on error).
@version: LibVLC 3.0.0 and later. See L{slaves_add}.
'''
return libvlc_media_slaves_get(self, ppp_slaves)
def parse(self):
'''Parse a media.
This fetches (local) art, meta data and tracks information.
The method is synchronous.
\deprecated This function could block indefinitely.
Use L{parse_with_options}() instead
See L{parse_with_options}
See L{get_meta}
See L{get_tracks_info}.
'''
return libvlc_media_parse(self)
def parse_async(self):
'''Parse a media.
This fetches (local) art, meta data and tracks information.
The method is the asynchronous of L{parse}().
To track when this is over you can listen to libvlc_MediaParsedChanged
event. However if the media was already parsed you will not receive this
event.
\deprecated You can't be sure to receive the libvlc_MediaParsedChanged
event (you can wait indefinitely for this event).
Use L{parse_with_options}() instead
See L{parse}
See libvlc_MediaParsedChanged
See L{get_meta}
See L{get_tracks_info}.
'''
return libvlc_media_parse_async(self)
def is_parsed(self):
'''Return true is the media descriptor object is parsed
\deprecated This can return true in case of failure.
Use L{get_parsed_status}() instead
See libvlc_MediaParsedChanged.
@return: true if media object has been parsed otherwise it returns false \libvlc_return_bool.
'''
return libvlc_media_is_parsed(self)
def get_tracks_info(self):
'''Get media descriptor's elementary streams description
Note, you need to call L{parse}() or play the media at least once
before calling this function.
Not doing this will result in an empty array.
\deprecated Use L{tracks_get}() instead.
@param tracks: address to store an allocated array of Elementary Streams descriptions (must be freed by the caller) [OUT].
@return: the number of Elementary Streams.
'''
return libvlc_media_get_tracks_info(self)
def player_new_from_media(self):
'''Create a Media Player object from a Media.
@return: a new media player object, or None on error.
'''
return libvlc_media_player_new_from_media(self)
class MediaDiscoverer(_Ctype):
'''N/A
'''
def __new__(cls, ptr=_internal_guard):
'''(INTERNAL) ctypes wrapper constructor.
'''
return _Constructor(cls, ptr)
def start(self):
'''Start media discovery.
To stop it, call L{stop}() or
L{list_release}() directly.
See L{stop}.
@return: -1 in case of error, 0 otherwise.
@version: LibVLC 3.0.0 or later.
'''
return libvlc_media_discoverer_start(self)
def stop(self):
'''Stop media discovery.
See L{start}.
@version: LibVLC 3.0.0 or later.
'''
return libvlc_media_discoverer_stop(self)
def release(self):
'''Release media discover object. If the reference count reaches 0, then
the object will be released.
'''
return libvlc_media_discoverer_release(self)
def media_list(self):
'''Get media service discover media list.
@return: list of media items.
'''
return libvlc_media_discoverer_media_list(self)
def is_running(self):
'''Query if media service discover object is running.
@return: true if running, false if not \libvlc_return_bool.
'''
return libvlc_media_discoverer_is_running(self)
def localized_name(self):
'''Get media service discover object its localized name.
\deprecated Useless, use L{list_get}() to get the
longname of the service discovery.
@return: localized name or None if the media_discoverer is not started.
'''
return libvlc_media_discoverer_localized_name(self)
@memoize_parameterless
def event_manager(self):
'''Get event manager from media service discover object.
\deprecated Useless, media_discoverer events are only triggered when calling
L{start}() and L{stop}().
@return: event manager object.
'''
return libvlc_media_discoverer_event_manager(self)
class MediaLibrary(_Ctype):
'''N/A
'''
def __new__(cls, ptr=_internal_guard):
'''(INTERNAL) ctypes wrapper constructor.
'''
return _Constructor(cls, ptr)
def release(self):
'''Release media library object. This functions decrements the
reference count of the media library object. If it reaches 0,
then the object will be released.
'''
return libvlc_media_library_release(self)
def retain(self):
'''Retain a reference to a media library object. This function will
increment the reference counting for this object. Use
L{release}() to decrement the reference count.
'''
return libvlc_media_library_retain(self)
def load(self):
'''Load media library.
@return: 0 on success, -1 on error.
'''
return libvlc_media_library_load(self)
def media_list(self):
'''Get media library subitems.
@return: media list subitems.
'''
return libvlc_media_library_media_list(self)
class MediaList(_Ctype):
'''Create a new MediaList instance.
Usage: MediaList(list_of_MRLs)
See vlc.Instance.media_list_new documentation for details.
'''
def __new__(cls, *args):
if args:
i = args[0]
if isinstance(i, _Ints):
return _Constructor(cls, i)
if isinstance(i, Instance):
return i.media_list_new(*args[1:])
o = get_default_instance().media_list_new(*args)
return o
def get_instance(self):
return getattr(self, '_instance', None)
def add_media(self, mrl):
"""Add media instance to media list.
The L{lock} should be held upon entering this function.
@param mrl: a media instance or a MRL.
@return: 0 on success, -1 if the media list is read-only.
"""
if isinstance(mrl, basestring):
mrl = (self.get_instance() or get_default_instance()).media_new(mrl)
return libvlc_media_list_add_media(self, mrl)
def release(self):
'''Release media list created with L{new}().
'''
return libvlc_media_list_release(self)
def retain(self):
'''Retain reference to a media list.
'''
return libvlc_media_list_retain(self)
def set_media(self, p_md):
'''Associate media instance with this media list instance.
If another media instance was present it will be released.
The L{lock} should NOT be held upon entering this function.
@param p_md: media instance to add.
'''
return libvlc_media_list_set_media(self, p_md)
def media(self):
'''Get media instance from this media list instance. This action will increase
the refcount on the media instance.
The L{lock} should NOT be held upon entering this function.
@return: media instance.
'''
return libvlc_media_list_media(self)
def insert_media(self, p_md, i_pos):
'''Insert media instance in media list on a position
The L{lock} should be held upon entering this function.
@param p_md: a media instance.
@param i_pos: position in array where to insert.
@return: 0 on success, -1 if the media list is read-only.
'''
return libvlc_media_list_insert_media(self, p_md, i_pos)
def remove_index(self, i_pos):
'''Remove media instance from media list on a position
The L{lock} should be held upon entering this function.
@param i_pos: position in array where to insert.
@return: 0 on success, -1 if the list is read-only or the item was not found.
'''
return libvlc_media_list_remove_index(self, i_pos)
def count(self):
'''Get count on media list items
The L{lock} should be held upon entering this function.
@return: number of items in media list.
'''
return libvlc_media_list_count(self)
def __len__(self):
return libvlc_media_list_count(self)
def item_at_index(self, i_pos):
'''List media instance in media list at a position
The L{lock} should be held upon entering this function.
@param i_pos: position in array where to insert.
@return: media instance at position i_pos, or None if not found. In case of success, L{media_retain}() is called to increase the refcount on the media.
'''
return libvlc_media_list_item_at_index(self, i_pos)
def __getitem__(self, i):
return libvlc_media_list_item_at_index(self, i)
def __iter__(self):
for i in range(len(self)):
yield self[i]
def index_of_item(self, p_md):
'''Find index position of List media instance in media list.
Warning: the function will return the first matched position.
The L{lock} should be held upon entering this function.
@param p_md: media instance.
@return: position of media instance or -1 if media not found.
'''
return libvlc_media_list_index_of_item(self, p_md)
def is_readonly(self):
'''This indicates if this media list is read-only from a user point of view.
@return: 1 on readonly, 0 on readwrite \libvlc_return_bool.
'''
return libvlc_media_list_is_readonly(self)
def lock(self):
'''Get lock on media list items.
'''
return libvlc_media_list_lock(self)
def unlock(self):
'''Release lock on media list items
The L{lock} should be held upon entering this function.
'''
return libvlc_media_list_unlock(self)
@memoize_parameterless
def event_manager(self):
'''Get libvlc_event_manager from this media list instance.
The p_event_manager is immutable, so you don't have to hold the lock.
@return: libvlc_event_manager.
'''
return libvlc_media_list_event_manager(self)
class MediaListPlayer(_Ctype):
'''Create a new MediaListPlayer instance.
It may take as parameter either:
- a vlc.Instance
- nothing
'''
def __new__(cls, arg=None):
if arg is None:
i = get_default_instance()
elif isinstance(arg, Instance):
i = arg
elif isinstance(arg, _Ints):
return _Constructor(cls, arg)
else:
raise TypeError('MediaListPlayer %r' % (arg,))
return i.media_list_player_new()
def get_instance(self):
"""Return the associated Instance.
"""
return self._instance #PYCHOK expected
def release(self):
'''Release a media_list_player after use
Decrement the reference count of a media player object. If the
reference count is 0, then L{release}() will
release the media player object. If the media player object
has been released, then it should not be used again.
'''
return libvlc_media_list_player_release(self)
def retain(self):
'''Retain a reference to a media player list object. Use
L{release}() to decrement reference count.
'''
return libvlc_media_list_player_retain(self)
@memoize_parameterless
def event_manager(self):
'''Return the event manager of this media_list_player.
@return: the event manager.
'''
return libvlc_media_list_player_event_manager(self)
def set_media_player(self, p_mi):
'''Replace media player in media_list_player with this instance.
@param p_mi: media player instance.
'''
return libvlc_media_list_player_set_media_player(self, p_mi)
def get_media_player(self):
'''Get media player of the media_list_player instance.
@return: media player instance @note the caller is responsible for releasing the returned instance.
'''
return libvlc_media_list_player_get_media_player(self)
def set_media_list(self, p_mlist):
'''Set the media list associated with the player.
@param p_mlist: list of media.
'''
return libvlc_media_list_player_set_media_list(self, p_mlist)
def play(self):
'''Play media list.
'''
return libvlc_media_list_player_play(self)
def pause(self):
'''Toggle pause (or resume) media list.
'''
return libvlc_media_list_player_pause(self)
def is_playing(self):
'''Is media list playing?
@return: true for playing and false for not playing \libvlc_return_bool.
'''
return libvlc_media_list_player_is_playing(self)
def get_state(self):
'''Get current libvlc_state of media list player.
@return: libvlc_state_t for media list player.
'''
return libvlc_media_list_player_get_state(self)
def play_item_at_index(self, i_index):
'''Play media list item at position index.
@param i_index: index in media list to play.
@return: 0 upon success -1 if the item wasn't found.
'''
return libvlc_media_list_player_play_item_at_index(self, i_index)
def __getitem__(self, i):
return libvlc_media_list_player_play_item_at_index(self, i)
def __iter__(self):
for i in range(len(self)):
yield self[i]
def play_item(self, p_md):
'''Play the given media item.
@param p_md: the media instance.
@return: 0 upon success, -1 if the media is not part of the media list.
'''
return libvlc_media_list_player_play_item(self, p_md)
def stop(self):
'''Stop playing media list.
'''
return libvlc_media_list_player_stop(self)
def next(self):
'''Play next item from media list.
@return: 0 upon success -1 if there is no next item.
'''
return libvlc_media_list_player_next(self)
def previous(self):
'''Play previous item from media list.
@return: 0 upon success -1 if there is no previous item.
'''
return libvlc_media_list_player_previous(self)
def set_playback_mode(self, e_mode):
'''Sets the playback mode for the playlist.
@param e_mode: playback mode specification.
'''
return libvlc_media_list_player_set_playback_mode(self, e_mode)
class MediaPlayer(_Ctype):
'''Create a new MediaPlayer instance.
It may take as parameter either:
- a string (media URI), options... In this case, a vlc.Instance will be created.
- a vlc.Instance, a string (media URI), options...
'''
def __new__(cls, *args):
if len(args) == 1 and isinstance(args[0], _Ints):
return _Constructor(cls, args[0])
if args and isinstance(args[0], Instance):
instance = args[0]
args = args[1:]
else:
instance = get_default_instance()
o = instance.media_player_new()
if args:
o.set_media(instance.media_new(*args))
return o
def get_instance(self):
"""Return the associated Instance.
"""
return self._instance #PYCHOK expected
def set_mrl(self, mrl, *options):
"""Set the MRL to play.
Warning: most audio and video options, such as text renderer,
have no effects on an individual media. These options must be
set at the vlc.Instance or vlc.MediaPlayer instanciation.
@param mrl: The MRL
@param options: optional media option=value strings
@return: the Media object
"""
m = self.get_instance().media_new(mrl, *options)
self.set_media(m)
return m
def video_get_spu_description(self):
"""Get the description of available video subtitles.
"""
return track_description_list(libvlc_video_get_spu_description(self))
def video_get_title_description(self):
"""Get the description of available titles.
"""
return track_description_list(libvlc_video_get_title_description(self))
def video_get_chapter_description(self, title):
"""Get the description of available chapters for specific title.
@param title: selected title (int)
"""
return track_description_list(libvlc_video_get_chapter_description(self, title))
def video_get_track_description(self):
"""Get the description of available video tracks.
"""
return track_description_list(libvlc_video_get_track_description(self))
def audio_get_track_description(self):
"""Get the description of available audio tracks.
"""
return track_description_list(libvlc_audio_get_track_description(self))
def get_full_title_descriptions(self):
'''Get the full description of available titles.
@return: the titles list
@version: LibVLC 3.0.0 and later.
'''
titleDescription_pp = ctypes.POINTER(TitleDescription)()
n = libvlc_media_player_get_full_title_descriptions(self, ctypes.byref(titleDescription_pp))
info = ctypes.cast(ctypes.titleDescription_pp, ctypes.POINTER(ctypes.POINTER(TitleDescription) * n))
return info
def get_full_chapter_descriptions(self, i_chapters_of_title):
'''Get the full description of available chapters.
@param i_chapters_of_title: index of the title to query for chapters (uses current title if set to -1).
@return: the chapters list
@version: LibVLC 3.0.0 and later.
'''
chapterDescription_pp = ctypes.POINTER(ChapterDescription)()
n = libvlc_media_player_get_full_chapter_descriptions(self, ctypes.byref(chapterDescription_pp))
info = ctypes.cast(ctypes.chapterDescription_pp, ctypes.POINTER(ctypes.POINTER(ChapterDescription) * n))
return info
def video_get_size(self, num=0):
"""Get the video size in pixels as 2-tuple (width, height).
@param num: video number (default 0).
"""
r = libvlc_video_get_size(self, num)
if isinstance(r, tuple) and len(r) == 2:
return r
else:
raise VLCException('invalid video number (%s)' % (num,))
def set_hwnd(self, drawable):
"""Set a Win32/Win64 API window handle (HWND).
Specify where the media player should render its video
output. If LibVLC was built without Win32/Win64 API output
support, then this has no effects.
@param drawable: windows handle of the drawable.
"""
if not isinstance(drawable, ctypes.c_void_p):
drawable = ctypes.c_void_p(int(drawable))
libvlc_media_player_set_hwnd(self, drawable)
def video_get_width(self, num=0):
"""Get the width of a video in pixels.
@param num: video number (default 0).
"""
return self.video_get_size(num)[0]
def video_get_height(self, num=0):
"""Get the height of a video in pixels.
@param num: video number (default 0).
"""
return self.video_get_size(num)[1]
def video_get_cursor(self, num=0):
"""Get the mouse pointer coordinates over a video as 2-tuple (x, y).
Coordinates are expressed in terms of the decoded video resolution,
B{not} in terms of pixels on the screen/viewport. To get the
latter, you must query your windowing system directly.
Either coordinate may be negative or larger than the corresponding
size of the video, if the cursor is outside the rendering area.
@warning: The coordinates may be out-of-date if the pointer is not
located on the video rendering area. LibVLC does not track the
mouse pointer if the latter is outside the video widget.
@note: LibVLC does not support multiple mouse pointers (but does
support multiple input devices sharing the same pointer).
@param num: video number (default 0).
"""
r = libvlc_video_get_cursor(self, num)
if isinstance(r, tuple) and len(r) == 2:
return r
raise VLCException('invalid video number (%s)' % (num,))
def get_fps(self):
'''Get movie fps rate
This function is provided for backward compatibility. It cannot deal with
multiple video tracks. In LibVLC versions prior to 3.0, it would also fail
if the file format did not convey the frame rate explicitly.
\deprecated Consider using L{media_tracks_get}() instead.
@return: frames per second (fps) for this playing movie, or 0 if unspecified.
'''
return libvlc_media_player_get_fps(self)
def set_agl(self, drawable):
'''\deprecated Use L{set_nsobject}() instead.
'''
return libvlc_media_player_set_agl(self, drawable)
def get_agl(self):
'''\deprecated Use L{get_nsobject}() instead.
'''
return libvlc_media_player_get_agl(self)
def video_set_subtitle_file(self, psz_subtitle):
'''Set new video subtitle file.
\deprecated Use L{add_slave}() instead.
@param psz_subtitle: new video subtitle file.
@return: the success status (boolean).
'''
return libvlc_video_set_subtitle_file(self, str_to_bytes(psz_subtitle))
def release(self):
'''Release a media_player after use
Decrement the reference count of a media player object. If the
reference count is 0, then L{release}() will
release the media player object. If the media player object
has been released, then it should not be used again.
'''
return libvlc_media_player_release(self)
def retain(self):
'''Retain a reference to a media player object. Use
L{release}() to decrement reference count.
'''
return libvlc_media_player_retain(self)
def set_media(self, p_md):
'''Set the media that will be used by the media_player. If any,
previous md will be released.
@param p_md: the Media. Afterwards the p_md can be safely destroyed.
'''
return libvlc_media_player_set_media(self, p_md)
def get_media(self):
'''Get the media used by the media_player.
@return: the media associated with p_mi, or None if no media is associated.
'''
return libvlc_media_player_get_media(self)
@memoize_parameterless
def event_manager(self):
'''Get the Event Manager from which the media player send event.
@return: the event manager associated with p_mi.
'''
return libvlc_media_player_event_manager(self)
def is_playing(self):
'''is_playing.
@return: 1 if the media player is playing, 0 otherwise \libvlc_return_bool.
'''
return libvlc_media_player_is_playing(self)
def play(self):
'''Play.
@return: 0 if playback started (and was already started), or -1 on error.
'''
return libvlc_media_player_play(self)
def set_pause(self, do_pause):
'''Pause or resume (no effect if there is no media).
@param do_pause: play/resume if zero, pause if non-zero.
@version: LibVLC 1.1.1 or later.
'''
return libvlc_media_player_set_pause(self, do_pause)
def pause(self):
'''Toggle pause (no effect if there is no media).
'''
return libvlc_media_player_pause(self)
def stop(self):
'''Stop (no effect if there is no media).
'''
return libvlc_media_player_stop(self)
def set_renderer(self, p_item):
'''Set a renderer to the media player
@note: must be called before the first call of L{play}() to
take effect.
See L{renderer_discoverer_new}.
@param p_item: an item discovered by L{renderer_discoverer_start}().
@return: 0 on success, -1 on error.
@version: LibVLC 3.0.0 or later.
'''
return libvlc_media_player_set_renderer(self, p_item)
def video_set_callbacks(self, lock, unlock, display, opaque):
'''Set callbacks and private data to render decoded video to a custom area
in memory.
Use L{video_set_format}() or L{video_set_format_callbacks}()
to configure the decoded format.
@warning: Rendering video into custom memory buffers is considerably less
efficient than rendering in a custom window as normal.
For optimal perfomances, VLC media player renders into a custom window, and
does not use this function and associated callbacks. It is B{highly
recommended} that other LibVLC-based application do likewise.
To embed video in a window, use libvlc_media_player_set_xid() or equivalent
depending on the operating system.
If window embedding does not fit the application use case, then a custom
LibVLC video output display plugin is required to maintain optimal video
rendering performances.
The following limitations affect performance:
- Hardware video decoding acceleration will either be disabled completely,
or require (relatively slow) copy from video/DSP memory to main memory.
- Sub-pictures (subtitles, on-screen display, etc.) must be blent into the
main picture by the CPU instead of the GPU.
- Depending on the video format, pixel format conversion, picture scaling,
cropping and/or picture re-orientation, must be performed by the CPU
instead of the GPU.
- Memory copying is required between LibVLC reference picture buffers and
application buffers (between lock and unlock callbacks).
@param lock: callback to lock video memory (must not be None).
@param unlock: callback to unlock video memory (or None if not needed).
@param display: callback to display video (or None if not needed).
@param opaque: private pointer for the three callbacks (as first parameter).
@version: LibVLC 1.1.1 or later.
'''
return libvlc_video_set_callbacks(self, lock, unlock, display, opaque)
def video_set_format(self, chroma, width, height, pitch):
'''Set decoded video chroma and dimensions.
This only works in combination with L{video_set_callbacks}(),
and is mutually exclusive with L{video_set_format_callbacks}().
@param chroma: a four-characters string identifying the chroma (e.g. "RV32" or "YUYV").
@param width: pixel width.
@param height: pixel height.
@param pitch: line pitch (in bytes).
@version: LibVLC 1.1.1 or later.
@bug: All pixel planes are expected to have the same pitch. To use the YCbCr color space with chrominance subsampling, consider using L{video_set_format_callbacks}() instead.
'''
return libvlc_video_set_format(self, str_to_bytes(chroma), width, height, pitch)
def video_set_format_callbacks(self, setup, cleanup):
'''Set decoded video chroma and dimensions. This only works in combination with
L{video_set_callbacks}().
@param setup: callback to select the video format (cannot be None).
@param cleanup: callback to release any allocated resources (or None).
@version: LibVLC 2.0.0 or later.
'''
return libvlc_video_set_format_callbacks(self, setup, cleanup)
def set_nsobject(self, drawable):
'''Set the NSView handler where the media player should render its video output.
Use the vout called "macosx".
The drawable is an NSObject that follow the VLCOpenGLVideoViewEmbedding
protocol:
@code.m
\@protocol VLCOpenGLVideoViewEmbedding <NSObject>
- (void)addVoutSubview:(NSView *)view;
- (void)removeVoutSubview:(NSView *)view;
\@end
@endcode
Or it can be an NSView object.
If you want to use it along with Qt see the QMacCocoaViewContainer. Then
the following code should work:
@code.mm
NSView *video = [[NSView alloc] init];
QMacCocoaViewContainer *container = new QMacCocoaViewContainer(video, parent);
L{set_nsobject}(mp, video);
[video release];
@endcode
You can find a live example in VLCVideoView in VLCKit.framework.
@param drawable: the drawable that is either an NSView or an object following the VLCOpenGLVideoViewEmbedding protocol.
'''
return libvlc_media_player_set_nsobject(self, drawable)
def get_nsobject(self):
'''Get the NSView handler previously set with L{set_nsobject}().
@return: the NSView handler or 0 if none where set.
'''
return libvlc_media_player_get_nsobject(self)
def set_xwindow(self, drawable):
'''Set an X Window System drawable where the media player should render its
video output. The call takes effect when the playback starts. If it is
already started, it might need to be stopped before changes apply.
If LibVLC was built without X11 output support, then this function has no
effects.
By default, LibVLC will capture input events on the video rendering area.
Use L{video_set_mouse_input}() and L{video_set_key_input}() to
disable that and deliver events to the parent window / to the application
instead. By design, the X11 protocol delivers input events to only one
recipient.
@warning
The application must call the XInitThreads() function from Xlib before
L{new}(), and before any call to XOpenDisplay() directly or via any
other library. Failure to call XInitThreads() will seriously impede LibVLC
performance. Calling XOpenDisplay() before XInitThreads() will eventually
crash the process. That is a limitation of Xlib.
@param drawable: X11 window ID @note The specified identifier must correspond to an existing Input/Output class X11 window. Pixmaps are B{not} currently supported. The default X11 server is assumed, i.e. that specified in the DISPLAY environment variable. @warning LibVLC can deal with invalid X11 handle errors, however some display drivers (EGL, GLX, VA and/or VDPAU) can unfortunately not. Thus the window handle must remain valid until playback is stopped, otherwise the process may abort or crash.
@bug No more than one window handle per media player instance can be specified. If the media has multiple simultaneously active video tracks, extra tracks will be rendered into external windows beyond the control of the application.
'''
return libvlc_media_player_set_xwindow(self, drawable)
def get_xwindow(self):
'''Get the X Window System window identifier previously set with
L{set_xwindow}(). Note that this will return the identifier
even if VLC is not currently using it (for instance if it is playing an
audio-only input).
@return: an X window ID, or 0 if none where set.
'''
return libvlc_media_player_get_xwindow(self)
def get_hwnd(self):
'''Get the Windows API window handle (HWND) previously set with
L{set_hwnd}(). The handle will be returned even if LibVLC
is not currently outputting any video to it.
@return: a window handle or None if there are none.
'''
return libvlc_media_player_get_hwnd(self)
def set_android_context(self, p_awindow_handler):
'''Set the android context.
@param p_awindow_handler: org.videolan.libvlc.AWindow jobject owned by the org.videolan.libvlc.MediaPlayer class from the libvlc-android project.
@version: LibVLC 3.0.0 and later.
'''
return libvlc_media_player_set_android_context(self, p_awindow_handler)
def set_evas_object(self, p_evas_object):
'''Set the EFL Evas Object.
@param p_evas_object: a valid EFL Evas Object (Evas_Object).
@return: -1 if an error was detected, 0 otherwise.
@version: LibVLC 3.0.0 and later.
'''
return libvlc_media_player_set_evas_object(self, p_evas_object)
def audio_set_callbacks(self, play, pause, resume, flush, drain, opaque):
'''Sets callbacks and private data for decoded audio.
Use L{audio_set_format}() or L{audio_set_format_callbacks}()
to configure the decoded audio format.
@note: The audio callbacks override any other audio output mechanism.
If the callbacks are set, LibVLC will B{not} output audio in any way.
@param play: callback to play audio samples (must not be None).
@param pause: callback to pause playback (or None to ignore).
@param resume: callback to resume playback (or None to ignore).
@param flush: callback to flush audio buffers (or None to ignore).
@param drain: callback to drain audio buffers (or None to ignore).
@param opaque: private pointer for the audio callbacks (as first parameter).
@version: LibVLC 2.0.0 or later.
'''
return libvlc_audio_set_callbacks(self, play, pause, resume, flush, drain, opaque)
def audio_set_volume_callback(self, set_volume):
'''Set callbacks and private data for decoded audio. This only works in
combination with L{audio_set_callbacks}().
Use L{audio_set_format}() or L{audio_set_format_callbacks}()
to configure the decoded audio format.
@param set_volume: callback to apply audio volume, or None to apply volume in software.
@version: LibVLC 2.0.0 or later.
'''
return libvlc_audio_set_volume_callback(self, set_volume)
def audio_set_format_callbacks(self, setup, cleanup):
'''Sets decoded audio format via callbacks.
This only works in combination with L{audio_set_callbacks}().
@param setup: callback to select the audio format (cannot be None).
@param cleanup: callback to release any allocated resources (or None).
@version: LibVLC 2.0.0 or later.
'''
return libvlc_audio_set_format_callbacks(self, setup, cleanup)
def audio_set_format(self, format, rate, channels):
'''Sets a fixed decoded audio format.
This only works in combination with L{audio_set_callbacks}(),
and is mutually exclusive with L{audio_set_format_callbacks}().
@param format: a four-characters string identifying the sample format (e.g. "S16N" or "FL32").
@param rate: sample rate (expressed in Hz).
@param channels: channels count.
@version: LibVLC 2.0.0 or later.
'''
return libvlc_audio_set_format(self, str_to_bytes(format), rate, channels)
def get_length(self):
'''Get the current movie length (in ms).
@return: the movie length (in ms), or -1 if there is no media.
'''
return libvlc_media_player_get_length(self)
def get_time(self):
'''Get the current movie time (in ms).
@return: the movie time (in ms), or -1 if there is no media.
'''
return libvlc_media_player_get_time(self)
def set_time(self, i_time):
'''Set the movie time (in ms). This has no effect if no media is being played.
Not all formats and protocols support this.
@param i_time: the movie time (in ms).
'''
return libvlc_media_player_set_time(self, i_time)
def get_position(self):
'''Get movie position as percentage between 0.0 and 1.0.
@return: movie position, or -1. in case of error.
'''
return libvlc_media_player_get_position(self)
def set_position(self, f_pos):
'''Set movie position as percentage between 0.0 and 1.0.
This has no effect if playback is not enabled.
This might not work depending on the underlying input format and protocol.
@param f_pos: the position.
'''
return libvlc_media_player_set_position(self, f_pos)
def set_chapter(self, i_chapter):
'''Set movie chapter (if applicable).
@param i_chapter: chapter number to play.
'''
return libvlc_media_player_set_chapter(self, i_chapter)
def get_chapter(self):
'''Get movie chapter.
@return: chapter number currently playing, or -1 if there is no media.
'''
return libvlc_media_player_get_chapter(self)
def get_chapter_count(self):
'''Get movie chapter count.
@return: number of chapters in movie, or -1.
'''
return libvlc_media_player_get_chapter_count(self)
def will_play(self):
'''Is the player able to play.
@return: boolean \libvlc_return_bool.
'''
return libvlc_media_player_will_play(self)
def get_chapter_count_for_title(self, i_title):
'''Get title chapter count.
@param i_title: title.
@return: number of chapters in title, or -1.
'''
return libvlc_media_player_get_chapter_count_for_title(self, i_title)
def set_title(self, i_title):
'''Set movie title.
@param i_title: title number to play.
'''
return libvlc_media_player_set_title(self, i_title)
def get_title(self):
'''Get movie title.
@return: title number currently playing, or -1.
'''
return libvlc_media_player_get_title(self)
def get_title_count(self):
'''Get movie title count.
@return: title number count, or -1.
'''
return libvlc_media_player_get_title_count(self)
def previous_chapter(self):
'''Set previous chapter (if applicable).
'''
return libvlc_media_player_previous_chapter(self)
def next_chapter(self):
'''Set next chapter (if applicable).
'''
return libvlc_media_player_next_chapter(self)
def get_rate(self):
'''Get the requested movie play rate.
@warning: Depending on the underlying media, the requested rate may be
different from the real playback rate.
@return: movie play rate.
'''
return libvlc_media_player_get_rate(self)
def set_rate(self, rate):
'''Set movie play rate.
@param rate: movie play rate to set.
@return: -1 if an error was detected, 0 otherwise (but even then, it might not actually work depending on the underlying media protocol).
'''
return libvlc_media_player_set_rate(self, rate)
def get_state(self):
'''Get current movie state.
@return: the current state of the media player (playing, paused, ...) See libvlc_state_t.
'''
return libvlc_media_player_get_state(self)
def has_vout(self):
'''How many video outputs does this media player have?
@return: the number of video outputs.
'''
return libvlc_media_player_has_vout(self)
def is_seekable(self):
'''Is this media player seekable?
@return: true if the media player can seek \libvlc_return_bool.
'''
return libvlc_media_player_is_seekable(self)
def can_pause(self):
'''Can this media player be paused?
@return: true if the media player can pause \libvlc_return_bool.
'''
return libvlc_media_player_can_pause(self)
def program_scrambled(self):
'''Check if the current program is scrambled.
@return: true if the current program is scrambled \libvlc_return_bool.
@version: LibVLC 2.2.0 or later.
'''
return libvlc_media_player_program_scrambled(self)
def next_frame(self):
'''Display the next frame (if supported).
'''
return libvlc_media_player_next_frame(self)
def navigate(self, navigate):
'''Navigate through DVD Menu.
@param navigate: the Navigation mode.
@version: libVLC 2.0.0 or later.
'''
return libvlc_media_player_navigate(self, navigate)
def set_video_title_display(self, position, timeout):
'''Set if, and how, the video title will be shown when media is played.
@param position: position at which to display the title, or libvlc_position_disable to prevent the title from being displayed.
@param timeout: title display timeout in milliseconds (ignored if libvlc_position_disable).
@version: libVLC 2.1.0 or later.
'''
return libvlc_media_player_set_video_title_display(self, position, timeout)
def add_slave(self, i_type, psz_uri, b_select):
'''Add a slave to the current media player.
@note: If the player is playing, the slave will be added directly. This call
will also update the slave list of the attached L{Media}.
@param i_type: subtitle or audio.
@param psz_uri: Uri of the slave (should contain a valid scheme).
@param b_select: True if this slave should be selected when it's loaded.
@return: 0 on success, -1 on error.
@version: LibVLC 3.0.0 and later. See L{media_slaves_add}.
'''
return libvlc_media_player_add_slave(self, i_type, str_to_bytes(psz_uri), b_select)
def toggle_fullscreen(self):
'''Toggle fullscreen status on non-embedded video outputs.
@warning: The same limitations applies to this function
as to L{set_fullscreen}().
'''
return libvlc_toggle_fullscreen(self)
def set_fullscreen(self, b_fullscreen):
'''Enable or disable fullscreen.
@warning: With most window managers, only a top-level windows can be in
full-screen mode. Hence, this function will not operate properly if
L{set_xwindow}() was used to embed the video in a
non-top-level window. In that case, the embedding window must be reparented
to the root window B{before} fullscreen mode is enabled. You will want
to reparent it back to its normal parent when disabling fullscreen.
@param b_fullscreen: boolean for fullscreen status.
'''
return libvlc_set_fullscreen(self, b_fullscreen)
def get_fullscreen(self):
'''Get current fullscreen status.
@return: the fullscreen status (boolean) \libvlc_return_bool.
'''
return libvlc_get_fullscreen(self)
def video_set_key_input(self, on):
'''Enable or disable key press events handling, according to the LibVLC hotkeys
configuration. By default and for historical reasons, keyboard events are
handled by the LibVLC video widget.
@note: On X11, there can be only one subscriber for key press and mouse
click events per window. If your application has subscribed to those events
for the X window ID of the video widget, then LibVLC will not be able to
handle key presses and mouse clicks in any case.
@warning: This function is only implemented for X11 and Win32 at the moment.
@param on: true to handle key press events, false to ignore them.
'''
return libvlc_video_set_key_input(self, on)
def video_set_mouse_input(self, on):
'''Enable or disable mouse click events handling. By default, those events are
handled. This is needed for DVD menus to work, as well as a few video
filters such as "puzzle".
See L{video_set_key_input}().
@warning: This function is only implemented for X11 and Win32 at the moment.
@param on: true to handle mouse click events, false to ignore them.
'''
return libvlc_video_set_mouse_input(self, on)
def video_get_scale(self):
'''Get the current video scaling factor.
See also L{video_set_scale}().
@return: the currently configured zoom factor, or 0. if the video is set to fit to the output window/drawable automatically.
'''
return libvlc_video_get_scale(self)
def video_set_scale(self, f_factor):
'''Set the video scaling factor. That is the ratio of the number of pixels on
screen to the number of pixels in the original decoded video in each
dimension. Zero is a special value; it will adjust the video to the output
window/drawable (in windowed mode) or the entire screen.
Note that not all video outputs support scaling.
@param f_factor: the scaling factor, or zero.
'''
return libvlc_video_set_scale(self, f_factor)
def video_get_aspect_ratio(self):
'''Get current video aspect ratio.
@return: the video aspect ratio or None if unspecified (the result must be released with free() or L{free}()).
'''
return libvlc_video_get_aspect_ratio(self)
def video_set_aspect_ratio(self, psz_aspect):
'''Set new video aspect ratio.
@param psz_aspect: new video aspect-ratio or None to reset to default @note Invalid aspect ratios are ignored.
'''
return libvlc_video_set_aspect_ratio(self, str_to_bytes(psz_aspect))
def video_update_viewpoint(self, p_viewpoint, b_absolute):
'''Update the video viewpoint information.
@note: It is safe to call this function before the media player is started.
@param p_viewpoint: video viewpoint allocated via L{video_new_viewpoint}().
@param b_absolute: if true replace the old viewpoint with the new one. If false, increase/decrease it.
@return: -1 in case of error, 0 otherwise @note the values are set asynchronously, it will be used by the next frame displayed.
@version: LibVLC 3.0.0 and later.
'''
return libvlc_video_update_viewpoint(self, p_viewpoint, b_absolute)
def video_get_spu(self):
'''Get current video subtitle.
@return: the video subtitle selected, or -1 if none.
'''
return libvlc_video_get_spu(self)
def video_get_spu_count(self):
'''Get the number of available video subtitles.
@return: the number of available video subtitles.
'''
return libvlc_video_get_spu_count(self)
def video_set_spu(self, i_spu):
'''Set new video subtitle.
@param i_spu: video subtitle track to select (i_id from track description).
@return: 0 on success, -1 if out of range.
'''
return libvlc_video_set_spu(self, i_spu)
def video_get_spu_delay(self):
'''Get the current subtitle delay. Positive values means subtitles are being
displayed later, negative values earlier.
@return: time (in microseconds) the display of subtitles is being delayed.
@version: LibVLC 2.0.0 or later.
'''
return libvlc_video_get_spu_delay(self)
def video_set_spu_delay(self, i_delay):
'''Set the subtitle delay. This affects the timing of when the subtitle will
be displayed. Positive values result in subtitles being displayed later,
while negative values will result in subtitles being displayed earlier.
The subtitle delay will be reset to zero each time the media changes.
@param i_delay: time (in microseconds) the display of subtitles should be delayed.
@return: 0 on success, -1 on error.
@version: LibVLC 2.0.0 or later.
'''
return libvlc_video_set_spu_delay(self, i_delay)
def video_get_crop_geometry(self):
'''Get current crop filter geometry.
@return: the crop filter geometry or None if unset.
'''
return libvlc_video_get_crop_geometry(self)
def video_set_crop_geometry(self, psz_geometry):
'''Set new crop filter geometry.
@param psz_geometry: new crop filter geometry (None to unset).
'''
return libvlc_video_set_crop_geometry(self, str_to_bytes(psz_geometry))
def video_get_teletext(self):
'''Get current teletext page requested.
@return: the current teletext page requested.
'''
return libvlc_video_get_teletext(self)
def video_set_teletext(self, i_page):
'''Set new teletext page to retrieve.
@param i_page: teletex page number requested.
'''
return libvlc_video_set_teletext(self, i_page)
def toggle_teletext(self):
'''Toggle teletext transparent status on video output.
'''
return libvlc_toggle_teletext(self)
def video_get_track_count(self):
'''Get number of available video tracks.
@return: the number of available video tracks (int).
'''
return libvlc_video_get_track_count(self)
def video_get_track(self):
'''Get current video track.
@return: the video track ID (int) or -1 if no active input.
'''
return libvlc_video_get_track(self)
def video_set_track(self, i_track):
'''Set video track.
@param i_track: the track ID (i_id field from track description).
@return: 0 on success, -1 if out of range.
'''
return libvlc_video_set_track(self, i_track)
def video_take_snapshot(self, num, psz_filepath, i_width, i_height):
'''Take a snapshot of the current video window.
If i_width AND i_height is 0, original size is used.
If i_width XOR i_height is 0, original aspect-ratio is preserved.
@param num: number of video output (typically 0 for the first/only one).
@param psz_filepath: the path where to save the screenshot to.
@param i_width: the snapshot's width.
@param i_height: the snapshot's height.
@return: 0 on success, -1 if the video was not found.
'''
return libvlc_video_take_snapshot(self, num, str_to_bytes(psz_filepath), i_width, i_height)
def video_set_deinterlace(self, psz_mode):
'''Enable or disable deinterlace filter.
@param psz_mode: type of deinterlace filter, None to disable.
'''
return libvlc_video_set_deinterlace(self, str_to_bytes(psz_mode))
def video_get_marquee_int(self, option):
'''Get an integer marquee option value.
@param option: marq option to get See libvlc_video_marquee_int_option_t.
'''
return libvlc_video_get_marquee_int(self, option)
def video_get_marquee_string(self, option):
'''Get a string marquee option value.
@param option: marq option to get See libvlc_video_marquee_string_option_t.
'''
return libvlc_video_get_marquee_string(self, option)
def video_set_marquee_int(self, option, i_val):
'''Enable, disable or set an integer marquee option
Setting libvlc_marquee_Enable has the side effect of enabling (arg !0)
or disabling (arg 0) the marq filter.
@param option: marq option to set See libvlc_video_marquee_int_option_t.
@param i_val: marq option value.
'''
return libvlc_video_set_marquee_int(self, option, i_val)
def video_set_marquee_string(self, option, psz_text):
'''Set a marquee string option.
@param option: marq option to set See libvlc_video_marquee_string_option_t.
@param psz_text: marq option value.
'''
return libvlc_video_set_marquee_string(self, option, str_to_bytes(psz_text))
def video_get_logo_int(self, option):
'''Get integer logo option.
@param option: logo option to get, values of libvlc_video_logo_option_t.
'''
return libvlc_video_get_logo_int(self, option)
def video_set_logo_int(self, option, value):
'''Set logo option as integer. Options that take a different type value
are ignored.
Passing libvlc_logo_enable as option value has the side effect of
starting (arg !0) or stopping (arg 0) the logo filter.
@param option: logo option to set, values of libvlc_video_logo_option_t.
@param value: logo option value.
'''
return libvlc_video_set_logo_int(self, option, value)
def video_set_logo_string(self, option, psz_value):
'''Set logo option as string. Options that take a different type value
are ignored.
@param option: logo option to set, values of libvlc_video_logo_option_t.
@param psz_value: logo option value.
'''
return libvlc_video_set_logo_string(self, option, str_to_bytes(psz_value))
def video_get_adjust_int(self, option):
'''Get integer adjust option.
@param option: adjust option to get, values of libvlc_video_adjust_option_t.
@version: LibVLC 1.1.1 and later.
'''
return libvlc_video_get_adjust_int(self, option)
def video_set_adjust_int(self, option, value):
'''Set adjust option as integer. Options that take a different type value
are ignored.
Passing libvlc_adjust_enable as option value has the side effect of
starting (arg !0) or stopping (arg 0) the adjust filter.
@param option: adust option to set, values of libvlc_video_adjust_option_t.
@param value: adjust option value.
@version: LibVLC 1.1.1 and later.
'''
return libvlc_video_set_adjust_int(self, option, value)
def video_get_adjust_float(self, option):
'''Get float adjust option.
@param option: adjust option to get, values of libvlc_video_adjust_option_t.
@version: LibVLC 1.1.1 and later.
'''
return libvlc_video_get_adjust_float(self, option)
def video_set_adjust_float(self, option, value):
'''Set adjust option as float. Options that take a different type value
are ignored.
@param option: adust option to set, values of libvlc_video_adjust_option_t.
@param value: adjust option value.
@version: LibVLC 1.1.1 and later.
'''
return libvlc_video_set_adjust_float(self, option, value)
def audio_output_set(self, psz_name):
'''Selects an audio output module.
@note: Any change will take be effect only after playback is stopped and
restarted. Audio output cannot be changed while playing.
@param psz_name: name of audio output, use psz_name of See L{AudioOutput}.
@return: 0 if function succeeded, -1 on error.
'''
return libvlc_audio_output_set(self, str_to_bytes(psz_name))
def audio_output_device_enum(self):
'''Gets a list of potential audio output devices,
See L{audio_output_device_set}().
@note: Not all audio outputs support enumerating devices.
The audio output may be functional even if the list is empty (None).
@note: The list may not be exhaustive.
@warning: Some audio output devices in the list might not actually work in
some circumstances. By default, it is recommended to not specify any
explicit audio device.
@return: A None-terminated linked list of potential audio output devices. It must be freed with L{audio_output_device_list_release}().
@version: LibVLC 2.2.0 or later.
'''
return libvlc_audio_output_device_enum(self)
def audio_output_device_set(self, module, device_id):
'''Configures an explicit audio output device.
If the module paramater is None, audio output will be moved to the device
specified by the device identifier string immediately. This is the
recommended usage.
A list of adequate potential device strings can be obtained with
L{audio_output_device_enum}().
However passing None is supported in LibVLC version 2.2.0 and later only;
in earlier versions, this function would have no effects when the module
parameter was None.
If the module parameter is not None, the device parameter of the
corresponding audio output, if it exists, will be set to the specified
string. Note that some audio output modules do not have such a parameter
(notably MMDevice and PulseAudio).
A list of adequate potential device strings can be obtained with
L{audio_output_device_list_get}().
@note: This function does not select the specified audio output plugin.
L{audio_output_set}() is used for that purpose.
@warning: The syntax for the device parameter depends on the audio output.
Some audio output modules require further parameters (e.g. a channels map
in the case of ALSA).
@param module: If None, current audio output module. if non-None, name of audio output module.
@param device_id: device identifier string.
@return: Nothing. Errors are ignored (this is a design bug).
'''
return libvlc_audio_output_device_set(self, str_to_bytes(module), str_to_bytes(device_id))
def audio_output_device_get(self):
'''Get the current audio output device identifier.
This complements L{audio_output_device_set}().
@warning: The initial value for the current audio output device identifier
may not be set or may be some unknown value. A LibVLC application should
compare this value against the known device identifiers (e.g. those that
were previously retrieved by a call to L{audio_output_device_enum} or
L{audio_output_device_list_get}) to find the current audio output device.
It is possible that the selected audio output device changes (an external
change) without a call to L{audio_output_device_set}. That may make this
method unsuitable to use if a LibVLC application is attempting to track
dynamic audio device changes as they happen.
@return: the current audio output device identifier None if no device is selected or in case of error (the result must be released with free() or L{free}()).
@version: LibVLC 3.0.0 or later.
'''
return libvlc_audio_output_device_get(self)
def audio_toggle_mute(self):
'''Toggle mute status.
'''
return libvlc_audio_toggle_mute(self)
def audio_get_mute(self):
'''Get current mute status.
@return: the mute status (boolean) if defined, -1 if undefined/unapplicable.
'''
return libvlc_audio_get_mute(self)
def audio_set_mute(self, status):
'''Set mute status.
@param status: If status is true then mute, otherwise unmute @warning This function does not always work. If there are no active audio playback stream, the mute status might not be available. If digital pass-through (S/PDIF, HDMI...) is in use, muting may be unapplicable. Also some audio output plugins do not support muting at all. @note To force silent playback, disable all audio tracks. This is more efficient and reliable than mute.
'''
return libvlc_audio_set_mute(self, status)
def audio_get_volume(self):
'''Get current software audio volume.
@return: the software volume in percents (0 = mute, 100 = nominal / 0dB).
'''
return libvlc_audio_get_volume(self)
def audio_set_volume(self, i_volume):
'''Set current software audio volume.
@param i_volume: the volume in percents (0 = mute, 100 = 0dB).
@return: 0 if the volume was set, -1 if it was out of range.
'''
return libvlc_audio_set_volume(self, i_volume)
def audio_get_track_count(self):
'''Get number of available audio tracks.
@return: the number of available audio tracks (int), or -1 if unavailable.
'''
return libvlc_audio_get_track_count(self)
def audio_get_track(self):
'''Get current audio track.
@return: the audio track ID or -1 if no active input.
'''
return libvlc_audio_get_track(self)
def audio_set_track(self, i_track):
'''Set current audio track.
@param i_track: the track ID (i_id field from track description).
@return: 0 on success, -1 on error.
'''
return libvlc_audio_set_track(self, i_track)
def audio_get_channel(self):
'''Get current audio channel.
@return: the audio channel See libvlc_audio_output_channel_t.
'''
return libvlc_audio_get_channel(self)
def audio_set_channel(self, channel):
'''Set current audio channel.
@param channel: the audio channel, See libvlc_audio_output_channel_t.
@return: 0 on success, -1 on error.
'''
return libvlc_audio_set_channel(self, channel)
def audio_get_delay(self):
'''Get current audio delay.
@return: the audio delay (microseconds).
@version: LibVLC 1.1.1 or later.
'''
return libvlc_audio_get_delay(self)
def audio_set_delay(self, i_delay):
'''Set current audio delay. The audio delay will be reset to zero each time the media changes.
@param i_delay: the audio delay (microseconds).
@return: 0 on success, -1 on error.
@version: LibVLC 1.1.1 or later.
'''
return libvlc_audio_set_delay(self, i_delay)
def set_equalizer(self, p_equalizer):
'''Apply new equalizer settings to a media player.
The equalizer is first created by invoking L{audio_equalizer_new}() or
L{audio_equalizer_new_from_preset}().
It is possible to apply new equalizer settings to a media player whether the media
player is currently playing media or not.
Invoking this method will immediately apply the new equalizer settings to the audio
output of the currently playing media if there is any.
If there is no currently playing media, the new equalizer settings will be applied
later if and when new media is played.
Equalizer settings will automatically be applied to subsequently played media.
To disable the equalizer for a media player invoke this method passing None for the
p_equalizer parameter.
The media player does not keep a reference to the supplied equalizer so it is safe
for an application to release the equalizer reference any time after this method
returns.
@param p_equalizer: opaque equalizer handle, or None to disable the equalizer for this media player.
@return: zero on success, -1 on error.
@version: LibVLC 2.2.0 or later.
'''
return libvlc_media_player_set_equalizer(self, p_equalizer)
def get_role(self):
'''Gets the media role.
@return: the media player role (\ref libvlc_media_player_role_t).
@version: LibVLC 3.0.0 and later.
'''
return libvlc_media_player_get_role(self)
def set_role(self, role):
'''Sets the media role.
@param role: the media player role (\ref libvlc_media_player_role_t).
@return: 0 on success, -1 on error.
'''
return libvlc_media_player_set_role(self, role)
# LibVLC __version__ functions #
def libvlc_clearerr():
'''Clears the LibVLC error status for the current thread. This is optional.
By default, the error status is automatically overridden when a new error
occurs, and destroyed when the thread exits.
'''
f = _Cfunctions.get('libvlc_clearerr', None) or \
_Cfunction('libvlc_clearerr', (), None,
None)
return f()
def libvlc_vprinterr(fmt, ap):
'''Sets the LibVLC error status and message for the current thread.
Any previous error is overridden.
@param fmt: the format string.
@param ap: the arguments.
@return: a nul terminated string in any case.
'''
f = _Cfunctions.get('libvlc_vprinterr', None) or \
_Cfunction('libvlc_vprinterr', ((1,), (1,),), None,
ctypes.c_char_p, ctypes.c_char_p, ctypes.c_void_p)
return f(fmt, ap)
def libvlc_new(argc, argv):
'''Create and initialize a libvlc instance.
This functions accept a list of "command line" arguments similar to the
main(). These arguments affect the LibVLC instance default configuration.
@note
LibVLC may create threads. Therefore, any thread-unsafe process
initialization must be performed before calling L{libvlc_new}(). In particular
and where applicable:
- setlocale() and textdomain(),
- setenv(), unsetenv() and putenv(),
- with the X11 display system, XInitThreads()
(see also L{libvlc_media_player_set_xwindow}()) and
- on Microsoft Windows, SetErrorMode().
- sigprocmask() shall never be invoked; pthread_sigmask() can be used.
On POSIX systems, the SIGCHLD signal B{must not} be ignored, i.e. the
signal handler must set to SIG_DFL or a function pointer, not SIG_IGN.
Also while LibVLC is active, the wait() function shall not be called, and
any call to waitpid() shall use a strictly positive value for the first
parameter (i.e. the PID). Failure to follow those rules may lead to a
deadlock or a busy loop.
Also on POSIX systems, it is recommended that the SIGPIPE signal be blocked,
even if it is not, in principles, necessary, e.g.:
@code
@endcode
On Microsoft Windows Vista/2008, the process error mode
SEM_FAILCRITICALERRORS flag B{must} be set before using LibVLC.
On later versions, that is optional and unnecessary.
Also on Microsoft Windows (Vista and any later version), setting the default
DLL directories to SYSTEM32 exclusively is strongly recommended for
security reasons:
@code
@endcode.
@param argc: the number of arguments (should be 0).
@param argv: list of arguments (should be None).
@return: the libvlc instance or None in case of error.
@version Arguments are meant to be passed from the command line to LibVLC, just like VLC media player does. The list of valid arguments depends on the LibVLC version, the operating system and platform, and set of available LibVLC plugins. Invalid or unsupported arguments will cause the function to fail (i.e. return None). Also, some arguments may alter the behaviour or otherwise interfere with other LibVLC functions. @warning There is absolutely no warranty or promise of forward, backward and cross-platform compatibility with regards to L{libvlc_new}() arguments. We recommend that you do not use them, other than when debugging.
'''
f = _Cfunctions.get('libvlc_new', None) or \
_Cfunction('libvlc_new', ((1,), (1,),), class_result(Instance),
ctypes.c_void_p, ctypes.c_int, ListPOINTER(ctypes.c_char_p))
return f(argc, argv)
def libvlc_release(p_instance):
'''Decrement the reference count of a libvlc instance, and destroy it
if it reaches zero.
@param p_instance: the instance to destroy.
'''
f = _Cfunctions.get('libvlc_release', None) or \
_Cfunction('libvlc_release', ((1,),), None,
None, Instance)
return f(p_instance)
def libvlc_retain(p_instance):
'''Increments the reference count of a libvlc instance.
The initial reference count is 1 after L{libvlc_new}() returns.
@param p_instance: the instance to reference.
'''
f = _Cfunctions.get('libvlc_retain', None) or \
_Cfunction('libvlc_retain', ((1,),), None,
None, Instance)
return f(p_instance)
def libvlc_add_intf(p_instance, name):
'''Try to start a user interface for the libvlc instance.
@param p_instance: the instance.
@param name: interface name, or None for default.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_add_intf', None) or \
_Cfunction('libvlc_add_intf', ((1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p)
return f(p_instance, name)
def libvlc_set_user_agent(p_instance, name, http):
'''Sets the application name. LibVLC passes this as the user agent string
when a protocol requires it.
@param p_instance: LibVLC instance.
@param name: human-readable application name, e.g. "FooBar player 1.2.3".
@param http: HTTP User Agent, e.g. "FooBar/1.2.3 Python/2.6.0".
@version: LibVLC 1.1.1 or later.
'''
f = _Cfunctions.get('libvlc_set_user_agent', None) or \
_Cfunction('libvlc_set_user_agent', ((1,), (1,), (1,),), None,
None, Instance, ctypes.c_char_p, ctypes.c_char_p)
return f(p_instance, name, http)
def libvlc_set_app_id(p_instance, id, version, icon):
'''Sets some meta-information about the application.
See also L{libvlc_set_user_agent}().
@param p_instance: LibVLC instance.
@param id: Java-style application identifier, e.g. "com.acme.foobar".
@param version: application version numbers, e.g. "1.2.3".
@param icon: application icon name, e.g. "foobar".
@version: LibVLC 2.1.0 or later.
'''
f = _Cfunctions.get('libvlc_set_app_id', None) or \
_Cfunction('libvlc_set_app_id', ((1,), (1,), (1,), (1,),), None,
None, Instance, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p)
return f(p_instance, id, version, icon)
def libvlc_get_version():
'''Retrieve libvlc version.
Example: "1.1.0-git The Luggage".
@return: a string containing the libvlc version.
'''
f = _Cfunctions.get('libvlc_get_version', None) or \
_Cfunction('libvlc_get_version', (), None,
ctypes.c_char_p)
return f()
def libvlc_get_compiler():
'''Retrieve libvlc compiler version.
Example: "gcc version 4.2.3 (Ubuntu 4.2.3-2ubuntu6)".
@return: a string containing the libvlc compiler version.
'''
f = _Cfunctions.get('libvlc_get_compiler', None) or \
_Cfunction('libvlc_get_compiler', (), None,
ctypes.c_char_p)
return f()
def libvlc_get_changeset():
'''Retrieve libvlc changeset.
Example: "aa9bce0bc4".
@return: a string containing the libvlc changeset.
'''
f = _Cfunctions.get('libvlc_get_changeset', None) or \
_Cfunction('libvlc_get_changeset', (), None,
ctypes.c_char_p)
return f()
def libvlc_free(ptr):
'''Frees an heap allocation returned by a LibVLC function.
If you know you're using the same underlying C run-time as the LibVLC
implementation, then you can call ANSI C free() directly instead.
@param ptr: the pointer.
'''
f = _Cfunctions.get('libvlc_free', None) or \
_Cfunction('libvlc_free', ((1,),), None,
None, ctypes.c_void_p)
return f(ptr)
def libvlc_event_attach(p_event_manager, i_event_type, f_callback, user_data):
'''Register for an event notification.
@param p_event_manager: the event manager to which you want to attach to. Generally it is obtained by vlc_my_object_event_manager() where my_object is the object you want to listen to.
@param i_event_type: the desired event to which we want to listen.
@param f_callback: the function to call when i_event_type occurs.
@param user_data: user provided data to carry with the event.
@return: 0 on success, ENOMEM on error.
'''
f = _Cfunctions.get('libvlc_event_attach', None) or \
_Cfunction('libvlc_event_attach', ((1,), (1,), (1,), (1,),), None,
ctypes.c_int, EventManager, ctypes.c_uint, Callback, ctypes.c_void_p)
return f(p_event_manager, i_event_type, f_callback, user_data)
def libvlc_event_detach(p_event_manager, i_event_type, f_callback, p_user_data):
'''Unregister an event notification.
@param p_event_manager: the event manager.
@param i_event_type: the desired event to which we want to unregister.
@param f_callback: the function to call when i_event_type occurs.
@param p_user_data: user provided data to carry with the event.
'''
f = _Cfunctions.get('libvlc_event_detach', None) or \
_Cfunction('libvlc_event_detach', ((1,), (1,), (1,), (1,),), None,
None, EventManager, ctypes.c_uint, Callback, ctypes.c_void_p)
return f(p_event_manager, i_event_type, f_callback, p_user_data)
def libvlc_event_type_name(event_type):
'''Get an event's type name.
@param event_type: the desired event.
'''
f = _Cfunctions.get('libvlc_event_type_name', None) or \
_Cfunction('libvlc_event_type_name', ((1,),), None,
ctypes.c_char_p, ctypes.c_uint)
return f(event_type)
def libvlc_log_get_context(ctx):
'''Gets log message debug infos.
This function retrieves self-debug information about a log message:
- the name of the VLC module emitting the message,
- the name of the source code module (i.e. file) and
- the line number within the source code module.
The returned module name and file name will be None if unknown.
The returned line number will similarly be zero if unknown.
@param ctx: message context (as passed to the @ref libvlc_log_cb callback).
@return: module module name storage (or None), file source code file name storage (or None), line source code file line number storage (or None).
@version: LibVLC 2.1.0 or later.
'''
f = _Cfunctions.get('libvlc_log_get_context', None) or \
_Cfunction('libvlc_log_get_context', ((1,), (2,), (2,), (2,),), None,
None, Log_ptr, ListPOINTER(ctypes.c_char_p), ListPOINTER(ctypes.c_char_p), ctypes.POINTER(ctypes.c_uint))
return f(ctx)
def libvlc_log_get_object(ctx, id):
'''Gets log message info.
This function retrieves meta-information about a log message:
- the type name of the VLC object emitting the message,
- the object header if any, and
- a temporaly-unique object identifier.
This information is mainly meant for B{manual} troubleshooting.
The returned type name may be "generic" if unknown, but it cannot be None.
The returned header will be None if unset; in current versions, the header
is used to distinguish for VLM inputs.
The returned object ID will be zero if the message is not associated with
any VLC object.
@param ctx: message context (as passed to the @ref libvlc_log_cb callback).
@return: name object name storage (or None), header object header (or None), line source code file line number storage (or None).
@version: LibVLC 2.1.0 or later.
'''
f = _Cfunctions.get('libvlc_log_get_object', None) or \
_Cfunction('libvlc_log_get_object', ((1,), (2,), (2,), (1,),), None,
None, Log_ptr, ListPOINTER(ctypes.c_char_p), ListPOINTER(ctypes.c_char_p), ctypes.POINTER(ctypes.c_uint))
return f(ctx, id)
def libvlc_log_unset(p_instance):
'''Unsets the logging callback.
This function deregisters the logging callback for a LibVLC instance.
This is rarely needed as the callback is implicitly unset when the instance
is destroyed.
@note: This function will wait for any pending callbacks invocation to
complete (causing a deadlock if called from within the callback).
@param p_instance: libvlc instance.
@version: LibVLC 2.1.0 or later.
'''
f = _Cfunctions.get('libvlc_log_unset', None) or \
_Cfunction('libvlc_log_unset', ((1,),), None,
None, Instance)
return f(p_instance)
def libvlc_log_set(p_instance, cb, data):
'''Sets the logging callback for a LibVLC instance.
This function is thread-safe: it will wait for any pending callbacks
invocation to complete.
@param cb: callback function pointer.
@param data: opaque data pointer for the callback function @note Some log messages (especially debug) are emitted by LibVLC while is being initialized. These messages cannot be captured with this interface. @warning A deadlock may occur if this function is called from the callback.
@param p_instance: libvlc instance.
@version: LibVLC 2.1.0 or later.
'''
f = _Cfunctions.get('libvlc_log_set', None) or \
_Cfunction('libvlc_log_set', ((1,), (1,), (1,),), None,
None, Instance, LogCb, ctypes.c_void_p)
return f(p_instance, cb, data)
def libvlc_log_set_file(p_instance, stream):
'''Sets up logging to a file.
@param p_instance: libvlc instance.
@param stream: FILE pointer opened for writing (the FILE pointer must remain valid until L{libvlc_log_unset}()).
@version: LibVLC 2.1.0 or later.
'''
f = _Cfunctions.get('libvlc_log_set_file', None) or \
_Cfunction('libvlc_log_set_file', ((1,), (1,),), None,
None, Instance, FILE_ptr)
return f(p_instance, stream)
def libvlc_module_description_list_release(p_list):
'''Release a list of module descriptions.
@param p_list: the list to be released.
'''
f = _Cfunctions.get('libvlc_module_description_list_release', None) or \
_Cfunction('libvlc_module_description_list_release', ((1,),), None,
None, ctypes.POINTER(ModuleDescription))
return f(p_list)
def libvlc_audio_filter_list_get(p_instance):
'''Returns a list of audio filters that are available.
@param p_instance: libvlc instance.
@return: a list of module descriptions. It should be freed with L{libvlc_module_description_list_release}(). In case of an error, None is returned. See L{ModuleDescription} See L{libvlc_module_description_list_release}.
'''
f = _Cfunctions.get('libvlc_audio_filter_list_get', None) or \
_Cfunction('libvlc_audio_filter_list_get', ((1,),), None,
ctypes.POINTER(ModuleDescription), Instance)
return f(p_instance)
def libvlc_video_filter_list_get(p_instance):
'''Returns a list of video filters that are available.
@param p_instance: libvlc instance.
@return: a list of module descriptions. It should be freed with L{libvlc_module_description_list_release}(). In case of an error, None is returned. See L{ModuleDescription} See L{libvlc_module_description_list_release}.
'''
f = _Cfunctions.get('libvlc_video_filter_list_get', None) or \
_Cfunction('libvlc_video_filter_list_get', ((1,),), None,
ctypes.POINTER(ModuleDescription), Instance)
return f(p_instance)
def libvlc_clock():
'''Return the current time as defined by LibVLC. The unit is the microsecond.
Time increases monotonically (regardless of time zone changes and RTC
adjustements).
The origin is arbitrary but consistent across the whole system
(e.g. the system uptim, the time since the system was booted).
@note: On systems that support it, the POSIX monotonic clock is used.
'''
f = _Cfunctions.get('libvlc_clock', None) or \
_Cfunction('libvlc_clock', (), None,
ctypes.c_int64)
return f()
def libvlc_media_discoverer_new(p_inst, psz_name):
'''Create a media discoverer object by name.
After this object is created, you should attach to media_list events in
order to be notified of new items discovered.
You need to call L{libvlc_media_discoverer_start}() in order to start the
discovery.
See L{libvlc_media_discoverer_media_list}
See L{libvlc_media_discoverer_event_manager}
See L{libvlc_media_discoverer_start}.
@param p_inst: libvlc instance.
@param psz_name: service name; use L{libvlc_media_discoverer_list_get}() to get a list of the discoverer names available in this libVLC instance.
@return: media discover object or None in case of error.
@version: LibVLC 3.0.0 or later.
'''
f = _Cfunctions.get('libvlc_media_discoverer_new', None) or \
_Cfunction('libvlc_media_discoverer_new', ((1,), (1,),), class_result(MediaDiscoverer),
ctypes.c_void_p, Instance, ctypes.c_char_p)
return f(p_inst, psz_name)
def libvlc_media_discoverer_start(p_mdis):
'''Start media discovery.
To stop it, call L{libvlc_media_discoverer_stop}() or
L{libvlc_media_discoverer_list_release}() directly.
See L{libvlc_media_discoverer_stop}.
@param p_mdis: media discover object.
@return: -1 in case of error, 0 otherwise.
@version: LibVLC 3.0.0 or later.
'''
f = _Cfunctions.get('libvlc_media_discoverer_start', None) or \
_Cfunction('libvlc_media_discoverer_start', ((1,),), None,
ctypes.c_int, MediaDiscoverer)
return f(p_mdis)
def libvlc_media_discoverer_stop(p_mdis):
'''Stop media discovery.
See L{libvlc_media_discoverer_start}.
@param p_mdis: media discover object.
@version: LibVLC 3.0.0 or later.
'''
f = _Cfunctions.get('libvlc_media_discoverer_stop', None) or \
_Cfunction('libvlc_media_discoverer_stop', ((1,),), None,
None, MediaDiscoverer)
return f(p_mdis)
def libvlc_media_discoverer_release(p_mdis):
'''Release media discover object. If the reference count reaches 0, then
the object will be released.
@param p_mdis: media service discover object.
'''
f = _Cfunctions.get('libvlc_media_discoverer_release', None) or \
_Cfunction('libvlc_media_discoverer_release', ((1,),), None,
None, MediaDiscoverer)
return f(p_mdis)
def libvlc_media_discoverer_media_list(p_mdis):
'''Get media service discover media list.
@param p_mdis: media service discover object.
@return: list of media items.
'''
f = _Cfunctions.get('libvlc_media_discoverer_media_list', None) or \
_Cfunction('libvlc_media_discoverer_media_list', ((1,),), class_result(MediaList),
ctypes.c_void_p, MediaDiscoverer)
return f(p_mdis)
def libvlc_media_discoverer_is_running(p_mdis):
'''Query if media service discover object is running.
@param p_mdis: media service discover object.
@return: true if running, false if not \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_discoverer_is_running', None) or \
_Cfunction('libvlc_media_discoverer_is_running', ((1,),), None,
ctypes.c_int, MediaDiscoverer)
return f(p_mdis)
def libvlc_media_discoverer_list_get(p_inst, i_cat, ppp_services):
'''Get media discoverer services by category.
@param p_inst: libvlc instance.
@param i_cat: category of services to fetch.
@param ppp_services: address to store an allocated array of media discoverer services (must be freed with L{libvlc_media_discoverer_list_release}() by the caller) [OUT].
@return: the number of media discoverer services (0 on error).
@version: LibVLC 3.0.0 and later.
'''
f = _Cfunctions.get('libvlc_media_discoverer_list_get', None) or \
_Cfunction('libvlc_media_discoverer_list_get', ((1,), (1,), (1,),), None,
ctypes.c_size_t, Instance, MediaDiscovererCategory, ctypes.POINTER(ctypes.POINTER(MediaDiscovererDescription)))
return f(p_inst, i_cat, ppp_services)
def libvlc_media_discoverer_list_release(pp_services, i_count):
'''Release an array of media discoverer services.
@param pp_services: array to release.
@param i_count: number of elements in the array.
@version: LibVLC 3.0.0 and later. See L{libvlc_media_discoverer_list_get}().
'''
f = _Cfunctions.get('libvlc_media_discoverer_list_release', None) or \
_Cfunction('libvlc_media_discoverer_list_release', ((1,), (1,),), None,
None, ctypes.POINTER(MediaDiscovererDescription), ctypes.c_size_t)
return f(pp_services, i_count)
def libvlc_dialog_set_context(p_id, p_context):
'''Associate an opaque pointer with the dialog id.
@version: LibVLC 3.0.0 and later.
'''
f = _Cfunctions.get('libvlc_dialog_set_context', None) or \
_Cfunction('libvlc_dialog_set_context', ((1,), (1,),), None,
None, ctypes.c_void_p, ctypes.c_void_p)
return f(p_id, p_context)
def libvlc_dialog_get_context(p_id):
'''Return the opaque pointer associated with the dialog id.
@version: LibVLC 3.0.0 and later.
'''
f = _Cfunctions.get('libvlc_dialog_get_context', None) or \
_Cfunction('libvlc_dialog_get_context', ((1,),), None,
ctypes.c_void_p, ctypes.c_void_p)
return f(p_id)
def libvlc_dialog_post_login(p_id, psz_username, psz_password, b_store):
'''Post a login answer
After this call, p_id won't be valid anymore
See libvlc_dialog_cbs.pf_display_login.
@param p_id: id of the dialog.
@param psz_username: valid and non empty string.
@param psz_password: valid string (can be empty).
@param b_store: if true, store the credentials.
@return: 0 on success, or -1 on error.
@version: LibVLC 3.0.0 and later.
'''
f = _Cfunctions.get('libvlc_dialog_post_login', None) or \
_Cfunction('libvlc_dialog_post_login', ((1,), (1,), (1,), (1,),), None,
ctypes.c_int, ctypes.c_void_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_bool)
return f(p_id, psz_username, psz_password, b_store)
def libvlc_dialog_post_action(p_id, i_action):
'''Post a question answer
After this call, p_id won't be valid anymore
See libvlc_dialog_cbs.pf_display_question.
@param p_id: id of the dialog.
@param i_action: 1 for action1, 2 for action2.
@return: 0 on success, or -1 on error.
@version: LibVLC 3.0.0 and later.
'''
f = _Cfunctions.get('libvlc_dialog_post_action', None) or \
_Cfunction('libvlc_dialog_post_action', ((1,), (1,),), None,
ctypes.c_int, ctypes.c_void_p, ctypes.c_int)
return f(p_id, i_action)
def libvlc_dialog_dismiss(p_id):
'''Dismiss a dialog
After this call, p_id won't be valid anymore
See libvlc_dialog_cbs.pf_cancel.
@param p_id: id of the dialog.
@return: 0 on success, or -1 on error.
@version: LibVLC 3.0.0 and later.
'''
f = _Cfunctions.get('libvlc_dialog_dismiss', None) or \
_Cfunction('libvlc_dialog_dismiss', ((1,),), None,
ctypes.c_int, ctypes.c_void_p)
return f(p_id)
def libvlc_media_library_new(p_instance):
'''Create an new Media Library object.
@param p_instance: the libvlc instance.
@return: a new object or None on error.
'''
f = _Cfunctions.get('libvlc_media_library_new', None) or \
_Cfunction('libvlc_media_library_new', ((1,),), class_result(MediaLibrary),
ctypes.c_void_p, Instance)
return f(p_instance)
def libvlc_media_library_release(p_mlib):
'''Release media library object. This functions decrements the
reference count of the media library object. If it reaches 0,
then the object will be released.
@param p_mlib: media library object.
'''
f = _Cfunctions.get('libvlc_media_library_release', None) or \
_Cfunction('libvlc_media_library_release', ((1,),), None,
None, MediaLibrary)
return f(p_mlib)
def libvlc_media_library_retain(p_mlib):
'''Retain a reference to a media library object. This function will
increment the reference counting for this object. Use
L{libvlc_media_library_release}() to decrement the reference count.
@param p_mlib: media library object.
'''
f = _Cfunctions.get('libvlc_media_library_retain', None) or \
_Cfunction('libvlc_media_library_retain', ((1,),), None,
None, MediaLibrary)
return f(p_mlib)
def libvlc_media_library_load(p_mlib):
'''Load media library.
@param p_mlib: media library object.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_media_library_load', None) or \
_Cfunction('libvlc_media_library_load', ((1,),), None,
ctypes.c_int, MediaLibrary)
return f(p_mlib)
def libvlc_media_library_media_list(p_mlib):
'''Get media library subitems.
@param p_mlib: media library object.
@return: media list subitems.
'''
f = _Cfunctions.get('libvlc_media_library_media_list', None) or \
_Cfunction('libvlc_media_library_media_list', ((1,),), class_result(MediaList),
ctypes.c_void_p, MediaLibrary)
return f(p_mlib)
def libvlc_vlm_release(p_instance):
'''Release the vlm instance related to the given L{Instance}.
@param p_instance: the instance.
'''
f = _Cfunctions.get('libvlc_vlm_release', None) or \
_Cfunction('libvlc_vlm_release', ((1,),), None,
None, Instance)
return f(p_instance)
def libvlc_vlm_add_broadcast(p_instance, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop):
'''Add a broadcast, with one input.
@param p_instance: the instance.
@param psz_name: the name of the new broadcast.
@param psz_input: the input MRL.
@param psz_output: the output MRL (the parameter to the "sout" variable).
@param i_options: number of additional options.
@param ppsz_options: additional options.
@param b_enabled: boolean for enabling the new broadcast.
@param b_loop: Should this broadcast be played in loop ?
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_add_broadcast', None) or \
_Cfunction('libvlc_vlm_add_broadcast', ((1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int, ListPOINTER(ctypes.c_char_p), ctypes.c_int, ctypes.c_int)
return f(p_instance, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop)
def libvlc_vlm_add_vod(p_instance, psz_name, psz_input, i_options, ppsz_options, b_enabled, psz_mux):
'''Add a vod, with one input.
@param p_instance: the instance.
@param psz_name: the name of the new vod media.
@param psz_input: the input MRL.
@param i_options: number of additional options.
@param ppsz_options: additional options.
@param b_enabled: boolean for enabling the new vod.
@param psz_mux: the muxer of the vod media.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_add_vod', None) or \
_Cfunction('libvlc_vlm_add_vod', ((1,), (1,), (1,), (1,), (1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int, ListPOINTER(ctypes.c_char_p), ctypes.c_int, ctypes.c_char_p)
return f(p_instance, psz_name, psz_input, i_options, ppsz_options, b_enabled, psz_mux)
def libvlc_vlm_del_media(p_instance, psz_name):
'''Delete a media (VOD or broadcast).
@param p_instance: the instance.
@param psz_name: the media to delete.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_del_media', None) or \
_Cfunction('libvlc_vlm_del_media', ((1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p)
return f(p_instance, psz_name)
def libvlc_vlm_set_enabled(p_instance, psz_name, b_enabled):
'''Enable or disable a media (VOD or broadcast).
@param p_instance: the instance.
@param psz_name: the media to work on.
@param b_enabled: the new status.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_set_enabled', None) or \
_Cfunction('libvlc_vlm_set_enabled', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, b_enabled)
def libvlc_vlm_set_output(p_instance, psz_name, psz_output):
'''Set the output for a media.
@param p_instance: the instance.
@param psz_name: the media to work on.
@param psz_output: the output MRL (the parameter to the "sout" variable).
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_set_output', None) or \
_Cfunction('libvlc_vlm_set_output', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p)
return f(p_instance, psz_name, psz_output)
def libvlc_vlm_set_input(p_instance, psz_name, psz_input):
'''Set a media's input MRL. This will delete all existing inputs and
add the specified one.
@param p_instance: the instance.
@param psz_name: the media to work on.
@param psz_input: the input MRL.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_set_input', None) or \
_Cfunction('libvlc_vlm_set_input', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p)
return f(p_instance, psz_name, psz_input)
def libvlc_vlm_add_input(p_instance, psz_name, psz_input):
'''Add a media's input MRL. This will add the specified one.
@param p_instance: the instance.
@param psz_name: the media to work on.
@param psz_input: the input MRL.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_add_input', None) or \
_Cfunction('libvlc_vlm_add_input', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p)
return f(p_instance, psz_name, psz_input)
def libvlc_vlm_set_loop(p_instance, psz_name, b_loop):
'''Set a media's loop status.
@param p_instance: the instance.
@param psz_name: the media to work on.
@param b_loop: the new status.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_set_loop', None) or \
_Cfunction('libvlc_vlm_set_loop', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, b_loop)
def libvlc_vlm_set_mux(p_instance, psz_name, psz_mux):
'''Set a media's vod muxer.
@param p_instance: the instance.
@param psz_name: the media to work on.
@param psz_mux: the new muxer.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_set_mux', None) or \
_Cfunction('libvlc_vlm_set_mux', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p)
return f(p_instance, psz_name, psz_mux)
def libvlc_vlm_change_media(p_instance, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop):
'''Edit the parameters of a media. This will delete all existing inputs and
add the specified one.
@param p_instance: the instance.
@param psz_name: the name of the new broadcast.
@param psz_input: the input MRL.
@param psz_output: the output MRL (the parameter to the "sout" variable).
@param i_options: number of additional options.
@param ppsz_options: additional options.
@param b_enabled: boolean for enabling the new broadcast.
@param b_loop: Should this broadcast be played in loop ?
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_change_media', None) or \
_Cfunction('libvlc_vlm_change_media', ((1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int, ListPOINTER(ctypes.c_char_p), ctypes.c_int, ctypes.c_int)
return f(p_instance, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop)
def libvlc_vlm_play_media(p_instance, psz_name):
'''Play the named broadcast.
@param p_instance: the instance.
@param psz_name: the name of the broadcast.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_play_media', None) or \
_Cfunction('libvlc_vlm_play_media', ((1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p)
return f(p_instance, psz_name)
def libvlc_vlm_stop_media(p_instance, psz_name):
'''Stop the named broadcast.
@param p_instance: the instance.
@param psz_name: the name of the broadcast.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_stop_media', None) or \
_Cfunction('libvlc_vlm_stop_media', ((1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p)
return f(p_instance, psz_name)
def libvlc_vlm_pause_media(p_instance, psz_name):
'''Pause the named broadcast.
@param p_instance: the instance.
@param psz_name: the name of the broadcast.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_pause_media', None) or \
_Cfunction('libvlc_vlm_pause_media', ((1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p)
return f(p_instance, psz_name)
def libvlc_vlm_seek_media(p_instance, psz_name, f_percentage):
'''Seek in the named broadcast.
@param p_instance: the instance.
@param psz_name: the name of the broadcast.
@param f_percentage: the percentage to seek to.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_seek_media', None) or \
_Cfunction('libvlc_vlm_seek_media', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_float)
return f(p_instance, psz_name, f_percentage)
def libvlc_vlm_show_media(p_instance, psz_name):
'''Return information about the named media as a JSON
string representation.
This function is mainly intended for debugging use,
if you want programmatic access to the state of
a vlm_media_instance_t, please use the corresponding
libvlc_vlm_get_media_instance_xxx -functions.
Currently there are no such functions available for
vlm_media_t though.
@param p_instance: the instance.
@param psz_name: the name of the media, if the name is an empty string, all media is described.
@return: string with information about named media, or None on error.
'''
f = _Cfunctions.get('libvlc_vlm_show_media', None) or \
_Cfunction('libvlc_vlm_show_media', ((1,), (1,),), string_result,
ctypes.c_void_p, Instance, ctypes.c_char_p)
return f(p_instance, psz_name)
def libvlc_vlm_get_media_instance_position(p_instance, psz_name, i_instance):
'''Get vlm_media instance position by name or instance id.
@param p_instance: a libvlc instance.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: position as float or -1. on error.
'''
f = _Cfunctions.get('libvlc_vlm_get_media_instance_position', None) or \
_Cfunction('libvlc_vlm_get_media_instance_position', ((1,), (1,), (1,),), None,
ctypes.c_float, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, i_instance)
def libvlc_vlm_get_media_instance_time(p_instance, psz_name, i_instance):
'''Get vlm_media instance time by name or instance id.
@param p_instance: a libvlc instance.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: time as integer or -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_get_media_instance_time', None) or \
_Cfunction('libvlc_vlm_get_media_instance_time', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, i_instance)
def libvlc_vlm_get_media_instance_length(p_instance, psz_name, i_instance):
'''Get vlm_media instance length by name or instance id.
@param p_instance: a libvlc instance.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: length of media item or -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_get_media_instance_length', None) or \
_Cfunction('libvlc_vlm_get_media_instance_length', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, i_instance)
def libvlc_vlm_get_media_instance_rate(p_instance, psz_name, i_instance):
'''Get vlm_media instance playback rate by name or instance id.
@param p_instance: a libvlc instance.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: playback rate or -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_get_media_instance_rate', None) or \
_Cfunction('libvlc_vlm_get_media_instance_rate', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, i_instance)
def libvlc_vlm_get_media_instance_title(p_instance, psz_name, i_instance):
'''Get vlm_media instance title number by name or instance id.
@param p_instance: a libvlc instance.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: title as number or -1 on error.
@bug: will always return 0.
'''
f = _Cfunctions.get('libvlc_vlm_get_media_instance_title', None) or \
_Cfunction('libvlc_vlm_get_media_instance_title', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, i_instance)
def libvlc_vlm_get_media_instance_chapter(p_instance, psz_name, i_instance):
'''Get vlm_media instance chapter number by name or instance id.
@param p_instance: a libvlc instance.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: chapter as number or -1 on error.
@bug: will always return 0.
'''
f = _Cfunctions.get('libvlc_vlm_get_media_instance_chapter', None) or \
_Cfunction('libvlc_vlm_get_media_instance_chapter', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, i_instance)
def libvlc_vlm_get_media_instance_seekable(p_instance, psz_name, i_instance):
'''Is libvlc instance seekable ?
@param p_instance: a libvlc instance.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: 1 if seekable, 0 if not, -1 if media does not exist.
@bug: will always return 0.
'''
f = _Cfunctions.get('libvlc_vlm_get_media_instance_seekable', None) or \
_Cfunction('libvlc_vlm_get_media_instance_seekable', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, i_instance)
def libvlc_vlm_get_event_manager(p_instance):
'''Get libvlc_event_manager from a vlm media.
The p_event_manager is immutable, so you don't have to hold the lock.
@param p_instance: a libvlc instance.
@return: libvlc_event_manager.
'''
f = _Cfunctions.get('libvlc_vlm_get_event_manager', None) or \
_Cfunction('libvlc_vlm_get_event_manager', ((1,),), class_result(EventManager),
ctypes.c_void_p, Instance)
return f(p_instance)
def libvlc_media_new_location(p_instance, psz_mrl):
'''Create a media with a certain given media resource location,
for instance a valid URL.
@note: To refer to a local file with this function,
the file://... URI syntax B{must} be used (see IETF RFC3986).
We recommend using L{libvlc_media_new_path}() instead when dealing with
local files.
See L{libvlc_media_release}.
@param p_instance: the instance.
@param psz_mrl: the media location.
@return: the newly created media or None on error.
'''
f = _Cfunctions.get('libvlc_media_new_location', None) or \
_Cfunction('libvlc_media_new_location', ((1,), (1,),), class_result(Media),
ctypes.c_void_p, Instance, ctypes.c_char_p)
return f(p_instance, psz_mrl)
def libvlc_media_new_path(p_instance, path):
'''Create a media for a certain file path.
See L{libvlc_media_release}.
@param p_instance: the instance.
@param path: local filesystem path.
@return: the newly created media or None on error.
'''
f = _Cfunctions.get('libvlc_media_new_path', None) or \
_Cfunction('libvlc_media_new_path', ((1,), (1,),), class_result(Media),
ctypes.c_void_p, Instance, ctypes.c_char_p)
return f(p_instance, path)
def libvlc_media_new_fd(p_instance, fd):
'''Create a media for an already open file descriptor.
The file descriptor shall be open for reading (or reading and writing).
Regular file descriptors, pipe read descriptors and character device
descriptors (including TTYs) are supported on all platforms.
Block device descriptors are supported where available.
Directory descriptors are supported on systems that provide fdopendir().
Sockets are supported on all platforms where they are file descriptors,
i.e. all except Windows.
@note: This library will B{not} automatically close the file descriptor
under any circumstance. Nevertheless, a file descriptor can usually only be
rendered once in a media player. To render it a second time, the file
descriptor should probably be rewound to the beginning with lseek().
See L{libvlc_media_release}.
@param p_instance: the instance.
@param fd: open file descriptor.
@return: the newly created media or None on error.
@version: LibVLC 1.1.5 and later.
'''
f = _Cfunctions.get('libvlc_media_new_fd', None) or \
_Cfunction('libvlc_media_new_fd', ((1,), (1,),), class_result(Media),
ctypes.c_void_p, Instance, ctypes.c_int)
return f(p_instance, fd)
def libvlc_media_new_callbacks(instance, open_cb, read_cb, seek_cb, close_cb, opaque):
'''Create a media with custom callbacks to read the data from.
@param instance: LibVLC instance.
@param open_cb: callback to open the custom bitstream input media.
@param read_cb: callback to read data (must not be None).
@param seek_cb: callback to seek, or None if seeking is not supported.
@param close_cb: callback to close the media, or None if unnecessary.
@param opaque: data pointer for the open callback.
@return: the newly created media or None on error @note If open_cb is None, the opaque pointer will be passed to read_cb, seek_cb and close_cb, and the stream size will be treated as unknown. @note The callbacks may be called asynchronously (from another thread). A single stream instance need not be reentrant. However the open_cb needs to be reentrant if the media is used by multiple player instances. @warning The callbacks may be used until all or any player instances that were supplied the media item are stopped. See L{libvlc_media_release}.
@version: LibVLC 3.0.0 and later.
'''
f = _Cfunctions.get('libvlc_media_new_callbacks', None) or \
_Cfunction('libvlc_media_new_callbacks', ((1,), (1,), (1,), (1,), (1,), (1,),), class_result(Media),
ctypes.c_void_p, Instance, MediaOpenCb, MediaReadCb, MediaSeekCb, MediaCloseCb, ctypes.c_void_p)
return f(instance, open_cb, read_cb, seek_cb, close_cb, opaque)
def libvlc_media_new_as_node(p_instance, psz_name):
'''Create a media as an empty node with a given name.
See L{libvlc_media_release}.
@param p_instance: the instance.
@param psz_name: the name of the node.
@return: the new empty media or None on error.
'''
f = _Cfunctions.get('libvlc_media_new_as_node', None) or \
_Cfunction('libvlc_media_new_as_node', ((1,), (1,),), class_result(Media),
ctypes.c_void_p, Instance, ctypes.c_char_p)
return f(p_instance, psz_name)
def libvlc_media_add_option(p_md, psz_options):
'''Add an option to the media.
This option will be used to determine how the media_player will
read the media. This allows to use VLC's advanced
reading/streaming options on a per-media basis.
@note: The options are listed in 'vlc --long-help' from the command line,
e.g. "-sout-all". Keep in mind that available options and their semantics
vary across LibVLC versions and builds.
@warning: Not all options affects L{Media} objects:
Specifically, due to architectural issues most audio and video options,
such as text renderer options, have no effects on an individual media.
These options must be set through L{libvlc_new}() instead.
@param p_md: the media descriptor.
@param psz_options: the options (as a string).
'''
f = _Cfunctions.get('libvlc_media_add_option', None) or \
_Cfunction('libvlc_media_add_option', ((1,), (1,),), None,
None, Media, ctypes.c_char_p)
return f(p_md, psz_options)
def libvlc_media_add_option_flag(p_md, psz_options, i_flags):
'''Add an option to the media with configurable flags.
This option will be used to determine how the media_player will
read the media. This allows to use VLC's advanced
reading/streaming options on a per-media basis.
The options are detailed in vlc --long-help, for instance
"--sout-all". Note that all options are not usable on medias:
specifically, due to architectural issues, video-related options
such as text renderer options cannot be set on a single media. They
must be set on the whole libvlc instance instead.
@param p_md: the media descriptor.
@param psz_options: the options (as a string).
@param i_flags: the flags for this option.
'''
f = _Cfunctions.get('libvlc_media_add_option_flag', None) or \
_Cfunction('libvlc_media_add_option_flag', ((1,), (1,), (1,),), None,
None, Media, ctypes.c_char_p, ctypes.c_uint)
return f(p_md, psz_options, i_flags)
def libvlc_media_retain(p_md):
'''Retain a reference to a media descriptor object (libvlc_media_t). Use
L{libvlc_media_release}() to decrement the reference count of a
media descriptor object.
@param p_md: the media descriptor.
'''
f = _Cfunctions.get('libvlc_media_retain', None) or \
_Cfunction('libvlc_media_retain', ((1,),), None,
None, Media)
return f(p_md)
def libvlc_media_release(p_md):
'''Decrement the reference count of a media descriptor object. If the
reference count is 0, then L{libvlc_media_release}() will release the
media descriptor object. It will send out an libvlc_MediaFreed event
to all listeners. If the media descriptor object has been released it
should not be used again.
@param p_md: the media descriptor.
'''
f = _Cfunctions.get('libvlc_media_release', None) or \
_Cfunction('libvlc_media_release', ((1,),), None,
None, Media)
return f(p_md)
def libvlc_media_get_mrl(p_md):
'''Get the media resource locator (mrl) from a media descriptor object.
@param p_md: a media descriptor object.
@return: string with mrl of media descriptor object.
'''
f = _Cfunctions.get('libvlc_media_get_mrl', None) or \
_Cfunction('libvlc_media_get_mrl', ((1,),), string_result,
ctypes.c_void_p, Media)
return f(p_md)
def libvlc_media_duplicate(p_md):
'''Duplicate a media descriptor object.
@param p_md: a media descriptor object.
'''
f = _Cfunctions.get('libvlc_media_duplicate', None) or \
_Cfunction('libvlc_media_duplicate', ((1,),), class_result(Media),
ctypes.c_void_p, Media)
return f(p_md)
def libvlc_media_get_meta(p_md, e_meta):
'''Read the meta of the media.
If the media has not yet been parsed this will return None.
See L{libvlc_media_parse}
See L{libvlc_media_parse_with_options}
See libvlc_MediaMetaChanged.
@param p_md: the media descriptor.
@param e_meta: the meta to read.
@return: the media's meta.
'''
f = _Cfunctions.get('libvlc_media_get_meta', None) or \
_Cfunction('libvlc_media_get_meta', ((1,), (1,),), string_result,
ctypes.c_void_p, Media, Meta)
return f(p_md, e_meta)
def libvlc_media_set_meta(p_md, e_meta, psz_value):
'''Set the meta of the media (this function will not save the meta, call
L{libvlc_media_save_meta} in order to save the meta).
@param p_md: the media descriptor.
@param e_meta: the meta to write.
@param psz_value: the media's meta.
'''
f = _Cfunctions.get('libvlc_media_set_meta', None) or \
_Cfunction('libvlc_media_set_meta', ((1,), (1,), (1,),), None,
None, Media, Meta, ctypes.c_char_p)
return f(p_md, e_meta, psz_value)
def libvlc_media_save_meta(p_md):
'''Save the meta previously set.
@param p_md: the media desriptor.
@return: true if the write operation was successful.
'''
f = _Cfunctions.get('libvlc_media_save_meta', None) or \
_Cfunction('libvlc_media_save_meta', ((1,),), None,
ctypes.c_int, Media)
return f(p_md)
def libvlc_media_get_state(p_md):
'''Get current state of media descriptor object. Possible media states are
libvlc_NothingSpecial=0, libvlc_Opening, libvlc_Playing, libvlc_Paused,
libvlc_Stopped, libvlc_Ended, libvlc_Error.
See libvlc_state_t.
@param p_md: a media descriptor object.
@return: state of media descriptor object.
'''
f = _Cfunctions.get('libvlc_media_get_state', None) or \
_Cfunction('libvlc_media_get_state', ((1,),), None,
State, Media)
return f(p_md)
def libvlc_media_get_stats(p_md, p_stats):
'''Get the current statistics about the media.
@param p_md:: media descriptor object.
@param p_stats:: structure that contain the statistics about the media (this structure must be allocated by the caller).
@return: true if the statistics are available, false otherwise \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_get_stats', None) or \
_Cfunction('libvlc_media_get_stats', ((1,), (1,),), None,
ctypes.c_int, Media, ctypes.POINTER(MediaStats))
return f(p_md, p_stats)
def libvlc_media_subitems(p_md):
'''Get subitems of media descriptor object. This will increment
the reference count of supplied media descriptor object. Use
L{libvlc_media_list_release}() to decrement the reference counting.
@param p_md: media descriptor object.
@return: list of media descriptor subitems or None.
'''
f = _Cfunctions.get('libvlc_media_subitems', None) or \
_Cfunction('libvlc_media_subitems', ((1,),), class_result(MediaList),
ctypes.c_void_p, Media)
return f(p_md)
def libvlc_media_event_manager(p_md):
'''Get event manager from media descriptor object.
NOTE: this function doesn't increment reference counting.
@param p_md: a media descriptor object.
@return: event manager object.
'''
f = _Cfunctions.get('libvlc_media_event_manager', None) or \
_Cfunction('libvlc_media_event_manager', ((1,),), class_result(EventManager),
ctypes.c_void_p, Media)
return f(p_md)
def libvlc_media_get_duration(p_md):
'''Get duration (in ms) of media descriptor object item.
@param p_md: media descriptor object.
@return: duration of media item or -1 on error.
'''
f = _Cfunctions.get('libvlc_media_get_duration', None) or \
_Cfunction('libvlc_media_get_duration', ((1,),), None,
ctypes.c_longlong, Media)
return f(p_md)
def libvlc_media_parse_with_options(p_md, parse_flag, timeout):
'''Parse the media asynchronously with options.
This fetches (local or network) art, meta data and/or tracks information.
This method is the extended version of L{libvlc_media_parse_with_options}().
To track when this is over you can listen to libvlc_MediaParsedChanged
event. However if this functions returns an error, you will not receive any
events.
It uses a flag to specify parse options (see libvlc_media_parse_flag_t). All
these flags can be combined. By default, media is parsed if it's a local
file.
@note: Parsing can be aborted with L{libvlc_media_parse_stop}().
See libvlc_MediaParsedChanged
See L{libvlc_media_get_meta}
See L{libvlc_media_tracks_get}
See L{libvlc_media_get_parsed_status}
See libvlc_media_parse_flag_t.
@param p_md: media descriptor object.
@param parse_flag: parse options:
@param timeout: maximum time allowed to preparse the media. If -1, the default "preparse-timeout" option will be used as a timeout. If 0, it will wait indefinitely. If > 0, the timeout will be used (in milliseconds).
@return: -1 in case of error, 0 otherwise.
@version: LibVLC 3.0.0 or later.
'''
f = _Cfunctions.get('libvlc_media_parse_with_options', None) or \
_Cfunction('libvlc_media_parse_with_options', ((1,), (1,), (1,),), None,
ctypes.c_int, Media, MediaParseFlag, ctypes.c_int)
return f(p_md, parse_flag, timeout)
def libvlc_media_parse_stop(p_md):
'''Stop the parsing of the media
When the media parsing is stopped, the libvlc_MediaParsedChanged event will
be sent with the libvlc_media_parsed_status_timeout status.
See L{libvlc_media_parse_with_options}.
@param p_md: media descriptor object.
@version: LibVLC 3.0.0 or later.
'''
f = _Cfunctions.get('libvlc_media_parse_stop', None) or \
_Cfunction('libvlc_media_parse_stop', ((1,),), None,
None, Media)
return f(p_md)
def libvlc_media_get_parsed_status(p_md):
'''Get Parsed status for media descriptor object.
See libvlc_MediaParsedChanged
See libvlc_media_parsed_status_t.
@param p_md: media descriptor object.
@return: a value of the libvlc_media_parsed_status_t enum.
@version: LibVLC 3.0.0 or later.
'''
f = _Cfunctions.get('libvlc_media_get_parsed_status', None) or \
_Cfunction('libvlc_media_get_parsed_status', ((1,),), None,
MediaParsedStatus, Media)
return f(p_md)
def libvlc_media_set_user_data(p_md, p_new_user_data):
'''Sets media descriptor's user_data. user_data is specialized data
accessed by the host application, VLC.framework uses it as a pointer to
an native object that references a L{Media} pointer.
@param p_md: media descriptor object.
@param p_new_user_data: pointer to user data.
'''
f = _Cfunctions.get('libvlc_media_set_user_data', None) or \
_Cfunction('libvlc_media_set_user_data', ((1,), (1,),), None,
None, Media, ctypes.c_void_p)
return f(p_md, p_new_user_data)
def libvlc_media_get_user_data(p_md):
'''Get media descriptor's user_data. user_data is specialized data
accessed by the host application, VLC.framework uses it as a pointer to
an native object that references a L{Media} pointer.
@param p_md: media descriptor object.
'''
f = _Cfunctions.get('libvlc_media_get_user_data', None) or \
_Cfunction('libvlc_media_get_user_data', ((1,),), None,
ctypes.c_void_p, Media)
return f(p_md)
def libvlc_media_tracks_get(p_md, tracks):
'''Get media descriptor's elementary streams description
Note, you need to call L{libvlc_media_parse}() or play the media at least once
before calling this function.
Not doing this will result in an empty array.
@param p_md: media descriptor object.
@param tracks: address to store an allocated array of Elementary Streams descriptions (must be freed with L{libvlc_media_tracks_release}.
@return: the number of Elementary Streams (zero on error).
@version: LibVLC 2.1.0 and later.
'''
f = _Cfunctions.get('libvlc_media_tracks_get', None) or \
_Cfunction('libvlc_media_tracks_get', ((1,), (1,),), None,
ctypes.c_uint, Media, ctypes.POINTER(ctypes.POINTER(MediaTrack)))
return f(p_md, tracks)
def libvlc_media_get_codec_description(i_type, i_codec):
'''Get codec description from media elementary stream.
@param i_type: i_type from L{MediaTrack}.
@param i_codec: i_codec or i_original_fourcc from L{MediaTrack}.
@return: codec description.
@version: LibVLC 3.0.0 and later. See L{MediaTrack}.
'''
f = _Cfunctions.get('libvlc_media_get_codec_description', None) or \
_Cfunction('libvlc_media_get_codec_description', ((1,), (1,),), None,
ctypes.c_char_p, TrackType, ctypes.c_uint32)
return f(i_type, i_codec)
def libvlc_media_tracks_release(p_tracks, i_count):
'''Release media descriptor's elementary streams description array.
@param p_tracks: tracks info array to release.
@param i_count: number of elements in the array.
@version: LibVLC 2.1.0 and later.
'''
f = _Cfunctions.get('libvlc_media_tracks_release', None) or \
_Cfunction('libvlc_media_tracks_release', ((1,), (1,),), None,
None, ctypes.POINTER(MediaTrack), ctypes.c_uint)
return f(p_tracks, i_count)
def libvlc_media_get_type(p_md):
'''Get the media type of the media descriptor object.
@param p_md: media descriptor object.
@return: media type.
@version: LibVLC 3.0.0 and later. See libvlc_media_type_t.
'''
f = _Cfunctions.get('libvlc_media_get_type', None) or \
_Cfunction('libvlc_media_get_type', ((1,),), None,
MediaType, Media)
return f(p_md)
def libvlc_media_slaves_add(p_md, i_type, i_priority, psz_uri):
'''Add a slave to the current media.
A slave is an external input source that may contains an additional subtitle
track (like a .srt) or an additional audio track (like a .ac3).
@note: This function must be called before the media is parsed (via
L{libvlc_media_parse_with_options}()) or before the media is played (via
L{libvlc_media_player_play}()).
@param p_md: media descriptor object.
@param i_type: subtitle or audio.
@param i_priority: from 0 (low priority) to 4 (high priority).
@param psz_uri: Uri of the slave (should contain a valid scheme).
@return: 0 on success, -1 on error.
@version: LibVLC 3.0.0 and later.
'''
f = _Cfunctions.get('libvlc_media_slaves_add', None) or \
_Cfunction('libvlc_media_slaves_add', ((1,), (1,), (1,), (1,),), None,
ctypes.c_int, Media, MediaSlaveType, ctypes.c_int, ctypes.c_char_p)
return f(p_md, i_type, i_priority, psz_uri)
def libvlc_media_slaves_clear(p_md):
'''Clear all slaves previously added by L{libvlc_media_slaves_add}() or
internally.
@param p_md: media descriptor object.
@version: LibVLC 3.0.0 and later.
'''
f = _Cfunctions.get('libvlc_media_slaves_clear', None) or \
_Cfunction('libvlc_media_slaves_clear', ((1,),), None,
None, Media)
return f(p_md)
def libvlc_media_slaves_get(p_md, ppp_slaves):
'''Get a media descriptor's slave list
The list will contain slaves parsed by VLC or previously added by
L{libvlc_media_slaves_add}(). The typical use case of this function is to save
a list of slave in a database for a later use.
@param p_md: media descriptor object.
@param ppp_slaves: address to store an allocated array of slaves (must be freed with L{libvlc_media_slaves_release}()) [OUT].
@return: the number of slaves (zero on error).
@version: LibVLC 3.0.0 and later. See L{libvlc_media_slaves_add}.
'''
f = _Cfunctions.get('libvlc_media_slaves_get', None) or \
_Cfunction('libvlc_media_slaves_get', ((1,), (1,),), None,
ctypes.c_int, Media, ctypes.POINTER(ctypes.POINTER(MediaSlave)))
return f(p_md, ppp_slaves)
def libvlc_media_slaves_release(pp_slaves, i_count):
'''Release a media descriptor's slave list.
@param pp_slaves: slave array to release.
@param i_count: number of elements in the array.
@version: LibVLC 3.0.0 and later.
'''
f = _Cfunctions.get('libvlc_media_slaves_release', None) or \
_Cfunction('libvlc_media_slaves_release', ((1,), (1,),), None,
None, ctypes.POINTER(MediaSlave), ctypes.c_int)
return f(pp_slaves, i_count)
def libvlc_renderer_item_name(p_item):
'''Get the human readable name of a renderer item.
@return: the name of the item (can't be None, must *not* be freed).
@version: LibVLC 3.0.0 or later.
'''
f = _Cfunctions.get('libvlc_renderer_item_name', None) or \
_Cfunction('libvlc_renderer_item_name', ((1,),), None,
ctypes.c_char_p, ctypes.c_void_p)
return f(p_item)
def libvlc_renderer_item_type(p_item):
'''Get the type (not translated) of a renderer item. For now, the type can only
be "chromecast" ("upnp", "airplay" may come later).
@return: the type of the item (can't be None, must *not* be freed).
@version: LibVLC 3.0.0 or later.
'''
f = _Cfunctions.get('libvlc_renderer_item_type', None) or \
_Cfunction('libvlc_renderer_item_type', ((1,),), None,
ctypes.c_char_p, ctypes.c_void_p)
return f(p_item)
def libvlc_renderer_item_icon_uri(p_item):
'''Get the icon uri of a renderer item.
@return: the uri of the item's icon (can be None, must *not* be freed).
@version: LibVLC 3.0.0 or later.
'''
f = _Cfunctions.get('libvlc_renderer_item_icon_uri', None) or \
_Cfunction('libvlc_renderer_item_icon_uri', ((1,),), None,
ctypes.c_char_p, ctypes.c_void_p)
return f(p_item)
def libvlc_renderer_item_flags(p_item):
'''Get the flags of a renderer item
See LIBVLC_RENDERER_CAN_AUDIO
See LIBVLC_RENDERER_CAN_VIDEO.
@return: bitwise flag: capabilities of the renderer, see.
@version: LibVLC 3.0.0 or later.
'''
f = _Cfunctions.get('libvlc_renderer_item_flags', None) or \
_Cfunction('libvlc_renderer_item_flags', ((1,),), None,
ctypes.c_int, ctypes.c_void_p)
return f(p_item)
def libvlc_renderer_discoverer_new(p_inst, psz_name):
'''Create a renderer discoverer object by name
After this object is created, you should attach to events in order to be
notified of the discoverer events.
You need to call L{libvlc_renderer_discoverer_start}() in order to start the
discovery.
See L{libvlc_renderer_discoverer_event_manager}()
See L{libvlc_renderer_discoverer_start}().
@param p_inst: libvlc instance.
@param psz_name: service name; use L{libvlc_renderer_discoverer_list_get}() to get a list of the discoverer names available in this libVLC instance.
@return: media discover object or None in case of error.
@version: LibVLC 3.0.0 or later.
'''
f = _Cfunctions.get('libvlc_renderer_discoverer_new', None) or \
_Cfunction('libvlc_renderer_discoverer_new', ((1,), (1,),), None,
ctypes.c_void_p, Instance, ctypes.c_char_p)
return f(p_inst, psz_name)
def libvlc_renderer_discoverer_release(p_rd):
'''Release a renderer discoverer object.
@param p_rd: renderer discoverer object.
@version: LibVLC 3.0.0 or later.
'''
f = _Cfunctions.get('libvlc_renderer_discoverer_release', None) or \
_Cfunction('libvlc_renderer_discoverer_release', ((1,),), None,
None, ctypes.c_void_p)
return f(p_rd)
def libvlc_renderer_discoverer_start(p_rd):
'''Start renderer discovery
To stop it, call L{libvlc_renderer_discoverer_stop}() or
L{libvlc_renderer_discoverer_release}() directly.
See L{libvlc_renderer_discoverer_stop}().
@param p_rd: renderer discoverer object.
@return: -1 in case of error, 0 otherwise.
@version: LibVLC 3.0.0 or later.
'''
f = _Cfunctions.get('libvlc_renderer_discoverer_start', None) or \
_Cfunction('libvlc_renderer_discoverer_start', ((1,),), None,
ctypes.c_int, ctypes.c_void_p)
return f(p_rd)
def libvlc_renderer_discoverer_stop(p_rd):
'''Stop renderer discovery.
See L{libvlc_renderer_discoverer_start}().
@param p_rd: renderer discoverer object.
@version: LibVLC 3.0.0 or later.
'''
f = _Cfunctions.get('libvlc_renderer_discoverer_stop', None) or \
_Cfunction('libvlc_renderer_discoverer_stop', ((1,),), None,
None, ctypes.c_void_p)
return f(p_rd)
def libvlc_renderer_discoverer_event_manager(p_rd):
'''Get the event manager of the renderer discoverer
The possible events to attach are @ref libvlc_RendererDiscovererItemAdded
and @ref libvlc_RendererDiscovererItemDeleted.
The @ref libvlc_renderer_item_t struct passed to event callbacks is owned by
VLC, users should take care of copying this struct for their internal usage.
See libvlc_event_t.u.renderer_discoverer_item_added.item
See libvlc_event_t.u.renderer_discoverer_item_removed.item.
@return: a valid event manager (can't fail).
@version: LibVLC 3.0.0 or later.
'''
f = _Cfunctions.get('libvlc_renderer_discoverer_event_manager', None) or \
_Cfunction('libvlc_renderer_discoverer_event_manager', ((1,),), class_result(EventManager),
ctypes.c_void_p, ctypes.c_void_p)
return f(p_rd)
def libvlc_renderer_discoverer_list_get(p_inst, ppp_services):
'''Get media discoverer services
See libvlc_renderer_list_release().
@param p_inst: libvlc instance.
@param ppp_services: address to store an allocated array of renderer discoverer services (must be freed with libvlc_renderer_list_release() by the caller) [OUT].
@return: the number of media discoverer services (0 on error).
@version: LibVLC 3.0.0 and later.
'''
f = _Cfunctions.get('libvlc_renderer_discoverer_list_get', None) or \
_Cfunction('libvlc_renderer_discoverer_list_get', ((1,), (1,),), None,
ctypes.c_size_t, Instance, ctypes.POINTER(ctypes.POINTER(RDDescription)))
return f(p_inst, ppp_services)
def libvlc_renderer_discoverer_list_release(pp_services, i_count):
'''Release an array of media discoverer services
See L{libvlc_renderer_discoverer_list_get}().
@param pp_services: array to release.
@param i_count: number of elements in the array.
@version: LibVLC 3.0.0 and later.
'''
f = _Cfunctions.get('libvlc_renderer_discoverer_list_release', None) or \
_Cfunction('libvlc_renderer_discoverer_list_release', ((1,), (1,),), None,
None, ctypes.POINTER(RDDescription), ctypes.c_size_t)
return f(pp_services, i_count)
def libvlc_media_list_new(p_instance):
'''Create an empty media list.
@param p_instance: libvlc instance.
@return: empty media list, or None on error.
'''
f = _Cfunctions.get('libvlc_media_list_new', None) or \
_Cfunction('libvlc_media_list_new', ((1,),), class_result(MediaList),
ctypes.c_void_p, Instance)
return f(p_instance)
def libvlc_media_list_release(p_ml):
'''Release media list created with L{libvlc_media_list_new}().
@param p_ml: a media list created with L{libvlc_media_list_new}().
'''
f = _Cfunctions.get('libvlc_media_list_release', None) or \
_Cfunction('libvlc_media_list_release', ((1,),), None,
None, MediaList)
return f(p_ml)
def libvlc_media_list_retain(p_ml):
'''Retain reference to a media list.
@param p_ml: a media list created with L{libvlc_media_list_new}().
'''
f = _Cfunctions.get('libvlc_media_list_retain', None) or \
_Cfunction('libvlc_media_list_retain', ((1,),), None,
None, MediaList)
return f(p_ml)
def libvlc_media_list_set_media(p_ml, p_md):
'''Associate media instance with this media list instance.
If another media instance was present it will be released.
The L{libvlc_media_list_lock} should NOT be held upon entering this function.
@param p_ml: a media list instance.
@param p_md: media instance to add.
'''
f = _Cfunctions.get('libvlc_media_list_set_media', None) or \
_Cfunction('libvlc_media_list_set_media', ((1,), (1,),), None,
None, MediaList, Media)
return f(p_ml, p_md)
def libvlc_media_list_media(p_ml):
'''Get media instance from this media list instance. This action will increase
the refcount on the media instance.
The L{libvlc_media_list_lock} should NOT be held upon entering this function.
@param p_ml: a media list instance.
@return: media instance.
'''
f = _Cfunctions.get('libvlc_media_list_media', None) or \
_Cfunction('libvlc_media_list_media', ((1,),), class_result(Media),
ctypes.c_void_p, MediaList)
return f(p_ml)
def libvlc_media_list_add_media(p_ml, p_md):
'''Add media instance to media list
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
@param p_md: a media instance.
@return: 0 on success, -1 if the media list is read-only.
'''
f = _Cfunctions.get('libvlc_media_list_add_media', None) or \
_Cfunction('libvlc_media_list_add_media', ((1,), (1,),), None,
ctypes.c_int, MediaList, Media)
return f(p_ml, p_md)
def libvlc_media_list_insert_media(p_ml, p_md, i_pos):
'''Insert media instance in media list on a position
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
@param p_md: a media instance.
@param i_pos: position in array where to insert.
@return: 0 on success, -1 if the media list is read-only.
'''
f = _Cfunctions.get('libvlc_media_list_insert_media', None) or \
_Cfunction('libvlc_media_list_insert_media', ((1,), (1,), (1,),), None,
ctypes.c_int, MediaList, Media, ctypes.c_int)
return f(p_ml, p_md, i_pos)
def libvlc_media_list_remove_index(p_ml, i_pos):
'''Remove media instance from media list on a position
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
@param i_pos: position in array where to insert.
@return: 0 on success, -1 if the list is read-only or the item was not found.
'''
f = _Cfunctions.get('libvlc_media_list_remove_index', None) or \
_Cfunction('libvlc_media_list_remove_index', ((1,), (1,),), None,
ctypes.c_int, MediaList, ctypes.c_int)
return f(p_ml, i_pos)
def libvlc_media_list_count(p_ml):
'''Get count on media list items
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
@return: number of items in media list.
'''
f = _Cfunctions.get('libvlc_media_list_count', None) or \
_Cfunction('libvlc_media_list_count', ((1,),), None,
ctypes.c_int, MediaList)
return f(p_ml)
def libvlc_media_list_item_at_index(p_ml, i_pos):
'''List media instance in media list at a position
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
@param i_pos: position in array where to insert.
@return: media instance at position i_pos, or None if not found. In case of success, L{libvlc_media_retain}() is called to increase the refcount on the media.
'''
f = _Cfunctions.get('libvlc_media_list_item_at_index', None) or \
_Cfunction('libvlc_media_list_item_at_index', ((1,), (1,),), class_result(Media),
ctypes.c_void_p, MediaList, ctypes.c_int)
return f(p_ml, i_pos)
def libvlc_media_list_index_of_item(p_ml, p_md):
'''Find index position of List media instance in media list.
Warning: the function will return the first matched position.
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
@param p_md: media instance.
@return: position of media instance or -1 if media not found.
'''
f = _Cfunctions.get('libvlc_media_list_index_of_item', None) or \
_Cfunction('libvlc_media_list_index_of_item', ((1,), (1,),), None,
ctypes.c_int, MediaList, Media)
return f(p_ml, p_md)
def libvlc_media_list_is_readonly(p_ml):
'''This indicates if this media list is read-only from a user point of view.
@param p_ml: media list instance.
@return: 1 on readonly, 0 on readwrite \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_list_is_readonly', None) or \
_Cfunction('libvlc_media_list_is_readonly', ((1,),), None,
ctypes.c_int, MediaList)
return f(p_ml)
def libvlc_media_list_lock(p_ml):
'''Get lock on media list items.
@param p_ml: a media list instance.
'''
f = _Cfunctions.get('libvlc_media_list_lock', None) or \
_Cfunction('libvlc_media_list_lock', ((1,),), None,
None, MediaList)
return f(p_ml)
def libvlc_media_list_unlock(p_ml):
'''Release lock on media list items
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
'''
f = _Cfunctions.get('libvlc_media_list_unlock', None) or \
_Cfunction('libvlc_media_list_unlock', ((1,),), None,
None, MediaList)
return f(p_ml)
def libvlc_media_list_event_manager(p_ml):
'''Get libvlc_event_manager from this media list instance.
The p_event_manager is immutable, so you don't have to hold the lock.
@param p_ml: a media list instance.
@return: libvlc_event_manager.
'''
f = _Cfunctions.get('libvlc_media_list_event_manager', None) or \
_Cfunction('libvlc_media_list_event_manager', ((1,),), class_result(EventManager),
ctypes.c_void_p, MediaList)
return f(p_ml)
def libvlc_media_player_get_fps(p_mi):
'''Get movie fps rate
This function is provided for backward compatibility. It cannot deal with
multiple video tracks. In LibVLC versions prior to 3.0, it would also fail
if the file format did not convey the frame rate explicitly.
\deprecated Consider using L{libvlc_media_tracks_get}() instead.
@param p_mi: the Media Player.
@return: frames per second (fps) for this playing movie, or 0 if unspecified.
'''
f = _Cfunctions.get('libvlc_media_player_get_fps', None) or \
_Cfunction('libvlc_media_player_get_fps', ((1,),), None,
ctypes.c_float, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_agl(p_mi, drawable):
'''\deprecated Use L{libvlc_media_player_set_nsobject}() instead.
'''
f = _Cfunctions.get('libvlc_media_player_set_agl', None) or \
_Cfunction('libvlc_media_player_set_agl', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint32)
return f(p_mi, drawable)
def libvlc_media_player_get_agl(p_mi):
'''\deprecated Use L{libvlc_media_player_get_nsobject}() instead.
'''
f = _Cfunctions.get('libvlc_media_player_get_agl', None) or \
_Cfunction('libvlc_media_player_get_agl', ((1,),), None,
ctypes.c_uint32, MediaPlayer)
return f(p_mi)
def libvlc_track_description_release(p_track_description):
'''\deprecated Use L{libvlc_track_description_list_release}() instead.
'''
f = _Cfunctions.get('libvlc_track_description_release', None) or \
_Cfunction('libvlc_track_description_release', ((1,),), None,
None, ctypes.POINTER(TrackDescription))
return f(p_track_description)
def libvlc_video_get_height(p_mi):
'''Get current video height.
\deprecated Use L{libvlc_video_get_size}() instead.
@param p_mi: the media player.
@return: the video pixel height or 0 if not applicable.
'''
f = _Cfunctions.get('libvlc_video_get_height', None) or \
_Cfunction('libvlc_video_get_height', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_video_get_width(p_mi):
'''Get current video width.
\deprecated Use L{libvlc_video_get_size}() instead.
@param p_mi: the media player.
@return: the video pixel width or 0 if not applicable.
'''
f = _Cfunctions.get('libvlc_video_get_width', None) or \
_Cfunction('libvlc_video_get_width', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_video_get_title_description(p_mi):
'''Get the description of available titles.
@param p_mi: the media player.
@return: list containing description of available titles. It must be freed with L{libvlc_track_description_list_release}().
'''
f = _Cfunctions.get('libvlc_video_get_title_description', None) or \
_Cfunction('libvlc_video_get_title_description', ((1,),), None,
ctypes.POINTER(TrackDescription), MediaPlayer)
return f(p_mi)
def libvlc_video_get_chapter_description(p_mi, i_title):
'''Get the description of available chapters for specific title.
@param p_mi: the media player.
@param i_title: selected title.
@return: list containing description of available chapter for title i_title. It must be freed with L{libvlc_track_description_list_release}().
'''
f = _Cfunctions.get('libvlc_video_get_chapter_description', None) or \
_Cfunction('libvlc_video_get_chapter_description', ((1,), (1,),), None,
ctypes.POINTER(TrackDescription), MediaPlayer, ctypes.c_int)
return f(p_mi, i_title)
def libvlc_video_set_subtitle_file(p_mi, psz_subtitle):
'''Set new video subtitle file.
\deprecated Use L{libvlc_media_player_add_slave}() instead.
@param p_mi: the media player.
@param psz_subtitle: new video subtitle file.
@return: the success status (boolean).
'''
f = _Cfunctions.get('libvlc_video_set_subtitle_file', None) or \
_Cfunction('libvlc_video_set_subtitle_file', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_char_p)
return f(p_mi, psz_subtitle)
def libvlc_audio_output_device_count(p_instance, psz_audio_output):
'''Backward compatibility stub. Do not use in new code.
\deprecated Use L{libvlc_audio_output_device_list_get}() instead.
@return: always 0.
'''
f = _Cfunctions.get('libvlc_audio_output_device_count', None) or \
_Cfunction('libvlc_audio_output_device_count', ((1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p)
return f(p_instance, psz_audio_output)
def libvlc_audio_output_device_longname(p_instance, psz_output, i_device):
'''Backward compatibility stub. Do not use in new code.
\deprecated Use L{libvlc_audio_output_device_list_get}() instead.
@return: always None.
'''
f = _Cfunctions.get('libvlc_audio_output_device_longname', None) or \
_Cfunction('libvlc_audio_output_device_longname', ((1,), (1,), (1,),), string_result,
ctypes.c_void_p, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_output, i_device)
def libvlc_audio_output_device_id(p_instance, psz_audio_output, i_device):
'''Backward compatibility stub. Do not use in new code.
\deprecated Use L{libvlc_audio_output_device_list_get}() instead.
@return: always None.
'''
f = _Cfunctions.get('libvlc_audio_output_device_id', None) or \
_Cfunction('libvlc_audio_output_device_id', ((1,), (1,), (1,),), string_result,
ctypes.c_void_p, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_audio_output, i_device)
def libvlc_media_parse(p_md):
'''Parse a media.
This fetches (local) art, meta data and tracks information.
The method is synchronous.
\deprecated This function could block indefinitely.
Use L{libvlc_media_parse_with_options}() instead
See L{libvlc_media_parse_with_options}
See L{libvlc_media_get_meta}
See L{libvlc_media_get_tracks_info}.
@param p_md: media descriptor object.
'''
f = _Cfunctions.get('libvlc_media_parse', None) or \
_Cfunction('libvlc_media_parse', ((1,),), None,
None, Media)
return f(p_md)
def libvlc_media_parse_async(p_md):
'''Parse a media.
This fetches (local) art, meta data and tracks information.
The method is the asynchronous of L{libvlc_media_parse}().
To track when this is over you can listen to libvlc_MediaParsedChanged
event. However if the media was already parsed you will not receive this
event.
\deprecated You can't be sure to receive the libvlc_MediaParsedChanged
event (you can wait indefinitely for this event).
Use L{libvlc_media_parse_with_options}() instead
See L{libvlc_media_parse}
See libvlc_MediaParsedChanged
See L{libvlc_media_get_meta}
See L{libvlc_media_get_tracks_info}.
@param p_md: media descriptor object.
'''
f = _Cfunctions.get('libvlc_media_parse_async', None) or \
_Cfunction('libvlc_media_parse_async', ((1,),), None,
None, Media)
return f(p_md)
def libvlc_media_is_parsed(p_md):
'''Return true is the media descriptor object is parsed
\deprecated This can return true in case of failure.
Use L{libvlc_media_get_parsed_status}() instead
See libvlc_MediaParsedChanged.
@param p_md: media descriptor object.
@return: true if media object has been parsed otherwise it returns false \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_is_parsed', None) or \
_Cfunction('libvlc_media_is_parsed', ((1,),), None,
ctypes.c_int, Media)
return f(p_md)
def libvlc_media_get_tracks_info(p_md):
'''Get media descriptor's elementary streams description
Note, you need to call L{libvlc_media_parse}() or play the media at least once
before calling this function.
Not doing this will result in an empty array.
\deprecated Use L{libvlc_media_tracks_get}() instead.
@param p_md: media descriptor object.
@param tracks: address to store an allocated array of Elementary Streams descriptions (must be freed by the caller) [OUT].
@return: the number of Elementary Streams.
'''
f = _Cfunctions.get('libvlc_media_get_tracks_info', None) or \
_Cfunction('libvlc_media_get_tracks_info', ((1,), (2,),), None,
ctypes.c_int, Media, ctypes.POINTER(ctypes.c_void_p))
return f(p_md)
def libvlc_media_discoverer_new_from_name(p_inst, psz_name):
'''\deprecated Use L{libvlc_media_discoverer_new}() and L{libvlc_media_discoverer_start}().
'''
f = _Cfunctions.get('libvlc_media_discoverer_new_from_name', None) or \
_Cfunction('libvlc_media_discoverer_new_from_name', ((1,), (1,),), class_result(MediaDiscoverer),
ctypes.c_void_p, Instance, ctypes.c_char_p)
return f(p_inst, psz_name)
def libvlc_media_discoverer_localized_name(p_mdis):
'''Get media service discover object its localized name.
\deprecated Useless, use L{libvlc_media_discoverer_list_get}() to get the
longname of the service discovery.
@param p_mdis: media discover object.
@return: localized name or None if the media_discoverer is not started.
'''
f = _Cfunctions.get('libvlc_media_discoverer_localized_name', None) or \
_Cfunction('libvlc_media_discoverer_localized_name', ((1,),), string_result,
ctypes.c_void_p, MediaDiscoverer)
return f(p_mdis)
def libvlc_media_discoverer_event_manager(p_mdis):
'''Get event manager from media service discover object.
\deprecated Useless, media_discoverer events are only triggered when calling
L{libvlc_media_discoverer_start}() and L{libvlc_media_discoverer_stop}().
@param p_mdis: media service discover object.
@return: event manager object.
'''
f = _Cfunctions.get('libvlc_media_discoverer_event_manager', None) or \
_Cfunction('libvlc_media_discoverer_event_manager', ((1,),), class_result(EventManager),
ctypes.c_void_p, MediaDiscoverer)
return f(p_mdis)
def libvlc_wait(p_instance):
'''Waits until an interface causes the instance to exit.
You should start at least one interface first, using L{libvlc_add_intf}().
@param p_instance: the instance @warning This function wastes one thread doing basically nothing. libvlc_set_exit_handler() should be used instead.
'''
f = _Cfunctions.get('libvlc_wait', None) or \
_Cfunction('libvlc_wait', ((1,),), None,
None, Instance)
return f(p_instance)
def libvlc_get_log_verbosity(p_instance):
'''Always returns minus one.
This function is only provided for backward compatibility.
@param p_instance: ignored.
@return: always -1.
'''
f = _Cfunctions.get('libvlc_get_log_verbosity', None) or \
_Cfunction('libvlc_get_log_verbosity', ((1,),), None,
ctypes.c_uint, Instance)
return f(p_instance)
def libvlc_set_log_verbosity(p_instance, level):
'''This function does nothing.
It is only provided for backward compatibility.
@param p_instance: ignored.
@param level: ignored.
'''
f = _Cfunctions.get('libvlc_set_log_verbosity', None) or \
_Cfunction('libvlc_set_log_verbosity', ((1,), (1,),), None,
None, Instance, ctypes.c_uint)
return f(p_instance, level)
def libvlc_log_open(p_instance):
'''This function does nothing useful.
It is only provided for backward compatibility.
@param p_instance: libvlc instance.
@return: an unique pointer or None on error.
'''
f = _Cfunctions.get('libvlc_log_open', None) or \
_Cfunction('libvlc_log_open', ((1,),), None,
Log_ptr, Instance)
return f(p_instance)
def libvlc_log_close(p_log):
'''Frees memory allocated by L{libvlc_log_open}().
@param p_log: libvlc log instance or None.
'''
f = _Cfunctions.get('libvlc_log_close', None) or \
_Cfunction('libvlc_log_close', ((1,),), None,
None, Log_ptr)
return f(p_log)
def libvlc_log_count(p_log):
'''Always returns zero.
This function is only provided for backward compatibility.
@param p_log: ignored.
@return: always zero.
'''
f = _Cfunctions.get('libvlc_log_count', None) or \
_Cfunction('libvlc_log_count', ((1,),), None,
ctypes.c_uint, Log_ptr)
return f(p_log)
def libvlc_log_clear(p_log):
'''This function does nothing.
It is only provided for backward compatibility.
@param p_log: ignored.
'''
f = _Cfunctions.get('libvlc_log_clear', None) or \
_Cfunction('libvlc_log_clear', ((1,),), None,
None, Log_ptr)
return f(p_log)
def libvlc_log_get_iterator(p_log):
'''This function does nothing useful.
It is only provided for backward compatibility.
@param p_log: ignored.
@return: an unique pointer or None on error or if the parameter was None.
'''
f = _Cfunctions.get('libvlc_log_get_iterator', None) or \
_Cfunction('libvlc_log_get_iterator', ((1,),), class_result(LogIterator),
ctypes.c_void_p, Log_ptr)
return f(p_log)
def libvlc_log_iterator_free(p_iter):
'''Frees memory allocated by L{libvlc_log_get_iterator}().
@param p_iter: libvlc log iterator or None.
'''
f = _Cfunctions.get('libvlc_log_iterator_free', None) or \
_Cfunction('libvlc_log_iterator_free', ((1,),), None,
None, LogIterator)
return f(p_iter)
def libvlc_log_iterator_has_next(p_iter):
'''Always returns zero.
This function is only provided for backward compatibility.
@param p_iter: ignored.
@return: always zero.
'''
f = _Cfunctions.get('libvlc_log_iterator_has_next', None) or \
_Cfunction('libvlc_log_iterator_has_next', ((1,),), None,
ctypes.c_int, LogIterator)
return f(p_iter)
def libvlc_log_iterator_next(p_iter, p_buf):
'''Always returns None.
This function is only provided for backward compatibility.
@param p_iter: libvlc log iterator or None.
@param p_buf: ignored.
@return: always None.
'''
f = _Cfunctions.get('libvlc_log_iterator_next', None) or \
_Cfunction('libvlc_log_iterator_next', ((1,), (1,),), None,
ctypes.POINTER(LogMessage), LogIterator, ctypes.POINTER(LogMessage))
return f(p_iter, p_buf)
def libvlc_playlist_play(p_instance, i_id, i_options, ppsz_options):
'''Start playing (if there is any item in the playlist).
Additionnal playlist item options can be specified for addition to the
item before it is played.
@param p_instance: the playlist instance.
@param i_id: the item to play. If this is a negative number, the next item will be selected. Otherwise, the item with the given ID will be played.
@param i_options: the number of options to add to the item.
@param ppsz_options: the options to add to the item.
'''
f = _Cfunctions.get('libvlc_playlist_play', None) or \
_Cfunction('libvlc_playlist_play', ((1,), (1,), (1,), (1,),), None,
None, Instance, ctypes.c_int, ctypes.c_int, ListPOINTER(ctypes.c_char_p))
return f(p_instance, i_id, i_options, ppsz_options)
def libvlc_media_player_new(p_libvlc_instance):
'''Create an empty Media Player object.
@param p_libvlc_instance: the libvlc instance in which the Media Player should be created.
@return: a new media player object, or None on error.
'''
f = _Cfunctions.get('libvlc_media_player_new', None) or \
_Cfunction('libvlc_media_player_new', ((1,),), class_result(MediaPlayer),
ctypes.c_void_p, Instance)
return f(p_libvlc_instance)
def libvlc_media_player_new_from_media(p_md):
'''Create a Media Player object from a Media.
@param p_md: the media. Afterwards the p_md can be safely destroyed.
@return: a new media player object, or None on error.
'''
f = _Cfunctions.get('libvlc_media_player_new_from_media', None) or \
_Cfunction('libvlc_media_player_new_from_media', ((1,),), class_result(MediaPlayer),
ctypes.c_void_p, Media)
return f(p_md)
def libvlc_media_player_release(p_mi):
'''Release a media_player after use
Decrement the reference count of a media player object. If the
reference count is 0, then L{libvlc_media_player_release}() will
release the media player object. If the media player object
has been released, then it should not be used again.
@param p_mi: the Media Player to free.
'''
f = _Cfunctions.get('libvlc_media_player_release', None) or \
_Cfunction('libvlc_media_player_release', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_media_player_retain(p_mi):
'''Retain a reference to a media player object. Use
L{libvlc_media_player_release}() to decrement reference count.
@param p_mi: media player object.
'''
f = _Cfunctions.get('libvlc_media_player_retain', None) or \
_Cfunction('libvlc_media_player_retain', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_media(p_mi, p_md):
'''Set the media that will be used by the media_player. If any,
previous md will be released.
@param p_mi: the Media Player.
@param p_md: the Media. Afterwards the p_md can be safely destroyed.
'''
f = _Cfunctions.get('libvlc_media_player_set_media', None) or \
_Cfunction('libvlc_media_player_set_media', ((1,), (1,),), None,
None, MediaPlayer, Media)
return f(p_mi, p_md)
def libvlc_media_player_get_media(p_mi):
'''Get the media used by the media_player.
@param p_mi: the Media Player.
@return: the media associated with p_mi, or None if no media is associated.
'''
f = _Cfunctions.get('libvlc_media_player_get_media', None) or \
_Cfunction('libvlc_media_player_get_media', ((1,),), class_result(Media),
ctypes.c_void_p, MediaPlayer)
return f(p_mi)
def libvlc_media_player_event_manager(p_mi):
'''Get the Event Manager from which the media player send event.
@param p_mi: the Media Player.
@return: the event manager associated with p_mi.
'''
f = _Cfunctions.get('libvlc_media_player_event_manager', None) or \
_Cfunction('libvlc_media_player_event_manager', ((1,),), class_result(EventManager),
ctypes.c_void_p, MediaPlayer)
return f(p_mi)
def libvlc_media_player_is_playing(p_mi):
'''is_playing.
@param p_mi: the Media Player.
@return: 1 if the media player is playing, 0 otherwise \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_player_is_playing', None) or \
_Cfunction('libvlc_media_player_is_playing', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_play(p_mi):
'''Play.
@param p_mi: the Media Player.
@return: 0 if playback started (and was already started), or -1 on error.
'''
f = _Cfunctions.get('libvlc_media_player_play', None) or \
_Cfunction('libvlc_media_player_play', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_pause(mp, do_pause):
'''Pause or resume (no effect if there is no media).
@param mp: the Media Player.
@param do_pause: play/resume if zero, pause if non-zero.
@version: LibVLC 1.1.1 or later.
'''
f = _Cfunctions.get('libvlc_media_player_set_pause', None) or \
_Cfunction('libvlc_media_player_set_pause', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_int)
return f(mp, do_pause)
def libvlc_media_player_pause(p_mi):
'''Toggle pause (no effect if there is no media).
@param p_mi: the Media Player.
'''
f = _Cfunctions.get('libvlc_media_player_pause', None) or \
_Cfunction('libvlc_media_player_pause', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_media_player_stop(p_mi):
'''Stop (no effect if there is no media).
@param p_mi: the Media Player.
'''
f = _Cfunctions.get('libvlc_media_player_stop', None) or \
_Cfunction('libvlc_media_player_stop', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_renderer(p_mi, p_item):
'''Set a renderer to the media player
@note: must be called before the first call of L{libvlc_media_player_play}() to
take effect.
See L{libvlc_renderer_discoverer_new}.
@param p_mi: the Media Player.
@param p_item: an item discovered by L{libvlc_renderer_discoverer_start}().
@return: 0 on success, -1 on error.
@version: LibVLC 3.0.0 or later.
'''
f = _Cfunctions.get('libvlc_media_player_set_renderer', None) or \
_Cfunction('libvlc_media_player_set_renderer', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_void_p)
return f(p_mi, p_item)
def libvlc_video_set_callbacks(mp, lock, unlock, display, opaque):
'''Set callbacks and private data to render decoded video to a custom area
in memory.
Use L{libvlc_video_set_format}() or L{libvlc_video_set_format_callbacks}()
to configure the decoded format.
@warning: Rendering video into custom memory buffers is considerably less
efficient than rendering in a custom window as normal.
For optimal perfomances, VLC media player renders into a custom window, and
does not use this function and associated callbacks. It is B{highly
recommended} that other LibVLC-based application do likewise.
To embed video in a window, use libvlc_media_player_set_xid() or equivalent
depending on the operating system.
If window embedding does not fit the application use case, then a custom
LibVLC video output display plugin is required to maintain optimal video
rendering performances.
The following limitations affect performance:
- Hardware video decoding acceleration will either be disabled completely,
or require (relatively slow) copy from video/DSP memory to main memory.
- Sub-pictures (subtitles, on-screen display, etc.) must be blent into the
main picture by the CPU instead of the GPU.
- Depending on the video format, pixel format conversion, picture scaling,
cropping and/or picture re-orientation, must be performed by the CPU
instead of the GPU.
- Memory copying is required between LibVLC reference picture buffers and
application buffers (between lock and unlock callbacks).
@param mp: the media player.
@param lock: callback to lock video memory (must not be None).
@param unlock: callback to unlock video memory (or None if not needed).
@param display: callback to display video (or None if not needed).
@param opaque: private pointer for the three callbacks (as first parameter).
@version: LibVLC 1.1.1 or later.
'''
f = _Cfunctions.get('libvlc_video_set_callbacks', None) or \
_Cfunction('libvlc_video_set_callbacks', ((1,), (1,), (1,), (1,), (1,),), None,
None, MediaPlayer, VideoLockCb, VideoUnlockCb, VideoDisplayCb, ctypes.c_void_p)
return f(mp, lock, unlock, display, opaque)
def libvlc_video_set_format(mp, chroma, width, height, pitch):
'''Set decoded video chroma and dimensions.
This only works in combination with L{libvlc_video_set_callbacks}(),
and is mutually exclusive with L{libvlc_video_set_format_callbacks}().
@param mp: the media player.
@param chroma: a four-characters string identifying the chroma (e.g. "RV32" or "YUYV").
@param width: pixel width.
@param height: pixel height.
@param pitch: line pitch (in bytes).
@version: LibVLC 1.1.1 or later.
@bug: All pixel planes are expected to have the same pitch. To use the YCbCr color space with chrominance subsampling, consider using L{libvlc_video_set_format_callbacks}() instead.
'''
f = _Cfunctions.get('libvlc_video_set_format', None) or \
_Cfunction('libvlc_video_set_format', ((1,), (1,), (1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_char_p, ctypes.c_uint, ctypes.c_uint, ctypes.c_uint)
return f(mp, chroma, width, height, pitch)
def libvlc_video_set_format_callbacks(mp, setup, cleanup):
'''Set decoded video chroma and dimensions. This only works in combination with
L{libvlc_video_set_callbacks}().
@param mp: the media player.
@param setup: callback to select the video format (cannot be None).
@param cleanup: callback to release any allocated resources (or None).
@version: LibVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_video_set_format_callbacks', None) or \
_Cfunction('libvlc_video_set_format_callbacks', ((1,), (1,), (1,),), None,
None, MediaPlayer, VideoFormatCb, VideoCleanupCb)
return f(mp, setup, cleanup)
def libvlc_media_player_set_nsobject(p_mi, drawable):
'''Set the NSView handler where the media player should render its video output.
Use the vout called "macosx".
The drawable is an NSObject that follow the VLCOpenGLVideoViewEmbedding
protocol:
@code.m
\@protocol VLCOpenGLVideoViewEmbedding <NSObject>
- (void)addVoutSubview:(NSView *)view;
- (void)removeVoutSubview:(NSView *)view;
\@end
@endcode
Or it can be an NSView object.
If you want to use it along with Qt see the QMacCocoaViewContainer. Then
the following code should work:
@code.mm
NSView *video = [[NSView alloc] init];
QMacCocoaViewContainer *container = new QMacCocoaViewContainer(video, parent);
L{libvlc_media_player_set_nsobject}(mp, video);
[video release];
@endcode
You can find a live example in VLCVideoView in VLCKit.framework.
@param p_mi: the Media Player.
@param drawable: the drawable that is either an NSView or an object following the VLCOpenGLVideoViewEmbedding protocol.
'''
f = _Cfunctions.get('libvlc_media_player_set_nsobject', None) or \
_Cfunction('libvlc_media_player_set_nsobject', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_void_p)
return f(p_mi, drawable)
def libvlc_media_player_get_nsobject(p_mi):
'''Get the NSView handler previously set with L{libvlc_media_player_set_nsobject}().
@param p_mi: the Media Player.
@return: the NSView handler or 0 if none where set.
'''
f = _Cfunctions.get('libvlc_media_player_get_nsobject', None) or \
_Cfunction('libvlc_media_player_get_nsobject', ((1,),), None,
ctypes.c_void_p, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_xwindow(p_mi, drawable):
'''Set an X Window System drawable where the media player should render its
video output. The call takes effect when the playback starts. If it is
already started, it might need to be stopped before changes apply.
If LibVLC was built without X11 output support, then this function has no
effects.
By default, LibVLC will capture input events on the video rendering area.
Use L{libvlc_video_set_mouse_input}() and L{libvlc_video_set_key_input}() to
disable that and deliver events to the parent window / to the application
instead. By design, the X11 protocol delivers input events to only one
recipient.
@warning
The application must call the XInitThreads() function from Xlib before
L{libvlc_new}(), and before any call to XOpenDisplay() directly or via any
other library. Failure to call XInitThreads() will seriously impede LibVLC
performance. Calling XOpenDisplay() before XInitThreads() will eventually
crash the process. That is a limitation of Xlib.
@param p_mi: media player.
@param drawable: X11 window ID @note The specified identifier must correspond to an existing Input/Output class X11 window. Pixmaps are B{not} currently supported. The default X11 server is assumed, i.e. that specified in the DISPLAY environment variable. @warning LibVLC can deal with invalid X11 handle errors, however some display drivers (EGL, GLX, VA and/or VDPAU) can unfortunately not. Thus the window handle must remain valid until playback is stopped, otherwise the process may abort or crash.
@bug No more than one window handle per media player instance can be specified. If the media has multiple simultaneously active video tracks, extra tracks will be rendered into external windows beyond the control of the application.
'''
f = _Cfunctions.get('libvlc_media_player_set_xwindow', None) or \
_Cfunction('libvlc_media_player_set_xwindow', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint32)
return f(p_mi, drawable)
def libvlc_media_player_get_xwindow(p_mi):
'''Get the X Window System window identifier previously set with
L{libvlc_media_player_set_xwindow}(). Note that this will return the identifier
even if VLC is not currently using it (for instance if it is playing an
audio-only input).
@param p_mi: the Media Player.
@return: an X window ID, or 0 if none where set.
'''
f = _Cfunctions.get('libvlc_media_player_get_xwindow', None) or \
_Cfunction('libvlc_media_player_get_xwindow', ((1,),), None,
ctypes.c_uint32, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_hwnd(p_mi, drawable):
'''Set a Win32/Win64 API window handle (HWND) where the media player should
render its video output. If LibVLC was built without Win32/Win64 API output
support, then this has no effects.
@param p_mi: the Media Player.
@param drawable: windows handle of the drawable.
'''
f = _Cfunctions.get('libvlc_media_player_set_hwnd', None) or \
_Cfunction('libvlc_media_player_set_hwnd', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_void_p)
return f(p_mi, drawable)
def libvlc_media_player_get_hwnd(p_mi):
'''Get the Windows API window handle (HWND) previously set with
L{libvlc_media_player_set_hwnd}(). The handle will be returned even if LibVLC
is not currently outputting any video to it.
@param p_mi: the Media Player.
@return: a window handle or None if there are none.
'''
f = _Cfunctions.get('libvlc_media_player_get_hwnd', None) or \
_Cfunction('libvlc_media_player_get_hwnd', ((1,),), None,
ctypes.c_void_p, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_android_context(p_mi, p_awindow_handler):
'''Set the android context.
@param p_mi: the media player.
@param p_awindow_handler: org.videolan.libvlc.AWindow jobject owned by the org.videolan.libvlc.MediaPlayer class from the libvlc-android project.
@version: LibVLC 3.0.0 and later.
'''
f = _Cfunctions.get('libvlc_media_player_set_android_context', None) or \
_Cfunction('libvlc_media_player_set_android_context', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_void_p)
return f(p_mi, p_awindow_handler)
def libvlc_media_player_set_evas_object(p_mi, p_evas_object):
'''Set the EFL Evas Object.
@param p_mi: the media player.
@param p_evas_object: a valid EFL Evas Object (Evas_Object).
@return: -1 if an error was detected, 0 otherwise.
@version: LibVLC 3.0.0 and later.
'''
f = _Cfunctions.get('libvlc_media_player_set_evas_object', None) or \
_Cfunction('libvlc_media_player_set_evas_object', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_void_p)
return f(p_mi, p_evas_object)
def libvlc_audio_set_callbacks(mp, play, pause, resume, flush, drain, opaque):
'''Sets callbacks and private data for decoded audio.
Use L{libvlc_audio_set_format}() or L{libvlc_audio_set_format_callbacks}()
to configure the decoded audio format.
@note: The audio callbacks override any other audio output mechanism.
If the callbacks are set, LibVLC will B{not} output audio in any way.
@param mp: the media player.
@param play: callback to play audio samples (must not be None).
@param pause: callback to pause playback (or None to ignore).
@param resume: callback to resume playback (or None to ignore).
@param flush: callback to flush audio buffers (or None to ignore).
@param drain: callback to drain audio buffers (or None to ignore).
@param opaque: private pointer for the audio callbacks (as first parameter).
@version: LibVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_set_callbacks', None) or \
_Cfunction('libvlc_audio_set_callbacks', ((1,), (1,), (1,), (1,), (1,), (1,), (1,),), None,
None, MediaPlayer, AudioPlayCb, AudioPauseCb, AudioResumeCb, AudioFlushCb, AudioDrainCb, ctypes.c_void_p)
return f(mp, play, pause, resume, flush, drain, opaque)
def libvlc_audio_set_volume_callback(mp, set_volume):
'''Set callbacks and private data for decoded audio. This only works in
combination with L{libvlc_audio_set_callbacks}().
Use L{libvlc_audio_set_format}() or L{libvlc_audio_set_format_callbacks}()
to configure the decoded audio format.
@param mp: the media player.
@param set_volume: callback to apply audio volume, or None to apply volume in software.
@version: LibVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_set_volume_callback', None) or \
_Cfunction('libvlc_audio_set_volume_callback', ((1,), (1,),), None,
None, MediaPlayer, AudioSetVolumeCb)
return f(mp, set_volume)
def libvlc_audio_set_format_callbacks(mp, setup, cleanup):
'''Sets decoded audio format via callbacks.
This only works in combination with L{libvlc_audio_set_callbacks}().
@param mp: the media player.
@param setup: callback to select the audio format (cannot be None).
@param cleanup: callback to release any allocated resources (or None).
@version: LibVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_set_format_callbacks', None) or \
_Cfunction('libvlc_audio_set_format_callbacks', ((1,), (1,), (1,),), None,
None, MediaPlayer, AudioSetupCb, AudioCleanupCb)
return f(mp, setup, cleanup)
def libvlc_audio_set_format(mp, format, rate, channels):
'''Sets a fixed decoded audio format.
This only works in combination with L{libvlc_audio_set_callbacks}(),
and is mutually exclusive with L{libvlc_audio_set_format_callbacks}().
@param mp: the media player.
@param format: a four-characters string identifying the sample format (e.g. "S16N" or "FL32").
@param rate: sample rate (expressed in Hz).
@param channels: channels count.
@version: LibVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_set_format', None) or \
_Cfunction('libvlc_audio_set_format', ((1,), (1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_char_p, ctypes.c_uint, ctypes.c_uint)
return f(mp, format, rate, channels)
def libvlc_media_player_get_length(p_mi):
'''Get the current movie length (in ms).
@param p_mi: the Media Player.
@return: the movie length (in ms), or -1 if there is no media.
'''
f = _Cfunctions.get('libvlc_media_player_get_length', None) or \
_Cfunction('libvlc_media_player_get_length', ((1,),), None,
ctypes.c_longlong, MediaPlayer)
return f(p_mi)
def libvlc_media_player_get_time(p_mi):
'''Get the current movie time (in ms).
@param p_mi: the Media Player.
@return: the movie time (in ms), or -1 if there is no media.
'''
f = _Cfunctions.get('libvlc_media_player_get_time', None) or \
_Cfunction('libvlc_media_player_get_time', ((1,),), None,
ctypes.c_longlong, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_time(p_mi, i_time):
'''Set the movie time (in ms). This has no effect if no media is being played.
Not all formats and protocols support this.
@param p_mi: the Media Player.
@param i_time: the movie time (in ms).
'''
f = _Cfunctions.get('libvlc_media_player_set_time', None) or \
_Cfunction('libvlc_media_player_set_time', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_longlong)
return f(p_mi, i_time)
def libvlc_media_player_get_position(p_mi):
'''Get movie position as percentage between 0.0 and 1.0.
@param p_mi: the Media Player.
@return: movie position, or -1. in case of error.
'''
f = _Cfunctions.get('libvlc_media_player_get_position', None) or \
_Cfunction('libvlc_media_player_get_position', ((1,),), None,
ctypes.c_float, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_position(p_mi, f_pos):
'''Set movie position as percentage between 0.0 and 1.0.
This has no effect if playback is not enabled.
This might not work depending on the underlying input format and protocol.
@param p_mi: the Media Player.
@param f_pos: the position.
'''
f = _Cfunctions.get('libvlc_media_player_set_position', None) or \
_Cfunction('libvlc_media_player_set_position', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_float)
return f(p_mi, f_pos)
def libvlc_media_player_set_chapter(p_mi, i_chapter):
'''Set movie chapter (if applicable).
@param p_mi: the Media Player.
@param i_chapter: chapter number to play.
'''
f = _Cfunctions.get('libvlc_media_player_set_chapter', None) or \
_Cfunction('libvlc_media_player_set_chapter', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_int)
return f(p_mi, i_chapter)
def libvlc_media_player_get_chapter(p_mi):
'''Get movie chapter.
@param p_mi: the Media Player.
@return: chapter number currently playing, or -1 if there is no media.
'''
f = _Cfunctions.get('libvlc_media_player_get_chapter', None) or \
_Cfunction('libvlc_media_player_get_chapter', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_get_chapter_count(p_mi):
'''Get movie chapter count.
@param p_mi: the Media Player.
@return: number of chapters in movie, or -1.
'''
f = _Cfunctions.get('libvlc_media_player_get_chapter_count', None) or \
_Cfunction('libvlc_media_player_get_chapter_count', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_will_play(p_mi):
'''Is the player able to play.
@param p_mi: the Media Player.
@return: boolean \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_player_will_play', None) or \
_Cfunction('libvlc_media_player_will_play', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_get_chapter_count_for_title(p_mi, i_title):
'''Get title chapter count.
@param p_mi: the Media Player.
@param i_title: title.
@return: number of chapters in title, or -1.
'''
f = _Cfunctions.get('libvlc_media_player_get_chapter_count_for_title', None) or \
_Cfunction('libvlc_media_player_get_chapter_count_for_title', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_int)
return f(p_mi, i_title)
def libvlc_media_player_set_title(p_mi, i_title):
'''Set movie title.
@param p_mi: the Media Player.
@param i_title: title number to play.
'''
f = _Cfunctions.get('libvlc_media_player_set_title', None) or \
_Cfunction('libvlc_media_player_set_title', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_int)
return f(p_mi, i_title)
def libvlc_media_player_get_title(p_mi):
'''Get movie title.
@param p_mi: the Media Player.
@return: title number currently playing, or -1.
'''
f = _Cfunctions.get('libvlc_media_player_get_title', None) or \
_Cfunction('libvlc_media_player_get_title', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_get_title_count(p_mi):
'''Get movie title count.
@param p_mi: the Media Player.
@return: title number count, or -1.
'''
f = _Cfunctions.get('libvlc_media_player_get_title_count', None) or \
_Cfunction('libvlc_media_player_get_title_count', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_previous_chapter(p_mi):
'''Set previous chapter (if applicable).
@param p_mi: the Media Player.
'''
f = _Cfunctions.get('libvlc_media_player_previous_chapter', None) or \
_Cfunction('libvlc_media_player_previous_chapter', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_media_player_next_chapter(p_mi):
'''Set next chapter (if applicable).
@param p_mi: the Media Player.
'''
f = _Cfunctions.get('libvlc_media_player_next_chapter', None) or \
_Cfunction('libvlc_media_player_next_chapter', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_media_player_get_rate(p_mi):
'''Get the requested movie play rate.
@warning: Depending on the underlying media, the requested rate may be
different from the real playback rate.
@param p_mi: the Media Player.
@return: movie play rate.
'''
f = _Cfunctions.get('libvlc_media_player_get_rate', None) or \
_Cfunction('libvlc_media_player_get_rate', ((1,),), None,
ctypes.c_float, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_rate(p_mi, rate):
'''Set movie play rate.
@param p_mi: the Media Player.
@param rate: movie play rate to set.
@return: -1 if an error was detected, 0 otherwise (but even then, it might not actually work depending on the underlying media protocol).
'''
f = _Cfunctions.get('libvlc_media_player_set_rate', None) or \
_Cfunction('libvlc_media_player_set_rate', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_float)
return f(p_mi, rate)
def libvlc_media_player_get_state(p_mi):
'''Get current movie state.
@param p_mi: the Media Player.
@return: the current state of the media player (playing, paused, ...) See libvlc_state_t.
'''
f = _Cfunctions.get('libvlc_media_player_get_state', None) or \
_Cfunction('libvlc_media_player_get_state', ((1,),), None,
State, MediaPlayer)
return f(p_mi)
def libvlc_media_player_has_vout(p_mi):
'''How many video outputs does this media player have?
@param p_mi: the media player.
@return: the number of video outputs.
'''
f = _Cfunctions.get('libvlc_media_player_has_vout', None) or \
_Cfunction('libvlc_media_player_has_vout', ((1,),), None,
ctypes.c_uint, MediaPlayer)
return f(p_mi)
def libvlc_media_player_is_seekable(p_mi):
'''Is this media player seekable?
@param p_mi: the media player.
@return: true if the media player can seek \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_player_is_seekable', None) or \
_Cfunction('libvlc_media_player_is_seekable', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_can_pause(p_mi):
'''Can this media player be paused?
@param p_mi: the media player.
@return: true if the media player can pause \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_player_can_pause', None) or \
_Cfunction('libvlc_media_player_can_pause', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_program_scrambled(p_mi):
'''Check if the current program is scrambled.
@param p_mi: the media player.
@return: true if the current program is scrambled \libvlc_return_bool.
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_media_player_program_scrambled', None) or \
_Cfunction('libvlc_media_player_program_scrambled', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_next_frame(p_mi):
'''Display the next frame (if supported).
@param p_mi: the media player.
'''
f = _Cfunctions.get('libvlc_media_player_next_frame', None) or \
_Cfunction('libvlc_media_player_next_frame', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_media_player_navigate(p_mi, navigate):
'''Navigate through DVD Menu.
@param p_mi: the Media Player.
@param navigate: the Navigation mode.
@version: libVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_media_player_navigate', None) or \
_Cfunction('libvlc_media_player_navigate', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint)
return f(p_mi, navigate)
def libvlc_media_player_set_video_title_display(p_mi, position, timeout):
'''Set if, and how, the video title will be shown when media is played.
@param p_mi: the media player.
@param position: position at which to display the title, or libvlc_position_disable to prevent the title from being displayed.
@param timeout: title display timeout in milliseconds (ignored if libvlc_position_disable).
@version: libVLC 2.1.0 or later.
'''
f = _Cfunctions.get('libvlc_media_player_set_video_title_display', None) or \
_Cfunction('libvlc_media_player_set_video_title_display', ((1,), (1,), (1,),), None,
None, MediaPlayer, Position, ctypes.c_int)
return f(p_mi, position, timeout)
def libvlc_media_player_add_slave(p_mi, i_type, psz_uri, b_select):
'''Add a slave to the current media player.
@note: If the player is playing, the slave will be added directly. This call
will also update the slave list of the attached L{Media}.
@param p_mi: the media player.
@param i_type: subtitle or audio.
@param psz_uri: Uri of the slave (should contain a valid scheme).
@param b_select: True if this slave should be selected when it's loaded.
@return: 0 on success, -1 on error.
@version: LibVLC 3.0.0 and later. See L{libvlc_media_slaves_add}.
'''
f = _Cfunctions.get('libvlc_media_player_add_slave', None) or \
_Cfunction('libvlc_media_player_add_slave', ((1,), (1,), (1,), (1,),), None,
ctypes.c_int, MediaPlayer, MediaSlaveType, ctypes.c_char_p, ctypes.c_bool)
return f(p_mi, i_type, psz_uri, b_select)
def libvlc_track_description_list_release(p_track_description):
'''Release (free) L{TrackDescription}.
@param p_track_description: the structure to release.
'''
f = _Cfunctions.get('libvlc_track_description_list_release', None) or \
_Cfunction('libvlc_track_description_list_release', ((1,),), None,
None, ctypes.POINTER(TrackDescription))
return f(p_track_description)
def libvlc_toggle_fullscreen(p_mi):
'''Toggle fullscreen status on non-embedded video outputs.
@warning: The same limitations applies to this function
as to L{libvlc_set_fullscreen}().
@param p_mi: the media player.
'''
f = _Cfunctions.get('libvlc_toggle_fullscreen', None) or \
_Cfunction('libvlc_toggle_fullscreen', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_set_fullscreen(p_mi, b_fullscreen):
'''Enable or disable fullscreen.
@warning: With most window managers, only a top-level windows can be in
full-screen mode. Hence, this function will not operate properly if
L{libvlc_media_player_set_xwindow}() was used to embed the video in a
non-top-level window. In that case, the embedding window must be reparented
to the root window B{before} fullscreen mode is enabled. You will want
to reparent it back to its normal parent when disabling fullscreen.
@param p_mi: the media player.
@param b_fullscreen: boolean for fullscreen status.
'''
f = _Cfunctions.get('libvlc_set_fullscreen', None) or \
_Cfunction('libvlc_set_fullscreen', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_int)
return f(p_mi, b_fullscreen)
def libvlc_get_fullscreen(p_mi):
'''Get current fullscreen status.
@param p_mi: the media player.
@return: the fullscreen status (boolean) \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_get_fullscreen', None) or \
_Cfunction('libvlc_get_fullscreen', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_video_set_key_input(p_mi, on):
'''Enable or disable key press events handling, according to the LibVLC hotkeys
configuration. By default and for historical reasons, keyboard events are
handled by the LibVLC video widget.
@note: On X11, there can be only one subscriber for key press and mouse
click events per window. If your application has subscribed to those events
for the X window ID of the video widget, then LibVLC will not be able to
handle key presses and mouse clicks in any case.
@warning: This function is only implemented for X11 and Win32 at the moment.
@param p_mi: the media player.
@param on: true to handle key press events, false to ignore them.
'''
f = _Cfunctions.get('libvlc_video_set_key_input', None) or \
_Cfunction('libvlc_video_set_key_input', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint)
return f(p_mi, on)
def libvlc_video_set_mouse_input(p_mi, on):
'''Enable or disable mouse click events handling. By default, those events are
handled. This is needed for DVD menus to work, as well as a few video
filters such as "puzzle".
See L{libvlc_video_set_key_input}().
@warning: This function is only implemented for X11 and Win32 at the moment.
@param p_mi: the media player.
@param on: true to handle mouse click events, false to ignore them.
'''
f = _Cfunctions.get('libvlc_video_set_mouse_input', None) or \
_Cfunction('libvlc_video_set_mouse_input', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint)
return f(p_mi, on)
def libvlc_video_get_size(p_mi, num):
'''Get the pixel dimensions of a video.
@param p_mi: media player.
@param num: number of the video (starting from, and most commonly 0).
@return: px pixel width, py pixel height.
'''
f = _Cfunctions.get('libvlc_video_get_size', None) or \
_Cfunction('libvlc_video_get_size', ((1,), (1,), (2,), (2,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_uint, ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
return f(p_mi, num)
def libvlc_video_get_cursor(p_mi, num):
'''Get the mouse pointer coordinates over a video.
Coordinates are expressed in terms of the decoded video resolution,
B{not} in terms of pixels on the screen/viewport (to get the latter,
you can query your windowing system directly).
Either of the coordinates may be negative or larger than the corresponding
dimension of the video, if the cursor is outside the rendering area.
@warning: The coordinates may be out-of-date if the pointer is not located
on the video rendering area. LibVLC does not track the pointer if it is
outside of the video widget.
@note: LibVLC does not support multiple pointers (it does of course support
multiple input devices sharing the same pointer) at the moment.
@param p_mi: media player.
@param num: number of the video (starting from, and most commonly 0).
@return: px abscissa, py ordinate.
'''
f = _Cfunctions.get('libvlc_video_get_cursor', None) or \
_Cfunction('libvlc_video_get_cursor', ((1,), (1,), (2,), (2,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_uint, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int))
return f(p_mi, num)
def libvlc_video_get_scale(p_mi):
'''Get the current video scaling factor.
See also L{libvlc_video_set_scale}().
@param p_mi: the media player.
@return: the currently configured zoom factor, or 0. if the video is set to fit to the output window/drawable automatically.
'''
f = _Cfunctions.get('libvlc_video_get_scale', None) or \
_Cfunction('libvlc_video_get_scale', ((1,),), None,
ctypes.c_float, MediaPlayer)
return f(p_mi)
def libvlc_video_set_scale(p_mi, f_factor):
'''Set the video scaling factor. That is the ratio of the number of pixels on
screen to the number of pixels in the original decoded video in each
dimension. Zero is a special value; it will adjust the video to the output
window/drawable (in windowed mode) or the entire screen.
Note that not all video outputs support scaling.
@param p_mi: the media player.
@param f_factor: the scaling factor, or zero.
'''
f = _Cfunctions.get('libvlc_video_set_scale', None) or \
_Cfunction('libvlc_video_set_scale', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_float)
return f(p_mi, f_factor)
def libvlc_video_get_aspect_ratio(p_mi):
'''Get current video aspect ratio.
@param p_mi: the media player.
@return: the video aspect ratio or None if unspecified (the result must be released with free() or L{libvlc_free}()).
'''
f = _Cfunctions.get('libvlc_video_get_aspect_ratio', None) or \
_Cfunction('libvlc_video_get_aspect_ratio', ((1,),), string_result,
ctypes.c_void_p, MediaPlayer)
return f(p_mi)
def libvlc_video_set_aspect_ratio(p_mi, psz_aspect):
'''Set new video aspect ratio.
@param p_mi: the media player.
@param psz_aspect: new video aspect-ratio or None to reset to default @note Invalid aspect ratios are ignored.
'''
f = _Cfunctions.get('libvlc_video_set_aspect_ratio', None) or \
_Cfunction('libvlc_video_set_aspect_ratio', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_char_p)
return f(p_mi, psz_aspect)
def libvlc_video_new_viewpoint():
'''Create a video viewpoint structure.
@return: video viewpoint or None (the result must be released with free() or L{libvlc_free}()).
@version: LibVLC 3.0.0 and later.
'''
f = _Cfunctions.get('libvlc_video_new_viewpoint', None) or \
_Cfunction('libvlc_video_new_viewpoint', (), None,
VideoViewpoint)
return f()
def libvlc_video_update_viewpoint(p_mi, p_viewpoint, b_absolute):
'''Update the video viewpoint information.
@note: It is safe to call this function before the media player is started.
@param p_mi: the media player.
@param p_viewpoint: video viewpoint allocated via L{libvlc_video_new_viewpoint}().
@param b_absolute: if true replace the old viewpoint with the new one. If false, increase/decrease it.
@return: -1 in case of error, 0 otherwise @note the values are set asynchronously, it will be used by the next frame displayed.
@version: LibVLC 3.0.0 and later.
'''
f = _Cfunctions.get('libvlc_video_update_viewpoint', None) or \
_Cfunction('libvlc_video_update_viewpoint', ((1,), (1,), (1,),), None,
ctypes.c_int, MediaPlayer, VideoViewpoint, ctypes.c_bool)
return f(p_mi, p_viewpoint, b_absolute)
def libvlc_video_get_spu(p_mi):
'''Get current video subtitle.
@param p_mi: the media player.
@return: the video subtitle selected, or -1 if none.
'''
f = _Cfunctions.get('libvlc_video_get_spu', None) or \
_Cfunction('libvlc_video_get_spu', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_video_get_spu_count(p_mi):
'''Get the number of available video subtitles.
@param p_mi: the media player.
@return: the number of available video subtitles.
'''
f = _Cfunctions.get('libvlc_video_get_spu_count', None) or \
_Cfunction('libvlc_video_get_spu_count', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_video_get_spu_description(p_mi):
'''Get the description of available video subtitles.
@param p_mi: the media player.
@return: list containing description of available video subtitles. It must be freed with L{libvlc_track_description_list_release}().
'''
f = _Cfunctions.get('libvlc_video_get_spu_description', None) or \
_Cfunction('libvlc_video_get_spu_description', ((1,),), None,
ctypes.POINTER(TrackDescription), MediaPlayer)
return f(p_mi)
def libvlc_video_set_spu(p_mi, i_spu):
'''Set new video subtitle.
@param p_mi: the media player.
@param i_spu: video subtitle track to select (i_id from track description).
@return: 0 on success, -1 if out of range.
'''
f = _Cfunctions.get('libvlc_video_set_spu', None) or \
_Cfunction('libvlc_video_set_spu', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_int)
return f(p_mi, i_spu)
def libvlc_video_get_spu_delay(p_mi):
'''Get the current subtitle delay. Positive values means subtitles are being
displayed later, negative values earlier.
@param p_mi: media player.
@return: time (in microseconds) the display of subtitles is being delayed.
@version: LibVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_video_get_spu_delay', None) or \
_Cfunction('libvlc_video_get_spu_delay', ((1,),), None,
ctypes.c_int64, MediaPlayer)
return f(p_mi)
def libvlc_video_set_spu_delay(p_mi, i_delay):
'''Set the subtitle delay. This affects the timing of when the subtitle will
be displayed. Positive values result in subtitles being displayed later,
while negative values will result in subtitles being displayed earlier.
The subtitle delay will be reset to zero each time the media changes.
@param p_mi: media player.
@param i_delay: time (in microseconds) the display of subtitles should be delayed.
@return: 0 on success, -1 on error.
@version: LibVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_video_set_spu_delay', None) or \
_Cfunction('libvlc_video_set_spu_delay', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_int64)
return f(p_mi, i_delay)
def libvlc_media_player_get_full_title_descriptions(p_mi, titles):
'''Get the full description of available titles.
@param p_mi: the media player.
@param titles: address to store an allocated array of title descriptions descriptions (must be freed with L{libvlc_title_descriptions_release}() by the caller) [OUT].
@return: the number of titles (-1 on error).
@version: LibVLC 3.0.0 and later.
'''
f = _Cfunctions.get('libvlc_media_player_get_full_title_descriptions', None) or \
_Cfunction('libvlc_media_player_get_full_title_descriptions', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.POINTER(ctypes.POINTER(TitleDescription)))
return f(p_mi, titles)
def libvlc_title_descriptions_release(p_titles, i_count):
'''Release a title description.
@param p_titles: title description array to release.
@param i_count: number of title descriptions to release.
@version: LibVLC 3.0.0 and later.
'''
f = _Cfunctions.get('libvlc_title_descriptions_release', None) or \
_Cfunction('libvlc_title_descriptions_release', ((1,), (1,),), None,
None, ctypes.POINTER(TitleDescription), ctypes.c_uint)
return f(p_titles, i_count)
def libvlc_media_player_get_full_chapter_descriptions(p_mi, i_chapters_of_title, pp_chapters):
'''Get the full description of available chapters.
@param p_mi: the media player.
@param i_chapters_of_title: index of the title to query for chapters (uses current title if set to -1).
@param pp_chapters: address to store an allocated array of chapter descriptions descriptions (must be freed with L{libvlc_chapter_descriptions_release}() by the caller) [OUT].
@return: the number of chapters (-1 on error).
@version: LibVLC 3.0.0 and later.
'''
f = _Cfunctions.get('libvlc_media_player_get_full_chapter_descriptions', None) or \
_Cfunction('libvlc_media_player_get_full_chapter_descriptions', ((1,), (1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_int, ctypes.POINTER(ctypes.POINTER(ChapterDescription)))
return f(p_mi, i_chapters_of_title, pp_chapters)
def libvlc_chapter_descriptions_release(p_chapters, i_count):
'''Release a chapter description.
@param p_chapters: chapter description array to release.
@param i_count: number of chapter descriptions to release.
@version: LibVLC 3.0.0 and later.
'''
f = _Cfunctions.get('libvlc_chapter_descriptions_release', None) or \
_Cfunction('libvlc_chapter_descriptions_release', ((1,), (1,),), None,
None, ctypes.POINTER(ChapterDescription), ctypes.c_uint)
return f(p_chapters, i_count)
def libvlc_video_get_crop_geometry(p_mi):
'''Get current crop filter geometry.
@param p_mi: the media player.
@return: the crop filter geometry or None if unset.
'''
f = _Cfunctions.get('libvlc_video_get_crop_geometry', None) or \
_Cfunction('libvlc_video_get_crop_geometry', ((1,),), string_result,
ctypes.c_void_p, MediaPlayer)
return f(p_mi)
def libvlc_video_set_crop_geometry(p_mi, psz_geometry):
'''Set new crop filter geometry.
@param p_mi: the media player.
@param psz_geometry: new crop filter geometry (None to unset).
'''
f = _Cfunctions.get('libvlc_video_set_crop_geometry', None) or \
_Cfunction('libvlc_video_set_crop_geometry', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_char_p)
return f(p_mi, psz_geometry)
def libvlc_video_get_teletext(p_mi):
'''Get current teletext page requested.
@param p_mi: the media player.
@return: the current teletext page requested.
'''
f = _Cfunctions.get('libvlc_video_get_teletext', None) or \
_Cfunction('libvlc_video_get_teletext', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_video_set_teletext(p_mi, i_page):
'''Set new teletext page to retrieve.
@param p_mi: the media player.
@param i_page: teletex page number requested.
'''
f = _Cfunctions.get('libvlc_video_set_teletext', None) or \
_Cfunction('libvlc_video_set_teletext', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_int)
return f(p_mi, i_page)
def libvlc_toggle_teletext(p_mi):
'''Toggle teletext transparent status on video output.
@param p_mi: the media player.
'''
f = _Cfunctions.get('libvlc_toggle_teletext', None) or \
_Cfunction('libvlc_toggle_teletext', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_video_get_track_count(p_mi):
'''Get number of available video tracks.
@param p_mi: media player.
@return: the number of available video tracks (int).
'''
f = _Cfunctions.get('libvlc_video_get_track_count', None) or \
_Cfunction('libvlc_video_get_track_count', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_video_get_track_description(p_mi):
'''Get the description of available video tracks.
@param p_mi: media player.
@return: list with description of available video tracks, or None on error. It must be freed with L{libvlc_track_description_list_release}().
'''
f = _Cfunctions.get('libvlc_video_get_track_description', None) or \
_Cfunction('libvlc_video_get_track_description', ((1,),), None,
ctypes.POINTER(TrackDescription), MediaPlayer)
return f(p_mi)
def libvlc_video_get_track(p_mi):
'''Get current video track.
@param p_mi: media player.
@return: the video track ID (int) or -1 if no active input.
'''
f = _Cfunctions.get('libvlc_video_get_track', None) or \
_Cfunction('libvlc_video_get_track', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_video_set_track(p_mi, i_track):
'''Set video track.
@param p_mi: media player.
@param i_track: the track ID (i_id field from track description).
@return: 0 on success, -1 if out of range.
'''
f = _Cfunctions.get('libvlc_video_set_track', None) or \
_Cfunction('libvlc_video_set_track', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_int)
return f(p_mi, i_track)
def libvlc_video_take_snapshot(p_mi, num, psz_filepath, i_width, i_height):
'''Take a snapshot of the current video window.
If i_width AND i_height is 0, original size is used.
If i_width XOR i_height is 0, original aspect-ratio is preserved.
@param p_mi: media player instance.
@param num: number of video output (typically 0 for the first/only one).
@param psz_filepath: the path where to save the screenshot to.
@param i_width: the snapshot's width.
@param i_height: the snapshot's height.
@return: 0 on success, -1 if the video was not found.
'''
f = _Cfunctions.get('libvlc_video_take_snapshot', None) or \
_Cfunction('libvlc_video_take_snapshot', ((1,), (1,), (1,), (1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_uint, ctypes.c_char_p, ctypes.c_int, ctypes.c_int)
return f(p_mi, num, psz_filepath, i_width, i_height)
def libvlc_video_set_deinterlace(p_mi, psz_mode):
'''Enable or disable deinterlace filter.
@param p_mi: libvlc media player.
@param psz_mode: type of deinterlace filter, None to disable.
'''
f = _Cfunctions.get('libvlc_video_set_deinterlace', None) or \
_Cfunction('libvlc_video_set_deinterlace', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_char_p)
return f(p_mi, psz_mode)
def libvlc_video_get_marquee_int(p_mi, option):
'''Get an integer marquee option value.
@param p_mi: libvlc media player.
@param option: marq option to get See libvlc_video_marquee_int_option_t.
'''
f = _Cfunctions.get('libvlc_video_get_marquee_int', None) or \
_Cfunction('libvlc_video_get_marquee_int', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_uint)
return f(p_mi, option)
def libvlc_video_get_marquee_string(p_mi, option):
'''Get a string marquee option value.
@param p_mi: libvlc media player.
@param option: marq option to get See libvlc_video_marquee_string_option_t.
'''
f = _Cfunctions.get('libvlc_video_get_marquee_string', None) or \
_Cfunction('libvlc_video_get_marquee_string', ((1,), (1,),), string_result,
ctypes.c_void_p, MediaPlayer, ctypes.c_uint)
return f(p_mi, option)
def libvlc_video_set_marquee_int(p_mi, option, i_val):
'''Enable, disable or set an integer marquee option
Setting libvlc_marquee_Enable has the side effect of enabling (arg !0)
or disabling (arg 0) the marq filter.
@param p_mi: libvlc media player.
@param option: marq option to set See libvlc_video_marquee_int_option_t.
@param i_val: marq option value.
'''
f = _Cfunctions.get('libvlc_video_set_marquee_int', None) or \
_Cfunction('libvlc_video_set_marquee_int', ((1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint, ctypes.c_int)
return f(p_mi, option, i_val)
def libvlc_video_set_marquee_string(p_mi, option, psz_text):
'''Set a marquee string option.
@param p_mi: libvlc media player.
@param option: marq option to set See libvlc_video_marquee_string_option_t.
@param psz_text: marq option value.
'''
f = _Cfunctions.get('libvlc_video_set_marquee_string', None) or \
_Cfunction('libvlc_video_set_marquee_string', ((1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint, ctypes.c_char_p)
return f(p_mi, option, psz_text)
def libvlc_video_get_logo_int(p_mi, option):
'''Get integer logo option.
@param p_mi: libvlc media player instance.
@param option: logo option to get, values of libvlc_video_logo_option_t.
'''
f = _Cfunctions.get('libvlc_video_get_logo_int', None) or \
_Cfunction('libvlc_video_get_logo_int', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_uint)
return f(p_mi, option)
def libvlc_video_set_logo_int(p_mi, option, value):
'''Set logo option as integer. Options that take a different type value
are ignored.
Passing libvlc_logo_enable as option value has the side effect of
starting (arg !0) or stopping (arg 0) the logo filter.
@param p_mi: libvlc media player instance.
@param option: logo option to set, values of libvlc_video_logo_option_t.
@param value: logo option value.
'''
f = _Cfunctions.get('libvlc_video_set_logo_int', None) or \
_Cfunction('libvlc_video_set_logo_int', ((1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint, ctypes.c_int)
return f(p_mi, option, value)
def libvlc_video_set_logo_string(p_mi, option, psz_value):
'''Set logo option as string. Options that take a different type value
are ignored.
@param p_mi: libvlc media player instance.
@param option: logo option to set, values of libvlc_video_logo_option_t.
@param psz_value: logo option value.
'''
f = _Cfunctions.get('libvlc_video_set_logo_string', None) or \
_Cfunction('libvlc_video_set_logo_string', ((1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint, ctypes.c_char_p)
return f(p_mi, option, psz_value)
def libvlc_video_get_adjust_int(p_mi, option):
'''Get integer adjust option.
@param p_mi: libvlc media player instance.
@param option: adjust option to get, values of libvlc_video_adjust_option_t.
@version: LibVLC 1.1.1 and later.
'''
f = _Cfunctions.get('libvlc_video_get_adjust_int', None) or \
_Cfunction('libvlc_video_get_adjust_int', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_uint)
return f(p_mi, option)
def libvlc_video_set_adjust_int(p_mi, option, value):
'''Set adjust option as integer. Options that take a different type value
are ignored.
Passing libvlc_adjust_enable as option value has the side effect of
starting (arg !0) or stopping (arg 0) the adjust filter.
@param p_mi: libvlc media player instance.
@param option: adust option to set, values of libvlc_video_adjust_option_t.
@param value: adjust option value.
@version: LibVLC 1.1.1 and later.
'''
f = _Cfunctions.get('libvlc_video_set_adjust_int', None) or \
_Cfunction('libvlc_video_set_adjust_int', ((1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint, ctypes.c_int)
return f(p_mi, option, value)
def libvlc_video_get_adjust_float(p_mi, option):
'''Get float adjust option.
@param p_mi: libvlc media player instance.
@param option: adjust option to get, values of libvlc_video_adjust_option_t.
@version: LibVLC 1.1.1 and later.
'''
f = _Cfunctions.get('libvlc_video_get_adjust_float', None) or \
_Cfunction('libvlc_video_get_adjust_float', ((1,), (1,),), None,
ctypes.c_float, MediaPlayer, ctypes.c_uint)
return f(p_mi, option)
def libvlc_video_set_adjust_float(p_mi, option, value):
'''Set adjust option as float. Options that take a different type value
are ignored.
@param p_mi: libvlc media player instance.
@param option: adust option to set, values of libvlc_video_adjust_option_t.
@param value: adjust option value.
@version: LibVLC 1.1.1 and later.
'''
f = _Cfunctions.get('libvlc_video_set_adjust_float', None) or \
_Cfunction('libvlc_video_set_adjust_float', ((1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint, ctypes.c_float)
return f(p_mi, option, value)
def libvlc_audio_output_list_get(p_instance):
'''Gets the list of available audio output modules.
@param p_instance: libvlc instance.
@return: list of available audio outputs. It must be freed with In case of error, None is returned.
'''
f = _Cfunctions.get('libvlc_audio_output_list_get', None) or \
_Cfunction('libvlc_audio_output_list_get', ((1,),), None,
ctypes.POINTER(AudioOutput), Instance)
return f(p_instance)
def libvlc_audio_output_list_release(p_list):
'''Frees the list of available audio output modules.
@param p_list: list with audio outputs for release.
'''
f = _Cfunctions.get('libvlc_audio_output_list_release', None) or \
_Cfunction('libvlc_audio_output_list_release', ((1,),), None,
None, ctypes.POINTER(AudioOutput))
return f(p_list)
def libvlc_audio_output_set(p_mi, psz_name):
'''Selects an audio output module.
@note: Any change will take be effect only after playback is stopped and
restarted. Audio output cannot be changed while playing.
@param p_mi: media player.
@param psz_name: name of audio output, use psz_name of See L{AudioOutput}.
@return: 0 if function succeeded, -1 on error.
'''
f = _Cfunctions.get('libvlc_audio_output_set', None) or \
_Cfunction('libvlc_audio_output_set', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_char_p)
return f(p_mi, psz_name)
def libvlc_audio_output_device_enum(mp):
'''Gets a list of potential audio output devices,
See L{libvlc_audio_output_device_set}().
@note: Not all audio outputs support enumerating devices.
The audio output may be functional even if the list is empty (None).
@note: The list may not be exhaustive.
@warning: Some audio output devices in the list might not actually work in
some circumstances. By default, it is recommended to not specify any
explicit audio device.
@param mp: media player.
@return: A None-terminated linked list of potential audio output devices. It must be freed with L{libvlc_audio_output_device_list_release}().
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_output_device_enum', None) or \
_Cfunction('libvlc_audio_output_device_enum', ((1,),), None,
ctypes.POINTER(AudioOutputDevice), MediaPlayer)
return f(mp)
def libvlc_audio_output_device_list_get(p_instance, aout):
'''Gets a list of audio output devices for a given audio output module,
See L{libvlc_audio_output_device_set}().
@note: Not all audio outputs support this. In particular, an empty (None)
list of devices does B{not} imply that the specified audio output does
not work.
@note: The list might not be exhaustive.
@warning: Some audio output devices in the list might not actually work in
some circumstances. By default, it is recommended to not specify any
explicit audio device.
@param p_instance: libvlc instance.
@param aout: audio output name (as returned by L{libvlc_audio_output_list_get}()).
@return: A None-terminated linked list of potential audio output devices. It must be freed with L{libvlc_audio_output_device_list_release}().
@version: LibVLC 2.1.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_output_device_list_get', None) or \
_Cfunction('libvlc_audio_output_device_list_get', ((1,), (1,),), None,
ctypes.POINTER(AudioOutputDevice), Instance, ctypes.c_char_p)
return f(p_instance, aout)
def libvlc_audio_output_device_list_release(p_list):
'''Frees a list of available audio output devices.
@param p_list: list with audio outputs for release.
@version: LibVLC 2.1.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_output_device_list_release', None) or \
_Cfunction('libvlc_audio_output_device_list_release', ((1,),), None,
None, ctypes.POINTER(AudioOutputDevice))
return f(p_list)
def libvlc_audio_output_device_set(mp, module, device_id):
'''Configures an explicit audio output device.
If the module paramater is None, audio output will be moved to the device
specified by the device identifier string immediately. This is the
recommended usage.
A list of adequate potential device strings can be obtained with
L{libvlc_audio_output_device_enum}().
However passing None is supported in LibVLC version 2.2.0 and later only;
in earlier versions, this function would have no effects when the module
parameter was None.
If the module parameter is not None, the device parameter of the
corresponding audio output, if it exists, will be set to the specified
string. Note that some audio output modules do not have such a parameter
(notably MMDevice and PulseAudio).
A list of adequate potential device strings can be obtained with
L{libvlc_audio_output_device_list_get}().
@note: This function does not select the specified audio output plugin.
L{libvlc_audio_output_set}() is used for that purpose.
@warning: The syntax for the device parameter depends on the audio output.
Some audio output modules require further parameters (e.g. a channels map
in the case of ALSA).
@param mp: media player.
@param module: If None, current audio output module. if non-None, name of audio output module.
@param device_id: device identifier string.
@return: Nothing. Errors are ignored (this is a design bug).
'''
f = _Cfunctions.get('libvlc_audio_output_device_set', None) or \
_Cfunction('libvlc_audio_output_device_set', ((1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_char_p, ctypes.c_char_p)
return f(mp, module, device_id)
def libvlc_audio_output_device_get(mp):
'''Get the current audio output device identifier.
This complements L{libvlc_audio_output_device_set}().
@warning: The initial value for the current audio output device identifier
may not be set or may be some unknown value. A LibVLC application should
compare this value against the known device identifiers (e.g. those that
were previously retrieved by a call to L{libvlc_audio_output_device_enum} or
L{libvlc_audio_output_device_list_get}) to find the current audio output device.
It is possible that the selected audio output device changes (an external
change) without a call to L{libvlc_audio_output_device_set}. That may make this
method unsuitable to use if a LibVLC application is attempting to track
dynamic audio device changes as they happen.
@param mp: media player.
@return: the current audio output device identifier None if no device is selected or in case of error (the result must be released with free() or L{libvlc_free}()).
@version: LibVLC 3.0.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_output_device_get', None) or \
_Cfunction('libvlc_audio_output_device_get', ((1,),), None,
ctypes.c_char_p, MediaPlayer)
return f(mp)
def libvlc_audio_toggle_mute(p_mi):
'''Toggle mute status.
@param p_mi: media player @warning Toggling mute atomically is not always possible: On some platforms, other processes can mute the VLC audio playback stream asynchronously. Thus, there is a small race condition where toggling will not work. See also the limitations of L{libvlc_audio_set_mute}().
'''
f = _Cfunctions.get('libvlc_audio_toggle_mute', None) or \
_Cfunction('libvlc_audio_toggle_mute', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_audio_get_mute(p_mi):
'''Get current mute status.
@param p_mi: media player.
@return: the mute status (boolean) if defined, -1 if undefined/unapplicable.
'''
f = _Cfunctions.get('libvlc_audio_get_mute', None) or \
_Cfunction('libvlc_audio_get_mute', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_audio_set_mute(p_mi, status):
'''Set mute status.
@param p_mi: media player.
@param status: If status is true then mute, otherwise unmute @warning This function does not always work. If there are no active audio playback stream, the mute status might not be available. If digital pass-through (S/PDIF, HDMI...) is in use, muting may be unapplicable. Also some audio output plugins do not support muting at all. @note To force silent playback, disable all audio tracks. This is more efficient and reliable than mute.
'''
f = _Cfunctions.get('libvlc_audio_set_mute', None) or \
_Cfunction('libvlc_audio_set_mute', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_int)
return f(p_mi, status)
def libvlc_audio_get_volume(p_mi):
'''Get current software audio volume.
@param p_mi: media player.
@return: the software volume in percents (0 = mute, 100 = nominal / 0dB).
'''
f = _Cfunctions.get('libvlc_audio_get_volume', None) or \
_Cfunction('libvlc_audio_get_volume', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_audio_set_volume(p_mi, i_volume):
'''Set current software audio volume.
@param p_mi: media player.
@param i_volume: the volume in percents (0 = mute, 100 = 0dB).
@return: 0 if the volume was set, -1 if it was out of range.
'''
f = _Cfunctions.get('libvlc_audio_set_volume', None) or \
_Cfunction('libvlc_audio_set_volume', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_int)
return f(p_mi, i_volume)
def libvlc_audio_get_track_count(p_mi):
'''Get number of available audio tracks.
@param p_mi: media player.
@return: the number of available audio tracks (int), or -1 if unavailable.
'''
f = _Cfunctions.get('libvlc_audio_get_track_count', None) or \
_Cfunction('libvlc_audio_get_track_count', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_audio_get_track_description(p_mi):
'''Get the description of available audio tracks.
@param p_mi: media player.
@return: list with description of available audio tracks, or None. It must be freed with L{libvlc_track_description_list_release}().
'''
f = _Cfunctions.get('libvlc_audio_get_track_description', None) or \
_Cfunction('libvlc_audio_get_track_description', ((1,),), None,
ctypes.POINTER(TrackDescription), MediaPlayer)
return f(p_mi)
def libvlc_audio_get_track(p_mi):
'''Get current audio track.
@param p_mi: media player.
@return: the audio track ID or -1 if no active input.
'''
f = _Cfunctions.get('libvlc_audio_get_track', None) or \
_Cfunction('libvlc_audio_get_track', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_audio_set_track(p_mi, i_track):
'''Set current audio track.
@param p_mi: media player.
@param i_track: the track ID (i_id field from track description).
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_audio_set_track', None) or \
_Cfunction('libvlc_audio_set_track', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_int)
return f(p_mi, i_track)
def libvlc_audio_get_channel(p_mi):
'''Get current audio channel.
@param p_mi: media player.
@return: the audio channel See libvlc_audio_output_channel_t.
'''
f = _Cfunctions.get('libvlc_audio_get_channel', None) or \
_Cfunction('libvlc_audio_get_channel', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_audio_set_channel(p_mi, channel):
'''Set current audio channel.
@param p_mi: media player.
@param channel: the audio channel, See libvlc_audio_output_channel_t.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_audio_set_channel', None) or \
_Cfunction('libvlc_audio_set_channel', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_int)
return f(p_mi, channel)
def libvlc_audio_get_delay(p_mi):
'''Get current audio delay.
@param p_mi: media player.
@return: the audio delay (microseconds).
@version: LibVLC 1.1.1 or later.
'''
f = _Cfunctions.get('libvlc_audio_get_delay', None) or \
_Cfunction('libvlc_audio_get_delay', ((1,),), None,
ctypes.c_int64, MediaPlayer)
return f(p_mi)
def libvlc_audio_set_delay(p_mi, i_delay):
'''Set current audio delay. The audio delay will be reset to zero each time the media changes.
@param p_mi: media player.
@param i_delay: the audio delay (microseconds).
@return: 0 on success, -1 on error.
@version: LibVLC 1.1.1 or later.
'''
f = _Cfunctions.get('libvlc_audio_set_delay', None) or \
_Cfunction('libvlc_audio_set_delay', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_int64)
return f(p_mi, i_delay)
def libvlc_audio_equalizer_get_preset_count():
'''Get the number of equalizer presets.
@return: number of presets.
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_equalizer_get_preset_count', None) or \
_Cfunction('libvlc_audio_equalizer_get_preset_count', (), None,
ctypes.c_uint)
return f()
def libvlc_audio_equalizer_get_preset_name(u_index):
'''Get the name of a particular equalizer preset.
This name can be used, for example, to prepare a preset label or menu in a user
interface.
@param u_index: index of the preset, counting from zero.
@return: preset name, or None if there is no such preset.
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_equalizer_get_preset_name', None) or \
_Cfunction('libvlc_audio_equalizer_get_preset_name', ((1,),), None,
ctypes.c_char_p, ctypes.c_uint)
return f(u_index)
def libvlc_audio_equalizer_get_band_count():
'''Get the number of distinct frequency bands for an equalizer.
@return: number of frequency bands.
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_equalizer_get_band_count', None) or \
_Cfunction('libvlc_audio_equalizer_get_band_count', (), None,
ctypes.c_uint)
return f()
def libvlc_audio_equalizer_get_band_frequency(u_index):
'''Get a particular equalizer band frequency.
This value can be used, for example, to create a label for an equalizer band control
in a user interface.
@param u_index: index of the band, counting from zero.
@return: equalizer band frequency (Hz), or -1 if there is no such band.
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_equalizer_get_band_frequency', None) or \
_Cfunction('libvlc_audio_equalizer_get_band_frequency', ((1,),), None,
ctypes.c_float, ctypes.c_uint)
return f(u_index)
def libvlc_audio_equalizer_new():
'''Create a new default equalizer, with all frequency values zeroed.
The new equalizer can subsequently be applied to a media player by invoking
L{libvlc_media_player_set_equalizer}().
The returned handle should be freed via L{libvlc_audio_equalizer_release}() when
it is no longer needed.
@return: opaque equalizer handle, or None on error.
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_equalizer_new', None) or \
_Cfunction('libvlc_audio_equalizer_new', (), None,
ctypes.c_void_p)
return f()
def libvlc_audio_equalizer_new_from_preset(u_index):
'''Create a new equalizer, with initial frequency values copied from an existing
preset.
The new equalizer can subsequently be applied to a media player by invoking
L{libvlc_media_player_set_equalizer}().
The returned handle should be freed via L{libvlc_audio_equalizer_release}() when
it is no longer needed.
@param u_index: index of the preset, counting from zero.
@return: opaque equalizer handle, or None on error.
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_equalizer_new_from_preset', None) or \
_Cfunction('libvlc_audio_equalizer_new_from_preset', ((1,),), None,
ctypes.c_void_p, ctypes.c_uint)
return f(u_index)
def libvlc_audio_equalizer_release(p_equalizer):
'''Release a previously created equalizer instance.
The equalizer was previously created by using L{libvlc_audio_equalizer_new}() or
L{libvlc_audio_equalizer_new_from_preset}().
It is safe to invoke this method with a None p_equalizer parameter for no effect.
@param p_equalizer: opaque equalizer handle, or None.
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_equalizer_release', None) or \
_Cfunction('libvlc_audio_equalizer_release', ((1,),), None,
None, ctypes.c_void_p)
return f(p_equalizer)
def libvlc_audio_equalizer_set_preamp(p_equalizer, f_preamp):
'''Set a new pre-amplification value for an equalizer.
The new equalizer settings are subsequently applied to a media player by invoking
L{libvlc_media_player_set_equalizer}().
The supplied amplification value will be clamped to the -20.0 to +20.0 range.
@param p_equalizer: valid equalizer handle, must not be None.
@param f_preamp: preamp value (-20.0 to 20.0 Hz).
@return: zero on success, -1 on error.
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_equalizer_set_preamp', None) or \
_Cfunction('libvlc_audio_equalizer_set_preamp', ((1,), (1,),), None,
ctypes.c_int, ctypes.c_void_p, ctypes.c_float)
return f(p_equalizer, f_preamp)
def libvlc_audio_equalizer_get_preamp(p_equalizer):
'''Get the current pre-amplification value from an equalizer.
@param p_equalizer: valid equalizer handle, must not be None.
@return: preamp value (Hz).
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_equalizer_get_preamp', None) or \
_Cfunction('libvlc_audio_equalizer_get_preamp', ((1,),), None,
ctypes.c_float, ctypes.c_void_p)
return f(p_equalizer)
def libvlc_audio_equalizer_set_amp_at_index(p_equalizer, f_amp, u_band):
'''Set a new amplification value for a particular equalizer frequency band.
The new equalizer settings are subsequently applied to a media player by invoking
L{libvlc_media_player_set_equalizer}().
The supplied amplification value will be clamped to the -20.0 to +20.0 range.
@param p_equalizer: valid equalizer handle, must not be None.
@param f_amp: amplification value (-20.0 to 20.0 Hz).
@param u_band: index, counting from zero, of the frequency band to set.
@return: zero on success, -1 on error.
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_equalizer_set_amp_at_index', None) or \
_Cfunction('libvlc_audio_equalizer_set_amp_at_index', ((1,), (1,), (1,),), None,
ctypes.c_int, ctypes.c_void_p, ctypes.c_float, ctypes.c_uint)
return f(p_equalizer, f_amp, u_band)
def libvlc_audio_equalizer_get_amp_at_index(p_equalizer, u_band):
'''Get the amplification value for a particular equalizer frequency band.
@param p_equalizer: valid equalizer handle, must not be None.
@param u_band: index, counting from zero, of the frequency band to get.
@return: amplification value (Hz); NaN if there is no such frequency band.
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_equalizer_get_amp_at_index', None) or \
_Cfunction('libvlc_audio_equalizer_get_amp_at_index', ((1,), (1,),), None,
ctypes.c_float, ctypes.c_void_p, ctypes.c_uint)
return f(p_equalizer, u_band)
def libvlc_media_player_set_equalizer(p_mi, p_equalizer):
'''Apply new equalizer settings to a media player.
The equalizer is first created by invoking L{libvlc_audio_equalizer_new}() or
L{libvlc_audio_equalizer_new_from_preset}().
It is possible to apply new equalizer settings to a media player whether the media
player is currently playing media or not.
Invoking this method will immediately apply the new equalizer settings to the audio
output of the currently playing media if there is any.
If there is no currently playing media, the new equalizer settings will be applied
later if and when new media is played.
Equalizer settings will automatically be applied to subsequently played media.
To disable the equalizer for a media player invoke this method passing None for the
p_equalizer parameter.
The media player does not keep a reference to the supplied equalizer so it is safe
for an application to release the equalizer reference any time after this method
returns.
@param p_mi: opaque media player handle.
@param p_equalizer: opaque equalizer handle, or None to disable the equalizer for this media player.
@return: zero on success, -1 on error.
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_media_player_set_equalizer', None) or \
_Cfunction('libvlc_media_player_set_equalizer', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_void_p)
return f(p_mi, p_equalizer)
def libvlc_media_player_get_role(p_mi):
'''Gets the media role.
@param p_mi: media player.
@return: the media player role (\ref libvlc_media_player_role_t).
@version: LibVLC 3.0.0 and later.
'''
f = _Cfunctions.get('libvlc_media_player_get_role', None) or \
_Cfunction('libvlc_media_player_get_role', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_role(p_mi, role):
'''Sets the media role.
@param p_mi: media player.
@param role: the media player role (\ref libvlc_media_player_role_t).
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_media_player_set_role', None) or \
_Cfunction('libvlc_media_player_set_role', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_uint)
return f(p_mi, role)
def libvlc_media_list_player_new(p_instance):
'''Create new media_list_player.
@param p_instance: libvlc instance.
@return: media list player instance or None on error.
'''
f = _Cfunctions.get('libvlc_media_list_player_new', None) or \
_Cfunction('libvlc_media_list_player_new', ((1,),), class_result(MediaListPlayer),
ctypes.c_void_p, Instance)
return f(p_instance)
def libvlc_media_list_player_release(p_mlp):
'''Release a media_list_player after use
Decrement the reference count of a media player object. If the
reference count is 0, then L{libvlc_media_list_player_release}() will
release the media player object. If the media player object
has been released, then it should not be used again.
@param p_mlp: media list player instance.
'''
f = _Cfunctions.get('libvlc_media_list_player_release', None) or \
_Cfunction('libvlc_media_list_player_release', ((1,),), None,
None, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_retain(p_mlp):
'''Retain a reference to a media player list object. Use
L{libvlc_media_list_player_release}() to decrement reference count.
@param p_mlp: media player list object.
'''
f = _Cfunctions.get('libvlc_media_list_player_retain', None) or \
_Cfunction('libvlc_media_list_player_retain', ((1,),), None,
None, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_event_manager(p_mlp):
'''Return the event manager of this media_list_player.
@param p_mlp: media list player instance.
@return: the event manager.
'''
f = _Cfunctions.get('libvlc_media_list_player_event_manager', None) or \
_Cfunction('libvlc_media_list_player_event_manager', ((1,),), class_result(EventManager),
ctypes.c_void_p, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_set_media_player(p_mlp, p_mi):
'''Replace media player in media_list_player with this instance.
@param p_mlp: media list player instance.
@param p_mi: media player instance.
'''
f = _Cfunctions.get('libvlc_media_list_player_set_media_player', None) or \
_Cfunction('libvlc_media_list_player_set_media_player', ((1,), (1,),), None,
None, MediaListPlayer, MediaPlayer)
return f(p_mlp, p_mi)
def libvlc_media_list_player_get_media_player(p_mlp):
'''Get media player of the media_list_player instance.
@param p_mlp: media list player instance.
@return: media player instance @note the caller is responsible for releasing the returned instance.
'''
f = _Cfunctions.get('libvlc_media_list_player_get_media_player', None) or \
_Cfunction('libvlc_media_list_player_get_media_player', ((1,),), class_result(MediaPlayer),
ctypes.c_void_p, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_set_media_list(p_mlp, p_mlist):
'''Set the media list associated with the player.
@param p_mlp: media list player instance.
@param p_mlist: list of media.
'''
f = _Cfunctions.get('libvlc_media_list_player_set_media_list', None) or \
_Cfunction('libvlc_media_list_player_set_media_list', ((1,), (1,),), None,
None, MediaListPlayer, MediaList)
return f(p_mlp, p_mlist)
def libvlc_media_list_player_play(p_mlp):
'''Play media list.
@param p_mlp: media list player instance.
'''
f = _Cfunctions.get('libvlc_media_list_player_play', None) or \
_Cfunction('libvlc_media_list_player_play', ((1,),), None,
None, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_pause(p_mlp):
'''Toggle pause (or resume) media list.
@param p_mlp: media list player instance.
'''
f = _Cfunctions.get('libvlc_media_list_player_pause', None) or \
_Cfunction('libvlc_media_list_player_pause', ((1,),), None,
None, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_is_playing(p_mlp):
'''Is media list playing?
@param p_mlp: media list player instance.
@return: true for playing and false for not playing \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_list_player_is_playing', None) or \
_Cfunction('libvlc_media_list_player_is_playing', ((1,),), None,
ctypes.c_int, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_get_state(p_mlp):
'''Get current libvlc_state of media list player.
@param p_mlp: media list player instance.
@return: libvlc_state_t for media list player.
'''
f = _Cfunctions.get('libvlc_media_list_player_get_state', None) or \
_Cfunction('libvlc_media_list_player_get_state', ((1,),), None,
State, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_play_item_at_index(p_mlp, i_index):
'''Play media list item at position index.
@param p_mlp: media list player instance.
@param i_index: index in media list to play.
@return: 0 upon success -1 if the item wasn't found.
'''
f = _Cfunctions.get('libvlc_media_list_player_play_item_at_index', None) or \
_Cfunction('libvlc_media_list_player_play_item_at_index', ((1,), (1,),), None,
ctypes.c_int, MediaListPlayer, ctypes.c_int)
return f(p_mlp, i_index)
def libvlc_media_list_player_play_item(p_mlp, p_md):
'''Play the given media item.
@param p_mlp: media list player instance.
@param p_md: the media instance.
@return: 0 upon success, -1 if the media is not part of the media list.
'''
f = _Cfunctions.get('libvlc_media_list_player_play_item', None) or \
_Cfunction('libvlc_media_list_player_play_item', ((1,), (1,),), None,
ctypes.c_int, MediaListPlayer, Media)
return f(p_mlp, p_md)
def libvlc_media_list_player_stop(p_mlp):
'''Stop playing media list.
@param p_mlp: media list player instance.
'''
f = _Cfunctions.get('libvlc_media_list_player_stop', None) or \
_Cfunction('libvlc_media_list_player_stop', ((1,),), None,
None, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_next(p_mlp):
'''Play next item from media list.
@param p_mlp: media list player instance.
@return: 0 upon success -1 if there is no next item.
'''
f = _Cfunctions.get('libvlc_media_list_player_next', None) or \
_Cfunction('libvlc_media_list_player_next', ((1,),), None,
ctypes.c_int, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_previous(p_mlp):
'''Play previous item from media list.
@param p_mlp: media list player instance.
@return: 0 upon success -1 if there is no previous item.
'''
f = _Cfunctions.get('libvlc_media_list_player_previous', None) or \
_Cfunction('libvlc_media_list_player_previous', ((1,),), None,
ctypes.c_int, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_set_playback_mode(p_mlp, e_mode):
'''Sets the playback mode for the playlist.
@param p_mlp: media list player instance.
@param e_mode: playback mode specification.
'''
f = _Cfunctions.get('libvlc_media_list_player_set_playback_mode', None) or \
_Cfunction('libvlc_media_list_player_set_playback_mode', ((1,), (1,),), None,
None, MediaListPlayer, PlaybackMode)
return f(p_mlp, e_mode)
# 5 function(s) blacklisted:
# libvlc_audio_output_get_device_type
# libvlc_audio_output_set_device_type
# libvlc_dialog_set_callbacks
# libvlc_printerr
# libvlc_set_exit_handler
# 52 function(s) not wrapped as methods:
# libvlc_audio_equalizer_get_amp_at_index
# libvlc_audio_equalizer_get_band_count
# libvlc_audio_equalizer_get_band_frequency
# libvlc_audio_equalizer_get_preamp
# libvlc_audio_equalizer_get_preset_count
# libvlc_audio_equalizer_get_preset_name
# libvlc_audio_equalizer_new
# libvlc_audio_equalizer_new_from_preset
# libvlc_audio_equalizer_release
# libvlc_audio_equalizer_set_amp_at_index
# libvlc_audio_equalizer_set_preamp
# libvlc_audio_output_device_list_release
# libvlc_audio_output_list_release
# libvlc_chapter_descriptions_release
# libvlc_clearerr
# libvlc_clock
# libvlc_dialog_dismiss
# libvlc_dialog_get_context
# libvlc_dialog_post_action
# libvlc_dialog_post_login
# libvlc_dialog_set_context
# libvlc_event_type_name
# libvlc_free
# libvlc_get_changeset
# libvlc_get_compiler
# libvlc_get_version
# libvlc_log_clear
# libvlc_log_close
# libvlc_log_count
# libvlc_log_get_context
# libvlc_log_get_iterator
# libvlc_log_get_object
# libvlc_media_discoverer_list_release
# libvlc_media_get_codec_description
# libvlc_media_slaves_release
# libvlc_media_tracks_release
# libvlc_module_description_list_release
# libvlc_new
# libvlc_renderer_discoverer_event_manager
# libvlc_renderer_discoverer_list_release
# libvlc_renderer_discoverer_release
# libvlc_renderer_discoverer_start
# libvlc_renderer_discoverer_stop
# libvlc_renderer_item_flags
# libvlc_renderer_item_icon_uri
# libvlc_renderer_item_name
# libvlc_renderer_item_type
# libvlc_title_descriptions_release
# libvlc_track_description_list_release
# libvlc_track_description_release
# libvlc_video_new_viewpoint
# libvlc_vprinterr
# Start of footer.py #
# Backward compatibility
def callbackmethod(callback):
"""Now obsolete @callbackmethod decorator."""
return callback
# libvlc_free is not present in some versions of libvlc. If it is not
# in the library, then emulate it by calling libc.free
if not hasattr(dll, 'libvlc_free'):
# need to find the free function in the C runtime. This is
# platform specific.
# For Linux and MacOSX
libc_path = find_library('c')
if libc_path:
libc = ctypes.CDLL(libc_path)
libvlc_free = libc.free
else:
# On win32, it is impossible to guess the proper lib to call
# (msvcrt, mingw...). Just ignore the call: it will memleak,
# but not prevent to run the application.
def libvlc_free(p):
pass
# ensure argtypes is right, because default type of int won't
# work on 64-bit systems
libvlc_free.argtypes = [ ctypes.c_void_p ]
# Version functions
def _dot2int(v):
'''(INTERNAL) Convert 'i.i.i[.i]' str to int.
'''
t = [int(i) for i in v.split('.')]
if len(t) == 3:
t.append(0)
elif len(t) != 4:
raise ValueError('"i.i.i[.i]": %r' % (v,))
if min(t) < 0 or max(t) > 255:
raise ValueError('[0..255]: %r' % (v,))
i = t.pop(0)
while t:
i = (i << 8) + t.pop(0)
return i
def hex_version():
"""Return the version of these bindings in hex or 0 if unavailable.
"""
try:
return _dot2int(__version__)
except (NameError, ValueError):
return 0
def libvlc_hex_version():
"""Return the libvlc version in hex or 0 if unavailable.
"""
try:
return _dot2int(bytes_to_str(libvlc_get_version()).split()[0])
except ValueError:
return 0
def debug_callback(event, *args, **kwds):
'''Example callback, useful for debugging.
'''
l = ['event %s' % (event.type,)]
if args:
l.extend(map(str, args))
if kwds:
l.extend(sorted('%s=%s' % t for t in kwds.items()))
print('Debug callback (%s)' % ', '.join(l))
if __name__ == '__main__':
try:
from msvcrt import getch
except ImportError:
import termios
import tty
def getch(): # getchar(), getc(stdin) #PYCHOK flake
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
try:
tty.setraw(fd)
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old)
return ch
def end_callback(event):
print('End of media stream (event %s)' % event.type)
sys.exit(0)
echo_position = False
def pos_callback(event, player):
if echo_position:
sys.stdout.write('\r%s to %.2f%% (%.2f%%)' % (event.type,
event.u.new_position * 100,
player.get_position() * 100))
sys.stdout.flush()
def print_version():
"""Print version of this vlc.py and of the libvlc"""
try:
print('Build date: %s (%#x)' % (build_date, hex_version()))
print('LibVLC version: %s (%#x)' % (bytes_to_str(libvlc_get_version()), libvlc_hex_version()))
print('LibVLC compiler: %s' % bytes_to_str(libvlc_get_compiler()))
if plugin_path:
print('Plugin path: %s' % plugin_path)
except:
print('Error: %s' % sys.exc_info()[1])
if sys.argv[1:] and '-h' not in sys.argv[1:] and '--help' not in sys.argv[1:]:
movie = os.path.expanduser(sys.argv.pop())
if not os.access(movie, os.R_OK):
print('Error: %s file not readable' % movie)
sys.exit(1)
# Need --sub-source=marq in order to use marquee below
instance = Instance(["--sub-source=marq"] + sys.argv[1:])
try:
media = instance.media_new(movie)
except (AttributeError, NameError) as e:
print('%s: %s (%s %s vs LibVLC %s)' % (e.__class__.__name__, e,
sys.argv[0], __version__,
libvlc_get_version()))
sys.exit(1)
player = instance.media_player_new()
player.set_media(media)
player.play()
# Some marquee examples. Marquee requires '--sub-source marq' in the
# Instance() call above, see <http://www.videolan.org/doc/play-howto/en/ch04.html>
player.video_set_marquee_int(VideoMarqueeOption.Enable, 1)
player.video_set_marquee_int(VideoMarqueeOption.Size, 24) # pixels
player.video_set_marquee_int(VideoMarqueeOption.Position, Position.Bottom)
if False: # only one marquee can be specified
player.video_set_marquee_int(VideoMarqueeOption.Timeout, 5000) # millisec, 0==forever
t = media.get_mrl() # movie
else: # update marquee text periodically
player.video_set_marquee_int(VideoMarqueeOption.Timeout, 0) # millisec, 0==forever
player.video_set_marquee_int(VideoMarqueeOption.Refresh, 1000) # millisec (or sec?)
##t = '$L / $D or $P at $T'
t = '%Y-%m-%d %H:%M:%S'
player.video_set_marquee_string(VideoMarqueeOption.Text, str_to_bytes(t))
# Some event manager examples. Note, the callback can be any Python
# callable and does not need to be decorated. Optionally, specify
# any number of positional and/or keyword arguments to be passed
# to the callback (in addition to the first one, an Event instance).
event_manager = player.event_manager()
event_manager.event_attach(EventType.MediaPlayerEndReached, end_callback)
event_manager.event_attach(EventType.MediaPlayerPositionChanged, pos_callback, player)
def mspf():
"""Milliseconds per frame"""
return int(1000 // (player.get_fps() or 25))
def print_info():
"""Print information about the media"""
try:
print_version()
media = player.get_media()
print('State: %s' % player.get_state())
print('Media: %s' % bytes_to_str(media.get_mrl()))
print('Track: %s/%s' % (player.video_get_track(), player.video_get_track_count()))
print('Current time: %s/%s' % (player.get_time(), media.get_duration()))
print('Position: %s' % player.get_position())
print('FPS: %s (%d ms)' % (player.get_fps(), mspf()))
print('Rate: %s' % player.get_rate())
print('Video size: %s' % str(player.video_get_size(0))) # num=0
print('Scale: %s' % player.video_get_scale())
print('Aspect ratio: %s' % player.video_get_aspect_ratio())
#print('Window:' % player.get_hwnd()
except Exception:
print('Error: %s' % sys.exc_info()[1])
def sec_forward():
"""Go forward one sec"""
player.set_time(player.get_time() + 1000)
def sec_backward():
"""Go backward one sec"""
player.set_time(player.get_time() - 1000)
def frame_forward():
"""Go forward one frame"""
player.set_time(player.get_time() + mspf())
def frame_backward():
"""Go backward one frame"""
player.set_time(player.get_time() - mspf())
def print_help():
"""Print help"""
print('Single-character commands:')
for k, m in sorted(keybindings.items()):
m = (m.__doc__ or m.__name__).splitlines()[0]
print(' %s: %s.' % (k, m.rstrip('.')))
print('0-9: go to that fraction of the movie')
def quit_app():
"""Stop and exit"""
sys.exit(0)
def toggle_echo_position():
"""Toggle echoing of media position"""
global echo_position
echo_position = not echo_position
keybindings = {
' ': player.pause,
'+': sec_forward,
'-': sec_backward,
'.': frame_forward,
',': frame_backward,
'f': player.toggle_fullscreen,
'i': print_info,
'p': toggle_echo_position,
'q': quit_app,
'?': print_help,
}
print('Press q to quit, ? to get help.%s' % os.linesep)
while True:
k = getch()
print('> %s' % k)
if k in keybindings:
keybindings[k]()
elif k.isdigit():
# jump to fraction of the movie.
player.set_position(float('0.'+k))
else:
print('Usage: %s [options] <movie_filename>' % sys.argv[0])
print('Once launched, type ? for help.')
print('')
print_version()
| jcherfils/Previoo | vlc.py | Python | gpl-3.0 | 355,425 |
"""engine.SCons.Platform.hpux
Platform-specific initialization for HP-UX systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/hpux.py 5357 2011/09/09 21:31:03 bdeegan"
import posix
def generate(env):
posix.generate(env)
#Based on HP-UX11i: ARG_MAX=2048000 - 3000 for environment expansion
env['MAXLINELENGTH'] = 2045000
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| ff94315/hiwifi-openwrt-HC5661-HC5761 | staging_dir/host/lib64/scons-2.1.0/SCons/Platform/hpux.py | Python | gpl-2.0 | 1,777 |
# encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import datetime
from socket import timeout as socket_timeout
from kombu import Connection, Producer, Exchange
from pytz import timezone
from mozillapulse.utils import time_to_string
from pyLibrary.debugs import constants
from pyLibrary import jsons
from pyLibrary.debugs.exceptions import Except
from pyLibrary.debugs.logs import Log
from pyLibrary.dot import wrap, coalesce, Dict, set_default
from pyLibrary.meta import use_settings
from pyLibrary.thread.threads import Thread
from mozillapulse.consumers import GenericConsumer
class Consumer(Thread):
@use_settings
def __init__(
self,
exchange, # name of the Pulse exchange
topic, # message name pattern to subscribe to ('#' is wildcard)
target=None, # WILL BE CALLED WITH PULSE PAYLOADS AND ack() IF COMPLETE$ED WITHOUT EXCEPTION
target_queue=None, # (aka self.queue) WILL BE FILLED WITH PULSE PAYLOADS
host='pulse.mozilla.org', # url to connect,
port=5671, # tcp port
user=None,
password=None,
vhost="/",
start=0, # USED AS STARTING POINT FOR ASSIGNING THE _meta.count ATTRIBUTE
ssl=True,
applabel=None,
heartbeat=False, # True to also get the Pulse heartbeat message
durable=False, # True to keep queue after shutdown
serializer='json',
broker_timezone='GMT',
settings=None
):
self.target_queue = target_queue
self.pulse_target = target
if (target_queue == None and target == None) or (target_queue != None and target != None):
Log.error("Expecting a queue (for fast digesters) or a target (for slow digesters)")
Thread.__init__(self, name="Pulse consumer for " + settings.exchange, target=self._worker)
self.settings = settings
settings.callback = self._got_result
settings.user = coalesce(settings.user, settings.username)
settings.applabel = coalesce(settings.applable, settings.queue, settings.queue_name)
settings.topic = topic
self.pulse = ModifiedGenericConsumer(settings, connect=True, **settings)
self.count = coalesce(start, 0)
self.start()
def _got_result(self, data, message):
data = wrap(data)
data._meta.count = self.count
self.count += 1
if self.settings.debug:
Log.note("{{data}}", data= data)
if self.target_queue != None:
try:
self.target_queue.add(data)
message.ack()
except Exception, e:
e = Except.wrap(e)
if not self.target_queue.closed: # EXPECTED TO HAPPEN, THIS THREAD MAY HAVE BEEN AWAY FOR A WHILE
raise e
else:
try:
self.pulse_target(data)
message.ack()
except Exception, e:
Log.warning("Problem processing pulse (see `data` in structured log)", data=data, cause=e)
def _worker(self, please_stop):
def disconnect():
try:
self.target_queue.close()
Log.note("stop put into queue")
except:
pass
self.pulse.disconnect()
Log.note("pulse listener was given a disconnect()")
please_stop.on_go(disconnect)
while not please_stop:
try:
self.pulse.listen()
except Exception, e:
if not please_stop:
Log.warning("pulse had problem", e)
Log.note("pulse listener is done")
def __exit__(self, exc_type, exc_val, exc_tb):
Log.note("clean pulse exit")
self.please_stop.go()
try:
self.target_queue.close()
Log.note("stop put into queue")
except:
pass
try:
self.pulse.disconnect()
except Exception, e:
Log.warning("Can not disconnect during pulse exit, ignoring", e)
Thread.__exit__(self, exc_type, exc_val, exc_tb)
class Publisher(object):
"""
Mimic GenericPublisher https://github.com/bhearsum/mozillapulse/blob/master/mozillapulse/publishers.py
"""
@use_settings
def __init__(
self,
exchange, # name of the Pulse exchange
host='pulse.mozilla.org', # url to connect,
port=5671, # tcp port
user=None,
password=None,
vhost="/",
start=0, # USED AS STARTING POINT FOR ASSIGNING THE _meta.count ATTRIBUTE
ssl=True,
applabel=None,
heartbeat=False, # True to also get the Pulse heartbeat message
durable=False, # True to keep queue after shutdown
serializer='json',
broker_timezone='GMT',
settings=None
):
self.settings = settings
self.connection = None
self.count = 0
def connect(self):
if not self.connection:
self.connection = Connection(
hostname=self.settings.host,
port=self.settings.port,
userid=self.settings.user,
password=self.settings.password,
virtual_host=self.settings.vhost,
ssl=self.settings.ssl
)
def disconnect(self):
if self.connection:
self.connection.release()
self.connection = None
def send(self, topic, message):
"""Publishes a pulse message to the proper exchange."""
if not message:
Log.error("Expecting a message")
message._prepare()
if not self.connection:
self.connect()
producer = Producer(
channel=self.connection,
exchange=Exchange(self.settings.exchange, type='topic'),
routing_key=topic
)
# The message is actually a simple envelope format with a payload and
# some metadata.
final_data = Dict(
payload=message.data,
_meta=set_default({
'exchange': self.settings.exchange,
'routing_key': message.routing_key,
'serializer': self.settings.serializer,
'sent': time_to_string(datetime.datetime.now(timezone(self.settings.broker_timezone))),
'count': self.count
}, message.metadata)
)
producer.publish(jsons.scrub(final_data), serializer=self.settings.serializer)
self.count += 1
class ModifiedGenericConsumer(GenericConsumer):
def _drain_events_loop(self):
while True:
try:
self.connection.drain_events(timeout=self.timeout)
except socket_timeout, e:
Log.warning("timeout! Restarting pulse consumer.", cause=e)
try:
self.disconnect()
except Exception, f:
Log.warning("Problem with disconnect()", cause=f)
break
| mozilla/ChangeDetector | pyLibrary/env/pulse.py | Python | mpl-2.0 | 7,334 |
import socket
import sys
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the address given on the command line
server_name = sys.argv[1]
server_address = (server_name, 10000)
print >>sys.stderr, 'starting up on %s port %s' % server_address
sock.bind(server_address)
sock.listen(1)
while True:
print >>sys.stderr, 'waiting for a connection'
connection, client_address = sock.accept()
try:
print >>sys.stderr, 'client connected:', client_address
while True:
data = connection.recv(16)
print >>sys.stderr, 'received "%s"' % data
if data:
connection.sendall(data)
else:
break
finally:
connection.close() | janusnic/21v-python | unit_18/socket/s4.py | Python | mit | 777 |
from feature_extraction.pre_processing.filter_precedent import precendent_directory_cleaner
def run(command_list):
precendent_directory_cleaner.run(command_list)
| Cyberjusticelab/JusticeAI | src/ml_service/feature_extraction/pre_processing/pre_processing_driver.py | Python | mit | 168 |
# -*- coding: utf-8 -*-
##
## This file is part of Flask-Registry
## Copyright (C) 2013 CERN.
##
## Flask-Registry is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Flask-Registry is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Flask-Registry; if not, write to the Free Software Foundation,
## Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
##
## In applying this licence, CERN does not waive the privileges and immunities
## granted to it by virtue of its status as an Intergovernmental Organization
## or submit itself to any jurisdiction.
"""
Flask-Registry extension
"""
import six
from werkzeug.utils import import_string
from flask import current_app, has_app_context
from flask_registry import RegistryProxy, RegistryBase, RegistryError
from flask_registry.registries.core import ModuleRegistry
class ModuleDiscoveryRegistry(ModuleRegistry):
"""
Python module registry with discover capabilities.
The registry will discover module with a given name from packages specified
in a ``PackageRegistry``.
Example::
app.config['PACKAGES'] = ['invenio.modules.*', ...]
app.config['PACKAGES_VIEWS_EXCLUDE'] = ['invenio.modules.oldstuff']
app.extensions['registry']['packages'] = PackageRegistry()
app.extensions['registry']['views'] = DiscoverRegistry('views')
app.extensions['registry']['views'].discover(app)
"""
def __init__(self, module_name, registry_namespace=None, with_setup=False,
silent=False):
"""
:param module_name: Name of module to look for in packages
:param registry_namespace: Name of registry containing the package
registry. Defaults to ``packages''.
:param with_setup: Call ``setup`` and ``teardown`` functions on module.
"""
self.module_name = module_name
self.silent = silent
if registry_namespace is not None and \
isinstance(registry_namespace, (RegistryProxy, RegistryBase)):
self.registry_namespace = registry_namespace.namespace
else:
self.registry_namespace = registry_namespace or 'packages'
# Setup config variable prefix
self.cfg_var_prefix = self.registry_namespace
self.cfg_var_prefix.upper()
self.cfg_var_prefix.replace('.', '_')
super(ModuleDiscoveryRegistry, self).__init__(with_setup=with_setup)
def discover(self, app=None, *args, **kwargs):
"""
Discover modules
Specific modules can be excluded with the configuration variable
``<NAMESPACE>_<MODULE_NAME>_EXCLUDE`` (e.g ``PACKAGES_VIEWS_EXCLUDE``).
The namespace name is capitalized and have dots replace by underscore.
:param module_name: Name of module to look for in packages
:param registry_namespace: Name of registry containing the package
registry. Defaults to ``packages``.
:param with_setup: Call ``setup`` and ``teardown`` functions on module.
"""
if app is None and has_app_context():
app = current_app
if app is None and hasattr(self, 'app'):
app = getattr(self, 'app')
if app is None:
RegistryError("You must provide a Flask application.")
blacklist = app.config.get(
'%s_%s_EXCLUDE' % (self.cfg_var_prefix, self.module_name.upper()),
[]
)
for pkg in app.extensions['registry'][self.registry_namespace]:
if not isinstance(pkg, six.string_types):
pkg = pkg.__name__
if pkg in blacklist:
continue
self._discover_module(pkg, app)
def _discover_module(self, pkg, app):
"""
"""
import_str = pkg + '.' + self.module_name
try:
module = import_string(import_str, self.silent)
self.register(module)
except ImportError:
pass
except Exception as e:
import traceback
traceback.print_exc()
app.logger.error('Could not import: "%s: %s', import_str, str(e))
class ModuleAutoDiscoveryRegistry(ModuleDiscoveryRegistry):
"""
"""
def __init__(self, module_name, app=None, *args, **kwargs):
"""
"""
super(ModuleAutoDiscoveryRegistry, self).__init__(
module_name, *args, **kwargs
)
self.app = app
self.discover(app=app)
| jirikuncar/flask-registry | flask_registry/registries/modulediscovery.py | Python | gpl-2.0 | 4,906 |
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', 'djangster.views.home', name='home'),
url(r'^create-site/$', 'djangster.views.create_site', name='create_site'),
url(r'^create-site/success/$', 'djangster.views.create_site_success', name='create_site_success'),
url(r'^about/$', 'djangster.views.about', name='about'),
url(r'^features/$', 'djangster.views.features', name='features'),
url(r'^price-list/$', 'djangster.views.price_list', name='price_list'),
url(r'^work/$', 'djangster.views.work', name='work'),
# user views
url(r'^logout/$', 'djangster.user_views.logout_view', name='logout'),
url(r'^login/$', 'djangster.user_views.login_view', name='login'),
url(r'^register/$', 'djangster.user_views.register_view', name='register'),
url(r'^get_login_buttons/$', 'djangster.user_views.get_login_buttons', name='get_login_buttons'),
# url(r'^djangster/', include('djangster.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
urlpatterns += patterns('', (r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.STATIC_ROOT, 'show_indexes': True}))
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| ramseydsilva/djangster | djangster/djangster/urls.py | Python | apache-2.0 | 1,692 |
from django.conf.urls import url
from django.conf.urls import patterns
from pyday_alarms import views
app_name = 'pyday_alarms'
urlpatterns = [
url(r'^alarms/$', views.AlarmView.as_view(), name='alarms'),
]
'''urlpatterns += patterns('pyday_social_network.views',
url(r'^list/$', 'list', name='list'))
'''
| 6desislava6/PyDay | pyday_alarms/urls.py | Python | mit | 336 |
# -*- coding: utf-8 -*-
"""In-memory representation of cluster state.
This module implements a data-structure used to keep
track of the state of a cluster of workers and the tasks
it is working on (by consuming events).
For every event consumed the state is updated,
so the state represents the state of the cluster
at the time of the last event.
Snapshots (:mod:`celery.events.snapshot`) can be used to
take "pictures" of this state at regular intervals
to for example, store that in a database.
"""
from __future__ import absolute_import, unicode_literals
import bisect
import sys
import threading
from collections import Callable, defaultdict
from datetime import datetime
from decimal import Decimal
from itertools import islice
from operator import itemgetter
from time import time
from weakref import WeakSet, ref
from kombu.clocks import timetuple
from kombu.utils.objects import cached_property
from celery import states
from celery.five import items, python_2_unicode_compatible, values
from celery.utils.functional import LRUCache, memoize, pass1
from celery.utils.log import get_logger
__all__ = ('Worker', 'Task', 'State', 'heartbeat_expires')
# pylint: disable=redefined-outer-name
# We cache globals and attribute lookups, so disable this warning.
# pylint: disable=too-many-function-args
# For some reason pylint thinks ._event is a method, when it's a property.
#: Set if running PyPy
PYPY = hasattr(sys, 'pypy_version_info')
#: The window (in percentage) is added to the workers heartbeat
#: frequency. If the time between updates exceeds this window,
#: then the worker is considered to be offline.
HEARTBEAT_EXPIRE_WINDOW = 200
#: Max drift between event timestamp and time of event received
#: before we alert that clocks may be unsynchronized.
HEARTBEAT_DRIFT_MAX = 16
DRIFT_WARNING = """\
Substantial drift from %s may mean clocks are out of sync. Current drift is
%s seconds. [orig: %s recv: %s]
"""
logger = get_logger(__name__)
warn = logger.warning
R_STATE = '<State: events={0.event_count} tasks={0.task_count}>'
R_WORKER = '<Worker: {0.hostname} ({0.status_string} clock:{0.clock})'
R_TASK = '<Task: {0.name}({0.uuid}) {0.state} clock:{0.clock}>'
#: Mapping of task event names to task state.
TASK_EVENT_TO_STATE = {
'sent': states.PENDING,
'received': states.RECEIVED,
'started': states.STARTED,
'failed': states.FAILURE,
'retried': states.RETRY,
'succeeded': states.SUCCESS,
'revoked': states.REVOKED,
'rejected': states.REJECTED,
}
class CallableDefaultdict(defaultdict):
""":class:`~collections.defaultdict` with configurable __call__.
We use this for backwards compatibility in State.tasks_by_type
etc, which used to be a method but is now an index instead.
So you can do::
>>> add_tasks = state.tasks_by_type['proj.tasks.add']
while still supporting the method call::
>>> add_tasks = list(state.tasks_by_type(
... 'proj.tasks.add', reverse=True))
"""
def __init__(self, fun, *args, **kwargs):
self.fun = fun
super(CallableDefaultdict, self).__init__(*args, **kwargs)
def __call__(self, *args, **kwargs):
return self.fun(*args, **kwargs)
Callable.register(CallableDefaultdict) # noqa: E305
@memoize(maxsize=1000, keyfun=lambda a, _: a[0])
def _warn_drift(hostname, drift, local_received, timestamp):
# we use memoize here so the warning is only logged once per hostname
warn(DRIFT_WARNING, hostname, drift,
datetime.fromtimestamp(local_received),
datetime.fromtimestamp(timestamp))
def heartbeat_expires(timestamp, freq=60,
expire_window=HEARTBEAT_EXPIRE_WINDOW,
Decimal=Decimal, float=float, isinstance=isinstance):
"""Return time when heartbeat expires."""
# some json implementations returns decimal.Decimal objects,
# which aren't compatible with float.
freq = float(freq) if isinstance(freq, Decimal) else freq
if isinstance(timestamp, Decimal):
timestamp = float(timestamp)
return timestamp + (freq * (expire_window / 1e2))
def _depickle_task(cls, fields):
return cls(**fields)
def with_unique_field(attr):
def _decorate_cls(cls):
def __eq__(this, other):
if isinstance(other, this.__class__):
return getattr(this, attr) == getattr(other, attr)
return NotImplemented
cls.__eq__ = __eq__
def __ne__(this, other):
res = this.__eq__(other)
return True if res is NotImplemented else not res
cls.__ne__ = __ne__
def __hash__(this):
return hash(getattr(this, attr))
cls.__hash__ = __hash__
return cls
return _decorate_cls
@with_unique_field('hostname')
@python_2_unicode_compatible
class Worker(object):
"""Worker State."""
heartbeat_max = 4
expire_window = HEARTBEAT_EXPIRE_WINDOW
_fields = ('hostname', 'pid', 'freq', 'heartbeats', 'clock',
'active', 'processed', 'loadavg', 'sw_ident',
'sw_ver', 'sw_sys')
if not PYPY: # pragma: no cover
__slots__ = _fields + ('event', '__dict__', '__weakref__')
def __init__(self, hostname=None, pid=None, freq=60,
heartbeats=None, clock=0, active=None, processed=None,
loadavg=None, sw_ident=None, sw_ver=None, sw_sys=None):
self.hostname = hostname
self.pid = pid
self.freq = freq
self.heartbeats = [] if heartbeats is None else heartbeats
self.clock = clock or 0
self.active = active
self.processed = processed
self.loadavg = loadavg
self.sw_ident = sw_ident
self.sw_ver = sw_ver
self.sw_sys = sw_sys
self.event = self._create_event_handler()
def __reduce__(self):
return self.__class__, (self.hostname, self.pid, self.freq,
self.heartbeats, self.clock, self.active,
self.processed, self.loadavg, self.sw_ident,
self.sw_ver, self.sw_sys)
def _create_event_handler(self):
_set = object.__setattr__
hbmax = self.heartbeat_max
heartbeats = self.heartbeats
hb_pop = self.heartbeats.pop
hb_append = self.heartbeats.append
def event(type_, timestamp=None,
local_received=None, fields=None,
max_drift=HEARTBEAT_DRIFT_MAX, items=items, abs=abs, int=int,
insort=bisect.insort, len=len):
fields = fields or {}
for k, v in items(fields):
_set(self, k, v)
if type_ == 'offline':
heartbeats[:] = []
else:
if not local_received or not timestamp:
return
drift = abs(int(local_received) - int(timestamp))
if drift > max_drift:
_warn_drift(self.hostname, drift,
local_received, timestamp)
if local_received: # pragma: no cover
hearts = len(heartbeats)
if hearts > hbmax - 1:
hb_pop(0)
if hearts and local_received > heartbeats[-1]:
hb_append(local_received)
else:
insort(heartbeats, local_received)
return event
def update(self, f, **kw):
for k, v in items(dict(f, **kw) if kw else f):
setattr(self, k, v)
def __repr__(self):
return R_WORKER.format(self)
@property
def status_string(self):
return 'ONLINE' if self.alive else 'OFFLINE'
@property
def heartbeat_expires(self):
return heartbeat_expires(self.heartbeats[-1],
self.freq, self.expire_window)
@property
def alive(self, nowfun=time):
return bool(self.heartbeats and nowfun() < self.heartbeat_expires)
@property
def id(self):
return '{0.hostname}.{0.pid}'.format(self)
@with_unique_field('uuid')
@python_2_unicode_compatible
class Task(object):
"""Task State."""
name = received = sent = started = succeeded = failed = retried = \
revoked = rejected = args = kwargs = eta = expires = retries = \
worker = result = exception = timestamp = runtime = traceback = \
exchange = routing_key = root_id = parent_id = client = None
state = states.PENDING
clock = 0
_fields = (
'uuid', 'name', 'state', 'received', 'sent', 'started', 'rejected',
'succeeded', 'failed', 'retried', 'revoked', 'args', 'kwargs',
'eta', 'expires', 'retries', 'worker', 'result', 'exception',
'timestamp', 'runtime', 'traceback', 'exchange', 'routing_key',
'clock', 'client', 'root', 'root_id', 'parent', 'parent_id',
'children',
)
if not PYPY: # pragma: no cover
__slots__ = ('__dict__', '__weakref__')
#: How to merge out of order events.
#: Disorder is detected by logical ordering (e.g., :event:`task-received`
#: must've happened before a :event:`task-failed` event).
#:
#: A merge rule consists of a state and a list of fields to keep from
#: that state. ``(RECEIVED, ('name', 'args')``, means the name and args
#: fields are always taken from the RECEIVED state, and any values for
#: these fields received before or after is simply ignored.
merge_rules = {
states.RECEIVED: (
'name', 'args', 'kwargs', 'parent_id',
'root_id' 'retries', 'eta', 'expires',
),
}
#: meth:`info` displays these fields by default.
_info_fields = (
'args', 'kwargs', 'retries', 'result', 'eta', 'runtime',
'expires', 'exception', 'exchange', 'routing_key',
'root_id', 'parent_id',
)
def __init__(self, uuid=None, cluster_state=None, children=None, **kwargs):
self.uuid = uuid
self.cluster_state = cluster_state
if self.cluster_state is not None:
self.children = WeakSet(
self.cluster_state.tasks.get(task_id)
for task_id in children or ()
if task_id in self.cluster_state.tasks
)
else:
self.children = WeakSet()
self._serializer_handlers = {
'children': self._serializable_children,
'root': self._serializable_root,
'parent': self._serializable_parent,
}
if kwargs:
self.__dict__.update(kwargs)
def event(self, type_, timestamp=None, local_received=None, fields=None,
precedence=states.precedence, items=items,
setattr=setattr, task_event_to_state=TASK_EVENT_TO_STATE.get,
RETRY=states.RETRY):
fields = fields or {}
# using .get is faster than catching KeyError in this case.
state = task_event_to_state(type_)
if state is not None:
# sets, for example, self.succeeded to the timestamp.
setattr(self, type_, timestamp)
else:
state = type_.upper() # custom state
# note that precedence here is reversed
# see implementation in celery.states.state.__lt__
if state != RETRY and self.state != RETRY and \
precedence(state) > precedence(self.state):
# this state logically happens-before the current state, so merge.
keep = self.merge_rules.get(state)
if keep is not None:
fields = {
k: v for k, v in items(fields) if k in keep
}
else:
fields.update(state=state, timestamp=timestamp)
# update current state with info from this event.
self.__dict__.update(fields)
def info(self, fields=None, extra=[]):
"""Information about this task suitable for on-screen display."""
fields = self._info_fields if fields is None else fields
def _keys():
for key in list(fields) + list(extra):
value = getattr(self, key, None)
if value is not None:
yield key, value
return dict(_keys())
def __repr__(self):
return R_TASK.format(self)
def as_dict(self):
get = object.__getattribute__
handler = self._serializer_handlers.get
return {
k: handler(k, pass1)(get(self, k)) for k in self._fields
}
def _serializable_children(self, value):
return [task.id for task in self.children]
def _serializable_root(self, value):
return self.root_id
def _serializable_parent(self, value):
return self.parent_id
def __reduce__(self):
return _depickle_task, (self.__class__, self.as_dict())
@property
def id(self):
return self.uuid
@property
def origin(self):
return self.client if self.worker is None else self.worker.id
@property
def ready(self):
return self.state in states.READY_STATES
@cached_property
def parent(self):
# issue github.com/mher/flower/issues/648
try:
return self.parent_id and self.cluster_state.tasks[self.parent_id]
except KeyError:
return None
@cached_property
def root(self):
# issue github.com/mher/flower/issues/648
try:
return self.root_id and self.cluster_state.tasks[self.root_id]
except KeyError:
return None
class State(object):
"""Records clusters state."""
Worker = Worker
Task = Task
event_count = 0
task_count = 0
heap_multiplier = 4
def __init__(self, callback=None,
workers=None, tasks=None, taskheap=None,
max_workers_in_memory=5000, max_tasks_in_memory=10000,
on_node_join=None, on_node_leave=None,
tasks_by_type=None, tasks_by_worker=None):
self.event_callback = callback
self.workers = (LRUCache(max_workers_in_memory)
if workers is None else workers)
self.tasks = (LRUCache(max_tasks_in_memory)
if tasks is None else tasks)
self._taskheap = [] if taskheap is None else taskheap
self.max_workers_in_memory = max_workers_in_memory
self.max_tasks_in_memory = max_tasks_in_memory
self.on_node_join = on_node_join
self.on_node_leave = on_node_leave
self._mutex = threading.Lock()
self.handlers = {}
self._seen_types = set()
self._tasks_to_resolve = {}
self.rebuild_taskheap()
# type: Mapping[TaskName, WeakSet[Task]]
self.tasks_by_type = CallableDefaultdict(
self._tasks_by_type, WeakSet)
self.tasks_by_type.update(
_deserialize_Task_WeakSet_Mapping(tasks_by_type, self.tasks))
# type: Mapping[Hostname, WeakSet[Task]]
self.tasks_by_worker = CallableDefaultdict(
self._tasks_by_worker, WeakSet)
self.tasks_by_worker.update(
_deserialize_Task_WeakSet_Mapping(tasks_by_worker, self.tasks))
@cached_property
def _event(self):
return self._create_dispatcher()
def freeze_while(self, fun, *args, **kwargs):
clear_after = kwargs.pop('clear_after', False)
with self._mutex:
try:
return fun(*args, **kwargs)
finally:
if clear_after:
self._clear()
def clear_tasks(self, ready=True):
with self._mutex:
return self._clear_tasks(ready)
def _clear_tasks(self, ready=True):
if ready:
in_progress = {
uuid: task for uuid, task in self.itertasks()
if task.state not in states.READY_STATES
}
self.tasks.clear()
self.tasks.update(in_progress)
else:
self.tasks.clear()
self._taskheap[:] = []
def _clear(self, ready=True):
self.workers.clear()
self._clear_tasks(ready)
self.event_count = 0
self.task_count = 0
def clear(self, ready=True):
with self._mutex:
return self._clear(ready)
def get_or_create_worker(self, hostname, **kwargs):
"""Get or create worker by hostname.
Returns:
Tuple: of ``(worker, was_created)`` pairs.
"""
try:
worker = self.workers[hostname]
if kwargs:
worker.update(kwargs)
return worker, False
except KeyError:
worker = self.workers[hostname] = self.Worker(
hostname, **kwargs)
return worker, True
def get_or_create_task(self, uuid):
"""Get or create task by uuid."""
try:
return self.tasks[uuid], False
except KeyError:
task = self.tasks[uuid] = self.Task(uuid, cluster_state=self)
return task, True
def event(self, event):
with self._mutex:
return self._event(event)
def task_event(self, type_, fields):
"""Deprecated, use :meth:`event`."""
return self._event(dict(fields, type='-'.join(['task', type_])))[0]
def worker_event(self, type_, fields):
"""Deprecated, use :meth:`event`."""
return self._event(dict(fields, type='-'.join(['worker', type_])))[0]
def _create_dispatcher(self):
# noqa: C901
# pylint: disable=too-many-statements
# This code is highly optimized, but not for reusability.
get_handler = self.handlers.__getitem__
event_callback = self.event_callback
wfields = itemgetter('hostname', 'timestamp', 'local_received')
tfields = itemgetter('uuid', 'hostname', 'timestamp',
'local_received', 'clock')
taskheap = self._taskheap
th_append = taskheap.append
th_pop = taskheap.pop
# Removing events from task heap is an O(n) operation,
# so easier to just account for the common number of events
# for each task (PENDING->RECEIVED->STARTED->final)
#: an O(n) operation
max_events_in_heap = self.max_tasks_in_memory * self.heap_multiplier
add_type = self._seen_types.add
on_node_join, on_node_leave = self.on_node_join, self.on_node_leave
tasks, Task = self.tasks, self.Task
workers, Worker = self.workers, self.Worker
# avoid updating LRU entry at getitem
get_worker, get_task = workers.data.__getitem__, tasks.data.__getitem__
get_task_by_type_set = self.tasks_by_type.__getitem__
get_task_by_worker_set = self.tasks_by_worker.__getitem__
def _event(event,
timetuple=timetuple, KeyError=KeyError,
insort=bisect.insort, created=True):
self.event_count += 1
if event_callback:
event_callback(self, event)
group, _, subject = event['type'].partition('-')
try:
handler = get_handler(group)
except KeyError:
pass
else:
return handler(subject, event), subject
if group == 'worker':
try:
hostname, timestamp, local_received = wfields(event)
except KeyError:
pass
else:
is_offline = subject == 'offline'
try:
worker, created = get_worker(hostname), False
except KeyError:
if is_offline:
worker, created = Worker(hostname), False
else:
worker = workers[hostname] = Worker(hostname)
worker.event(subject, timestamp, local_received, event)
if on_node_join and (created or subject == 'online'):
on_node_join(worker)
if on_node_leave and is_offline:
on_node_leave(worker)
workers.pop(hostname, None)
return (worker, created), subject
elif group == 'task':
(uuid, hostname, timestamp,
local_received, clock) = tfields(event)
# task-sent event is sent by client, not worker
is_client_event = subject == 'sent'
try:
task, task_created = get_task(uuid), False
except KeyError:
task = tasks[uuid] = Task(uuid, cluster_state=self)
task_created = True
if is_client_event:
task.client = hostname
else:
try:
worker = get_worker(hostname)
except KeyError:
worker = workers[hostname] = Worker(hostname)
task.worker = worker
if worker is not None and local_received:
worker.event(None, local_received, timestamp)
origin = hostname if is_client_event else worker.id
# remove oldest event if exceeding the limit.
heaps = len(taskheap)
if heaps + 1 > max_events_in_heap:
th_pop(0)
# most events will be dated later than the previous.
timetup = timetuple(clock, timestamp, origin, ref(task))
if heaps and timetup > taskheap[-1]:
th_append(timetup)
else:
insort(taskheap, timetup)
if subject == 'received':
self.task_count += 1
task.event(subject, timestamp, local_received, event)
task_name = task.name
if task_name is not None:
add_type(task_name)
if task_created: # add to tasks_by_type index
get_task_by_type_set(task_name).add(task)
get_task_by_worker_set(hostname).add(task)
if task.parent_id:
try:
parent_task = self.tasks[task.parent_id]
except KeyError:
self._add_pending_task_child(task)
else:
parent_task.children.add(task)
try:
_children = self._tasks_to_resolve.pop(uuid)
except KeyError:
pass
else:
task.children.update(_children)
return (task, task_created), subject
return _event
def _add_pending_task_child(self, task):
try:
ch = self._tasks_to_resolve[task.parent_id]
except KeyError:
ch = self._tasks_to_resolve[task.parent_id] = WeakSet()
ch.add(task)
def rebuild_taskheap(self, timetuple=timetuple):
heap = self._taskheap[:] = [
timetuple(t.clock, t.timestamp, t.origin, ref(t))
for t in values(self.tasks)
]
heap.sort()
def itertasks(self, limit=None):
for index, row in enumerate(items(self.tasks)):
yield row
if limit and index + 1 >= limit:
break
def tasks_by_time(self, limit=None, reverse=True):
"""Generator yielding tasks ordered by time.
Yields:
Tuples of ``(uuid, Task)``.
"""
_heap = self._taskheap
if reverse:
_heap = reversed(_heap)
seen = set()
for evtup in islice(_heap, 0, limit):
task = evtup[3]()
if task is not None:
uuid = task.uuid
if uuid not in seen:
yield uuid, task
seen.add(uuid)
tasks_by_timestamp = tasks_by_time
def _tasks_by_type(self, name, limit=None, reverse=True):
"""Get all tasks by type.
This is slower than accessing :attr:`tasks_by_type`,
but will be ordered by time.
Returns:
Generator: giving ``(uuid, Task)`` pairs.
"""
return islice(
((uuid, task) for uuid, task in self.tasks_by_time(reverse=reverse)
if task.name == name),
0, limit,
)
def _tasks_by_worker(self, hostname, limit=None, reverse=True):
"""Get all tasks by worker.
Slower than accessing :attr:`tasks_by_worker`, but ordered by time.
"""
return islice(
((uuid, task) for uuid, task in self.tasks_by_time(reverse=reverse)
if task.worker.hostname == hostname),
0, limit,
)
def task_types(self):
"""Return a list of all seen task types."""
return sorted(self._seen_types)
def alive_workers(self):
"""Return a list of (seemingly) alive workers."""
return (w for w in values(self.workers) if w.alive)
def __repr__(self):
return R_STATE.format(self)
def __reduce__(self):
return self.__class__, (
self.event_callback, self.workers, self.tasks, None,
self.max_workers_in_memory, self.max_tasks_in_memory,
self.on_node_join, self.on_node_leave,
_serialize_Task_WeakSet_Mapping(self.tasks_by_type),
_serialize_Task_WeakSet_Mapping(self.tasks_by_worker),
)
def _serialize_Task_WeakSet_Mapping(mapping):
return {name: [t.id for t in tasks] for name, tasks in items(mapping)}
def _deserialize_Task_WeakSet_Mapping(mapping, tasks):
return {name: WeakSet(tasks[i] for i in ids if i in tasks)
for name, ids in items(mapping or {})}
| kawamon/hue | desktop/core/ext-py/celery-4.2.1/celery/events/state.py | Python | apache-2.0 | 25,942 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
See README :)
"""
from __future__ import print_function
import argparse
import cgi
import difflib
import tempfile
import os
import re
import sys
import json
from itertools import chain
import concurrent.futures
import pip_api
from packaging.requirements import Requirement
from packaging.specifiers import SpecifierSet
from packaging.version import parse
if sys.version_info >= (3,):
from urllib.request import urlopen
from urllib.error import HTTPError
from urllib.parse import urljoin
else:
from urllib import urlopen
from urlparse import urljoin
input = raw_input # noqa
if sys.version_info < (2, 7, 9):
import warnings
warnings.warn(
"In Python 2.7.9, the built-in urllib.urlopen() got upgraded "
"so that it, by default, does HTTPS certificate verification. "
"All prior versions do not. That means you run the risk of "
"downloading from a server that claims (man-in-the-middle "
"attack) to be https://pypi.python.org but actually is not. "
"Consider upgrading your version of Python."
)
DEFAULT_ALGORITHM = "sha256"
DEFAULT_INDEX_URL = os.environ.get("INDEX_URL", "https://pypi.org/")
assert DEFAULT_INDEX_URL
MAX_WORKERS = None
if sys.version_info >= (3, 4) and sys.version_info < (3, 5):
# Python 3.4 is an odd duck. It's the first Python 3 version that had
# concurrent.futures.ThreadPoolExecutor built in. (Python 2.7 needs a
# backport from PyPI)
# However, in Python 3.4 the max_workers (first and only argument) needs
# to be set. In version > 3.4 the max_workers argument can be None and
# it will itself figure it out by figuring out the systems number of
# CPUs and then multiplying that number by 5.
# So, exclusively for 3.4 we have to set this to some integer.
# Python 3.4 is small so it's not important that it's the perfect amount.
MAX_WORKERS = 5
major_pip_version = int(pip_api.version().split(".")[0])
if major_pip_version < 8:
raise ImportError("hashin only works with pip 8.x or greater")
class PackageError(Exception):
pass
class NoVersionsError(Exception):
"""When there are no valid versions found."""
class PackageNotFoundError(Exception):
"""When the package can't be found on pypi.org."""
def _verbose(*args):
print("* " + " ".join(args))
def _download(url, binary=False):
try:
r = urlopen(url)
except HTTPError as exception:
status_code = exception.getcode()
if status_code == 404:
raise PackageNotFoundError(url)
raise PackageError("Download error. {0} on {1}".format(status_code, url))
# Note that urlopen will, by default, follow redirects.
status_code = r.getcode()
if 301 <= status_code < 400:
location, _ = cgi.parse_header(r.headers.get("location", ""))
if not location:
raise PackageError(
"No 'Location' header on {0} ({1})".format(url, status_code)
)
return _download(location)
elif status_code == 404:
raise PackageNotFoundError(url)
elif status_code != 200:
raise PackageError("Download error. {0} on {1}".format(status_code, url))
if binary:
return r.read()
_, params = cgi.parse_header(r.headers.get("Content-Type", ""))
encoding = params.get("charset", "utf-8")
return r.read().decode(encoding)
def run(specs, requirements_file, *args, **kwargs):
if not specs: # then, assume all in the requirements file
regex = re.compile(r"(^|\n|\n\r).*==")
specs = []
previous_versions = {}
with open(requirements_file) as f:
for line in f:
if regex.search(line) and not line.lstrip().startswith("#"):
req = Requirement(line.split("\\")[0])
# Deliberately strip the specifier (aka. the version)
version = req.specifier
req.specifier = None
specs.append(str(req))
previous_versions[str(req)] = version
kwargs["previous_versions"] = previous_versions
if isinstance(specs, str):
specs = [specs]
return run_packages(specs, requirements_file, *args, **kwargs)
def _explode_package_spec(spec):
restriction = None
if ";" in spec:
spec, restriction = [x.strip() for x in spec.split(";", 1)]
if "==" in spec:
package, version = spec.split("==")
else:
assert ">" not in spec and "<" not in spec
package, version = spec, None
return package, version, restriction
def run_packages(
specs,
file,
algorithm,
python_versions=None,
verbose=False,
include_prereleases=False,
dry_run=False,
previous_versions=None,
interactive=False,
synchronous=False,
index_url=DEFAULT_INDEX_URL,
):
assert index_url
assert isinstance(specs, list), type(specs)
all_new_lines = []
first_interactive = True
yes_to_all = False
lookup_memory = {}
if not synchronous and len(specs) > 1:
pre_download_packages(
lookup_memory, specs, verbose=verbose, index_url=index_url
)
for spec in specs:
package, version, restriction = _explode_package_spec(spec)
# It's important to keep a track of what the package was called before
# so that if we have to amend the requirements file, we know what to
# look for before.
previous_name = package
# The 'previous_versions' dict is based on the old names. So figure
# out what the previous version was *before* the new/"correct" name
# is figured out.
previous_version = previous_versions.get(package) if previous_versions else None
req = Requirement(package)
data = get_package_hashes(
package=req.name,
version=version,
verbose=verbose,
python_versions=python_versions,
algorithm=algorithm,
include_prereleases=include_prereleases,
lookup_memory=lookup_memory,
index_url=index_url,
)
package = data["package"]
# We need to keep this `req` instance for the sake of turning it into a string
# the correct way. But, the name might actually be wrong. Suppose the user
# asked for "Django" but on PyPI it's actually called "django", then we want
# correct that.
# We do that by modifying only the `name` part of the `Requirement` instance.
req.name = package
if previous_versions is None:
# Need to be smart here. It's a little counter-intuitive.
# If no previous_versions was supplied that has an implied the fact;
# the user was explicit about what they want to install.
# The name it was called in the old requirements file doesn't matter.
previous_name = package
new_version_specifier = SpecifierSet("=={}".format(data["version"]))
if previous_version:
# We have some form of previous version and a new version.
# If they' already equal, just skip this one.
if previous_version == new_version_specifier:
continue
if interactive:
try:
response = interactive_upgrade_request(
package,
previous_version,
new_version_specifier,
print_header=first_interactive,
force_yes=yes_to_all,
)
first_interactive = False
if response == "NO":
continue
elif response == "ALL":
# If you ever answer "all" to the update question, we don't want
# stop showing the interactive prompt but we don't need to
# ask any questions any more. This way, you get to see the
# upgrades that are going to happen.
yes_to_all = True
elif response == "QUIT":
return 1
except KeyboardInterrupt:
return 1
maybe_restriction = "" if not restriction else "; {0}".format(restriction)
new_lines = "{0}=={1}{2} \\\n".format(req, data["version"], maybe_restriction)
padding = " " * 4
for i, release in enumerate(data["hashes"]):
new_lines += "{0}--hash={1}:{2}".format(padding, algorithm, release["hash"])
if i != len(data["hashes"]) - 1:
new_lines += " \\"
new_lines += "\n"
all_new_lines.append((package, previous_name, new_lines))
if not all_new_lines:
# This can happen if you use 'interactive' and said no to everything or
# if every single package you listed already has the latest version.
return 0
with open(file) as f:
old_requirements = f.read()
requirements = amend_requirements_content(old_requirements, all_new_lines)
if dry_run:
if verbose:
_verbose("Dry run, not editing ", file)
print(
"".join(
difflib.unified_diff(
old_requirements.splitlines(True),
requirements.splitlines(True),
fromfile="Old",
tofile="New",
)
)
)
else:
with open(file, "w") as f:
f.write(requirements)
if verbose:
_verbose("Editing", file)
return 0
def pre_download_packages(memory, specs, verbose=False, index_url=DEFAULT_INDEX_URL):
futures = {}
with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
for spec in specs:
package, _, _ = _explode_package_spec(spec)
req = Requirement(package)
futures[
executor.submit(get_package_data, req.name, index_url, verbose=verbose)
] = req.name
for future in concurrent.futures.as_completed(futures):
content = future.result()
memory[futures[future]] = content
def interactive_upgrade_request(
package, old_version, new_version, print_header=False, force_yes=False
):
def print_version(v):
return str(v).replace("==", "").ljust(15)
if print_header:
print(
"PACKAGE".ljust(30),
print_version("YOUR VERSION"),
print_version("NEW VERSION"),
)
def print_line(checkbox=None):
if checkbox is None:
checkboxed = "?"
elif checkbox:
checkboxed = "✓"
else:
checkboxed = "✘"
print(
package.ljust(30),
print_version(old_version),
print_version(new_version),
checkboxed,
)
if force_yes:
print_line(True)
return "YES"
else:
print_line()
printed_help = []
def print_help():
print(
"y - Include this update (default)\n"
"n - Skip this update\n"
"a - Include this and all following upgrades\n"
"q - Skip this and all following upgrades\n"
"? - Print this help\n"
)
printed_help.append(1)
def clear_line():
sys.stdout.write("\033[F") # Cursor up one line
sys.stdout.write("\033[K") # Clear to the end of line
def ask():
answer = input("Update? [Y/n/a/q/?]: ").lower().strip()
if printed_help:
# Because the print_help() prints 5 lines to stdout.
# Plus 2 because of the original question line and the extra blank line.
for i in range(5 + 2):
clear_line()
# printed_help.clear()
del printed_help[:]
if answer == "n":
clear_line()
clear_line()
print_line(False)
return "NO"
if answer == "a":
clear_line()
clear_line()
print_line(True)
return "ALL"
if answer == "q":
return "QUIT"
if answer == "y" or answer == "" or answer == "yes":
clear_line()
clear_line()
print_line(True)
return "YES"
if answer == "?":
print_help()
return ask()
return ask()
def amend_requirements_content(requirements, all_new_lines):
# I wish we had types!
assert isinstance(all_new_lines, list), type(all_new_lines)
padding = " " * 4
def is_different_lines(old_lines, new_lines, indent):
# This regex is used to only temporarily normalize the names of packages
# in the lines being compared. This results in "old" names matching
# "new" names so that hashin correctly replaces them when it looks for
# them.
match_delims = re.compile(r"[-_]")
# This assumes that the package is already mentioned in the old
# requirements. Now we just need to double-check that its lines are
# different.
# The 'new_lines` is what we might intend to replace it with.
old = set([match_delims.sub("-", line.strip(" \\")) for line in old_lines])
new = set([indent + x.strip(" \\") for x in new_lines])
return old != new
for package, old_name, new_text in all_new_lines:
# The call to `escape` will turn hyphens into escaped hyphens
#
# ex.
# - becomes \\-
#
escaped = re.escape(old_name)
# This changes those escaped hypens into a pattern to match
#
# ex.
# \\- becomes [-_]
#
# This is necessary so that hashin will correctly find underscored (old)
# and hyphenated (new) package names so that it will correctly replace an
# old name with the new name when there is a version update.
escape_replaced = escaped.replace("\\-", "[-_]")
regex = re.compile(
r"^(?P<indent>[ \t]*){0}(\[.*\])?==".format(escape_replaced),
re.IGNORECASE | re.MULTILINE,
)
# if the package wasn't already there, add it to the bottom
match = regex.search(requirements)
if not match:
# easy peasy
if requirements:
requirements = requirements.strip() + "\n"
requirements += new_text.strip() + "\n"
else:
indent = match.group("indent")
lines = []
for line in requirements.splitlines():
if regex.search(line):
lines.append(line)
elif lines and line.startswith(indent + padding + "#"):
break
elif lines and line.startswith(indent + padding):
lines.append(line)
elif lines:
break
if is_different_lines(lines, new_text.splitlines(), indent):
# need to replace the existing
combined = "\n".join(lines + [""])
# indent non-empty lines
indented = re.sub(
r"^(.+)$", r"{0}\1".format(indent), new_text, flags=re.MULTILINE
)
requirements = requirements.replace(combined, indented)
return requirements
def get_latest_version(data, include_prereleases):
"""
Return the version string of what we think is the latest version.
In the data blob from PyPI there is the info->version key which
is just the latest in time. Ideally we want the latest non-pre-release.
"""
if not data.get("releases"):
# If there were no releases, fall back to the old way of doing
# things with the info->version key.
# This feels kinda strange but it has worked for years
return data["info"]["version"]
all_versions = []
count_prereleases = 0
for version in data["releases"]:
v = parse(version)
if not v.is_prerelease or include_prereleases:
all_versions.append((v, version))
else:
count_prereleases += 1
all_versions.sort(reverse=True)
if not all_versions:
msg = "No valid version found."
if not include_prereleases and count_prereleases:
msg += (
" But, found {0} pre-releases. Consider running again "
"with the --include-prereleases flag.".format(count_prereleases)
)
raise NoVersionsError(msg)
# return the highest non-pre-release version
return str(all_versions[0][1])
def expand_python_version(version):
"""
Expand Python versions to all identifiers used on PyPI.
>>> expand_python_version('3.5')
['3.5', 'py3', 'py2.py3', 'cp35']
"""
if not re.match(r"^\d\.\d$", version):
return [version]
major, minor = version.split(".")
patterns = [
"{major}.{minor}",
"cp{major}{minor}",
"py{major}",
"py{major}.{minor}",
"py{major}{minor}",
"source",
"py2.py3",
]
return set(pattern.format(major=major, minor=minor) for pattern in patterns)
# This should match the naming convention laid out in PEP 0427
# url = 'https://pypi.python.org/packages/3.4/P/Pygments/Pygments-2.1-py3-none-any.whl' # NOQA
CLASSIFY_WHEEL_RE = re.compile(
r"""
^(?P<package>.+)-
(?P<version>\d[^-]*)-
(?P<python_version>[^-]+)-
(?P<abi>[^-]+)-
(?P<platform>.+)
.(?P<format>whl)
(\#md5=.*)?
$
""",
re.VERBOSE,
)
CLASSIFY_EGG_RE = re.compile(
r"""
^(?P<package>.+)-
(?P<version>\d[^-]*)-
(?P<python_version>[^-]+)
(-(?P<platform>[^\.]+))?
.(?P<format>egg)
(\#md5=.*)?
$
""",
re.VERBOSE,
)
CLASSIFY_ARCHIVE_RE = re.compile(
r"""
^(?P<package>.+)-
(?P<version>\d[^-]*)
(-(?P<platform>[^\.]+))?
.(?P<format>tar.(gz|bz2)|zip)
(\#md5=.*)?
$
""",
re.VERBOSE,
)
CLASSIFY_EXE_RE = re.compile(
r"""
^(?P<package>.+)-
(?P<version>\d[^-]*)[-\.]
((?P<platform>[^-]*)-)?
(?P<python_version>[^-]+)
.(?P<format>(exe|msi))
(\#md5=.*)?
$
""",
re.VERBOSE,
)
def release_url_metadata(url):
filename = url.split("/")[-1]
defaults = {
"package": None,
"version": None,
"python_version": None,
"abi": None,
"platform": None,
"format": None,
}
simple_classifiers = [CLASSIFY_WHEEL_RE, CLASSIFY_EGG_RE, CLASSIFY_EXE_RE]
for classifier in simple_classifiers:
match = classifier.match(filename)
if match:
defaults.update(match.groupdict())
return defaults
match = CLASSIFY_ARCHIVE_RE.match(filename)
if match:
defaults.update(match.groupdict())
defaults["python_version"] = "source"
return defaults
raise PackageError("Unrecognizable url: " + url)
def filter_releases(releases, python_versions):
python_versions = list(
chain.from_iterable(expand_python_version(v) for v in python_versions)
)
filtered = []
for release in releases:
metadata = release_url_metadata(release["url"])
if metadata["python_version"] in python_versions:
filtered.append(release)
return filtered
def get_package_data(package, index_url, verbose=False):
path = "/pypi/%s/json" % package
url = urljoin(index_url, path)
if verbose:
print(url)
content = json.loads(_download(url))
if "releases" not in content:
raise PackageError("package JSON is not sane")
return content
def get_releases_hashes(releases, algorithm, verbose=False):
for found in releases:
digests = found["digests"]
try:
found["hash"] = digests[algorithm]
if verbose:
_verbose("Found hash for", found["url"])
except KeyError:
# The algorithm is NOT in the 'digests' dict.
# We have to download the file and use pip
url = found["url"]
if verbose:
_verbose("Found URL", url)
download_dir = tempfile.gettempdir()
filename = os.path.join(download_dir, os.path.basename(url.split("#")[0]))
if not os.path.isfile(filename):
if verbose:
_verbose(" Downloaded to", filename)
with open(filename, "wb") as f:
f.write(_download(url, binary=True))
elif verbose:
_verbose(" Re-using", filename)
found["hash"] = pip_api.hash(filename, algorithm)
if verbose:
_verbose(" Hash", found["hash"])
yield {"hash": found["hash"]}
def get_package_hashes(
package,
version=None,
algorithm=DEFAULT_ALGORITHM,
python_versions=(),
verbose=False,
include_prereleases=False,
lookup_memory=None,
index_url=DEFAULT_INDEX_URL,
):
"""
Gets the hashes for the given package.
>>> get_package_hashes('hashin')
{
'package': 'hashin',
'version': '0.10',
'hashes': [
{
'url': 'https://pypi.org/packages/[...]',
'hash': '45d1c5d2237a3b4f78b4198709fb2ecf[...]'
},
{
'url': 'https://pypi.org/packages/[...]',
'hash': '0d63bf4c115154781846ecf573049324[...]'
},
{
'url': 'https://pypi.org/packages/[...]',
'hash': 'c32e6d9fb09dc36ab9222c4606a1f43a[...]'
}
]
}
"""
if lookup_memory is not None and package in lookup_memory:
data = lookup_memory[package]
else:
data = get_package_data(package, index_url, verbose)
if not version:
version = get_latest_version(data, include_prereleases)
assert version
if verbose:
_verbose("Latest version for {0} is {1}".format(package, version))
# Independent of how you like to case type it, pick the correct
# name from the PyPI index.
package = data["info"]["name"]
try:
releases = data["releases"][version]
except KeyError:
raise PackageError("No data found for version {0}".format(version))
if python_versions:
releases = filter_releases(releases, python_versions)
if not releases:
if python_versions:
raise PackageError(
"No releases could be found for "
"{0} matching Python versions {1}".format(version, python_versions)
)
else:
raise PackageError("No releases could be found for {0}".format(version))
hashes = sorted(
get_releases_hashes(releases=releases, algorithm=algorithm, verbose=verbose),
key=lambda r: r["hash"],
)
return {"package": package, "version": version, "hashes": hashes}
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"packages",
help="One or more package specifiers (e.g. some-package or some-package==1.2.3)",
nargs="*",
)
parser.add_argument(
"-r",
"--requirements-file",
help="requirements file to write to (default requirements.txt)",
default="requirements.txt",
)
parser.add_argument(
"-a",
"--algorithm",
help="The hash algorithm to use: one of sha256, sha384, sha512",
default=DEFAULT_ALGORITHM,
)
parser.add_argument("-v", "--verbose", help="Verbose output", action="store_true")
parser.add_argument(
"--include-prereleases",
help="Include pre-releases (off by default)",
action="store_true",
)
parser.add_argument(
"-p",
"--python-version",
help="Python version to add wheels for. May be used multiple times.",
action="append",
default=[],
)
parser.add_argument(
"--version", help="Version of hashin", action="store_true", default=False
)
parser.add_argument(
"-d",
"--dry-run",
help="Don't touch requirements.txt and just show the diff",
action="store_true",
default=False,
)
parser.add_argument(
"-u",
"--update-all",
help="Update all mentioned packages in the requirements file.",
action="store_true",
default=False,
)
parser.add_argument(
"-i",
"--interactive",
help=(
"Ask about each possible update. "
"Only applicable together with --update-all/-u."
),
action="store_true",
default=False,
)
parser.add_argument(
"--synchronous",
help="Do not download from pypi in parallel.",
action="store_true",
default=False,
)
parser.add_argument(
"--index-url",
help="alternate package index url (default {0})".format(DEFAULT_INDEX_URL),
default=DEFAULT_INDEX_URL,
)
return parser
def main():
if "--version" in sys.argv[1:]:
# Can't be part of argparse because the 'packages' is mandatory
# print out the version of self
import pkg_resources
print(pkg_resources.get_distribution("hashin").version)
return 0
parser = get_parser()
args = parser.parse_args()
if (
args.update_all
and args.packages
and len(args.packages) == 1
and os.path.isfile(args.packages[0])
and args.packages[0].endswith(".txt")
):
# It's totally common to make the mistake of using the `--update-all` flag
# and specifying the requirements file as the first argument. E.g.
#
# $ hashin --update-all --interactive myproject/reqs.txt
#
# The user intention is clear any non-keyed flags get interpreted as a
# list of "packages". Let's fix that for the user.
args.requirements_file = args.packages[0]
args.packages = []
if args.update_all:
if args.packages:
print(
"Can not combine the --update-all option with a list of packages.",
file=sys.stderr,
)
return 2
elif args.interactive:
print(
"--interactive (or -i) is only applicable together "
"with --update-all (or -u).",
file=sys.stderr,
)
return 4
elif not args.packages:
print("If you don't use --update-all you must list packages.", file=sys.stderr)
parser.print_usage()
return 3
try:
return run(
args.packages,
args.requirements_file,
args.algorithm,
args.python_version,
verbose=args.verbose,
include_prereleases=args.include_prereleases,
dry_run=args.dry_run,
interactive=args.interactive,
synchronous=args.synchronous,
index_url=args.index_url,
)
except PackageError as exception:
print(str(exception), file=sys.stderr)
return 1
if __name__ == "__main__":
sys.exit(main())
| peterbe/hashin | hashin.py | Python | mit | 27,305 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set ts=4 sw=4 et fdm=marker : */
from django.db import models
class TreePathModel(models.Model):
# ancestor = models.ForeignKey('Node', related_name='tpa')
# descendant = models.ForeignKey('Node', related_name='tpd')
path_len = models.IntegerField(db_index=True)
class Meta:
unique_together = ('ancestor', 'descendant')
abstract = True
index_together = [
["ancestor", "descendant", "path_len"],
]
def __unicode__(self):
return '%s -> %s (%d)' % (self.ancestor, self.descendant, self.path_len)
def register(cls):
"""
generuje TreePathModel dla podanego modelu
:param cls:
:return:
"""
tpcls = type(cls.__name__ + 'TreePath',
(TreePathModel,),
{'__module__': cls.__module__})
ancestor_field = models.ForeignKey(cls, related_name='tpa')
descendant_field = models.ForeignKey(cls, related_name='tpd')
ancestor_field.contribute_to_class(tpcls, 'ancestor')
descendant_field.contribute_to_class(tpcls, 'descendant')
cls._tpm = tpcls
cls._cls = cls
return tpcls
| HiddenData/django-ctt | ctt/core.py | Python | mit | 1,176 |
#!/usr/bin/env python
"""file_utils.py: convenient file operations used by derpbox"""
__author__ = "Waris Boonyasiriwat"
__copyright__ = "Copyright 2017"
import os
import hashlib
def md5(filename):
hash_md5 = hashlib.md5()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def create_file_obj(id, root_path, path):
file_obj = {
'id': id,
'path': path,
'isDirectory': os.path.isdir(root_path + path),
}
if not file_obj['isDirectory']:
file_obj['hash'] = md5(root_path + path)
return file_obj
def get_paths_recursive(root_path):
paths = []
for root, dirs, files in os.walk(root_path):
for f in files:
path = os.path.relpath(os.path.join(root, f), root_path)
paths.append(path.replace('\\', '/'))
for d in dirs:
path = os.path.relpath(os.path.join(root, d), root_path)
paths.append(path.replace('\\', '/'))
return paths
| warisb/derpbox | DerpBox/file_utils.py | Python | mit | 1,062 |
#!/usr/bin/python
# Filename: stringtools.py
import string
def split_embiggen_sort(sentence):
'''
takes a string and removes punctuation
splits it into a list of individual words,
converts them to uppercase and
sorts them in descending order
'''
all_converted = embiggen(sentence)
all_converted = stripper3(all_converted)
all_converted = divide_at_spaces(all_converted)
all_converted = big_to_little(all_converted)
return all_converted
def stripper3(sentence):
'''
takes a string as input and removes punctuation using list comprehension
returns a string
'''
res = "".join([x for x in sentence if x not in string.punctuation])
return res
def embiggen(sentence):
'''
convert a string to uppercase and
returns a string
'''
assert(type(sentence) is str)
converted = sentence.upper()
return converted
def divide_at_spaces(sentence):
'''
takes a string as input
splits the string at space characters and
returns a list of strings
'''
assert(type(sentence) is str)
chopped = sentence.split()
return chopped
def big_to_little(bits):
'''
sorts a list of words from longest to shortest and return a list
'''
assert(type(bits) is list)
bits.sort(key=len)
return bits | alistairwalsh/NeuralCode_2015_05_8 | stringtools.py | Python | mit | 1,324 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ConnectionMonitorsOperations:
"""ConnectionMonitorsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
parameters: "_models.ConnectionMonitor",
**kwargs: Any
) -> "_models.ConnectionMonitorResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ConnectionMonitor')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
parameters: "_models.ConnectionMonitor",
**kwargs: Any
) -> AsyncLROPoller["_models.ConnectionMonitorResult"]:
"""Create or update a connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:param parameters: Parameters that define the operation to create a connection monitor.
:type parameters: ~azure.mgmt.network.v2018_07_01.models.ConnectionMonitor
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ConnectionMonitorResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.ConnectionMonitorResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> "_models.ConnectionMonitorResult":
"""Gets a connection monitor by name.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionMonitorResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.ConnectionMonitorResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def _stop_initial(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'} # type: ignore
async def begin_stop(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Stops the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'} # type: ignore
async def _start_initial(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self._start_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'} # type: ignore
async def begin_start(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Starts the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'} # type: ignore
async def _query_initial(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> "_models.ConnectionMonitorQueryResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorQueryResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self._query_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_query_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'} # type: ignore
async def begin_query(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.ConnectionMonitorQueryResult"]:
"""Query a snapshot of the most recent connection states.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name given to the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ConnectionMonitorQueryResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.ConnectionMonitorQueryResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorQueryResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._query_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_query.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'} # type: ignore
def list(
self,
resource_group_name: str,
network_watcher_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ConnectionMonitorListResult"]:
"""Lists all connection monitors for the specified Network Watcher.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ConnectionMonitorListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.ConnectionMonitorListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_connection_monitors_operations.py | Python | mit | 41,918 |
"""
Tests for xsiftx.util functions
"""
import os
import stat
import unittest
from mock import patch
from .util import mkdtemp_clean
from xsiftx.util import (
get_sifters,
get_course_list,
get_settings,
run_sifter,
XsiftxException,
SifterException
)
class TestUtils(unittest.TestCase):
"""
Test series for util functions in xsiftx.util
"""
# pylint: disable=r0904
KNOWN_SIFTERS = [
'copy_file',
'xqanalyze',
'content_statistics',
'test_sifters',
'dump_grades',
]
EDX_ROOT = '/edx/app/edxapp/edx-platform'
EDX_VENV = '/edx/app/edxapp/venvs/edxapp'
BAD_SIFTER = 'testenv_sifter'
def _make_bad_sifter(self):
"""
Create a sifter that raises an exception
"""
temp_dir = mkdtemp_clean(self)
sifter_path = os.path.join(temp_dir, self.BAD_SIFTER)
with open(sifter_path, 'w+') as temp_sifter:
temp_sifter.write('#!/bin/bash\nfalse')
perms = os.stat(sifter_path)
os.chmod(sifter_path, perms.st_mode | stat.S_IEXEC)
os.environ['SIFTER_DIR'] = temp_dir
def test_sifter_list_locations(self):
"""
Make sure sifter search paths work
"""
# check default sifters
self.assertTrue(set(self.KNOWN_SIFTERS).issubset(get_sifters()))
# Add environment variable of extra sifter folder, ensure it is added
# to list
self._make_bad_sifter()
self.assertTrue(self.BAD_SIFTER in get_sifters())
@unittest.skipUnless(os.environ.get('XSIFTX_TEST_EDX', None),
'Requires an edx environment and XSIFTX_TEST_EDX '
'environment variable set.')
def test_course_list(self):
"""
Make sure we can get a course list.
Requires that we have an edx-platform environment at default
coords
"""
with self.assertRaisesRegexp(XsiftxException,
'No such file or dir.*'):
get_course_list('nope', 'nope')
self.assertTrue(len(get_course_list(self.EDX_VENV, self.EDX_ROOT)) > 0)
@unittest.skipUnless(os.environ.get('XSIFTX_TEST_EDX', None),
'Requires an edx environment and XSIFTX_TEST_EDX '
'environment variable set.')
def test_get_settings(self):
"""
Test that we can get settings and fail nicely without them
"""
settings_keys = [
'use_s3',
'aws_key',
'root_path',
'bucket',
'aws_key_id',
]
with self.assertRaisesRegexp(XsiftxException,
'Cannot find lms.*'):
get_settings('nope')
settings = get_settings(self.EDX_ROOT)
self.assertEqual(settings_keys, settings.keys())
self.assertIsNotNone(settings.get('root_path'))
self.assertIsNotNone(settings.get('bucket'))
@unittest.skipUnless(os.environ.get('XSIFTX_TEST_EDX', None),
'Requires an edx environment and XSIFTX_TEST_EDX '
'environment variable set.')
def test_run_sifter(self):
"""
Run sifters to make sure the runner is working right
"""
# Try with bad root
with self.assertRaisesRegexp(XsiftxException,
'Cannot find lms*'):
run_sifter('nope', 'nope', 'nope', 'nope', 'nope')
# Mock out settings so we know where output is going
bucket = 'reports'
course = 'stuff'
temp_dir = mkdtemp_clean(self)
with patch('xsiftx.util.get_settings') as mock_settings:
mock_settings.return_value = {
'use_s3': False,
'aws_key': '',
'root_path': temp_dir,
'bucket': bucket,
'aws_key_id': ''
}
# Test that a bad sifter raises
with self.assertRaisesRegexp(SifterException,
'Sifter .+ called with'):
self._make_bad_sifter()
run_sifter(get_sifters()[self.BAD_SIFTER],
course, self.EDX_VENV, self.EDX_ROOT, [])
# Test that a good sifter creates the expected file
run_sifter(get_sifters()['test_sifters'],
course, self.EDX_VENV, self.EDX_ROOT, [])
self.assertTrue(mock_settings.called)
self.assertTrue(os.path.exists(
os.path.join(temp_dir, course, 'test_sifter.txt')
))
| mitocw/xsiftx | xsiftx/tests/test_util.py | Python | gpl-3.0 | 4,654 |
# Copyright 2015 Violin Memory, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Violin Memory 7000 Series All-Flash Array Common Driver
"""
import math
import mock
from oslo_utils import units
from jacket import context
from jacket.storage import exception
from jacket.storage import test
from jacket.tests.storage.unit import fake_vmem_client as vmemclient
from jacket.storage.volume import configuration as conf
from jacket.storage.volume.drivers.violin import v7000_common
from jacket.storage.volume import volume_types
VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba"
VOLUME = {"name": "volume-" + VOLUME_ID,
"id": VOLUME_ID,
"display_name": "fake_volume",
"size": 2,
"host": "irrelevant",
"volume_type": None,
"volume_type_id": None,
}
SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb"
SNAPSHOT = {"name": "snapshot-" + SNAPSHOT_ID,
"id": SNAPSHOT_ID,
"volume_id": VOLUME_ID,
"volume_name": "volume-" + VOLUME_ID,
"volume_size": 2,
"display_name": "fake_snapshot",
}
SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc"
SRC_VOL = {"name": "volume-" + SRC_VOL_ID,
"id": SRC_VOL_ID,
"display_name": "fake_src_vol",
"size": 2,
"host": "irrelevant",
"volume_type": None,
"volume_type_id": None,
}
INITIATOR_IQN = "iqn.1111-22.org.debian:11:222"
CONNECTOR = {"initiator": INITIATOR_IQN}
class V7000CommonTestCase(test.TestCase):
"""Test case for Violin drivers."""
def setUp(self):
super(V7000CommonTestCase, self).setUp()
self.conf = self.setup_configuration()
self.driver = v7000_common.V7000Common(self.conf)
self.driver.container = 'myContainer'
self.driver.device_id = 'ata-VIOLIN_MEMORY_ARRAY_23109R00000022'
self.stats = {}
def tearDown(self):
super(V7000CommonTestCase, self).tearDown()
def setup_configuration(self):
config = mock.Mock(spec=conf.Configuration)
config.volume_backend_name = 'v7000_common'
config.san_ip = '1.1.1.1'
config.san_login = 'admin'
config.san_password = ''
config.san_thin_provision = False
config.san_is_local = False
config.gateway_mga = '2.2.2.2'
config.gateway_mgb = '3.3.3.3'
config.use_igroups = False
config.violin_request_timeout = 300
config.container = 'myContainer'
return config
@mock.patch('vmemclient.open')
def setup_mock_client(self, _m_client, m_conf=None):
"""Create a fake backend communication factory.
The xg-tools creates a Concerto connection object (for V7000
devices) and returns it for use on a call to vmemclient.open().
"""
# configure the concerto object mock with defaults
_m_concerto = mock.Mock(name='Concerto',
version='1.1.1',
spec=vmemclient.mock_client_conf)
# if m_conf, clobber the defaults with it
if m_conf:
_m_concerto.configure_mock(**m_conf)
# set calls to vmemclient.open() to return this mocked concerto object
_m_client.return_value = _m_concerto
return _m_client
def setup_mock_concerto(self, m_conf=None):
"""Create a fake Concerto communication object."""
_m_concerto = mock.Mock(name='Concerto',
version='1.1.1',
spec=vmemclient.mock_client_conf)
if m_conf:
_m_concerto.configure_mock(**m_conf)
return _m_concerto
def test_check_for_setup_error(self):
"""No setup errors are found."""
self.driver.vmem_mg = self.setup_mock_concerto()
self.driver._is_supported_vmos_version = mock.Mock(return_value=True)
result = self.driver.check_for_setup_error()
self.driver._is_supported_vmos_version.assert_called_with(
self.driver.vmem_mg.version)
self.assertIsNone(result)
def test_create_lun(self):
"""Lun is successfully created."""
response = {'success': True, 'msg': 'Create resource successfully.'}
size_in_mb = VOLUME['size'] * units.Ki
conf = {
'lun.create_lun.return_value': response,
}
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.driver._send_cmd = mock.Mock(return_value=response)
result = self.driver._create_lun(VOLUME)
self.driver._send_cmd.assert_called_with(
self.driver.vmem_mg.lun.create_lun,
'Create resource successfully.',
VOLUME['id'], size_in_mb, False, False, size_in_mb,
storage_pool=None)
self.assertIsNone(result)
def test_create_dedup_lun(self):
"""Lun is successfully created."""
vol = VOLUME.copy()
vol['size'] = 100
vol['volume_type_id'] = '1'
response = {'success': True, 'msg': 'Create resource successfully.'}
size_in_mb = vol['size'] * units.Ki
full_size_mb = size_in_mb
conf = {
'lun.create_lun.return_value': response,
}
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.driver._send_cmd = mock.Mock(return_value=response)
# simulate extra specs of {'thin': 'true', 'dedupe': 'true'}
self.driver._get_volume_type_extra_spec = mock.Mock(
return_value="True")
self.driver._get_violin_extra_spec = mock.Mock(
return_value=None)
result = self.driver._create_lun(vol)
self.driver._send_cmd.assert_called_with(
self.driver.vmem_mg.lun.create_lun,
'Create resource successfully.',
VOLUME['id'], size_in_mb / 10, True, True, full_size_mb,
storage_pool=None)
self.assertIsNone(result)
def test_fail_extend_dedup_lun(self):
"""Volume extend fails when new size would shrink the volume."""
failure = exception.VolumeDriverException
vol = VOLUME.copy()
vol['volume_type_id'] = '1'
size_in_mb = vol['size'] * units.Ki
self.driver.vmem_mg = self.setup_mock_concerto()
# simulate extra specs of {'thin': 'true', 'dedupe': 'true'}
self.driver._get_volume_type_extra_spec = mock.Mock(
return_value="True")
self.assertRaises(failure, self.driver._extend_lun,
vol, size_in_mb)
def test_create_non_dedup_lun(self):
"""Lun is successfully created."""
vol = VOLUME.copy()
vol['size'] = 100
vol['volume_type_id'] = '1'
response = {'success': True, 'msg': 'Create resource successfully.'}
size_in_mb = vol['size'] * units.Ki
full_size_mb = size_in_mb
conf = {
'lun.create_lun.return_value': response,
}
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.driver._send_cmd = mock.Mock(return_value=response)
# simulate extra specs of {'thin': 'false', 'dedupe': 'false'}
self.driver._get_volume_type_extra_spec = mock.Mock(
return_value="False")
self.driver._get_violin_extra_spec = mock.Mock(
return_value=None)
result = self.driver._create_lun(vol)
self.driver._send_cmd.assert_called_with(
self.driver.vmem_mg.lun.create_lun,
'Create resource successfully.',
VOLUME['id'], size_in_mb, False, False, full_size_mb,
storage_pool=None)
self.assertIsNone(result)
def test_create_lun_fails(self):
"""Array returns error that the lun already exists."""
response = {'success': False,
'msg': 'Duplicate Virtual Device name. Error: 0x90010022'}
conf = {
'lun.create_lun.return_value': response,
}
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.driver._send_cmd = mock.Mock(return_value=response)
self.assertIsNone(self.driver._create_lun(VOLUME))
def test_create_lun_on_a_storage_pool(self):
"""Lun is successfully created."""
vol = VOLUME.copy()
vol['size'] = 100
vol['volume_type_id'] = '1'
response = {'success': True, 'msg': 'Create resource successfully.'}
size_in_mb = vol['size'] * units.Ki
full_size_mb = size_in_mb
conf = {
'lun.create_lun.return_value': response,
}
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.driver._send_cmd = mock.Mock(return_value=response)
self.driver._get_volume_type_extra_spec = mock.Mock(
return_value="False")
# simulates extra specs: {'storage_pool', 'StoragePool'}
self.driver._get_violin_extra_spec = mock.Mock(
return_value="StoragePool")
result = self.driver._create_lun(vol)
self.driver._send_cmd.assert_called_with(
self.driver.vmem_mg.lun.create_lun,
'Create resource successfully.',
VOLUME['id'], size_in_mb, False, False, full_size_mb,
storage_pool="StoragePool")
self.assertIsNone(result)
def test_delete_lun(self):
"""Lun is deleted successfully."""
response = {'success': True, 'msg': 'Delete resource successfully'}
success_msgs = ['Delete resource successfully', '']
conf = {
'lun.delete_lun.return_value': response,
}
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.driver._send_cmd = mock.Mock(return_value=response)
self.driver._delete_lun_snapshot_bookkeeping = mock.Mock()
result = self.driver._delete_lun(VOLUME)
self.driver._send_cmd.assert_called_with(
self.driver.vmem_mg.lun.delete_lun,
success_msgs, VOLUME['id'], True)
self.driver._delete_lun_snapshot_bookkeeping.assert_called_with(
VOLUME['id'])
self.assertIsNone(result)
# TODO(rlucio) More delete lun failure cases to be added after
# collecting the possible responses from Concerto
def test_extend_lun(self):
"""Volume extend completes successfully."""
new_volume_size = 10
change_in_size_mb = (new_volume_size - VOLUME['size']) * units.Ki
response = {'success': True, 'message': 'Expand resource successfully'}
conf = {
'lun.extend_lun.return_value': response,
}
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.driver._send_cmd = mock.Mock(return_value=response)
result = self.driver._extend_lun(VOLUME, new_volume_size)
self.driver._send_cmd.assert_called_with(
self.driver.vmem_mg.lun.extend_lun,
response['message'], VOLUME['id'], change_in_size_mb)
self.assertIsNone(result)
def test_extend_lun_new_size_is_too_small(self):
"""Volume extend fails when new size would shrink the volume."""
new_volume_size = 0
change_in_size_mb = (new_volume_size - VOLUME['size']) * units.Ki
response = {'success': False, 'msg': 'Invalid size. Error: 0x0902000c'}
failure = exception.ViolinBackendErr
conf = {
'lun.resize_lun.return_value': response,
}
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.driver._send_cmd = mock.Mock(side_effect=failure(message='fail'))
self.assertRaises(failure, self.driver._extend_lun,
VOLUME, change_in_size_mb)
def test_create_volume_from_snapshot(self):
"""Create a new storage volume from a given snapshot of a lun."""
object_id = '12345'
vdev_id = 11111
response = {'success': True,
'object_id': object_id,
'msg': 'Copy TimeMark successfully.'}
lun_info = {'virtualDeviceID': vdev_id}
compressed_snap_id = 'abcdabcd1234abcd1234abcdeffedcbb'
conf = {
'lun.copy_snapshot_to_new_lun.return_value': response,
}
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.driver._compress_snapshot_id = mock.Mock(
return_value=compressed_snap_id)
self.driver.vmem_mg.lun.get_lun_info = mock.Mock(return_value=lun_info)
self.driver._wait_for_lun_or_snap_copy = mock.Mock()
result = self.driver._create_volume_from_snapshot(SNAPSHOT, VOLUME)
self.driver.vmem_mg.lun.copy_snapshot_to_new_lun.assert_called_with(
source_lun=SNAPSHOT['volume_id'],
source_snapshot_comment=compressed_snap_id,
destination=VOLUME['id'], storage_pool=None)
self.driver.vmem_mg.lun.get_lun_info.assert_called_with(
object_id=object_id)
self.driver._wait_for_lun_or_snap_copy.assert_called_with(
SNAPSHOT['volume_id'], dest_vdev_id=vdev_id)
self.assertIsNone(result)
def test_create_volume_from_snapshot_on_a_storage_pool(self):
"""Create a new storage volume from a given snapshot of a lun."""
dest_vol = VOLUME.copy()
dest_vol['size'] = 100
dest_vol['volume_type_id'] = '1'
object_id = '12345'
vdev_id = 11111
response = {'success': True,
'object_id': object_id,
'msg': 'Copy TimeMark successfully.'}
lun_info = {'virtualDeviceID': vdev_id}
compressed_snap_id = 'abcdabcd1234abcd1234abcdeffedcbb'
conf = {
'lun.copy_snapshot_to_new_lun.return_value': response,
}
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.driver._compress_snapshot_id = mock.Mock(
return_value=compressed_snap_id)
self.driver.vmem_mg.lun.get_lun_info = mock.Mock(return_value=lun_info)
self.driver._wait_for_lun_or_snap_copy = mock.Mock()
# simulates extra specs: {'storage_pool', 'StoragePool'}
self.driver._get_violin_extra_spec = mock.Mock(
return_value="StoragePool")
result = self.driver._create_volume_from_snapshot(SNAPSHOT, dest_vol)
self.assertIsNone(result)
def test_create_volume_from_snapshot_fails(self):
"""Array returns error that the lun already exists."""
response = {'success': False,
'msg': 'Duplicate Virtual Device name. Error: 0x90010022'}
compressed_snap_id = 'abcdabcd1234abcd1234abcdeffedcbb'
failure = exception.ViolinBackendErrExists
conf = {
'lun.copy_snapshot_to_new_lun.return_value': response,
}
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.driver._send_cmd = mock.Mock(return_value=response)
self.driver._compress_snapshot_id = mock.Mock(
return_value=compressed_snap_id)
self.driver._send_cmd = mock.Mock(side_effect=failure(message='fail'))
self.assertRaises(failure, self.driver._create_volume_from_snapshot,
SNAPSHOT, VOLUME)
def test_create_lun_from_lun(self):
"""lun full clone to new volume completes successfully."""
object_id = '12345'
response = {'success': True,
'object_id': object_id,
'msg': 'Copy Snapshot resource successfully'}
conf = {
'lun.copy_lun_to_new_lun.return_value': response,
}
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.driver._ensure_snapshot_resource_area = mock.Mock()
self.driver._wait_for_lun_or_snap_copy = mock.Mock()
result = self.driver._create_lun_from_lun(SRC_VOL, VOLUME)
self.driver._ensure_snapshot_resource_area.assert_called_with(
SRC_VOL['id'])
self.driver.vmem_mg.lun.copy_lun_to_new_lun.assert_called_with(
source=SRC_VOL['id'], destination=VOLUME['id'], storage_pool=None)
self.driver._wait_for_lun_or_snap_copy.assert_called_with(
SRC_VOL['id'], dest_obj_id=object_id)
self.assertIsNone(result)
def test_create_lun_from_lun_on_a_storage_pool(self):
"""lun full clone to new volume completes successfully."""
dest_vol = VOLUME.copy()
dest_vol['size'] = 100
dest_vol['volume_type_id'] = '1'
object_id = '12345'
response = {'success': True,
'object_id': object_id,
'msg': 'Copy Snapshot resource successfully'}
conf = {
'lun.copy_lun_to_new_lun.return_value': response,
}
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.driver._ensure_snapshot_resource_area = mock.Mock()
self.driver._wait_for_lun_or_snap_copy = mock.Mock()
# simulates extra specs: {'storage_pool', 'StoragePool'}
self.driver._get_violin_extra_spec = mock.Mock(
return_value="StoragePool")
result = self.driver._create_lun_from_lun(SRC_VOL, dest_vol)
self.driver._ensure_snapshot_resource_area.assert_called_with(
SRC_VOL['id'])
self.driver.vmem_mg.lun.copy_lun_to_new_lun.assert_called_with(
source=SRC_VOL['id'], destination=dest_vol['id'],
storage_pool="StoragePool")
self.driver._wait_for_lun_or_snap_copy.assert_called_with(
SRC_VOL['id'], dest_obj_id=object_id)
self.assertIsNone(result)
def test_create_lun_from_lun_fails(self):
"""lun full clone to new volume completes successfully."""
failure = exception.ViolinBackendErr
response = {'success': False,
'msg': 'Snapshot Resource is not created '
'for this virtual device. Error: 0x0901008c'}
conf = {
'lun.copy_lun_to_new_lun.return_value': response,
}
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.driver._ensure_snapshot_resource_area = mock.Mock()
self.driver._send_cmd = mock.Mock(side_effect=failure(message='fail'))
self.assertRaises(failure, self.driver._create_lun_from_lun,
SRC_VOL, VOLUME)
def test_send_cmd(self):
"""Command callback completes successfully."""
success_msg = 'success'
request_args = ['arg1', 'arg2', 'arg3']
response = {'success': True, 'msg': 'Operation successful'}
request_func = mock.Mock(return_value=response)
result = self.driver._send_cmd(request_func, success_msg, request_args)
self.assertEqual(response, result)
def test_send_cmd_request_timed_out(self):
"""The callback retry timeout hits immediately."""
failure = exception.ViolinRequestRetryTimeout
success_msg = 'success'
request_args = ['arg1', 'arg2', 'arg3']
self.conf.violin_request_timeout = 0
request_func = mock.Mock()
self.assertRaises(failure, self.driver._send_cmd,
request_func, success_msg, request_args)
def test_send_cmd_response_has_no_message(self):
"""The callback returns no message on the first call."""
success_msg = 'success'
request_args = ['arg1', 'arg2', 'arg3']
response1 = {'success': True, 'msg': None}
response2 = {'success': True, 'msg': 'success'}
request_func = mock.Mock(side_effect=[response1, response2])
self.assertEqual(response2, self.driver._send_cmd
(request_func, success_msg, request_args))
def test_check_error_code(self):
"""Return an exception for a valid error code."""
failure = exception.ViolinBackendErr
response = {'success': False, 'msg': 'Error: 0x90000000'}
self.assertRaises(failure, self.driver._check_error_code,
response)
def test_check_error_code_non_fatal_error(self):
"""Returns no exception for a non-fatal error code."""
response = {'success': False, 'msg': 'Error: 0x9001003c'}
self.assertIsNone(self.driver._check_error_code(response))
def test_compress_snapshot_id(self):
test_snap_id = "12345678-abcd-1234-cdef-0123456789ab"
expected = "12345678abcd1234cdef0123456789ab"
self.assertTrue(len(expected) == 32)
result = self.driver._compress_snapshot_id(test_snap_id)
self.assertTrue(result == expected)
def test_ensure_snapshot_resource_area(self):
result_dict = {'success': True, 'res': 'Successful'}
self.driver.vmem_mg = self.setup_mock_concerto()
snap = self.driver.vmem_mg.snapshot
snap.lun_has_a_snapshot_resource = mock.Mock(return_value=False)
snap.create_snapshot_resource = mock.Mock(return_value=result_dict)
with mock.patch('storage.db.sqlalchemy.api.volume_get',
return_value=VOLUME):
result = self.driver._ensure_snapshot_resource_area(VOLUME_ID)
self.assertIsNone(result)
snap.lun_has_a_snapshot_resource.assert_called_with(lun=VOLUME_ID)
snap.create_snapshot_resource.assert_called_with(
lun=VOLUME_ID,
size=int(math.ceil(0.2 * (VOLUME['size'] * 1024))),
enable_notification=False,
policy=v7000_common.CONCERTO_DEFAULT_SRA_POLICY,
enable_expansion=
v7000_common.CONCERTO_DEFAULT_SRA_ENABLE_EXPANSION,
expansion_threshold=
v7000_common.CONCERTO_DEFAULT_SRA_EXPANSION_THRESHOLD,
expansion_increment=
v7000_common.CONCERTO_DEFAULT_SRA_EXPANSION_INCREMENT,
expansion_max_size=
v7000_common.CONCERTO_DEFAULT_SRA_EXPANSION_MAX_SIZE,
enable_shrink=v7000_common.CONCERTO_DEFAULT_SRA_ENABLE_SHRINK,
storage_pool=None)
def test_ensure_snapshot_resource_area_with_storage_pool(self):
dest_vol = VOLUME.copy()
dest_vol['size'] = 2
dest_vol['volume_type_id'] = '1'
result_dict = {'success': True, 'res': 'Successful'}
self.driver.vmem_mg = self.setup_mock_concerto()
snap = self.driver.vmem_mg.snapshot
snap.lun_has_a_snapshot_resource = mock.Mock(return_value=False)
snap.create_snapshot_resource = mock.Mock(return_value=result_dict)
# simulates extra specs: {'storage_pool', 'StoragePool'}
self.driver._get_violin_extra_spec = mock.Mock(
return_value="StoragePool")
with mock.patch('storage.db.sqlalchemy.api.volume_get',
return_value=dest_vol):
result = self.driver._ensure_snapshot_resource_area(VOLUME_ID)
self.assertIsNone(result)
snap.lun_has_a_snapshot_resource.assert_called_with(lun=VOLUME_ID)
snap.create_snapshot_resource.assert_called_with(
lun=VOLUME_ID,
size=int(math.ceil(0.2 * (VOLUME['size'] * 1024))),
enable_notification=False,
policy=v7000_common.CONCERTO_DEFAULT_SRA_POLICY,
enable_expansion=
v7000_common.CONCERTO_DEFAULT_SRA_ENABLE_EXPANSION,
expansion_threshold=
v7000_common.CONCERTO_DEFAULT_SRA_EXPANSION_THRESHOLD,
expansion_increment=
v7000_common.CONCERTO_DEFAULT_SRA_EXPANSION_INCREMENT,
expansion_max_size=
v7000_common.CONCERTO_DEFAULT_SRA_EXPANSION_MAX_SIZE,
enable_shrink=v7000_common.CONCERTO_DEFAULT_SRA_ENABLE_SHRINK,
storage_pool="StoragePool")
def test_ensure_snapshot_resource_policy(self):
result_dict = {'success': True, 'res': 'Successful'}
self.driver.vmem_mg = self.setup_mock_concerto()
snap = self.driver.vmem_mg.snapshot
snap.lun_has_a_snapshot_policy = mock.Mock(return_value=False)
snap.create_snapshot_policy = mock.Mock(return_value=result_dict)
result = self.driver._ensure_snapshot_policy(VOLUME_ID)
self.assertIsNone(result)
snap.lun_has_a_snapshot_policy.assert_called_with(lun=VOLUME_ID)
snap.create_snapshot_policy.assert_called_with(
lun=VOLUME_ID,
max_snapshots=v7000_common.CONCERTO_DEFAULT_POLICY_MAX_SNAPSHOTS,
enable_replication=False,
enable_snapshot_schedule=False,
enable_cdp=False,
retention_mode=v7000_common.CONCERTO_DEFAULT_POLICY_RETENTION_MODE)
def test_delete_lun_snapshot_bookkeeping(self):
result_dict = {'success': True, 'res': 'Successful'}
self.driver.vmem_mg = self.setup_mock_concerto()
snap = self.driver.vmem_mg.snapshot
snap.get_snapshots = mock.Mock(
return_value=[],
side_effect=vmemclient.core.error.NoMatchingObjectIdError)
snap.delete_snapshot_policy = mock.Mock(return_value=result_dict)
snap.delete_snapshot_resource = mock.Mock()
result = self.driver._delete_lun_snapshot_bookkeeping(
volume_id=VOLUME_ID)
self.assertIsNone(result)
snap.get_snapshots.assert_called_with(VOLUME_ID)
snap.delete_snapshot_policy.assert_called_with(lun=VOLUME_ID)
snap.delete_snapshot_resource.assert_called_with(lun=VOLUME_ID)
def test_create_lun_snapshot(self):
response = {'success': True, 'msg': 'Create TimeMark successfully'}
self.driver.vmem_mg = self.setup_mock_concerto()
self.driver._ensure_snapshot_resource_area = (
mock.Mock(return_value=True))
self.driver._ensure_snapshot_policy = mock.Mock(return_value=True)
self.driver._send_cmd = mock.Mock(return_value=response)
with mock.patch('storage.db.sqlalchemy.api.volume_get',
return_value=VOLUME):
result = self.driver._create_lun_snapshot(SNAPSHOT)
self.assertIsNone(result)
self.driver._ensure_snapshot_resource_area.assert_called_with(
VOLUME_ID)
self.driver._ensure_snapshot_policy.assert_called_with(VOLUME_ID)
self.driver._send_cmd.assert_called_with(
self.driver.vmem_mg.snapshot.create_lun_snapshot,
'Create TimeMark successfully',
lun=VOLUME_ID,
comment=self.driver._compress_snapshot_id(SNAPSHOT_ID),
priority=v7000_common.CONCERTO_DEFAULT_PRIORITY,
enable_notification=False)
def test_delete_lun_snapshot(self):
response = {'success': True, 'msg': 'Delete TimeMark successfully'}
compressed_snap_id = 'abcdabcd1234abcd1234abcdeffedcbb'
self.driver.vmem_mg = self.setup_mock_concerto()
self.driver._send_cmd = mock.Mock(return_value=response)
self.driver._compress_snapshot_id = mock.Mock(
return_value=compressed_snap_id)
self.assertIsNone(self.driver._delete_lun_snapshot(SNAPSHOT))
self.driver._send_cmd.assert_called_with(
self.driver.vmem_mg.snapshot.delete_lun_snapshot,
'Delete TimeMark successfully',
lun=VOLUME_ID,
comment=compressed_snap_id)
def test_wait_for_lun_or_snap_copy_completes_for_snap(self):
"""waiting for a snapshot to copy succeeds."""
vdev_id = 11111
response = (vdev_id, None, 100)
conf = {
'snapshot.get_snapshot_copy_status.return_value': response,
}
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
result = self.driver._wait_for_lun_or_snap_copy(
SRC_VOL['id'], dest_vdev_id=vdev_id)
(self.driver.vmem_mg.snapshot.get_snapshot_copy_status.
assert_called_with(SRC_VOL['id']))
self.assertTrue(result)
def test_wait_for_lun_or_snap_copy_completes_for_lun(self):
"""waiting for a lun to copy succeeds."""
object_id = '12345'
response = (object_id, None, 100)
conf = {
'lun.get_lun_copy_status.return_value': response,
}
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
result = self.driver._wait_for_lun_or_snap_copy(
SRC_VOL['id'], dest_obj_id=object_id)
self.driver.vmem_mg.lun.get_lun_copy_status.assert_called_with(
SRC_VOL['id'])
self.assertTrue(result)
@mock.patch.object(context, 'get_admin_context')
@mock.patch.object(volume_types, 'get_volume_type')
def test_get_volume_type_extra_spec(self,
m_get_volume_type,
m_get_admin_context):
"""Volume_type extra specs are found successfully."""
vol = VOLUME.copy()
vol['volume_type_id'] = 1
volume_type = {'extra_specs': {'override:test_key': 'test_value'}}
m_get_admin_context.return_value = None
m_get_volume_type.return_value = volume_type
result = self.driver._get_volume_type_extra_spec(vol, 'test_key')
m_get_admin_context.assert_called_with()
m_get_volume_type.assert_called_with(None, vol['volume_type_id'])
self.assertEqual('test_value', result)
@mock.patch.object(context, 'get_admin_context')
@mock.patch.object(volume_types, 'get_volume_type')
def test_get_violin_extra_spec(self,
m_get_volume_type,
m_get_admin_context):
"""Volume_type extra specs are found successfully."""
vol = VOLUME.copy()
vol['volume_type_id'] = 1
volume_type = {'extra_specs': {'violin:test_key': 'test_value'}}
m_get_admin_context.return_value = None
m_get_volume_type.return_value = volume_type
result = self.driver._get_volume_type_extra_spec(vol, 'test_key')
m_get_admin_context.assert_called_with()
m_get_volume_type.assert_called_with(None, vol['volume_type_id'])
self.assertEqual('test_value', result)
| HybridF5/jacket | jacket/tests/storage/unit/test_v7000_common.py | Python | apache-2.0 | 30,620 |
from sfa.generic import Generic
class dummy (Generic):
# the importer class
def importer_class (self):
import sfa.importer.dummyimporter
return sfa.importer.dummyimporter.DummyImporter
# use the standard api class
def api_class (self):
import sfa.server.sfaapi
return sfa.server.sfaapi.SfaApi
# the manager classes for the server-side services
def registry_manager_class (self) :
import sfa.managers.registry_manager
return sfa.managers.registry_manager.RegistryManager
def slicemgr_manager_class (self) :
import sfa.managers.slice_manager
return sfa.managers.slice_manager.SliceManager
def aggregate_manager_class (self) :
import sfa.managers.aggregate_manager
return sfa.managers.aggregate_manager.AggregateManager
# driver class for server-side services, talk to the whole testbed
def driver_class (self):
import sfa.dummy.dummydriver
return sfa.dummy.dummydriver.DummyDriver
| yippeecw/sfa | sfa/generic/dummy.py | Python | mit | 1,036 |
# -*- coding: utf-8 -*-
import pytest
import json
import os.path
import importlib
import jsonpickle
from fixture.application import Application
from fixture.db import DbFixture
from fixture.orm import ORMFixture
fixture = None
target = None
def load_config(file):
global target
if target is None:
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), file)
with open(config_file) as f:
target = json.load(f)
return target
@pytest.fixture
def app(request):
global fixture
browser = request.config.getoption("--browser")
web_config = load_config(request.config.getoption("--target"))['web']
if fixture is None or not fixture.is_valid():
fixture = Application(browser=browser, base_url=web_config['baseUrl'])
fixture.session.ensure_login(username=web_config['username'], password=web_config['password'])
return fixture
@pytest.fixture(scope="session")
def db(request):
db_config = load_config(request.config.getoption("--target"))['db']
dbfixture = DbFixture(host=db_config['host'], name=db_config['name'], user=db_config['user'], password=db_config['password'])
def fin():
dbfixture.destroy
request.addfinalizer(fin)
return dbfixture
@pytest.fixture(scope="session")
def orm(request):
orm_config = load_config(request.config.getoption("--target"))['db']
ormfixture = ORMFixture(host=orm_config['host'], name=orm_config['name'], user=orm_config['user'], password=orm_config['password'])
def fin():
pass
#ormfixture.destroy
request.addfinalizer(fin)
return ormfixture
@pytest.fixture(scope="session", autouse=True)
def stop(request):
def fin():
fixture.session.ensure_logout()
fixture.destroy()
request.addfinalizer(fin)
return fixture
@pytest.fixture
def check_ui(request):
return request.config.getoption("--check_ui")
def pytest_addoption(parser):
parser.addoption("--browser", action="store", default="firefox")
parser.addoption("--target", action="store", default="target.json")
parser.addoption("--check_ui", action="store_true")
def pytest_generate_tests(metafunc):
for fixture in metafunc.fixturenames:
if fixture.startswith("data_"):
testdate = load_from_module(fixture[5:])
metafunc.parametrize(fixture, testdate, ids=[str(x) for x in testdate])
elif fixture.startswith("json_"):
testdate = load_from_json(fixture[5:])
metafunc.parametrize(fixture, testdate, ids=[str(x) for x in testdate])
def load_from_module(module):
return importlib.import_module("data.%s" % module).testdata
def load_from_json(file):
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/%s.json" % file)) as f:
return jsonpickle.decode(f.read()) | DmitriyNeurov/python_training | conftest.py | Python | apache-2.0 | 2,844 |
import os
import sys
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
import maya.cmds as cmds
kPluginNodeName = "MitsubaRenderSettings"
kPluginNodeId = OpenMaya.MTypeId(0x87021)
# Command
class MitsubaRenderSetting(OpenMayaMPx.MPxNode):
# Class variables
mMitsubaPath = OpenMaya.MObject()
mOIIOToolPath = OpenMaya.MObject()
# Integrator variables
mIntegrator = OpenMaya.MObject()
# Sampler variables
mSampler = OpenMaya.MObject()
mSampleCount = OpenMaya.MObject()
mSamplerDimension = OpenMaya.MObject()
mSamplerScramble = OpenMaya.MObject()
# Reconstruction Filter variables
mReconstructionFilter = OpenMaya.MObject()
# Overall controls
mKeepTempFiles = OpenMaya.MObject()
mVerbose = OpenMaya.MObject()
mWritePartialResults = OpenMaya.MObject()
mWritePartialResultsInterval = OpenMaya.MObject()
mBlockSize = OpenMaya.MObject()
mThreads = OpenMaya.MObject()
# Integrator - Path Tracer variables
mPathTracerUseInfiniteDepth = OpenMaya.MObject()
mPathTracerMaxDepth = OpenMaya.MObject()
mPathTracerRRDepth = OpenMaya.MObject()
mPathTracerStrictNormals = OpenMaya.MObject()
mPathTracerHideEmitters = OpenMaya.MObject()
# Integrator - Bidirectional Path Tracer variables
mBidrectionalPathTracerUseInfiniteDepth = OpenMaya.MObject()
mBidrectionalPathTracerMaxDepth = OpenMaya.MObject()
mBidrectionalPathTracerRRDepth = OpenMaya.MObject()
mBidrectionalPathTracerLightImage = OpenMaya.MObject()
mBidrectionalPathTracerSampleDirect = OpenMaya.MObject()
# Integrator - Ambient Occlusion variables
mAmbientOcclusionShadingSamples = OpenMaya.MObject()
mAmbientOcclusionUseAutomaticRayLength = OpenMaya.MObject()
mAmbientOcclusionRayLength = OpenMaya.MObject()
# Integrator - Direct Illumination variables
mDirectIlluminationShadingSamples = OpenMaya.MObject()
mDirectIlluminationUseEmitterAndBSDFSamples = OpenMaya.MObject()
mDirectIlluminationEmitterSamples = OpenMaya.MObject()
mDirectIlluminationBSDFSamples = OpenMaya.MObject()
mDirectIlluminationStrictNormals = OpenMaya.MObject()
mDirectIlluminationHideEmitters = OpenMaya.MObject()
# Integrator - Simple Volumetric Path Tracer variables
mSimpleVolumetricPathTracerUseInfiniteDepth = OpenMaya.MObject()
mSimpleVolumetricPathTracerMaxDepth = OpenMaya.MObject()
mSimpleVolumetricPathTracerRRDepth = OpenMaya.MObject()
mSimpleVolumetricPathTracerStrictNormals = OpenMaya.MObject()
mSimpleVolumetricPathTracerHideEmitters = OpenMaya.MObject()
# Integrator - Volumetric Path Tracer variables
mVolumetricPathTracerUseInfiniteDepth = OpenMaya.MObject()
mVolumetricPathTracerMaxDepth = OpenMaya.MObject()
mVolumetricPathTracerRRDepth = OpenMaya.MObject()
mVolumetricPathTracerStrictNormals = OpenMaya.MObject()
mVolumetricPathTracerHideEmitters = OpenMaya.MObject()
# Integrator - Photon Map variables
mPhotonMapDirectSamples = OpenMaya.MObject()
mPhotonMapGlossySamples = OpenMaya.MObject()
mPhotonMapUseInfiniteDepth = OpenMaya.MObject()
mPhotonMapMaxDepth = OpenMaya.MObject()
mPhotonMapGlobalPhotons = OpenMaya.MObject()
mPhotonMapCausticPhotons = OpenMaya.MObject()
mPhotonMapVolumePhotons = OpenMaya.MObject()
mPhotonMapGlobalLookupRadius = OpenMaya.MObject()
mPhotonMapCausticLookupRadius = OpenMaya.MObject()
mPhotonMapLookupSize = OpenMaya.MObject()
mPhotonMapGranularity = OpenMaya.MObject()
mPhotonMapHideEmitters = OpenMaya.MObject()
mPhotonMapRRDepth = OpenMaya.MObject()
# Integrator - Progressive Photon Map variables
mProgressivePhotonMapUseInfiniteDepth = OpenMaya.MObject()
mProgressivePhotonMapMaxDepth = OpenMaya.MObject()
mProgressivePhotonMapPhotonCount = OpenMaya.MObject()
mProgressivePhotonMapInitialRadius = OpenMaya.MObject()
mProgressivePhotonMapAlpha = OpenMaya.MObject()
mProgressivePhotonMapGranularity = OpenMaya.MObject()
mProgressivePhotonMapRRDepth = OpenMaya.MObject()
mProgressivePhotonMapMaxPasses = OpenMaya.MObject()
# Integrator - Stochastic Progressive Photon Map variables
mStochasticProgressivePhotonMapUseInfiniteDepth = OpenMaya.MObject()
mStochasticProgressivePhotonMapMaxDepth = OpenMaya.MObject()
mStochasticProgressivePhotonMapPhotonCount = OpenMaya.MObject()
mStochasticProgressivePhotonMapInitialRadius = OpenMaya.MObject()
mStochasticProgressivePhotonMapAlpha = OpenMaya.MObject()
mStochasticProgressivePhotonMapGranularity = OpenMaya.MObject()
mStochasticProgressivePhotonMapRRDepth = OpenMaya.MObject()
mStochasticProgressivePhotonMapMaxPasses = OpenMaya.MObject()
# Integrator - Primary Sample Space Metropolis Light Transport variables
mPrimarySampleSpaceMetropolisLightTransportBidirectional = OpenMaya.MObject()
mPrimarySampleSpaceMetropolisLightTransportUseInfiniteDepth = OpenMaya.MObject()
mPrimarySampleSpaceMetropolisLightTransportMaxDepth = OpenMaya.MObject()
mPrimarySampleSpaceMetropolisLightTransportDirectSamples = OpenMaya.MObject()
mPrimarySampleSpaceMetropolisLightTransportRRDepth = OpenMaya.MObject()
mPrimarySampleSpaceMetropolisLightTransportLuminanceSamples = OpenMaya.MObject()
mPrimarySampleSpaceMetropolisLightTransportTwoStage = OpenMaya.MObject()
mPrimarySampleSpaceMetropolisLightTransportPLarge = OpenMaya.MObject()
# Integrator - Path Space Metropolis Light Transport variables
mPathSpaceMetropolisLightTransportUseInfiniteDepth = OpenMaya.MObject()
mPathSpaceMetropolisLightTransportMaxDepth = OpenMaya.MObject()
mPathSpaceMetropolisLightTransportDirectSamples = OpenMaya.MObject()
mPathSpaceMetropolisLightTransportLuminanceSamples = OpenMaya.MObject()
mPathSpaceMetropolisLightTransportTwoStage = OpenMaya.MObject()
mPathSpaceMetropolisLightTransportBidirectionalMutation = OpenMaya.MObject()
mPathSpaceMetropolisLightTransportLensPurturbation = OpenMaya.MObject()
mPathSpaceMetropolisLightTransportMultiChainPurturbation = OpenMaya.MObject()
mPathSpaceMetropolisLightTransportCausticPurturbation = OpenMaya.MObject()
mPathSpaceMetropolisLightTransportManifoldPurturbation = OpenMaya.MObject()
mPathSpaceMetropolisLightTransportLambda = OpenMaya.MObject()
# Integrator - Energy Redistribution Path Tracing variables
mEnergyRedistributionPathTracingUseInfiniteDepth = OpenMaya.MObject()
mEnergyRedistributionPathTracingMaxDepth = OpenMaya.MObject()
mEnergyRedistributionPathTracingNumChains = OpenMaya.MObject()
mEnergyRedistributionPathTracingMaxChains = OpenMaya.MObject()
mEnergyRedistributionPathTracingChainLength = OpenMaya.MObject()
mEnergyRedistributionPathTracingDirectSamples = OpenMaya.MObject()
mEnergyRedistributionPathTracingLensPerturbation = OpenMaya.MObject()
mEnergyRedistributionPathTracingMultiChainPerturbation = OpenMaya.MObject()
mEnergyRedistributionPathTracingCausticPerturbation = OpenMaya.MObject()
mEnergyRedistributionPathTracingManifoldPerturbation = OpenMaya.MObject()
mEnergyRedistributionPathTracingLambda = OpenMaya.MObject()
# Integrator - Adjoint Particle Tracer variables
mAdjointParticleTracerUseInfiniteDepth = OpenMaya.MObject()
mAdjointParticleTracerMaxDepth = OpenMaya.MObject()
mAdjointParticleTracerRRDepth = OpenMaya.MObject()
mAdjointParticleTracerGranularity = OpenMaya.MObject()
mAdjointParticleTracerBruteForce = OpenMaya.MObject()
# Integrator - Virtual Point Light variables
mVirtualPointLightUseInfiniteDepth = OpenMaya.MObject()
mVirtualPointLightMaxDepth = OpenMaya.MObject()
mVirtualPointLightShadowMapResolution = OpenMaya.MObject()
mVirtualPointLightClamping = OpenMaya.MObject()
# Sensor variables
mSensorOverride = OpenMaya.MObject()
# Sensor - Perspective Rdist variables
mPerspectiveRdistKc2 = OpenMaya.MObject()
mPerspectiveRdistKc4 = OpenMaya.MObject()
# Film variables
mFilm = OpenMaya.MObject()
# Film - HDR variables
mHDRFilmFileFormat = OpenMaya.MObject()
mHDRFilmPixelFormat = OpenMaya.MObject()
mHDRFilmComponentFormat = OpenMaya.MObject()
mHDRFilmAttachLog = OpenMaya.MObject()
mHDRFilmBanner = OpenMaya.MObject()
mHDRFilmHighQualityEdges = OpenMaya.MObject()
# Film - Tiled HDR variables
mTiledHDRFilmPixelFormat = OpenMaya.MObject()
mTiledHDRFilmComponentFormat = OpenMaya.MObject()
# Film - LDR variables
mLDRFilmFileFormat = OpenMaya.MObject()
mLDRFilmPixelFormat = OpenMaya.MObject()
mLDRFilmTonemapMethod = OpenMaya.MObject()
mLDRFilmGamma = OpenMaya.MObject()
mLDRFilmExposure = OpenMaya.MObject()
mLDRFilmKey = OpenMaya.MObject()
mLDRFilmBurn = OpenMaya.MObject()
mLDRFilmBanner = OpenMaya.MObject()
mLDRFilmHighQualityEdges = OpenMaya.MObject()
# Film - Math variables
mMathFilmFileFormat = OpenMaya.MObject()
mMathFilmPixelFormat = OpenMaya.MObject()
mMathFilmDigits = OpenMaya.MObject()
mMathFilmVariable = OpenMaya.MObject()
mMathFilmHighQualityEdges = OpenMaya.MObject()
# Metaintegrator variables
mMetaIntegrator = OpenMaya.MObject()
# Metaintegrator - Adaptive variables
mAdaptiveMaxError = OpenMaya.MObject()
mAdaptivePValue = OpenMaya.MObject()
mAdaptiveMaxSampleFactor = OpenMaya.MObject()
# Metaintegrator - Irradiance Cache variables
mIrradianceCacheResolution = OpenMaya.MObject()
mIrradianceCacheQuality = OpenMaya.MObject()
mIrradianceCacheGradients = OpenMaya.MObject()
mIrradianceCacheClampNeighbor = OpenMaya.MObject()
mIrradianceCacheClampScreen = OpenMaya.MObject()
mIrradianceCacheOverture = OpenMaya.MObject()
mIrradianceCacheQualityAdjustment = OpenMaya.MObject()
mIrradianceCacheIndirectOnly = OpenMaya.MObject()
mIrradianceCacheDebug = OpenMaya.MObject()
# Multichannel variables
mMultichannel = OpenMaya.MObject()
mMultichannelPosition = OpenMaya.MObject()
mMultichannelRelPosition = OpenMaya.MObject()
mMultichannelDistance = OpenMaya.MObject()
mMultichannelGeoNormal = OpenMaya.MObject()
mMultichannelShadingNormal = OpenMaya.MObject()
mMultichannelUV = OpenMaya.MObject()
mMultichannelAlbedo = OpenMaya.MObject()
mMultichannelShapeIndex = OpenMaya.MObject()
mMultichannelPrimIndex = OpenMaya.MObject()
def __init__(self):
OpenMayaMPx.MPxNode.__init__(self)
# Invoked when the command is evaluated.
def compute(self, plug, block):
print "Render Settings evaluate!"
return OpenMaya.kUnknownParameter
@staticmethod
def addBooleanAttribute(nAttr, attribute, longName, shortName, defaultBoolean=True):
setattr(MitsubaRenderSetting, attribute, nAttr.create(longName, shortName, OpenMaya.MFnNumericData.kBoolean, defaultBoolean) )
nAttr.setStorable(1)
nAttr.setWritable(1)
@staticmethod
def addIntegerAttribute(nAttr, attribute, longName, shortName, defaultInt=0):
setattr(MitsubaRenderSetting, attribute, nAttr.create(longName, shortName, OpenMaya.MFnNumericData.kInt, defaultInt) )
nAttr.setStorable(1)
nAttr.setWritable(1)
@staticmethod
def addFloatAttribute(nAttr, attribute, longName, shortName, defaultFloat=0.0):
setattr(MitsubaRenderSetting, attribute, nAttr.create(longName, shortName, OpenMaya.MFnNumericData.kFloat, defaultFloat) )
nAttr.setStorable(1)
nAttr.setWritable(1)
@staticmethod
def addColorAttribute(nAttr, attribute, longName, shortName, defaultRGB):
setattr(MitsubaRenderSetting, attribute, nAttr.createColor(longName, shortName) )
nAttr.setDefault(defaultRGB[0], defaultRGB[1], defaultRGB[2])
nAttr.setStorable(1)
nAttr.setWritable(1)
@staticmethod
def addStringAttribute(sAttr, attribute, longName, shortName, defaultString=""):
stringFn = OpenMaya.MFnStringData()
defaultText = stringFn.create(defaultString)
setattr(MitsubaRenderSetting, attribute, sAttr.create(longName, shortName, OpenMaya.MFnData.kString, defaultText) )
sAttr.setStorable(1)
sAttr.setWritable(1)
def nodeCreator():
return MitsubaRenderSetting()
def nodeInitializer():
print "Render Settings initialize!"
sAttr = OpenMaya.MFnTypedAttribute()
nAttr = OpenMaya.MFnNumericAttribute()
try:
# Path to mitsuba executable
defaultMitsubaPath = os.getenv( "MITSUBA_PATH" )
if not defaultMitsubaPath:
defaultMitsubaPath = ""
MitsubaRenderSetting.addStringAttribute(sAttr, "mMitsubaPath", "mitsubaPath", "mp", defaultMitsubaPath)
# Path to oiiotool executable
defaultOIIOToolPath = os.getenv( "OIIOTOOL_PATH" )
if not defaultOIIOToolPath:
defaultOIIOToolPath = ""
MitsubaRenderSetting.addStringAttribute(sAttr, "mOIIOToolPath", "oiiotoolPath", "oiiotp", defaultOIIOToolPath)
# Integrator variables
MitsubaRenderSetting.addStringAttribute(sAttr, "mIntegrator", "integrator", "ig", "Path Tracer")
# Sampler variables
MitsubaRenderSetting.addStringAttribute(sAttr, "mSampler", "sampler", "sm", "Independent Sampler")
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mSampleCount", "sampleCount", "sc", 8)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mSamplerDimension", "samplerDimension", "sd", 4)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mSamplerScramble", "samplerScramble", "ss", -1)
# Reconstruction Filter variables
MitsubaRenderSetting.addStringAttribute(sAttr, "mReconstructionFilter", "reconstructionFilter", "rf", "Box filter")
# Overall controls
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mKeepTempFiles", "keepTempFiles", "kt", False)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mVerbose", "verbose", "vb", False)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mWritePartialResults", "writePartialResults", "wpr", True)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mWritePartialResultsInterval", "writePartialResultsInterval", "wpri", 15)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mBlockSize", "blockSize", "bs", 32)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mThreads", "threads", "th", 0)
# Integrator - Path Tracer variables
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mPathTracerUseInfiniteDepth", "iPathTracerUseInfiniteDepth", "iptuid", True)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mPathTracerMaxDepth", "iPathTracerMaxDepth", "iptmd", -1)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mPathTracerRRDepth", "iPathTracerRRDepth", "iptrrd", 5)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mPathTracerStrictNormals", "iPathTracerStrictNormals", "iptsn", False)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mPathTracerHideEmitters", "iPathTracerHideEmitters", "ipthe", False)
# Integrator - Bidirectional Path Tracer variables
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mBidrectionalPathTracerUseInfiniteDepth", "iBidrectionalPathTracerUseInfiniteDepth", "ibdptuid", True)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mBidrectionalPathTracerMaxDepth", "iBidrectionalPathTracerMaxDepth", "ibdptmd", -1)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mBidrectionalPathTracerRRDepth", "iBidrectionalPathTracerRRDepth", "ibdptrrd", 5)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mBidrectionalPathTracerLightImage", "iBidrectionalPathTracerLightImage", "ibdptli", True)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mBidrectionalPathTracerSampleDirect", "iBidrectionalPathTracerSampleDirect", "ibdptsd", True)
# Integrator - Ambient Occlusion variables
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mAmbientOcclusionShadingSamples", "iAmbientOcclusionShadingSamples", "iaoss", 1)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mAmbientOcclusionUseAutomaticRayLength", "iAmbientOcclusionUseAutomaticRayLength", "iaouarl", True)
MitsubaRenderSetting.addFloatAttribute(nAttr, "mAmbientOcclusionRayLength", "iAmbientOcclusionRayLength", "iaorl", -1)
# Integrator - Direct Illumination variables
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mDirectIlluminationShadingSamples", "iDirectIlluminationShadingSamples", "idiss", 1)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mDirectIlluminationUseEmitterAndBSDFSamples", "iDirectIlluminationUseEmitterAndBSDFSamples", "idiuebs", False)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mDirectIlluminationEmitterSamples", "iDirectIlluminationEmitterSamples", "idies", 1)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mDirectIlluminationBSDFSamples", "iDirectIlluminationBSDFSamples", "idibs", 1)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mDirectIlluminationStrictNormals", "iDirectIlluminationStrictNormals", "idisn", False)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mDirectIlluminationHideEmitters", "iDirectIlluminationHideEmitters", "idihe", False)
# Integrator - Simple Volumetric Path Tracer variables
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mSimpleVolumetricPathTracerUseInfiniteDepth", "iSimpleVolumetricPathTracerUseInfiniteDepth", "isvptuid", True)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mSimpleVolumetricPathTracerMaxDepth", "iSimpleVolumetricPathTracerMaxDepth", "isvptmd", -1)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mSimpleVolumetricPathTracerRRDepth", "iSimpleVolumetricPathTracerRRDepth", "isvptrrd", 5)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mSimpleVolumetricPathTracerStrictNormals", "iSimpleVolumetricPathTracerStrictNormals", "isvptsn", False)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mSimpleVolumetricPathTracerHideEmitters", "iSimpleVolumetricPathTracerHideEmitters", "isvpthe", False)
# Integrator - Volumetric Path Tracer variables
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mVolumetricPathTracerUseInfiniteDepth", "iVolumetricPathTracerUseInfiniteDepth", "ivptuid", True)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mVolumetricPathTracerMaxDepth", "iVolumetricPathTracerMaxDepth", "ivptmd", -1)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mVolumetricPathTracerRRDepth", "iVolumetricPathTracerRRDepth", "ivptrrd", 5)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mVolumetricPathTracerStrictNormals", "iVolumetricPathTracerStrictNormals", "ivptsn", False)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mVolumetricPathTracerHideEmitters", "iVolumetricPathTracerHideEmitters", "ivpthe", False)
# Integrator - Photon Map variables
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mPhotonMapDirectSamples", "iPhotonMapDirectSamples", "ipmds", 16)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mPhotonMapGlossySamples", "iPhotonMapGlossySamples", "ipmgs", 32)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mPhotonMapUseInfiniteDepth", "iPhotonMapUseInfiniteDepth", "ipmuid", True)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mPhotonMapMaxDepth", "iPhotonMapMaxDepth", "ipmmd", -1)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mPhotonMapGlobalPhotons", "iPhotonMapGlobalPhotons", "ipmgp", 250000)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mPhotonMapCausticPhotons", "iPhotonMapCausticPhotons", "ipmcp", 250000)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mPhotonMapVolumePhotons", "iPhotonMapVolumePhotons", "ipmvp", 250000)
MitsubaRenderSetting.addFloatAttribute(nAttr, "mPhotonMapGlobalLookupRadius", "iPhotonMapGlobalLookupRadius", "ipmglr", 0.05)
MitsubaRenderSetting.addFloatAttribute(nAttr, "mPhotonMapCausticLookupRadius", "iPhotonMapCausticLookupRadius", "ipmclr", 0.0125)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mPhotonMapLookupSize", "iPhotonMapLookupSize", "ipmls", 120)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mPhotonMapGranularity", "iPhotonMapGranularity", "ipmg", 0)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mPhotonMapHideEmitters", "iPhotonMapHideEmitters", "ipmhe", False)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mPhotonMapRRDepth", "iPhotonMapRRDepth", "ipmrrd", 5)
# Integrator - Progressive Photon Map variables
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mProgressivePhotonMapUseInfiniteDepth", "iProgressivePhotonMapUseInfiniteDepth", "ippmuid", True)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mProgressivePhotonMapMaxDepth", "iProgressivePhotonMapMaxDepth", "ippmmd", -1)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mProgressivePhotonMapPhotonCount", "iProgressivePhotonMapPhotonCount", "ippmpc", 250000)
MitsubaRenderSetting.addFloatAttribute(nAttr, "mProgressivePhotonMapInitialRadius", "iProgressivePhotonMapInitialRadius", "ippmir", 0)
MitsubaRenderSetting.addFloatAttribute(nAttr, "mProgressivePhotonMapAlpha", "iProgressivePhotonMapAlpha", "ippma", 0.7)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mProgressivePhotonMapGranularity", "iProgressivePhotonMapGranularity", "ippmg", 0)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mProgressivePhotonMapRRDepth", "iProgressivePhotonMapRRDepth", "ippmrrd", 5)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mProgressivePhotonMapMaxPasses", "iProgressivePhotonMapMaxPasses", "ippmmp", 10)
# Integrator - Stochastic Progressive Photon Map variables
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mStochasticProgressivePhotonMapUseInfiniteDepth", "iStochasticProgressivePhotonMapUseInfiniteDepth", "isppmuid", True)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mStochasticProgressivePhotonMapMaxDepth", "iStochasticProgressivePhotonMapMaxDepth", "isppmmd", -1)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mStochasticProgressivePhotonMapPhotonCount", "iStochasticProgressivePhotonMapPhotonCount", "isppmpc", 250000)
MitsubaRenderSetting.addFloatAttribute(nAttr, "mStochasticProgressivePhotonMapInitialRadius", "iStochasticProgressivePhotonMapInitialRadius", "isppmir", 0)
MitsubaRenderSetting.addFloatAttribute(nAttr, "mStochasticProgressivePhotonMapAlpha", "iStochasticProgressivePhotonMapAlpha", "isppma", 0.7)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mStochasticProgressivePhotonMapGranularity", "iStochasticProgressivePhotonMapGranularity", "isppmg", 0)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mStochasticProgressivePhotonMapRRDepth", "iStochasticProgressivePhotonMapRRDepth", "isppmrrd", 5)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mStochasticProgressivePhotonMapMaxPasses", "iStochasticProgressivePhotonMapMaxPasses", "isppmmp", 10)
# Integrator - Primary Sample Space Metropolis Light Transport variables
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mPrimarySampleSpaceMetropolisLightTransportBidirectional", "iPrimarySampleSpaceMetropolisLightTransportBidirectional", "ipssmltb", True)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mPrimarySampleSpaceMetropolisLightTransportUseInfiniteDepth", "iPrimarySampleSpaceMetropolisLightTransportUseInfiniteDepth", "ipssmltuid", True)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mPrimarySampleSpaceMetropolisLightTransportMaxDepth", "iPrimarySampleSpaceMetropolisLightTransportMaxDepth", "ipssmltmd", -1)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mPrimarySampleSpaceMetropolisLightTransportDirectSamples", "iPrimarySampleSpaceMetropolisLightTransportDirectSamples", "ipssmltds", 16)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mPrimarySampleSpaceMetropolisLightTransportRRDepth", "iPrimarySampleSpaceMetropolisLightTransportRRDepth", "ipssmltrrd", 5)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mPrimarySampleSpaceMetropolisLightTransportLuminanceSamples", "iPrimarySampleSpaceMetropolisLightTransportLuminanceSamples", "ipssmltls", 100000)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mPrimarySampleSpaceMetropolisLightTransportTwoStage", "iPrimarySampleSpaceMetropolisLightTransportTwoStage", "ipssmltts", False)
MitsubaRenderSetting.addFloatAttribute(nAttr, "mPrimarySampleSpaceMetropolisLightTransportPLarge", "iPrimarySampleSpaceMetropolisLightTransportPLarge", "ipssmltpl", 0.3)
# Integrator - Path Space Metropolis Light Transport variables
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mPathSpaceMetropolisLightTransportUseInfiniteDepth", "iPathSpaceMetropolisLightTransportUseInfiniteDepth", "ipsmlttuid", True)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mPathSpaceMetropolisLightTransportMaxDepth", "iPathSpaceMetropolisLightTransportMaxDepth", "ipsmltmd", -1)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mPathSpaceMetropolisLightTransportDirectSamples", "iPathSpaceMetropolisLightTransportDirectSamples", "ipsmltds", 16)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mPathSpaceMetropolisLightTransportLuminanceSamples", "iPathSpaceMetropolisLightTransportLuminanceSamples", "ipsmltls", 100000)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mPathSpaceMetropolisLightTransportTwoStage", "iPathSpaceMetropolisLightTransportTwoStage", "ipsmltts", False)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mPathSpaceMetropolisLightTransportBidirectionalMutation", "iPathSpaceMetropolisLightTransportBidirectionalMutation", "ipsmltbm", True)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mPathSpaceMetropolisLightTransportLensPurturbation", "iPathSpaceMetropolisLightTransportLensPurturbation", "ipsmltlp", True)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mPathSpaceMetropolisLightTransportMultiChainPurturbation", "iPathSpaceMetropolisLightTransportMultiChainPurturbation", "ipsmltmcp", True)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mPathSpaceMetropolisLightTransportCausticPurturbation", "iPathSpaceMetropolisLightTransportCausticPurturbation", "ipsmltcp", True)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mPathSpaceMetropolisLightTransportManifoldPurturbation", "iPathSpaceMetropolisLightTransportManifoldPurturbation", "ipsmltmp", False)
MitsubaRenderSetting.addFloatAttribute(nAttr, "mPathSpaceMetropolisLightTransportLambda", "iPathSpaceMetropolisLightTransportLambda", "ipsmltl", 50)
# Integrator - Energy Redistribution Path Tracing variables
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mEnergyRedistributionPathTracingUseInfiniteDepth", "iEnergyRedistributionPathTracingUseInfiniteDepth", "ierptuid", True)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mEnergyRedistributionPathTracingMaxDepth", "iEnergyRedistributionPathTracingMaxDepth", "ierptmd", -1)
MitsubaRenderSetting.addFloatAttribute(nAttr, "mEnergyRedistributionPathTracingNumChains", "iEnergyRedistributionPathTracingNumChains", "ierptnc", 1)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mEnergyRedistributionPathTracingMaxChains", "iEnergyRedistributionPathTracingMaxChains", "ierptmc", 0)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mEnergyRedistributionPathTracingChainLength", "iEnergyRedistributionPathTracingChainLength", "ierptcl", 1)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mEnergyRedistributionPathTracingDirectSamples", "iEnergyRedistributionPathTracingDirectSamples", "ierptds", 16)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mEnergyRedistributionPathTracingLensPerturbation", "iEnergyRedistributionPathTracingLensPerturbation", "ierptlp", True)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mEnergyRedistributionPathTracingMultiChainPerturbation", "iEnergyRedistributionPathTracingMultiChainPerturbation", "ierptmcp", True)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mEnergyRedistributionPathTracingCausticPerturbation", "iEnergyRedistributionPathTracingCausticPerturbation", "ierptcp", True)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mEnergyRedistributionPathTracingManifoldPerturbation", "iEnergyRedistributionPathTracingManifoldPerturbation", "ierptmp", False)
MitsubaRenderSetting.addFloatAttribute(nAttr, "mEnergyRedistributionPathTracingLambda", "iEnergyRedistributionPathTracingLambda", "ierptl", 50)
# Integrator - Adjoint Particle Tracer variables
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mAdjointParticleTracerUseInfiniteDepth", "iAdjointParticleTracerUseInfiniteDepth", "iaptuid", True)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mAdjointParticleTracerMaxDepth", "iAdjointParticleTracerMaxDepth", "iaptmd", -1)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mAdjointParticleTracerRRDepth", "iAdjointParticleTracerRRDepth", "iaptrrd", 5)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mAdjointParticleTracerGranularity", "iAdjointParticleTracerGranularity", "iaptg", 200000)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mAdjointParticleTracerBruteForce", "iAdjointParticleTracerBruteForce", "iaptbf", False)
# Integrator - Virtual Point Light variables
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mVirtualPointLightUseInfiniteDepth", "iVirtualPointLightUseInfiniteDepth", "ivpluid", True)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mVirtualPointLightMaxDepth", "iVirtualPointLightMaxDepth", "ivplmd", -1)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mVirtualPointLightShadowMapResolution", "iVirtualPointLightShadowMapResolution", "ivplsmr", 512)
MitsubaRenderSetting.addFloatAttribute(nAttr, "mVirtualPointLightClamping", "iVirtualPointLightClamping", "ivplc", 0.1)
# Sensor variables
MitsubaRenderSetting.addStringAttribute(sAttr, "mSensorOverride", "sensorOverride", "so", "None")
# Sensor - Perspective Rdist variables
MitsubaRenderSetting.addFloatAttribute(nAttr, "mPerspectiveRdistKc2", "sPerspectiveRdistKc2", "sprkc2", 0.0)
MitsubaRenderSetting.addFloatAttribute(nAttr, "mPerspectiveRdistKc4", "sPerspectiveRdistKc4", "sprkc4", 0.0)
# Film variables
MitsubaRenderSetting.addStringAttribute(sAttr, "mFilm", "film", "fm", "HDR Film")
# Film - HDR variables
MitsubaRenderSetting.addStringAttribute(sAttr, "mHDRFilmFileFormat", "fHDRFilmFileFormat", "fhff", "OpenEXR (.exr)")
MitsubaRenderSetting.addStringAttribute(sAttr, "mHDRFilmPixelFormat", "fHDRFilmPixelFormat", "fhpf", "RGBA")
MitsubaRenderSetting.addStringAttribute(sAttr, "mHDRFilmComponentFormat", "fHDRFilmComponentFormat", "fhcf", "Float 16")
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mHDRFilmAttachLog", "fHDRFilmAttachLog", "fhal", True)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mHDRFilmBanner", "fHDRFilmBanner", "fhb", False)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mHDRFilmHighQualityEdges", "fHDRFilmHighQualityEdges", "fhhqe", False)
# Film - Tiled HDR variables
MitsubaRenderSetting.addStringAttribute(sAttr, "mTiledHDRFilmPixelFormat", "fTiledHDRFilmPixelFormat", "fthpf", "RGBA")
MitsubaRenderSetting.addStringAttribute(sAttr, "mTiledHDRFilmComponentFormat", "fTiledHDRFilmComponentFormat", "fthcf", "Float 16")
# Film - LDR variables
MitsubaRenderSetting.addStringAttribute(sAttr, "mLDRFilmFileFormat", "fLDRFilmFileFormat", "flff", "PNG (.png)")
MitsubaRenderSetting.addStringAttribute(sAttr, "mLDRFilmPixelFormat", "fLDRFilmPixelFormat", "flpf", "RGB")
MitsubaRenderSetting.addStringAttribute(sAttr, "mLDRFilmTonemapMethod", "fLDRFilmTonemapMethod", "fltm", "Gamma")
MitsubaRenderSetting.addFloatAttribute(nAttr, "mLDRFilmGamma", "fLDRFilmGamma", "flg", -1)
MitsubaRenderSetting.addFloatAttribute(nAttr, "mLDRFilmExposure", "fLDRFilmExposure", "fle", 0.0)
MitsubaRenderSetting.addFloatAttribute(nAttr, "mLDRFilmKey", "fLDRFilmKey", "flk", 0.18)
MitsubaRenderSetting.addFloatAttribute(nAttr, "mLDRFilmBurn", "fLDRFilmBurn", "flb", 0.0)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mLDRFilmBanner", "fLDRFilmBanner", "flbn", False)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mLDRFilmHighQualityEdges", "fLDRFilmHighQualityEdges", "flhqe", False)
# Film - Math variables
MitsubaRenderSetting.addStringAttribute(sAttr, "mMathFilmFileFormat", "fMathFilmFileFormat", "fmfm", "Matlab (.m)")
MitsubaRenderSetting.addStringAttribute(sAttr, "mMathFilmPixelFormat", "fMathFilmPixelFormat", "fmpf", "RGB")
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mMathFilmDigits", "fMathFilmDigits", "fmd", 4)
MitsubaRenderSetting.addStringAttribute(sAttr, "mMathFilmVariable", "fMathFilmVariable", "fmv", "data")
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mMathFilmHighQualityEdges", "fMathFilmHighQualityEdges", "fmhqe", False)
# Meta-Integrator variables
MitsubaRenderSetting.addStringAttribute(sAttr, "mMetaIntegrator", "metaIntegrator", "mi", "None")
# Metaintegrator - Adaptive variables
MitsubaRenderSetting.addFloatAttribute(nAttr, "mAdaptiveMaxError", "miAdaptiveMaxError", "miame", 5.0)
MitsubaRenderSetting.addFloatAttribute(nAttr, "mAdaptivePValue", "miAdaptivePValue", "miapv", 5.0)
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mAdaptiveMaxSampleFactor", "miAdaptiveMaxSampleFactor", "miamsf", 32)
# Metaintegrator - Irradiance Cache variables
MitsubaRenderSetting.addIntegerAttribute(nAttr, "mIrradianceCacheResolution", "miIrradianceCacheResolution", "miicr", 14)
MitsubaRenderSetting.addFloatAttribute(nAttr, "mIrradianceCacheQuality", "miIrradianceCacheQuality", "miicq", 1.0)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mIrradianceCacheGradients", "miIrradianceCacheGradients", "miicg", True)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mIrradianceCacheClampNeighbor", "miIrradianceCacheClampNeighbor" , "miiccn", True)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mIrradianceCacheClampScreen", "miIrradianceCacheClampScreen", "miiccs", True)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mIrradianceCacheOverture", "miIrradianceCacheOverture", "miico", True)
MitsubaRenderSetting.addFloatAttribute(nAttr, "mIrradianceCacheQualityAdjustment", "miIrradianceCacheQualityAdjustment", "miicqa", 0.5)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mIrradianceCacheIndirectOnly", "miIrradianceCacheIndirectOnly", "miicio", False)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mIrradianceCacheDebug", "miIrradianceCacheDebug", "miicd", False)
# Multichannel variables
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mMultichannel", "multichannel", "mc", False)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mMultichannelPosition", "multichannelPosition", "mcp", False)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mMultichannelRelPosition", "multichannelRelPosition", "mcrp", False)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mMultichannelDistance", "multichannelDistance", "mcd", False)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mMultichannelGeoNormal", "multichannelGeoNormal", "mcgn", False)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mMultichannelShadingNormal", "multichannelShadingNormal", "mcsn", False)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mMultichannelUV", "multichannelUV", "mcuv", False)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mMultichannelAlbedo", "multichannelAlbedo", "mca", False)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mMultichannelShapeIndex", "multichannelShapeIndex", "mcsi", False)
MitsubaRenderSetting.addBooleanAttribute(nAttr, "mMultichannelPrimIndex", "multichannelPrimIndex", "mcpi", False)
except:
sys.stderr.write("Failed to create and add attributes\n")
raise
try:
# Path to executables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mMitsubaPath)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mOIIOToolPath)
# Integrator variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mIntegrator)
# Sampler variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mSampler)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mSampleCount)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mSamplerDimension)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mSamplerScramble)
# Reconstruction Filter variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mReconstructionFilter)
# Overall controls
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mKeepTempFiles)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mVerbose)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mWritePartialResults)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mWritePartialResultsInterval)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mBlockSize)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mThreads)
# Integrator - Path Tracer variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPathTracerUseInfiniteDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPathTracerMaxDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPathTracerRRDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPathTracerStrictNormals)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPathTracerHideEmitters)
# Integrator - Bidirectional Path Tracer variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mBidrectionalPathTracerUseInfiniteDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mBidrectionalPathTracerMaxDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mBidrectionalPathTracerRRDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mBidrectionalPathTracerLightImage)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mBidrectionalPathTracerSampleDirect)
# Integrator - Ambient Occlusion variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mAmbientOcclusionShadingSamples)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mAmbientOcclusionUseAutomaticRayLength)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mAmbientOcclusionRayLength)
# Integrator - Direct Illumination variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mDirectIlluminationShadingSamples)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mDirectIlluminationUseEmitterAndBSDFSamples)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mDirectIlluminationEmitterSamples)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mDirectIlluminationBSDFSamples)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mDirectIlluminationStrictNormals)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mDirectIlluminationHideEmitters)
# Integrator - Simple Volumetric Path Tracer variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mSimpleVolumetricPathTracerUseInfiniteDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mSimpleVolumetricPathTracerMaxDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mSimpleVolumetricPathTracerRRDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mSimpleVolumetricPathTracerStrictNormals)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mSimpleVolumetricPathTracerHideEmitters)
# Integrator - Volumetric Path Tracer variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mVolumetricPathTracerUseInfiniteDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mVolumetricPathTracerMaxDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mVolumetricPathTracerRRDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mVolumetricPathTracerStrictNormals)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mVolumetricPathTracerHideEmitters)
# Integrator - Photon Map variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPhotonMapDirectSamples)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPhotonMapGlossySamples)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPhotonMapUseInfiniteDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPhotonMapMaxDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPhotonMapGlobalPhotons)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPhotonMapCausticPhotons)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPhotonMapVolumePhotons)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPhotonMapGlobalLookupRadius)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPhotonMapCausticLookupRadius)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPhotonMapLookupSize)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPhotonMapGranularity)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPhotonMapHideEmitters)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPhotonMapRRDepth)
# Integrator - Progressive Photon Map variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mProgressivePhotonMapUseInfiniteDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mProgressivePhotonMapMaxDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mProgressivePhotonMapPhotonCount)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mProgressivePhotonMapInitialRadius)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mProgressivePhotonMapAlpha)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mProgressivePhotonMapGranularity)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mProgressivePhotonMapRRDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mProgressivePhotonMapMaxPasses)
# Integrator - Stochastic Progressive Photon Map variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mStochasticProgressivePhotonMapUseInfiniteDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mStochasticProgressivePhotonMapMaxDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mStochasticProgressivePhotonMapPhotonCount)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mStochasticProgressivePhotonMapInitialRadius)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mStochasticProgressivePhotonMapAlpha)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mStochasticProgressivePhotonMapGranularity)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mStochasticProgressivePhotonMapRRDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mStochasticProgressivePhotonMapMaxPasses)
# Integrator - Primary Sample Space Metropolis Light Transport variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPrimarySampleSpaceMetropolisLightTransportBidirectional)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPrimarySampleSpaceMetropolisLightTransportUseInfiniteDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPrimarySampleSpaceMetropolisLightTransportMaxDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPrimarySampleSpaceMetropolisLightTransportDirectSamples)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPrimarySampleSpaceMetropolisLightTransportRRDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPrimarySampleSpaceMetropolisLightTransportLuminanceSamples)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPrimarySampleSpaceMetropolisLightTransportTwoStage)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPrimarySampleSpaceMetropolisLightTransportPLarge)
# Integrator - Path Space Metropolis Light Transport variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPathSpaceMetropolisLightTransportUseInfiniteDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPathSpaceMetropolisLightTransportMaxDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPathSpaceMetropolisLightTransportDirectSamples)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPathSpaceMetropolisLightTransportLuminanceSamples)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPathSpaceMetropolisLightTransportTwoStage)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPathSpaceMetropolisLightTransportBidirectionalMutation)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPathSpaceMetropolisLightTransportLensPurturbation)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPathSpaceMetropolisLightTransportMultiChainPurturbation)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPathSpaceMetropolisLightTransportCausticPurturbation)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPathSpaceMetropolisLightTransportManifoldPurturbation)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPathSpaceMetropolisLightTransportLambda)
# Integrator - Energy Redistribution Path Tracing variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mEnergyRedistributionPathTracingUseInfiniteDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mEnergyRedistributionPathTracingMaxDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mEnergyRedistributionPathTracingNumChains)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mEnergyRedistributionPathTracingMaxChains)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mEnergyRedistributionPathTracingChainLength)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mEnergyRedistributionPathTracingDirectSamples)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mEnergyRedistributionPathTracingLensPerturbation)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mEnergyRedistributionPathTracingMultiChainPerturbation)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mEnergyRedistributionPathTracingCausticPerturbation)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mEnergyRedistributionPathTracingManifoldPerturbation)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mEnergyRedistributionPathTracingLambda)
# Integrator - Adjoint Particle Tracer variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mAdjointParticleTracerUseInfiniteDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mAdjointParticleTracerMaxDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mAdjointParticleTracerRRDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mAdjointParticleTracerGranularity)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mAdjointParticleTracerBruteForce)
# Integrator - Virtual Point Light variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mVirtualPointLightUseInfiniteDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mVirtualPointLightMaxDepth)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mVirtualPointLightShadowMapResolution)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mVirtualPointLightClamping)
# Sensor variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mSensorOverride)
# Sensor - Perspective Rdist variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPerspectiveRdistKc2)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mPerspectiveRdistKc4)
# Film variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mFilm)
# Film - HDR variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mHDRFilmFileFormat)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mHDRFilmPixelFormat)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mHDRFilmComponentFormat)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mHDRFilmAttachLog)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mHDRFilmBanner)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mHDRFilmHighQualityEdges)
# Film - Tiled HDR variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mTiledHDRFilmPixelFormat)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mTiledHDRFilmComponentFormat)
# Film - LDR variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mLDRFilmFileFormat)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mLDRFilmPixelFormat)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mLDRFilmTonemapMethod)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mLDRFilmGamma)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mLDRFilmExposure)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mLDRFilmKey)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mLDRFilmBurn)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mLDRFilmBanner)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mLDRFilmHighQualityEdges)
# Film - Math variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mMathFilmFileFormat)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mMathFilmPixelFormat)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mMathFilmDigits)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mMathFilmVariable)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mMathFilmHighQualityEdges)
# Meta-Integrator variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mMetaIntegrator)
# Metaintegrator - Adaptive variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mAdaptiveMaxError)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mAdaptivePValue)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mAdaptiveMaxSampleFactor)
# Metaintegrator - Irradiance Cache variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mIrradianceCacheResolution)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mIrradianceCacheQuality)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mIrradianceCacheGradients)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mIrradianceCacheClampNeighbor)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mIrradianceCacheClampScreen)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mIrradianceCacheOverture)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mIrradianceCacheQualityAdjustment)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mIrradianceCacheIndirectOnly)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mIrradianceCacheDebug)
# Multichannel variables
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mMultichannel)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mMultichannelPosition)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mMultichannelRelPosition)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mMultichannelDistance)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mMultichannelGeoNormal)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mMultichannelShadingNormal)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mMultichannelUV)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mMultichannelAlbedo)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mMultichannelShapeIndex)
MitsubaRenderSetting.addAttribute(MitsubaRenderSetting.mMultichannelPrimIndex)
except:
sys.stderr.write("Failed to add attributes\n")
raise
# initialize the script plug-in
def initializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.registerNode( kPluginNodeName,
kPluginNodeId,
nodeCreator,
nodeInitializer,
OpenMayaMPx.MPxNode.kDependNode )
except:
sys.stderr.write( "Failed to register node: %s" % kPluginNodeName )
raise
# uninitialize the script plug-in
def uninitializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.deregisterNode( kPluginNodeId )
except:
sys.stderr.write( "Failed to deregister node: %s" % kPluginNodeName )
raise
| hpd/MitsubaForMaya | plug-ins/mitsuba/renderer/MitsubaRenderSettings.py | Python | mit | 53,926 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2011 :
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import time
from shinken.comment import Comment
from shinken.property import BoolProp, IntegerProp, StringProp
""" Schedules downtime for a specified service. If the "fixed" argument is set
to one (1), downtime will start and end at the times specified by the
"start" and "end" arguments.
Otherwise, downtime will begin between the "start" and "end" times and last
for "duration" seconds. The "start" and "end" arguments are specified
in time_t format (seconds since the UNIX epoch). The specified service
downtime can be triggered by another downtime entry if the "trigger_id"
is set to the ID of another scheduled downtime entry.
Set the "trigger_id" argument to zero (0) if the downtime for the
specified service should not be triggered by another downtime entry.
"""
class Downtime:
id = 1
#Just to list the properties we will send as pickle
#so to others daemons, so all but NOT REF
properties = {
'activate_me': StringProp (default=[]),
'entry_time': IntegerProp(default=0 , fill_brok=['full_status']),
'fixed': BoolProp (default=True, fill_brok=['full_status']),
'start_time': IntegerProp(default=0, fill_brok=['full_status']),
'duration': IntegerProp(default=0, fill_brok=['full_status']),
'trigger_id': IntegerProp(default=0),
'end_time': IntegerProp(default=0, fill_brok=['full_status']),
'real_end_time': IntegerProp(default=0),
'author': StringProp (default='', fill_brok=['full_status']),
'comment': StringProp (default=''),
'is_in_effect': BoolProp (default=False),
'has_been_triggered': BoolProp(default=False),
'can_be_deleted': BoolProp(default=False),
# TODO: find a very good way to handle the downtime "ref"
# ref must effectively not be in properties because it points onto a real object.
# 'ref': None
}
def __init__(self, ref, start_time, end_time, fixed, trigger_id, duration, author, comment):
self.id = self.__class__.id
self.__class__.id += 1
self.ref = ref # pointer to srv or host we are apply
self.activate_me = [] # The other downtimes i need to activate
self.entry_time = int(time.time())
self.fixed = fixed
self.start_time = start_time
self.duration = duration
self.trigger_id = trigger_id
if self.trigger_id != 0: # triggered plus fixed makes no sense
self.fixed = False
self.end_time = end_time
if fixed:
self.duration = end_time - start_time
# This is important for flexible downtimes. Here start_time and
# end_time mean: in this time interval it is possible to trigger
# the beginning of the downtime which lasts for duration.
# Later, when a non-ok event happens, real_end_time will be
# recalculated from now+duration
# end_time will be displayed in the web interface, but real_end_time
# is used internally
self.real_end_time = end_time
self.author = author
self.comment = comment
self.is_in_effect = False # fixed: start_time has been reached, flexible: non-ok checkresult
self.has_been_triggered = False # another downtime has triggered me
self.can_be_deleted = False
self.add_automatic_comment()
def __str__(self):
if self.is_in_effect == True:
active = "active"
else:
active = "inactive"
if self.fixed == True:
type = "fixed"
else:
type = "flexible"
return "%s %s Downtime id=%d %s - %s" % (active, type, self.id, time.ctime(self.start_time), time.ctime(self.end_time))
def trigger_me(self, other_downtime):
self.activate_me.append(other_downtime)
def in_scheduled_downtime(self):
return self.is_in_effect
# The referenced host/service object enters now a (or another) scheduled
# downtime. Write a log message only if it was not already in a downtime
def enter(self):
res = []
self.is_in_effect = True
if self.fixed == False:
now = time.time()
self.real_end_time = now + self.duration
if self.ref.scheduled_downtime_depth == 0:
self.ref.raise_enter_downtime_log_entry()
self.ref.create_notifications('DOWNTIMESTART')
self.ref.scheduled_downtime_depth += 1
self.ref.in_scheduled_downtime = True
for dt in self.activate_me:
res.extend(dt.enter())
return res
# The end of the downtime was reached.
def exit(self):
res = []
if self.is_in_effect == True:
# This was a fixed or a flexible+triggered downtime
self.is_in_effect = False
self.ref.scheduled_downtime_depth -= 1
if self.ref.scheduled_downtime_depth == 0:
self.ref.raise_exit_downtime_log_entry()
self.ref.create_notifications('DOWNTIMEEND')
self.ref.in_scheduled_downtime = False
else:
# This was probably a flexible downtime which was not triggered
# In this case it silently disappears
pass
self.del_automatic_comment()
self.can_be_deleted = True
# when a downtime ends and the service was critical
# a notification is sent with the next critical check
# So we should set a flag here which signals consume_result
# to send a notification
self.ref.in_scheduled_downtime_during_last_check = True
return res
# A scheduled downtime was prematurely cancelled
def cancel(self):
res = []
self.is_in_effect = False
self.ref.scheduled_downtime_depth -= 1
if self.ref.scheduled_downtime_depth == 0:
self.ref.raise_cancel_downtime_log_entry()
self.ref.in_scheduled_downtime = False
self.del_automatic_comment()
self.can_be_deleted = True
self.ref.in_scheduled_downtime_during_last_check = True
# Nagios does not notify on cancelled downtimes
#res.extend(self.ref.create_notifications('DOWNTIMECANCELLED'))
# Also cancel other downtimes triggered by me
for dt in self.activate_me:
res.extend(dt.cancel())
return res
# Scheduling a downtime creates a comment automatically
def add_automatic_comment(self):
if self.fixed == True:
text = "This %s has been scheduled for fixed downtime from %s to %s. Notifications for the %s will not be sent out during that time period." % (self.ref.my_type, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.start_time)), time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.end_time)), self.ref.my_type)
else:
hours, remainder = divmod(self.duration, 3600)
minutes, seconds = divmod(remainder, 60)
text = "This %s has been scheduled for flexible downtime starting between %s and %s and lasting for a period of %d hours and %d minutes. Notifications for the %s will not be sent out during that time period." % (self.ref.my_type, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.start_time)), time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.end_time)), hours, minutes, self.ref.my_type)
if self.ref.my_type == 'host':
comment_type = 1
else:
comment_type = 2
c = Comment(self.ref, False, "(Nagios Process)", text, comment_type, 2, 0, False, 0)
self.comment_id = c.id
self.extra_comment = c
self.ref.add_comment(c)
def del_automatic_comment(self):
# Extra comment can be None if we load it from a old version of Shinken
# TODO : remove it in a future version when every one got upgrade
if self.extra_comment is not None:
self.extra_comment.can_be_deleted = True
#self.ref.del_comment(self.comment_id)
# Fill data with info of item by looking at brok_type
# in props of properties or running_propterties
def fill_data_brok_from(self, data, brok_type):
cls = self.__class__
# Now config properties
for prop, entry in cls.properties.items():
if hasattr(prop, 'fill_brok'):
if brok_type in entry['fill_brok']:
data[prop] = getattr(self, prop)
# Get a brok with initial status
def get_initial_status_brok(self):
data = {'id': self.id}
self.fill_data_brok_from(data, 'full_status')
b = Brok('downtime_raise', data)
return b
# Call by pickle for dataify the downtime
# because we DO NOT WANT REF in this pickleisation!
def __getstate__(self):
cls = self.__class__
# id is not in *_properties
res = { 'id' : self.id }
for prop in cls.properties:
if hasattr(self, prop):
res[prop] = getattr(self, prop)
return res
# Inverted funtion of getstate
def __setstate__(self, state):
cls = self.__class__
# Maybe it's not a dict but a list like in the old 0.4 format
# so we should call the 0.4 function for it
if isinstance(state, list):
self.__setstate_deprecated__(state)
return
self.id = state['id']
for prop in cls.properties:
if prop in state:
setattr(self, prop, state[prop])
if self.id >= cls.id:
cls.id = self.id + 1
# This function is DEPRECATED and will be removed in a future version of
# Shinken. It should not be useful any more after a first load/save pass.
#Inversed funtion of getstate
def __setstate_deprecated__(self, state):
cls = self.__class__
#Check if the len of this state is like the previous,
# if not, we will do errors!
# -1 because of the 'id' prop
if len(cls.properties) != (len(state) - 1):
print "Passing downtime"
return
self.id = state.pop()
for prop in cls.properties:
val = state.pop()
setattr(self, prop, val)
if self.id >= cls.id:
cls.id = self.id + 1
| baloo/shinken | shinken/downtime.py | Python | agpl-3.0 | 11,225 |
from __future__ import absolute_import
from __future__ import print_function
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import warnings
warnings.simplefilter( "always", DeprecationWarning)
import argparse
import getpass
import os
import sys
from nss.error import NSPRError
import nss.io as io
import nss.nss as nss
import nss.ssl as ssl
# -----------------------------------------------------------------------------
NO_CLIENT_CERT = 0
REQUEST_CLIENT_CERT_ONCE = 1
REQUIRE_CLIENT_CERT_ONCE = 2
REQUEST_CLIENT_CERT_ALWAYS = 3
REQUIRE_CLIENT_CERT_ALWAYS = 4
timeout_secs = 3
# -----------------------------------------------------------------------------
# Utility Functions
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Callback Functions
# -----------------------------------------------------------------------------
def password_callback(slot, retry, password):
if password: return password
return getpass.getpass("Enter password: ");
def handshake_callback(sock):
print("-- handshake complete --")
print("peer: %s" % (sock.get_peer_name()))
print("negotiated host: %s" % (sock.get_negotiated_host()))
print()
print(sock.connection_info_str())
print("-- handshake complete --")
print()
def auth_certificate_callback(sock, check_sig, is_server, certdb):
print("auth_certificate_callback: check_sig=%s is_server=%s" % (check_sig, is_server))
cert_is_valid = False
cert = sock.get_peer_certificate()
pin_args = sock.get_pkcs11_pin_arg()
if pin_args is None:
pin_args = ()
print("peer cert:\n%s" % cert)
# Define how the cert is being used based upon the is_server flag. This may
# seem backwards, but isn't. If we're a server we're trying to validate a
# client cert. If we're a client we're trying to validate a server cert.
if is_server:
intended_usage = nss.certificateUsageSSLClient
else:
intended_usage = nss.certificateUsageSSLServer
try:
# If the cert fails validation it will raise an exception, the errno attribute
# will be set to the error code matching the reason why the validation failed
# and the strerror attribute will contain a string describing the reason.
approved_usage = cert.verify_now(certdb, check_sig, intended_usage, *pin_args)
except Exception as e:
print(e)
cert_is_valid = False
print("Returning cert_is_valid = %s" % cert_is_valid)
return cert_is_valid
print("approved_usage = %s" % ', '.join(nss.cert_usage_flags(approved_usage)))
# Is the intended usage a proper subset of the approved usage
if approved_usage & intended_usage:
cert_is_valid = True
else:
cert_is_valid = False
# If this is a server, we're finished
if is_server or not cert_is_valid:
print("Returning cert_is_valid = %s" % cert_is_valid)
return cert_is_valid
# Certificate is OK. Since this is the client side of an SSL
# connection, we need to verify that the name field in the cert
# matches the desired hostname. This is our defense against
# man-in-the-middle attacks.
hostname = sock.get_hostname()
print("verifying socket hostname (%s) matches cert subject (%s)" % (hostname, cert.subject))
try:
# If the cert fails validation it will raise an exception
cert_is_valid = cert.verify_hostname(hostname)
except Exception as e:
print(e)
cert_is_valid = False
print("Returning cert_is_valid = %s" % cert_is_valid)
return cert_is_valid
print("Returning cert_is_valid = %s" % cert_is_valid)
return cert_is_valid
def client_auth_data_callback(ca_names, chosen_nickname, password, certdb):
cert = None
if chosen_nickname:
try:
cert = nss.find_cert_from_nickname(chosen_nickname, password)
priv_key = nss.find_key_by_any_cert(cert, password)
print("client cert:\n%s" % cert)
return cert, priv_key
except NSPRError as e:
print(e)
return False
else:
nicknames = nss.get_cert_nicknames(certdb, cert.SEC_CERT_NICKNAMES_USER)
for nickname in nicknames:
try:
cert = nss.find_cert_from_nickname(nickname, password)
print("client cert:\n%s" % cert)
if cert.check_valid_times():
if cert.has_signer_in_ca_names(ca_names):
priv_key = nss.find_key_by_any_cert(cert, password)
return cert, priv_key
except NSPRError as e:
print(e)
return False
# -----------------------------------------------------------------------------
# Client Implementation
# -----------------------------------------------------------------------------
def Client():
valid_addr = False
# Get the IP Address of our server
try:
addr_info = io.AddrInfo(options.hostname)
except Exception as e:
print("could not resolve host address \"%s\"" % options.hostname)
return
for net_addr in addr_info:
if options.family != io.PR_AF_UNSPEC:
if net_addr.family != options.family:
continue
net_addr.port = options.port
if options.use_ssl:
sock = ssl.SSLSocket(net_addr.family)
# Set client SSL socket options
sock.set_ssl_option(ssl.SSL_SECURITY, True)
sock.set_ssl_option(ssl.SSL_HANDSHAKE_AS_CLIENT, True)
sock.set_hostname(options.hostname)
# Provide a callback which notifies us when the SSL handshake is complete
sock.set_handshake_callback(handshake_callback)
# Provide a callback to supply our client certificate info
sock.set_client_auth_data_callback(client_auth_data_callback, options.client_nickname,
options.password, nss.get_default_certdb())
# Provide a callback to verify the servers certificate
sock.set_auth_certificate_callback(auth_certificate_callback,
nss.get_default_certdb())
else:
sock = io.Socket(net_addr.family)
try:
print("client trying connection to: %s" % (net_addr))
sock.connect(net_addr, timeout=io.seconds_to_interval(timeout_secs))
print("client connected to: %s" % (net_addr))
valid_addr = True
break
except Exception as e:
sock.close()
print("client connection to: %s failed (%s)" % (net_addr, e))
if not valid_addr:
print("Could not establish valid address for \"%s\" in family %s" % \
(options.hostname, io.addr_family_name(options.family)))
return
# Talk to the server
try:
data = 'Hello' + '\n' # newline is protocol record separator
sock.send(data.encode('utf-8'))
buf = sock.readline()
if not buf:
print("client lost connection")
sock.close()
return
buf = buf.decode('utf-8')
buf = buf.rstrip() # remove newline record separator
print("client received: %s" % (buf))
except Exception as e:
print(e.strerror)
try:
sock.close()
except:
pass
return
# End of (simple) protocol session?
if buf == 'Goodbye':
try:
sock.shutdown()
except:
pass
try:
sock.close()
if options.use_ssl:
ssl.clear_session_cache()
except Exception as e:
print(e)
# -----------------------------------------------------------------------------
# Server Implementation
# -----------------------------------------------------------------------------
def Server():
# Setup an IP Address to listen on any of our interfaces
if options.family == io.PR_AF_UNSPEC:
options.family = io.PR_AF_INET
net_addr = io.NetworkAddress(io.PR_IpAddrAny, options.port, options.family)
if options.use_ssl:
# Perform basic SSL server configuration
ssl.set_default_cipher_pref(ssl.SSL_RSA_WITH_NULL_MD5, True)
ssl.config_server_session_id_cache()
# Get our certificate and private key
server_cert = nss.find_cert_from_nickname(options.server_nickname, options.password)
priv_key = nss.find_key_by_any_cert(server_cert, options.password)
server_cert_kea = server_cert.find_kea_type();
print("server cert:\n%s" % server_cert)
sock = ssl.SSLSocket(net_addr.family)
# Set server SSL socket options
sock.set_pkcs11_pin_arg(options.password)
sock.set_ssl_option(ssl.SSL_SECURITY, True)
sock.set_ssl_option(ssl.SSL_HANDSHAKE_AS_SERVER, True)
# If we're doing client authentication then set it up
if options.client_cert_action >= REQUEST_CLIENT_CERT_ONCE:
sock.set_ssl_option(ssl.SSL_REQUEST_CERTIFICATE, True)
if options.client_cert_action == REQUIRE_CLIENT_CERT_ONCE:
sock.set_ssl_option(ssl.SSL_REQUIRE_CERTIFICATE, True)
sock.set_auth_certificate_callback(auth_certificate_callback, nss.get_default_certdb())
# Configure the server SSL socket
sock.config_secure_server(server_cert, priv_key, server_cert_kea)
else:
sock = io.Socket(net_addr.family)
# Bind to our network address and listen for clients
sock.bind(net_addr)
print("listening on: %s" % (net_addr))
sock.listen()
while True:
# Accept a connection from a client
client_sock, client_addr = sock.accept()
if options.use_ssl:
client_sock.set_handshake_callback(handshake_callback)
print("client connect from: %s" % (client_addr))
while True:
try:
# Handle the client connection
buf = client_sock.readline()
if not buf:
print("server lost lost connection to %s" % (client_addr))
break
buf = buf.decode('utf-8')
buf = buf.rstrip() # remove newline record separator
print("server received: %s" % (buf))
data ='Goodbye' + '\n' # newline is protocol record separator
client_sock.send(data.encode('utf-8'))
try:
client_sock.shutdown(io.PR_SHUTDOWN_RCV)
client_sock.close()
except:
pass
break
except Exception as e:
print(e.strerror)
break
break
try:
sock.shutdown()
sock.close()
if options.use_ssl:
ssl.shutdown_server_session_id_cache()
except Exception as e:
print(e)
pass
# -----------------------------------------------------------------------------
class FamilyArgAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
value = values[0]
if value == "inet":
family = io.PR_AF_INET
elif value == "inet6":
family = io.PR_AF_INET6
elif value == "unspec":
family = io.PR_AF_UNSPEC
else:
raise argparse.ArgumentError(self, "unknown address family (%s)" % (value))
setattr(namespace, self.dest, family)
parser = argparse.ArgumentParser(description='SSL example')
parser.add_argument('-C', '--client', action='store_true',
help='run as the client')
parser.add_argument('-S', '--server', action='store_true',
help='run as the server')
parser.add_argument('-d', '--db-name',
help='NSS database name (e.g. "sql:pki")')
parser.add_argument('-H', '--hostname',
help='host to connect to')
parser.add_argument('-f', '--family',
choices=['unspec', 'inet', 'inet6'],
dest='family', action=FamilyArgAction, nargs=1,
help='''
If unspec client tries all addresses returned by AddrInfo,
server binds to IPv4 "any" wildcard address.
If inet client tries IPv4 addresses returned by AddrInfo,
server binds to IPv4 "any" wildcard address.
If inet6 client tries IPv6 addresses returned by AddrInfo,
server binds to IPv6 "any" wildcard address''')
parser.add_argument('-4', '--inet',
dest='family', action='store_const', const=io.PR_AF_INET,
help='set family to inet (see family)')
parser.add_argument('-6', '--inet6',
dest='family', action='store_const', const=io.PR_AF_INET6,
help='set family to inet6 (see family)')
parser.add_argument('-n', '--server-nickname',
help='server certificate nickname')
parser.add_argument('-N', '--client-nickname',
help='client certificate nickname')
parser.add_argument('-w', '--password',
help='certificate database password')
parser.add_argument('-p', '--port', type=int,
help='host port')
parser.add_argument('-e', '--encrypt', dest='use_ssl', action='store_true',
help='use SSL connection')
parser.add_argument('-E', '--no-encrypt', dest='use_ssl', action='store_false',
help='do not use SSL connection')
parser.add_argument('--require-cert-once', dest='client_cert_action',
action='store_const', const=REQUIRE_CLIENT_CERT_ONCE)
parser.add_argument('--require-cert-always', dest='client_cert_action',
action='store_const', const=REQUIRE_CLIENT_CERT_ALWAYS)
parser.add_argument('--request-cert-once', dest='client_cert_action',
action='store_const', const=REQUEST_CLIENT_CERT_ONCE)
parser.add_argument('--request-cert-always', dest='client_cert_action',
action='store_const', const=REQUEST_CLIENT_CERT_ALWAYS)
parser.add_argument('--min-ssl-version',
help='minimum SSL version')
parser.add_argument('--max-ssl-version',
help='minimum SSL version')
parser.set_defaults(client = False,
server = False,
db_name = 'sql:pki',
hostname = os.uname()[1],
family = io.PR_AF_UNSPEC,
server_nickname = 'test_server',
client_nickname = 'test_user',
password = 'DB_passwd',
port = 1234,
use_ssl = True,
client_cert_action = NO_CLIENT_CERT,
)
options = parser.parse_args()
if options.client and options.server:
print("can't be both client and server")
sys.exit(1)
if not (options.client or options.server):
print("must be one of client or server")
sys.exit(1)
# Perform basic configuration and setup
if options.use_ssl:
nss.nss_init(options.db_name)
else:
nss.nss_init_nodb()
ssl.set_domestic_policy()
nss.set_password_callback(password_callback)
min_ssl_version, max_ssl_version = \
ssl.get_supported_ssl_version_range(repr_kind=nss.AsString)
print("Supported SSL version range: min=%s, max=%s" % \
(min_ssl_version, max_ssl_version))
min_ssl_version, max_ssl_version = \
ssl.get_default_ssl_version_range(repr_kind=nss.AsString)
print("Default SSL version range: min=%s, max=%s" % \
(min_ssl_version, max_ssl_version))
if options.min_ssl_version is not None or \
options.max_ssl_version is not None:
if options.min_ssl_version is not None:
min_ssl_version = options.min_ssl_version
if options.max_ssl_version is not None:
max_ssl_version = options.max_ssl_version
print("Setting default SSL version range: min=%s, max=%s" % \
(min_ssl_version, max_ssl_version))
ssl.set_default_ssl_version_range(min_ssl_version, max_ssl_version)
min_ssl_version, max_ssl_version = \
ssl.get_default_ssl_version_range(repr_kind=nss.AsString)
print("Default SSL version range now: min=%s, max=%s" % \
(min_ssl_version, max_ssl_version))
# Run as a client or as a serveri
if options.client:
print("starting as client")
Client()
if options.server:
print("starting as server")
Server()
try:
nss.nss_shutdown()
except Exception as e:
print(e)
| vadzimt/python-nss | doc/examples/ssl_example.py | Python | gpl-2.0 | 16,895 |
# ~*~ coding:utf-8 ~*~
from PyQt5.Qt import QColor
import math
import numpy as np
from geometry import Geometry
class Sphere:
"""
Класс для общего описания сферы
"""
def __init__(self, render_area):
self.render_area = render_area
self.approximation_step = 0
self.radius = 0
self.projection_name = "default"
# Координаты источника света
self.light_x = 0
self.light_y = 0
self.light_z = -1000
self.geom = Geometry()
def recalculate(self):
# Настройка шагов аппроксимации
circle_count = self.approximation_step
circle_points_count = self.approximation_step + 2
# Считаем окружность
self.geom.clear()
angle_step = 2*math.pi/circle_points_count
for circle_number in range(0, circle_count):
radius_for_point_1 = self.radius * math.sqrt(1 - math.pow((circle_count - (circle_number+1))/circle_count, 2))
z_axis_for_point_1 = self.radius * (circle_count-(circle_number+1))/circle_count
radius_for_point_2 = self.radius * math.sqrt(1 - math.pow((circle_count - circle_number)/circle_count, 2))
z_axis_for_point_2 = self.radius * (circle_count - circle_number) / circle_count
angle = 0
while angle < 2*math.pi:
self.geom.points.append(Geometry.from_polar(radius_for_point_1, angle, z_axis_for_point_1))
self.geom.points.append(Geometry.from_polar(radius_for_point_1, angle+angle_step, z_axis_for_point_1))
self.geom.edges.append((len(self.geom.points)-2, len(self.geom.points)-1))
self.geom.points.append(Geometry.from_polar(radius_for_point_2, angle, z_axis_for_point_2))
self.geom.points.append(Geometry.from_polar(radius_for_point_2, angle+angle_step, z_axis_for_point_2))
self.geom.edges.append((len(self.geom.points)-2, len(self.geom.points)-1))
angle += angle_step
angle = 2*math.pi
while angle > 0:
self.geom.points.append(Geometry.from_polar(radius_for_point_1, angle, -z_axis_for_point_1))
self.geom.points.append(Geometry.from_polar(radius_for_point_1, angle-angle_step, -z_axis_for_point_1))
self.geom.edges.append((len(self.geom.points)-2, len(self.geom.points)-1))
self.geom.points.append(Geometry.from_polar(radius_for_point_2, angle, -z_axis_for_point_2))
self.geom.points.append(Geometry.from_polar(radius_for_point_2, angle-angle_step, -z_axis_for_point_2))
self.geom.edges.append((len(self.geom.points)-2, len(self.geom.points)-1))
angle -= angle_step
for index in range(0, len(self.geom.points), 4):
self.geom.faces.append((index, index+1, index+3, index+2))
self.geom.apply_projection(self.projection_name)
def is_face_visible(self, face):
"""
Определение видимости грани на основе алгоритма Робертса
:param face: грань
:return: True, если видимо, иначе False
"""
p1_index = face[0]
x0 = self.geom.points[p1_index][0]
y0 = self.geom.points[p1_index][1]
z0 = self.geom.points[p1_index][2]
p2_index = face[1]
x1 = self.geom.points[p2_index][0]
y1 = self.geom.points[p2_index][1]
z1 = self.geom.points[p2_index][2]
p3_index = face[2]
x2 = self.geom.points[p3_index][0]
y2 = self.geom.points[p3_index][1]
z2 = self.geom.points[p3_index][2]
a = y0*(z1 - z2) + y1*(z2 - z0) + y2*(z0 - z1)
b = z0*(x1 - x2) + z1*(x2 - x0) + z2*(x0 - x1)
c = x0*(y1 - y2) + x1*(y2 - y0) + x2*(y0 - y1)
d = -(x0*(y1*z2 - y2*z1) + x1*(y2*z0 - y0*z2) + x2*(y0*z1 - y1*z0))
"""
Знак result = Ax + By + Cz + D определяет, с какой стороны по отношению к плоскости находится точка s(x,y,z,w).
Если result > 0, то точка внутри тела
Если result < 0 - на противаположной стороне, а в случае result = 0 точка принадлежит плоскости.
"""
s = np.array([[1, 1, -1000, 1]])
p = np.array([[a],
[b],
[c],
[d]])
result = Geometry.multiplication_matrix(s, p)
return True if result[0][0] < 0 else False
def get_face_light(self, face, color):
"""
Закраска грани с учётом освещения на основе вычисления угла между нормалью грани и вектором освещения
:param face: грань
:param color: цвет грани
:return: цвет
"""
p1_index = face[0]
x0 = self.geom.clear_points[p1_index][0]
y0 = self.geom.clear_points[p1_index][1]
z0 = self.geom.clear_points[p1_index][2]
p2_index = face[1]
x1 = self.geom.clear_points[p2_index][0]
y1 = self.geom.clear_points[p2_index][1]
z1 = self.geom.clear_points[p2_index][2]
p3_index = face[2]
x2 = self.geom.clear_points[p3_index][0]
y2 = self.geom.clear_points[p3_index][1]
z2 = self.geom.clear_points[p3_index][2]
# Вычисляем два вектора, принадлежащих грани
a_x = x1 - x0
a_y = y1 - y0
a_z = z1 - z0
b_x = x2 - x1
b_y = y2 - y1
b_z = z2 - z1
# Считаем нормаль к грани по найденным векторам
normal_x = a_y * b_z - a_z * b_y
normal_y = a_x * b_z - a_z * b_x
normal_z = a_x * b_y - a_y * b_x
# Длина нормали
normal_length = math.sqrt(math.pow(normal_x, 2) + math.pow(normal_y, 2) + math.pow(normal_z, 2))
# Зная координаты источника света, можно вычислить длину вектора от источника света до точки рассмотрения:
light_length = math.sqrt(math.pow(self.light_x, 2) + math.pow(self.light_y, 2) + math.pow(self.light_z, 2))
normal_length = normal_length if normal_length != 0 else 0.0001
light_length = light_length if light_length != 0 else 0.0001
# Косинус угла между данными векторами находим следующим образом:
result = (normal_x * self.light_x + normal_y * self.light_y + normal_z * self.light_z)/(normal_length * light_length)
# Находим интенсивность
return QColor(int(color.red() * (0.5 + 0.5 * result)),
int(color.green() * (0.5 + 0.5 * result)),
int(color.blue() * (0.5 + 0.5 * result)))
def set_approximation_step(self, step):
self.approximation_step = step
self.render_area.update()
def set_radius(self, radius):
self.radius = radius * 6
self.render_area.update()
def set_x_rotate_angle(self, angle):
self.geom.x_rotate_angle = angle
self.render_area.update()
def set_y_rotate_angle(self, angle):
self.geom.y_rotate_angle = angle
self.render_area.update()
def set_z_rotate_angle(self, angle):
self.geom.z_rotate_angle = angle
self.render_area.update()
def set_x_move(self, value):
self.geom.x_move = value
self.render_area.update()
def set_y_move(self, value):
self.geom.y_move = value
self.render_area.update()
def set_z_move(self, value):
self.geom.z_move = value
self.render_area.update()
def set_x_scale(self, value):
self.geom.x_scale = value
self.render_area.update()
def set_y_scale(self, value):
self.geom.y_scale = value
self.render_area.update()
def set_z_scale(self, value):
self.geom.z_scale = value
self.render_area.update()
def set_axonometric_angle_fi(self, value):
self.geom.axonometric_angle_fi = value
self.render_area.update()
def set_axonometric_angle_psi(self, value):
self.geom.axonometric_angle_psi = value
self.render_area.update()
def set_oblique_angle_alpha(self, value):
self.geom.oblique_angle_alpha = value
self.render_area.update()
def set_oblique_L(self, value):
self.geom.oblique_L = value
self.render_area.update()
def set_perspective_angle_fi(self, value):
self.geom.perspective_angle_fi = value
self.render_area.update()
def set_perspective_angle_teta(self, value):
self.geom.perspective_angle_teta = value
self.render_area.update()
def set_perspective_ro(self, value):
self.geom.perspective_ro = value
self.render_area.update()
def set_perspective_d(self, value):
self.geom.perspective_d = value
self.render_area.update()
def set_light_x(self, x):
self.light_x = x*10
self.render_area.update()
def set_light_y(self, y):
self.light_y = -y*10
self.render_area.update()
def set_light_z(self, z):
self.light_z = -z*10
self.render_area.update()
| katya-malyk/sphere_approximation | sphere.py | Python | apache-2.0 | 9,596 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.