code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#--------------------------------------------------------------------------------------------------
# Program Name: Lychee
# Program Description: MEI document manager for formalized document control
#
# Filename: lychee/__init__.py
# Purpose: Initialize Lychee.
#
# Copyright (C) 2016, 2017 Christopher Antila
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#--------------------------------------------------------------------------------------------------
"""
Initialize Lychee.
"""
__all__ = [
'converters',
'document',
'exceptions',
'logs',
'namespaces',
'signals',
'tui',
'workflow',
'vcs',
'views',
]
from lychee import *
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
|
nCoda/lychee
|
lychee/__init__.py
|
Python
|
gpl-3.0
| 1,444
|
"""Provide history manager for SCCTool."""
import json
import logging
import scctool.settings.translation
from scctool.settings import getJsonFile, idx2race, race2idx
module_logger = logging.getLogger(__name__)
_ = scctool.settings.translation.gettext
class HistoryManager:
"""History manager for SCCTool."""
__max_length = 100
def __init__(self):
"""Init the history manager."""
self.loadJson()
self.updateDataStructure()
def loadJson(self):
"""Read json data from file."""
try:
with open(getJsonFile('history'), 'r',
encoding='utf-8-sig') as json_file:
data = json.load(json_file)
except Exception:
data = dict()
self.__player_history = data.get('player', [])
self.__team_history = data.get('team', [])
def dumpJson(self):
"""Write json data to file."""
data = dict()
data['player'] = self.__player_history
data['team'] = self.__team_history
try:
with open(getJsonFile('history'), 'w',
encoding='utf-8-sig') as outfile:
json.dump(data, outfile)
except Exception:
module_logger.exception("message")
def updateDataStructure(self):
"""Update the data structure (from a previous version)."""
for idx, item in enumerate(self.__team_history):
if isinstance(item, str):
self.__team_history[idx] = {'team': item, 'logo': '0'}
def insertPlayer(self, player, race):
"""Insert a player into the history."""
player = player.strip()
if not player or player.lower() == "tbd":
return
if race is str:
race = race2idx(race)
race = idx2race(race)
for item in self.__player_history:
if item.get('player', '').lower() == player.lower():
self.__player_history.remove(item)
if race == "Random":
race = item.get('race', 'Random')
break
self.__player_history.insert(0, {"player": player, "race": race})
# self.enforeMaxLength("player")
def insertTeam(self, team, logo='0'):
"""Insert a team into the history."""
team = team.strip()
if not team or team.lower() == "tbd":
return
for item in self.__team_history:
if item.get('team', '').lower() == team.lower():
self.__team_history.remove(item)
if logo == '0':
logo = item.get('logo', '0')
break
self.__team_history.insert(0, {"team": team, "logo": logo})
# self.enforeMaxLength("team")
def enforeMaxLength(self, scope=None):
"""Delete old history elements."""
if not scope or scope == "player":
while len(self.__player_history) > self.__max_length:
self.__player_history.pop()
if not scope or scope == "team":
while len(self.__team_history) > self.__max_length:
self.__team_history.pop()
def getPlayerList(self):
"""Return a list of all players in history."""
playerList = list()
for item in self.__player_history:
player = item['player']
if player not in playerList:
playerList.append(player)
return playerList
def getTeamList(self):
"""Return a list of all teams in history."""
teamList = list()
for item in self.__team_history:
team = item.get('team')
if team not in teamList:
teamList.append(team)
return teamList
def getRace(self, player):
"""Look up the race of a player in the history."""
player = player.lower().strip()
race = "Random"
for item in self.__player_history:
if item.get('player', '').lower() == player:
race = item.get('race', 'Random')
break
return race
def getLogo(self, team):
"""Look up the logo of a team in history."""
team = team.lower().strip()
logo = '0'
for item in self.__team_history:
if item.get('team', '').lower() == team:
logo = item.get('logo', '0')
break
return logo
|
teampheenix/StarCraft-Casting-Tool
|
scctool/settings/history.py
|
Python
|
gpl-3.0
| 4,381
|
# coding=utf-8
import unittest
"""804. Unique Morse Code Words
https://leetcode.com/problems/unique-morse-code-words/description/
International Morse Code defines a standard encoding where each letter is
mapped to a series of dots and dashes, as follows: `"a"` maps to `".-"`, `"b"`
maps to `"-..."`, `"c"` maps to `"-.-."`, and so on.
For convenience, the full table for the 26 letters of the English alphabet is
given below:
[".-","-...","-.-.","-..",".","..-.","--.","....","..",".---","-.-",".-..","--","-.","---",".--.","--.-",".-.","...","-","..-","...-",".--","-..-","-.--","--.."]
Now, given a list of words, each word can be written as a concatenation of the
Morse code of each letter. For example, "cab" can be written as "-.-.-....-",
(which is the concatenation "-.-." \+ "-..." \+ ".-"). We'll call such a
concatenation, the transformation of a word.
Return the number of different transformations among all words we have.
**Example:**
**Input:** words = ["gin", "zen", "gig", "msg"]
**Output:** 2
**Explanation:**
The transformation of each word is:
"gin" -> "--...-."
"zen" -> "--...-."
"gig" -> "--...--."
"msg" -> "--...--."
There are 2 different transformations, "--...-." and "--...--.".
**Note:**
* The length of `words` will be at most `100`.
* Each `words[i]` will have length in range `[1, 12]`.
* `words[i]` will only consist of lowercase letters.
Similar Questions:
"""
class Solution(object):
def uniqueMorseRepresentations(self, words):
"""
:type words: List[str]
:rtype: int
"""
self.CODE = [
".-", "-...", "-.-.", "-..", ".",
"..-.", "--.", "....", "..", ".---",
"-.-", ".-..", "--", "-.", "---",
".--.", "--.-", ".-.", "...", "-",
"..-", "...-", ".--", "-..-", "-.--", "--.."]
cache = {self._trans(i) for i in words}
return len(cache)
def _trans(self, w):
return ''.join(self.CODE[ord(i) - ord('a')] for i in w)
class T(unittest.TestCase):
def test(self):
s = Solution()
self.assertEqual(s.uniqueMorseRepresentations(["gin", "zen", "gig", "msg"]), 2)
if __name__ == "__main__":
unittest.main()
|
openqt/algorithms
|
leetcode/python/ac/lc804-unique-morse-code-words.py
|
Python
|
gpl-3.0
| 2,281
|
import warnings
from pathlib import Path
from typing import Union
try:
import xlwings as xw
except ImportError:
xw = None
from larray.util.misc import _positive_integer
from larray.core.group import _translate_sheet_name
from larray.core.array import asarray, zip_array_items
from larray.example import load_example_data, EXAMPLE_EXCEL_TEMPLATES_DIR
_default_items_size = {}
def _validate_template_filename(filename: Union[str, Path]) -> Path:
if isinstance(filename, str):
filename = Path(filename)
suffix = filename.suffix
if not suffix:
suffix = '.crtx'
if suffix != '.crtx':
raise ValueError(f"Extension for the excel template file must be '.crtx' instead of {suffix}")
return filename.with_suffix(suffix)
class AbstractReportItem:
def __init__(self, template_dir=None, template=None, graphs_per_row=1):
self.template_dir = template_dir
self.template = template
self.default_items_size = _default_items_size.copy()
self.graphs_per_row = graphs_per_row
@property
def template_dir(self):
r"""
Set the path to the directory containing the Excel template files (with '.crtx' extension).
This method is mainly useful if your template files are located in several directories,
otherwise pass the template directory directly the ExcelReport constructor.
Parameters
----------
template_dir : str or Path
Path to the directory containing the Excel template files.
See Also
--------
set_graph_template
Examples
--------
>>> report = ExcelReport(EXAMPLE_EXCEL_TEMPLATES_DIR)
>>> # ... add some graphs using template files from 'C:\excel_templates_dir'
>>> report.template_dir = r'C:\other_templates_dir' # doctest: +SKIP
>>> # ... add some graphs using template files from 'C:\other_templates_dir'
"""
return self._template_dir
@template_dir.setter
def template_dir(self, template_dir):
if template_dir is not None:
if isinstance(template_dir, str):
template_dir = Path(template_dir)
if not isinstance(template_dir, Path):
raise TypeError(f"Expected a string or a pathlib.Path object. "
f"Got an object of type {type(template_dir).__name__} instead.")
if not template_dir.is_dir():
raise ValueError(f"The directory {template_dir} could not be found.")
self._template_dir = template_dir
@property
def template(self):
r"""
Set a default Excel template file.
Parameters
----------
template : str or Path
Name of the template to be used as default template.
The extension '.crtx' will be added if not given.
The full path to the template file must be given if no template directory has been set.
Examples
--------
>>> demo = load_example_data('demography_eurostat')
Passing the name of the template (only if a template directory has been set)
>>> report = ExcelReport(EXAMPLE_EXCEL_TEMPLATES_DIR)
>>> report.template = 'Line'
>>> sheet_population = report.new_sheet('Population')
>>> sheet_population.add_graph(demo.population['Belgium'],'Belgium')
Passing the full path of the template file
>>> # if no default template directory has been set
>>> # or if the new template is located in another directory,
>>> # you must provide the full path
>>> sheet_population.template = r'C:\other_templates_dir\Line_Marker.crtx' # doctest: +SKIP
>>> sheet_population.add_graph(demo.population['Germany'],'Germany') # doctest: +SKIP
"""
return self._template
@template.setter
def template(self, template):
if template is not None:
if self.template_dir is None:
raise RuntimeError("Please set 'template_dir' first")
filename = _validate_template_filename(template)
template = self.template_dir / filename
self._template = template
def set_item_default_size(self, kind, width=None, height=None):
r"""
Override the default 'width' and 'height' values for the given kind of item.
A new value must be provided at least for 'width' or 'height'.
Parameters
----------
kind : str
kind of item for which default values of 'width' and/or 'height' are modified.
Currently available kinds are 'title' and 'graph'.
width : int, optional
new default width value.
height : int, optional
new default height value.
Examples
--------
>>> report = ExcelReport()
>>> report.set_item_default_size('graph', width=450, height=250)
"""
if width is None and height is None:
raise ValueError("No value provided for both 'width' and 'heigth'. "
"Please provide one for at least 'width' or 'heigth'")
if kind not in self.default_items_size:
item_types = sorted(self.default_items_size.keys())
raise ValueError(f"Item type {kind} is not registered. Please choose in list {item_types}")
if width is None:
width = self.default_items_size[kind].width
if height is None:
height = self.default_items_size[kind].height
self.default_items_size[kind] = ItemSize(width, height)
@property
def graphs_per_row(self):
r"""
Default number of graphs per row.
Parameters
----------
graphs_per_row: int
See Also
--------
ReportSheet.newline
"""
return self._graphs_per_row
@graphs_per_row.setter
def graphs_per_row(self, graphs_per_row):
_positive_integer(graphs_per_row)
self._graphs_per_row = graphs_per_row
class AbstractReportSheet(AbstractReportItem):
r"""
Represents a sheet dedicated to contains only graphical items (title banners, graphs).
See :py:obj:`ExcelReport` for use cases.
Parameters
----------
template_dir : str or Path, optional
Path to the directory containing the Excel template files (with a '.crtx' extension).
Defaults to None.
template : str or Path, optional
Name of the template to be used as default template.
The extension '.crtx' will be added if not given.
The full path to the template file must be given if no template directory has been set.
Defaults to None.
graphs_per_row : int, optional
Default number of graphs per row. Defaults to 1.
See Also
--------
ExcelReport
"""
def add_title(self, title, width=None, height=None, fontsize=11):
r"""
Add a title item to the current sheet.
Note that the current method only add a new item to the list of items to be generated.
The report Excel file is generated only when the :py:obj:`~ExcelReport.to_excel` is called.
Parameters
----------
title : str
Text to write in the title item.
width : int, optional
width of the title item. The current default value is used if None
(see :py:obj:`~ExcelReport.set_item_default_size`). Defaults to None.
height : int, optional
height of the title item. The current default value is used if None
(see :py:obj:`~ExcelReport.set_item_default_size`). Defaults to None.
fontsize : int, optional
fontsize of the displayed text. Defaults to 11.
Examples
--------
>>> report = ExcelReport()
>>> first_sheet = report.new_sheet('First_sheet')
>>> first_sheet.add_title('Title banner with default width, height and fontsize')
>>> first_sheet.add_title('Larger title banner', width=1200, height=100)
>>> first_sheet.add_title('Bigger fontsize', fontsize=13)
>>> # do not forget to call 'to_excel' to create the report file
>>> report.to_excel('Report.xlsx')
"""
pass
def add_graph(self, data, title=None, template=None, width=None, height=None, min_y=None, max_y=None,
xticks_spacing=None, customize_func=None, customize_kwargs=None):
r"""
Add a graph item to the current sheet.
Note that the current method only add a new item to the list of items to be generated.
The report Excel file is generated only when the :py:obj:`~ExcelReport.to_excel` is called.
Parameters
----------
data : 1D or 2D array-like
1D or 2D array representing the data associated with the graph.
The first row represents the abscissa labels.
Each additional row represents a new series and must start with the name of the current series.
title : str, optional
title of the graph. Defaults to None.
template : str or Path, optional
name of the template to be used to generate the graph.
The full path to the template file must be provided if no template directory has not been set
or if the template file belongs to another directory.
Defaults to the defined template (see :py:obj:`~ExcelReport.set_graph_template`).
width : int, optional
width of the title item. The current default value is used if None
(see :py:obj:`~ExcelReport.set_item_default_size`). Defaults to None.
height : int, optional
height of the title item. The current default value is used if None
(see :py:obj:`~ExcelReport.set_item_default_size`). Defaults to None.
min_y: int, optional
minimum value for the Y axis.
max_y: int, optional
maximum value for the Y axis.
xticks_spacing: int, optional
space interval between two ticks along the X axis.
customize_func: function, optional
user defined function to personalize the graph.
The function must take the Chart object as first argument.
All keyword arguments defined in customize_kwargs are passed to the function at call.
customize_kwargs: dict, optional
keywords arguments passed to the function `customize_func` at call.
Examples
--------
>>> demo = load_example_data('demography_eurostat')
>>> report = ExcelReport(EXAMPLE_EXCEL_TEMPLATES_DIR)
>>> sheet_be = report.new_sheet('Belgium')
Specifying the 'template'
>>> sheet_be.add_graph(demo.population['Belgium'], 'Population', template='Line')
Specifying the 'template', 'width' and 'height' values
>>> sheet_be.add_graph(demo.births['Belgium'], 'Births', template='Line', width=450, height=250)
Setting a default template
>>> sheet_be.template = 'Line_Marker'
>>> sheet_be.add_graph(demo.deaths['Belgium'], 'Deaths')
Specify the mininum and maximum values for the Y axis
>>> sheet_be.add_graph(demo.population['Belgium'], 'Population (min/max Y axis = 5/6 millions)', min_y=5e6, max_y=6e6)
Specify the interval between two ticks (X axis)
>>> sheet_be.add_graph(demo.population['Belgium'], 'Population (every 2 years)', xticks_spacing=2)
Dumping the report Excel file
>>> # do not forget to call 'to_excel' to create the report file
>>> report.to_excel('Demography_Report.xlsx')
"""
pass
def add_graphs(self, array_per_title, axis_per_loop_variable, template=None, width=None, height=None,
graphs_per_row=1, min_y=None, max_y=None, xticks_spacing=None, customize_func=None,
customize_kwargs=None):
r"""
Add multiple graph items to the current sheet. This method is mainly useful when multiple
graphs are generated by iterating over one or several axes of an array (see examples below).
The report Excel file is generated only when the :py:obj:`~ExcelReport.to_excel` is called.
Parameters
----------
array_per_title: dict
dictionary containing pairs (title template, array).
axis_per_loop_variable: dict
dictionary containing pairs (variable used in the title template, axis).
template : str or Path, optional
name of the template to be used to generate the graph.
The full path to the template file must be provided if no template directory has not been set
or if the template file belongs to another directory.
Defaults to the defined template (see :py:obj:`~ExcelReport.set_graph_template`).
width : int, optional
width of the title item. The current default value is used if None
(see :py:obj:`~ExcelReport.set_item_default_size`). Defaults to None.
height : int, optional
height of the title item. The current default value is used if None
(see :py:obj:`~ExcelReport.set_item_default_size`). Defaults to None.
graphs_per_row: int, optional
Number of graphs per row. Defaults to 1.
min_y: int, optional
minimum value for the Y axis.
max_y: int, optional
maximum value for the Y axis.
xticks_spacing: int, optional
space interval between two ticks along the X axis.
customize_func: function, optional
user defined function to personalize the graph.
The function must take the Chart object as first argument.
All keyword arguments defined in customize_kwargs are passed to the function at call.
customize_kwargs: dict, optional
keywords arguments passed to the function `customize_func` at call.
Examples
--------
>>> demo = load_example_data('demography_eurostat')
>>> report = ExcelReport(EXAMPLE_EXCEL_TEMPLATES_DIR)
>>> sheet_population = report.new_sheet('Population')
>>> population = demo.population
Generate a new graph for each combination of gender and year
>>> sheet_population.add_graphs(
... {'Population of {gender} by country in {year}': population},
... {'gender': population.gender, 'year': population.time},
... template='line', width=450, height=250, graphs_per_row=2)
Specify the mininum and maximum values for the Y axis
>>> sheet_population.add_graphs({'Population of {gender} by country for the year {year}': population},
... {'gender': population.gender, 'year': population.time},
... template='line', width=450, height=250, graphs_per_row=2, min_y=0, max_y=50e6)
Specify the interval between two ticks (X axis)
>>> sheet_population.add_graphs({'Population of {gender} by country for the year {year}': population},
... {'gender': population.gender, 'year': population.time},
... template='line', width=450, height=250, graphs_per_row=2, xticks_spacing=2)
>>> # do not forget to call 'to_excel' to create the report file
>>> report.to_excel('Demography_Report.xlsx')
"""
pass
def newline(self):
r"""
Force a new row of graphs.
"""
pass
class AbstractExcelReport(AbstractReportItem):
r"""
Automate the generation of multiple graphs in an Excel file.
The ExcelReport instance is initially populated with information
(data, title, destination sheet, template, size) required to create the graphs.
Once all information has been provided, the :py:obj:`~ExcelReport.to_excel` method
is called to generate an Excel file with all graphs in one step.
Parameters
----------
template_dir : str or Path, optional
Path to the directory containing the Excel template files (with a '.crtx' extension).
Defaults to None.
template : str or Path, optional
Name of the template to be used as default template.
The extension '.crtx' will be added if not given.
The full path to the template file must be given if no template directory has been set.
Defaults to None.
graphs_per_row: int, optional
Default number of graphs per row.
Defaults to 1.
Notes
-----
The data associated with all graphical items is dumped in the same sheet named '__data__'.
Examples
--------
>>> demo = load_example_data('demography_eurostat')
>>> report = ExcelReport(EXAMPLE_EXCEL_TEMPLATES_DIR)
Set a new destination sheet
>>> sheet_be = report.new_sheet('Belgium')
Add a new title item
>>> sheet_be.add_title('Population, births and deaths')
Add a new graph item (each new graph is placed right to previous one unless you use newline() or add_title())
>>> # using default 'width' and 'height' values
>>> sheet_be.add_graph(demo.population['Belgium'], 'Population', template='Line')
>>> # specifying the 'width' and 'height' values
>>> sheet_be.add_graph(demo.births['Belgium'], 'Births', template='Line', width=450, height=250)
Override the default 'width' and 'height' values for graphs
>>> sheet_be.set_item_default_size('graph', width=450, height=250)
>>> # add a new graph with the new default 'width' and 'height' values
>>> sheet_be.add_graph(demo.deaths['Belgium'], 'Deaths')
Set a default template for all next graphs
>>> # if a default template directory has been set, just pass the name
>>> sheet_be.template = 'Line'
>>> # otherwise, give the full path to the template file
>>> sheet_be.template = r'C:\other_template_dir\Line_Marker.crtx' # doctest: +SKIP
>>> # add a new graph with the default template
>>> sheet_be.add_graph(demo.population['Belgium', 'Female'], 'Population - Female')
>>> sheet_be.add_graph(demo.population['Belgium', 'Male'], 'Population - Male')
Specify the number of graphs per row
>>> sheet_countries = report.new_sheet('All countries')
>>> sheet_countries.graphs_per_row = 2
>>> for combined_labels, subset in demo.population.items(('time', 'gender')):
... title = ' - '.join([str(label) for label in combined_labels])
... sheet_countries.add_graph(subset, title)
Force a new row of graphs
>>> sheet_countries.newline()
Add multiple graphs at once (add a new graph for each combination of gender and year)
>>> sheet_countries.add_graphs({'Population of {gender} by country in {year}': population},
... {'gender': population.gender, 'year': population.time},
... template='line', width=450, height=250, graphs_per_row=2)
Generate the report Excel file
>>> report.to_excel('Demography_Report.xlsx')
"""
def new_sheet(self, sheet_name):
r"""
Add a new empty output sheet.
This sheet will contain only graphical elements, all data are exported
to a dedicated separate sheet.
Parameters
----------
sheet_name : str
name of the current sheet.
Returns
-------
sheet: ReportSheet
Examples
--------
>>> demo = load_example_data('demography_eurostat')
>>> report = ExcelReport(EXAMPLE_EXCEL_TEMPLATES_DIR)
>>> # prepare new output sheet named 'Belgium'
>>> sheet_be = report.new_sheet('Belgium')
>>> # add graph to the output sheet 'Belgium'
>>> sheet_be.add_graph(demo.population['Belgium'], 'Population', template='Line')
"""
pass
def sheet_names(self):
r"""
Returns the names of the output sheets.
Examples
--------
>>> report = ExcelReport()
>>> sheet_population = report.new_sheet('Pop')
>>> sheet_births = report.new_sheet('Births')
>>> sheet_deaths = report.new_sheet('Deaths')
>>> report.sheet_names()
['Pop', 'Births', 'Deaths']
"""
pass
def to_excel(self, filepath, data_sheet_name='__data__', overwrite=True):
r"""
Generate the report Excel file.
Parameters
----------
filepath : str or Path
Path of the report file for the dump.
data_sheet_name : str, optional
name of the Excel sheet where all data associated with items is dumped.
Defaults to '__data__'.
overwrite : bool, optional
whether to overwrite an existing report file.
Defaults to True.
Examples
--------
>>> demo = load_example_data('demography_eurostat')
>>> report = ExcelReport(EXAMPLE_EXCEL_TEMPLATES_DIR)
>>> report.template = 'Line_Marker'
>>> for c in demo.country:
... sheet_country = report.new_sheet(c)
... sheet_country.add_graph(demo.population[c], 'Population')
... sheet_country.add_graph(demo.births[c], 'Births')
... sheet_country.add_graph(demo.deaths[c], 'Deaths')
Basic usage
>>> report.to_excel('Demography_Report.xlsx')
Alternative data sheet name
>>> report.to_excel('Demography_Report.xlsx', data_sheet_name='Data Tables') # doctest: +SKIP
Check if ouput file already exists
>>> report.to_excel('Demography_Report.xlsx', overwrite=False) # doctest: +SKIP
Traceback (most recent call last):
...
ValueError: Sheet named 'Belgium' already present in workbook
"""
pass
if xw is not None:
from xlwings.constants import LegendPosition, HAlign, VAlign, ChartType, RowCol, AxisType, Constants
from larray.inout.xw_excel import open_excel
class ItemSize:
def __init__(self, width, height):
self.width = width
self.height = height
@property
def width(self):
return self._width
@width.setter
def width(self, width):
_positive_integer(width)
self._width = width
@property
def height(self):
return self._height
@height.setter
def height(self, height):
_positive_integer(height)
self._height = height
class ExcelTitleItem(ItemSize):
_default_size = ItemSize(1000, 50)
def __init__(self, text, fontsize, top, left, width, height):
ItemSize.__init__(self, width, height)
self.top = top
self.left = left
self.text = str(text)
_positive_integer(fontsize)
self.fontsize = fontsize
def dump(self, sheet, data_sheet, row):
data_cells = data_sheet.Cells
# add title in data sheet
data_cells(row, 1).Value = self.text
# generate title banner in destination sheet
msoShapeRectangle = 1
msoThemeColorBackground1 = 14
sheet_shapes = sheet.Shapes
shp = sheet_shapes.AddShape(Type=msoShapeRectangle, Left=self.left, Top=self.top,
Width=self.width, Height=self.height)
fill = shp.Fill
fill.ForeColor.ObjectThemeColor = msoThemeColorBackground1
fill.Solid()
shp.Line.Visible = False
frame = shp.TextFrame
chars = frame.Characters()
chars.Text = self.text
font = chars.Font
font.Color = 1
font.Bold = True
font.Size = self.fontsize
frame.HorizontalAlignment = HAlign.xlHAlignLeft
frame.VerticalAlignment = VAlign.xlVAlignCenter
shp.SetShapesDefaultProperties()
# update and return current row position in data sheet (+1 for title +1 for blank line)
return row + 2
_default_items_size['title'] = ExcelTitleItem._default_size
class ExcelGraphItem(ItemSize):
_default_size = ItemSize(427, 230)
def __init__(self, data, title, template, top, left, width, height, min_y, max_y,
xticks_spacing, customize_func, customize_kwargs):
ItemSize.__init__(self, width, height)
self.top = top
self.left = left
self.title = str(title) if title is not None else None
data = asarray(data)
if not (1 <= data.ndim <= 2):
raise ValueError(f"Expected 1D or 2D array for data argument. Got array of dimensions {data.ndim}")
self.data = data
if template is not None:
template = Path(template)
if not template.is_file():
raise ValueError(f"Could not find template file {template}")
self.template = template
self.min_y = min_y
self.max_y = max_y
self.xticks_spacing = xticks_spacing
if customize_func is not None and not callable(customize_func):
raise TypeError(f"Expected a function for the argument 'customize_func'. "
f"Got object of type {type(customize_func).__name__} instead.")
self.customize_func = customize_func
self.customize_kwargs = customize_kwargs
def dump(self, sheet, data_sheet, row):
data_range = data_sheet.Range
data_cells = data_sheet.Cells
# write graph title in data sheet
data_cells(row, 1).Value = self.title
row += 1
# dump data to make the graph in data sheet
data = self.data
nb_series = 1 if data.ndim == 1 else data.shape[0]
nb_xticks = data.size if data.ndim == 1 else data.shape[1]
last_row, last_col = row + nb_series, nb_xticks + 1
data_range(data_cells(row, 1), data_cells(last_row, last_col)).Value = data.dump(na_repr=None)
data_cells(row, 1).Value = ''
# generate graph in destination sheet
sheet_charts = sheet.ChartObjects()
obj = sheet_charts.Add(self.left, self.top, self.width, self.height)
obj_chart = obj.Chart
source = data_range(data_cells(row, 1), data_cells(last_row, last_col))
obj_chart.SetSourceData(source)
obj_chart.ChartType = ChartType.xlLine
# title
if self.title is not None:
obj_chart.HasTitle = True
obj_chart.ChartTitle.Caption = self.title
# legend
obj_chart.Legend.Position = LegendPosition.xlLegendPositionBottom
# template
if self.template is not None:
obj_chart.ApplyChartTemplate(self.template)
# min - max on Y axis
if self.min_y is not None:
obj_chart.Axes(AxisType.xlValue).MinimumScale = self.min_y
if self.max_y is not None:
obj_chart.Axes(AxisType.xlValue).MaximumScale = self.max_y
# xticks_spacing
if self.xticks_spacing is not None:
obj_chart.Axes(AxisType.xlCategory).TickLabelSpacing = self.xticks_spacing
obj_chart.Axes(AxisType.xlCategory).TickMarkSpacing = self.xticks_spacing
obj_chart.Axes(AxisType.xlCategory).TickLabelPosition = Constants.xlLow
# user's function (to apply on remaining kwargs)
if self.customize_func is not None:
self.customize_func(obj_chart, **self.customize_kwargs)
# flagflip
if nb_series > 1 and nb_xticks == 1:
obj_chart.PlotBy = RowCol.xlRows
# update and return current row position
return row + nb_series + 2
_default_items_size['graph'] = ExcelGraphItem._default_size
class ReportSheet(AbstractReportSheet):
def __init__(self, excel_report, name, template_dir=None, template=None, graphs_per_row=1):
name = _translate_sheet_name(name)
self.excel_report = excel_report
self.name = name
self.items = []
self.top = 0
self.left = 0
self.position_in_row = 1
self.curline_height = 0
if template_dir is None:
template_dir = excel_report.template_dir
if template is None:
template = excel_report.template
AbstractReportSheet.__init__(self, template_dir, template, graphs_per_row)
def add_title(self, title, width=None, height=None, fontsize=11):
if width is None:
width = self.default_items_size['title'].width
if height is None:
height = self.default_items_size['title'].height
self.newline()
self.items.append(ExcelTitleItem(title, fontsize, self.top, 0, width, height))
self.top += height
def add_graph(self, data, title=None, template=None, width=None, height=None, min_y=None, max_y=None,
xticks_spacing=None, customize_func=None, customize_kwargs=None):
if width is None:
width = self.default_items_size['graph'].width
if height is None:
height = self.default_items_size['graph'].height
if template is not None:
self.template = template
template = self.template
if self.graphs_per_row is not None and self.position_in_row > self.graphs_per_row:
self.newline()
self.items.append(ExcelGraphItem(data, title, template, self.top, self.left, width, height,
min_y, max_y, xticks_spacing, customize_func, customize_kwargs))
self.left += width
self.curline_height = max(self.curline_height, height)
self.position_in_row += 1
def add_graphs(self, array_per_title, axis_per_loop_variable, template=None, width=None, height=None,
graphs_per_row=1, min_y=None, max_y=None, xticks_spacing=None, customize_func=None,
customize_kwargs=None):
loop_variable_names = axis_per_loop_variable.keys()
axes = tuple(axis_per_loop_variable.values())
titles = array_per_title.keys()
arrays = array_per_title.values()
if graphs_per_row is not None:
previous_graphs_per_row = self.graphs_per_row
self.graphs_per_row = graphs_per_row
if self.position_in_row > 1:
self.newline()
for loop_variable_values, arrays_chunk in zip_array_items(arrays, axes=axes):
loop_variables_dict = dict(zip(loop_variable_names, loop_variable_values))
for title_template, array_chunk in zip(titles, arrays_chunk):
title = title_template.format(**loop_variables_dict)
self.add_graph(array_chunk, title, template, width, height, min_y, max_y, xticks_spacing,
customize_func, customize_kwargs)
if graphs_per_row is not None:
self.graphs_per_row = previous_graphs_per_row
def newline(self):
self.top += self.curline_height
self.curline_height = 0
self.left = 0
self.position_in_row = 1
def _to_excel(self, workbook, data_row):
# use first sheet as data sheet
data_sheet = workbook.Worksheets(1)
data_cells = data_sheet.Cells
# write destination sheet name in data sheet
data_cells(data_row, 1).Value = self.name
data_row += 2
# create new empty sheet in workbook (will contain output graphical items)
# Hack, since just specifying "After" is broken in certain environments
# see: https://stackoverflow.com/questions/40179804/adding-excel-sheets-to-end-of-workbook
dest_sheet = workbook.Worksheets.Add(Before=None, After=workbook.Sheets(workbook.Sheets.Count))
dest_sheet.Name = self.name
# for each item, dump data + generate associated graphical items
for item in self.items:
data_row = item.dump(dest_sheet, data_sheet, data_row)
# reset
self.top = 0
self.left = 0
self.curline_height = 0
# return current row in data sheet
return data_row
# TODO : add a new section about this class in the tutorial
class ExcelReport(AbstractExcelReport):
def __init__(self, template_dir=None, template=None, graphs_per_row=1):
AbstractExcelReport.__init__(self, template_dir, template, graphs_per_row)
self.sheets = {}
def sheet_names(self):
return [sheet_name for sheet_name in self.sheets.keys()]
def __getitem__(self, key):
return self.sheets[key]
# TODO : Do not implement __setitem__ and move code below to new_sheet()?
def __setitem__(self, key, value, warn_stacklevel=2):
if not isinstance(value, ReportSheet):
raise ValueError(f"Expected ReportSheet object. Got {type(value).__name__} object instead.")
if key in self.sheet_names():
warnings.warn(f"Sheet '{key}' already exists in the report and will be reset",
stacklevel=warn_stacklevel)
self.sheets[key] = value
def __delitem__(self, key):
del self.sheets[key]
def __repr__(self):
return f'sheets: {self.sheet_names()}'
def new_sheet(self, sheet_name):
sheet = ReportSheet(self, sheet_name, self.template_dir, self.template, self.graphs_per_row)
self.__setitem__(sheet_name, sheet, warn_stacklevel=3)
return sheet
def to_excel(self, filepath, data_sheet_name='__data__', overwrite=True):
with open_excel(filepath, overwrite_file=overwrite) as wb:
# from here on, we use pure win32com objects instead of
# larray.excel or xlwings objects as this is faster
xl_wb = wb.api
# rename first sheet
xl_wb.Worksheets(1).Name = data_sheet_name
# dump items for each output sheet
data_sheet_row = 1
for sheet in self.sheets.values():
data_sheet_row = sheet._to_excel(xl_wb, data_sheet_row)
wb.save()
# reset
self.sheets.clear()
else:
class ReportSheet(AbstractReportSheet):
def __init__(self):
raise Exception("ReportSheet class cannot be instantiated because xlwings is not installed")
class ExcelReport(AbstractExcelReport):
def __init__(self):
raise Exception("ExcelReport class cannot be instantiated because xlwings is not installed")
ExcelReport.__doc__ = AbstractExcelReport.__doc__
ReportSheet.__doc__ = AbstractReportSheet.__doc__
|
liam2/larray
|
larray/inout/xw_reporting.py
|
Python
|
gpl-3.0
| 35,378
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2020 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Early initialization and main entry point.
qutebrowser's initialization process roughly looks like this:
- This file gets imported, either via the setuptools entry point or
__main__.py.
- At import time, we check for the correct Python version and show an error if
it's too old.
- The main() function in this file gets invoked
- Argument parsing takes place
- earlyinit.early_init() gets invoked to do various low-level initialization
and checks whether all dependencies are met.
- app.run() gets called, which takes over.
See the docstring of app.py for details.
"""
import sys
import json
import qutebrowser
try:
from qutebrowser.misc.checkpyver import check_python_version
except ImportError:
try:
# python2
from .misc.checkpyver import check_python_version
except (SystemError, ValueError):
# Import without module - SystemError on Python3, ValueError (?!?) on
# Python2
sys.stderr.write("Please don't run this script directly, do something "
"like python3 -m qutebrowser instead.\n")
sys.stderr.flush()
sys.exit(100)
check_python_version()
import argparse # pylint: disable=wrong-import-order
from qutebrowser.misc import earlyinit
def get_argparser():
"""Get the argparse parser."""
parser = argparse.ArgumentParser(prog='qutebrowser',
description=qutebrowser.__description__)
parser.add_argument('-B', '--basedir', help="Base directory for all "
"storage.")
parser.add_argument('-C', '--config-py', help="Path to config.py.",
metavar='CONFIG')
parser.add_argument('-V', '--version', help="Show version and quit.",
action='store_true')
parser.add_argument('-s', '--set', help="Set a temporary setting for "
"this session.", nargs=2, action='append',
dest='temp_settings', default=[],
metavar=('OPTION', 'VALUE'))
parser.add_argument('-r', '--restore', help="Restore a named session.",
dest='session')
parser.add_argument('-R', '--override-restore', help="Don't restore a "
"session even if one would be restored.",
action='store_true')
parser.add_argument('--target', choices=['auto', 'tab', 'tab-bg',
'tab-silent', 'tab-bg-silent',
'window'],
help="How URLs should be opened if there is already a "
"qutebrowser instance running.")
parser.add_argument('--backend', choices=['webkit', 'webengine'],
help="Which backend to use.")
parser.add_argument('--enable-webengine-inspector', action='store_true',
help="Enable the web inspector for QtWebEngine. Note "
"that this is a SECURITY RISK and you should not "
"visit untrusted websites with the inspector turned "
"on. See https://bugreports.qt.io/browse/QTBUG-50725 "
"for more details. This is not needed anymore since "
"Qt 5.11 where the inspector is always enabled and "
"secure.")
parser.add_argument('--json-args', help=argparse.SUPPRESS)
parser.add_argument('--temp-basedir-restarted', help=argparse.SUPPRESS)
debug = parser.add_argument_group('debug arguments')
debug.add_argument('-l', '--loglevel', dest='loglevel',
help="Set loglevel", default='info',
choices=['critical', 'error', 'warning', 'info',
'debug', 'vdebug'])
debug.add_argument('--logfilter', type=logfilter_error,
help="Comma-separated list of things to be logged "
"to the debug log on stdout.")
debug.add_argument('--loglines',
help="How many lines of the debug log to keep in RAM "
"(-1: unlimited).",
default=2000, type=int)
debug.add_argument('-d', '--debug', help="Turn on debugging options.",
action='store_true')
debug.add_argument('--json-logging', action='store_true', help="Output log"
" lines in JSON format (one object per line).")
debug.add_argument('--nocolor', help="Turn off colored logging.",
action='store_false', dest='color')
debug.add_argument('--force-color', help="Force colored logging",
action='store_true')
debug.add_argument('--nowindow', action='store_true', help="Don't show "
"the main window.")
debug.add_argument('-T', '--temp-basedir', action='store_true', help="Use "
"a temporary basedir.")
debug.add_argument('--no-err-windows', action='store_true', help="Don't "
"show any error windows (used for tests/smoke.py).")
debug.add_argument('--qt-arg', help="Pass an argument with a value to Qt. "
"For example, you can do "
"`--qt-arg geometry 650x555+200+300` to set the window "
"geometry.", nargs=2, metavar=('NAME', 'VALUE'),
action='append')
debug.add_argument('--qt-flag', help="Pass an argument to Qt as flag.",
nargs=1, action='append')
debug.add_argument('-D', '--debug-flag', type=debug_flag_error,
default=[], help="Pass name of debugging feature to be"
" turned on.", action='append', dest='debug_flags')
parser.add_argument('command', nargs='*', help="Commands to execute on "
"startup.", metavar=':command')
# URLs will actually be in command
parser.add_argument('url', nargs='*', help="URLs to open on startup "
"(empty as a window separator).")
return parser
def directory(arg):
if not arg:
raise argparse.ArgumentTypeError("Invalid empty value")
def logfilter_error(logfilter):
"""Validate logger names passed to --logfilter.
Args:
logfilter: A comma separated list of logger names.
"""
from qutebrowser.utils import log
if set(logfilter.lstrip('!').split(',')).issubset(log.LOGGER_NAMES):
return logfilter
else:
raise argparse.ArgumentTypeError(
"filters: Invalid value {} - expected a list of: {}".format(
logfilter, ', '.join(log.LOGGER_NAMES)))
def debug_flag_error(flag):
"""Validate flags passed to --debug-flag.
Available flags:
debug-exit: Turn on debugging of late exit.
pdb-postmortem: Drop into pdb on exceptions.
no-sql-history: Don't store history items.
no-scroll-filtering: Process all scrolling updates.
log-requests: Log all network requests.
log-scroll-pos: Log all scrolling changes.
stack: Enable Chromium stack logging.
chromium: Enable Chromium logging.
werror: Turn Python warnings into errors.
"""
valid_flags = ['debug-exit', 'pdb-postmortem', 'no-sql-history',
'no-scroll-filtering', 'log-requests', 'lost-focusproxy',
'log-scroll-pos', 'stack', 'chromium', 'werror']
if flag in valid_flags:
return flag
else:
raise argparse.ArgumentTypeError("Invalid debug flag - valid flags: {}"
.format(', '.join(valid_flags)))
def main():
parser = get_argparser()
argv = sys.argv[1:]
args = parser.parse_args(argv)
if args.json_args is not None:
# Restoring after a restart.
# When restarting, we serialize the argparse namespace into json, and
# construct a "fake" argparse.Namespace here based on the data loaded
# from json.
data = json.loads(args.json_args)
args = argparse.Namespace(**data)
earlyinit.early_init(args)
# We do this imports late as earlyinit needs to be run first (because of
# version checking and other early initialization)
from qutebrowser import app
return app.run(args)
|
t-wissmann/qutebrowser
|
qutebrowser/qutebrowser.py
|
Python
|
gpl-3.0
| 9,154
|
from typing import Optional
from lib.data import CustomFieldArgs
async def fieldUser(args: CustomFieldArgs) -> Optional[str]:
if args.field.lower() == 'user' or args.field.lower() == 'nick':
if args.nick:
return (args.prefix or '') + args.nick + (args.suffix or '')
else:
return args.default or ''
return None
|
MeGotsThis/BotGotsThis
|
pkg/custom_command/custom/user.py
|
Python
|
gpl-3.0
| 363
|
#!/usr/bin/python
# usage: A debugging class
import pdb
version=2.0
def my_add(a,b):
''' This is the function for addition of numbers and strings '''
print "value of a is {}".format(a)
print "value of b is {}".format(b)
return a+b
def my_div(a,b):
''' This is the function for division '''
return a/b
def my_sub(a,b):
''' This is the function for substraction '''
if a > b:
return a - b
elif b > a:
return b - a
def my_mul(a,b):
''' This is the function for multiplication '''
return a * b
# Application code
if __name__ == '__main__':
print "This is a example on understading debugging"
print "Congo, i learned to write a calculator"
pdb.set_trace()
print "summation of two numbers- {}".format(my_add(1,2))
print "multiplication of two numbers- {}".format(my_mul(1,2))
print "substartion of two numbers - {}".format(my_sub(1,2))
print "division of two numbers - {}".format(my_div(4,2))
|
tuxfux-hlp-notes/python-batches
|
archieves/batch-57/debugging/first.py
|
Python
|
gpl-3.0
| 954
|
#
# Honeybee: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Honeybee.
#
# Copyright (c) 2013-2020, Mostapha Sadeghipour Roudsari <mostapha@ladybug.tools>
# Honeybee is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Honeybee is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Honeybee; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
Use this component to create a custom opaque material that has no mass, which can be plugged into the "Honeybee_EnergyPlus Construction" component.
_
It is important to note that this component creates a material with no mass and, because of this, the accuracy of the component is not as great as a material that has mass. However, this component is very useful if you only have an R-value for a material (or a construction) and you know that the mass is relatively small.
_
If you want to create a material that accounts for mass, you should use the "Honeybee_EnergyPlus Window Material" component.
-
Provided by Honeybee 0.0.66
Args:
_name: A text name for your NoMass Opaque Material.
_roughness_: A text value that indicated the roughness of your material. This can be either "VeryRough", "Rough", "MediumRough", "MediumSmooth", "Smooth", and "VerySmooth". The default is set to "Rough".
_R_Value: A number representing the R-Value of the material in m2-K/W.
_thermAbsp_: An number between 0 and 1 that represents the thermal abstorptance of the material. The default is set to 0.9, which is common for most non-metallic materials.
_solAbsp_: An number between 0 and 1 that represents the abstorptance of solar radiation by the material. The default is set to 0.7, which is common for most non-metallic materials.
_visAbsp_: An number between 0 and 1 that represents the abstorptance of visible light by the material. The default is set to 0.7, which is common for most non-metallic materials.
Returns:
EPMaterial: A no-mass opaque material that can be plugged into the "Honeybee_EnergyPlus Construction" component.
"""
ghenv.Component.Name = "Honeybee_EnergyPlus NoMass Opaque Material"
ghenv.Component.NickName = 'EPNoMassMat'
ghenv.Component.Message = 'VER 0.0.66\nJUL_07_2020'
ghenv.Component.IconDisplayMode = ghenv.Component.IconDisplayMode.application
ghenv.Component.Category = "HB-Legacy"
ghenv.Component.SubCategory = "06 | Energy | Material | Construction"
#compatibleHBVersion = VER 0.0.56\nFEB_01_2015
#compatibleLBVersion = VER 0.0.59\nFEB_01_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "1"
except: pass
import Grasshopper.Kernel as gh
w = gh.GH_RuntimeMessageLevel.Warning
def checkInputs():
#Check to be sure that SHGC and VT are between 0 and 1.
checkData = True
def checkBtwZeroAndOne(variable, default, variableName):
if variable == None: newVariable = default
else:
if variable <= 1 and variable >= 0: newVariable = variable
else:
newVariable = 0
checkData = False
warning = variableName + " must be between 0 and 1."
print warning
ghenv.Component.AddRuntimeMessage(w, warning)
return newVariable
thermAbs = checkBtwZeroAndOne(_thermAbsp_, None, "_thermAbsp_")
solAbsp = checkBtwZeroAndOne(_solAbsp_, None, "_solAbsp_")
visAbsp = checkBtwZeroAndOne(_visAbsp_, None, "_visAbsp_")
#Check the Roughness value.
if _roughness_ != None: _roughness = _roughness_.upper()
else: _roughness = None
if _roughness == None or _roughness == "VERYROUGH" or _roughness == "ROUGH" or _roughness == "MEDIUMROUGH" or _roughness == "MEDIUMSMOOTH" or _roughness == "SMOOTH" or _roughness == "VERYSMOOTH": pass
else:
checkData = False
warning = "_roughness_ is not valid."
print warning
ghenv.Component.AddRuntimeMessage(w, warning)
return checkData
def main(name, roughness, R_Value, thermAbsp, solAbsp, visAbsp):
if roughness == None: roughness = "Rough"
if thermAbsp == None: thermAbsp = 0.9
if solAbsp == None: solAbsp = 0.7
if visAbsp == None: visAbsp = 0.7
values = [name.upper(), roughness, R_Value, thermAbsp, solAbsp, visAbsp]
comments = ["Name", "Roughness", "Thermal Resistance {m2-K/W}", "Thermal Absorptance", "Solar Absorptance", "Visible Absorptance"]
materialStr = "Material:NoMass,\n"
for count, (value, comment) in enumerate(zip(values, comments)):
if count!= len(values) - 1:
materialStr += str(value) + ", !" + str(comment) + "\n"
else:
materialStr += str(value) + "; !" + str(comment)
return materialStr
if _name and _R_Value:
checkData = checkInputs()
if checkData == True:
EPMaterial = main(_name, _roughness_, _R_Value, _thermAbsp_, _solAbsp_, _visAbsp_)
|
mostaphaRoudsari/Honeybee
|
src/Honeybee_EnergyPlus NoMass Opaque Material.py
|
Python
|
gpl-3.0
| 5,463
|
#
# Copyright 2013, 2018, 2019 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
""" Disable blocks module """
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import re
import sys
import logging
from ..tools import CMakeFileEditor
from .base import ModTool, ModToolException
logger = logging.getLogger(__name__)
class ModToolDisable(ModTool):
""" Disable block (comments out CMake entries for files) """
name = 'disable'
description = 'Disable selected block in module.'
def __init__(self, blockname=None, **kwargs):
ModTool.__init__(self, blockname, **kwargs)
self.info['pattern'] = blockname
def validate(self):
""" Validates the arguments """
ModTool._validate(self)
if not self.info['pattern'] or self.info['pattern'].isspace():
raise ModToolException("Invalid pattern!")
def run(self):
""" Go, go, go! """
def _handle_py_qa(cmake, fname):
""" Do stuff for py qa """
cmake.comment_out_lines('GR_ADD_TEST.*'+fname)
self.scm.mark_file_updated(cmake.filename)
return True
def _handle_py_mod(cmake, fname):
""" Do stuff for py extra files """
try:
with open(self._file['pyinit']) as f:
initfile = f.read()
except IOError:
logger.warning("Could not edit __init__.py, that might be a problem.")
return False
pymodname = os.path.splitext(fname)[0]
initfile = re.sub(r'((from|import)\s+\b'+pymodname+r'\b)', r'#\1', initfile)
with open(self._file['pyinit'], 'w') as f:
f.write(initfile)
self.scm.mark_file_updated(self._file['pyinit'])
return False
def _handle_cc_qa(cmake, fname):
""" Do stuff for cc qa """
if self.info['version'] == '37':
cmake.comment_out_lines(r'\$\{CMAKE_CURRENT_SOURCE_DIR\}/'+fname)
fname_base = os.path.splitext(fname)[0]
ed = CMakeFileEditor(self._file['qalib']) # Abusing the CMakeFileEditor...
ed.comment_out_lines(r'#include\s+"{}.h"'.format(fname_base), comment_str='//')
ed.comment_out_lines(r'{}::suite\(\)'.format(fname_base), comment_str='//')
ed.write()
self.scm.mark_file_updated(self._file['qalib'])
elif self.info['version'] == '38':
fname_qa_cc = 'qa_{}.cc'.format(self.info['blockname'])
cmake.comment_out_lines(fname_qa_cc)
elif self.info['version'] == '36':
cmake.comment_out_lines('add_executable.*'+fname)
cmake.comment_out_lines('target_link_libraries.*'+os.path.splitext(fname)[0])
cmake.comment_out_lines('GR_ADD_TEST.*'+os.path.splitext(fname)[0])
self.scm.mark_file_updated(cmake.filename)
return True
def _handle_h_swig(cmake, fname):
""" Comment out include files from the SWIG file,
as well as the block magic """
with open(self._file['swig']) as f:
swigfile = f.read()
(swigfile, nsubs) = re.subn(r'(.include\s+"({}/)?{}")'.format(
self.info['modname'], fname),
r'//\1', swigfile)
if nsubs > 0:
logger.info("Changing {}...".format(self._file['swig']))
if nsubs > 1: # Need to find a single BLOCK_MAGIC
blockname = os.path.splitext(fname[len(self.info['modname'])+1:])[0]
if self.info['version'] in ('37', '38'):
blockname = os.path.splitext(fname)[0]
(swigfile, nsubs) = re.subn('(GR_SWIG_BLOCK_MAGIC2?.+{}.+;)'.format(blockname), r'//\1', swigfile)
if nsubs > 1:
logger.warning("Hm, changed more then expected while editing {}.".format(self._file['swig']))
with open(self._file['swig'], 'w') as f:
f.write(swigfile)
self.scm.mark_file_updated(self._file['swig'])
return False
def _handle_i_swig(cmake, fname):
""" Comment out include files from the SWIG file,
as well as the block magic """
with open(self._file['swig']) as f:
swigfile = f.read()
blockname = os.path.splitext(fname[len(self.info['modname'])+1:])[0]
if self.info['version'] in ('37', '38'):
blockname = os.path.splitext(fname)[0]
swigfile = re.sub(r'(%include\s+"'+fname+'")', r'//\1', swigfile)
logger.info("Changing {}...".format(self._file['swig']))
swigfile = re.sub('(GR_SWIG_BLOCK_MAGIC2?.+'+blockname+'.+;)', r'//\1', swigfile)
with open(self._file['swig'], 'w') as f:
f.write(swigfile)
self.scm.mark_file_updated(self._file['swig'])
return False
# This portion will be covered by the CLI
if not self.cli:
self.validate()
else:
from ..cli import cli_input
# List of special rules: 0: subdir, 1: filename re match, 2: callback
special_treatments = (
('python', r'qa.+py$', _handle_py_qa),
('python', r'^(?!qa).+py$', _handle_py_mod),
('lib', r'qa.+\.cc$', _handle_cc_qa),
('include/{}'.format(self.info['modname']), r'.+\.h$', _handle_h_swig),
('include', r'.+\.h$', _handle_h_swig),
('swig', r'.+\.i$', _handle_i_swig)
)
for subdir in self._subdirs:
if self.skip_subdirs[subdir]:
continue
if self.info['version'] in ('37', '38') and subdir == 'include':
subdir = 'include/{}'.format(self.info['modname'])
try:
cmake = CMakeFileEditor(os.path.join(subdir, 'CMakeLists.txt'))
except IOError:
continue
logger.info("Traversing {}...".format(subdir))
filenames = cmake.find_filenames_match(self.info['pattern'])
yes = self.info['yes']
for fname in filenames:
file_disabled = False
if not yes:
ans = cli_input("Really disable {}? [Y/n/a/q]: ".format(fname)).lower().strip()
if ans == 'a':
yes = True
if ans == 'q':
sys.exit(0)
if ans == 'n':
continue
for special_treatment in special_treatments:
if special_treatment[0] == subdir and re.match(special_treatment[1], fname):
file_disabled = special_treatment[2](cmake, fname)
if not file_disabled:
cmake.disable_file(fname)
cmake.write()
self.scm.mark_files_updated((os.path.join(subdir, 'CMakeLists.txt'),))
logger.warning("Careful: 'gr_modtool disable' does not resolve dependencies.")
|
marcusmueller/gnuradio
|
gr-utils/python/modtool/core/disable.py
|
Python
|
gpl-3.0
| 7,940
|
from nose.tools import *
import iDibo
def setup():
print "SETUP!"
def teardown():
print "TEAR DOWN!"
def test_basic():
print "I RAN!"
|
anaoprea/iDibo
|
tests/iDibo_tests.py
|
Python
|
gpl-3.0
| 148
|
#!/usr/bin/python
# coding=utf-8
# Copyright (c) 2015, 2016 Björn Schrader
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import ConfigParser
import os
import time
import logging
import pyinotify
import serial
import subprocess
import threading
import re
import string
import signal
import Queue
from struct import *
NX_lf = '\xff\xff\xff'
NX_channel = 0
NX_page = 0
version = '0.18'
temps = dict()
channels = dict()
pitmaster = dict()
pitconf = dict()
# Events werden vom Display asynchron gesendet
NX_eventq = Queue.Queue()
# Returns werden auf Anforderung zurückgegeben
NX_returnq = Queue.Queue()
# Lock für das schreiben in die Konfig
configfile_lock = threading.Lock()
# Neue Temperaturen
temps_event = threading.Event()
# Neue Kanalkonfiguration (= geändertes Konfigfile)
channels_event = threading.Event()
# Neue Pitmasterevents
pitmaster_event = threading.Event()
# Neue Pitmasterkonfiguration (= geändertes Konfigfile)
pitconf_event = threading.Event()
# Event für ein Aufwachen aus dem Sleep-Mode (= geändertes Konfigfile)
NX_wake_event = threading.Event()
# Stop des Prozesses wurde angefordert
stop_event = threading.Event()
# Konfigurationsdatei einlesen
configdefaults = {'dim' : '90',
'timeout': '30',
'serialdevice': '/dev/ttyAMA0',
'serialspeed': '115200'}
configfile = '/var/www/conf/WLANThermo.conf'
Config = ConfigParser.SafeConfigParser(configdefaults)
# Wir laufen als root, auch andere müssen die Config schreiben!
os.umask (0)
for i in range(0,5):
while True:
try:
Config.read(configfile)
except IndexError:
# Auf Event warten geht hier noch nicht, da wir die anderen Pfade aus der Config brauchen
# Logging geht auch noch nicht, da wir das Logfile brauchen, als an StdErr
sys.stderr.write('Warte auf Konfigurationsdatei')
time.sleep(1)
continue
break
# Logging initialisieren
LOGFILE = Config.get('daemon_logging', 'log_file')
logger = logging.getLogger('WLANthermoNEXTION')
#Define Logging Level by changing >logger.setLevel(logging.LEVEL_YOU_WANT)< available: DEBUG, INFO, WARNING, ERROR, CRITICAL
log_level = Config.get('daemon_logging', 'level_DISPLAY')
if log_level == 'DEBUG':
logger.setLevel(logging.DEBUG)
if log_level == 'INFO':
logger.setLevel(logging.INFO)
if log_level == 'ERROR':
logger.setLevel(logging.ERROR)
if log_level == 'WARNING':
logger.setLevel(logging.WARNING)
if log_level == 'CRITICAL':
logger.setLevel(logging.CRITICAL)
handler = logging.FileHandler(LOGFILE)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logging.captureWarnings(True)
# Pfad fuer die Übergabedateien auslesen
curPath, curFile = os.path.split(Config.get('filepath','current_temp'))
pitPath, pitFile = os.path.split(Config.get('filepath','pitmaster'))
confPath, confFile = os.path.split(configfile)
# Wenn das display Verzeichniss im Ram Drive nicht exisitiert erstelle es
if not os.path.exists(curPath):
os.makedirs(curPath)
class FileEvent(pyinotify.ProcessEvent):
def process_IN_CLOSE_WRITE(self, event):
global temps, channels, pitmaster, pitconf, Config, configfile
global temps_event, channels_event, pitmaster_event, pitconf_event, logger
logger.debug("IN_CLOSE_WRITE: %s " % os.path.join(event.path, event.name))
if event.path == curPath and event.name == curFile:
logger.debug('Neue Temperaturwerte vorhanden')
temps_event.set()
elif event.path == confPath and event.name == confFile:
logger.debug('Neue Konfiguration vorhanden')
channels_event.set()
pitconf_event.set()
elif event.path == pitPath and event.name == pitFile:
logger.debug('Neue Pitmasterdaten vorhanden')
pitmaster_event.set()
def process_IN_MOVED_TO(self, event):
global temps, channels, pitmaster, pitconf, Config, configfile
global temps_event, channels_event, pitmaster_event, pitconf_event, logger
logger.debug("IN_MOVED_TO: %s " % os.path.join(event.path, event.name))
if event.path == curPath and event.name == curFile:
logger.debug('Neue Temperaturwerte vorhanden')
temps_event.set()
elif event.path == confPath and event.name == confFile:
logger.debug('Neue Konfiguration vorhanden')
channels_event.set()
pitconf_event.set()
elif event.path == pitPath and event.name == pitFile:
logger.debug('Neue Pitmasterdaten vorhanden')
pitmaster_event.set()
def NX_reader():
global logger, ser, NX_returns, NX_events, stop_event, NX_wake_event
logger.info('Reader-Thread gestartet')
# Timeout setzen, damit der Thread gestoppt werden kann
ser.timeout = 0.1
# Dauerschleife, bricht ab wenn ein stop_event vorlieg
while not stop_event.is_set():
is_return = False
endcount = 0
bytecount = 0
message = {'raw' : '', 'iserr' : False, 'errmsg' : '', 'data' : {}, 'type': ''}
while (endcount != 3):
byte = ser.read()
if byte != '':
# Kein Timeout
bytecount += 1
message['raw'] += byte[0]
if (byte[0] == '\xff'):
endcount += 1
else:
endcount = 0
else:
# Timeout, sollen wir stoppen?
if stop_event.is_set():
break
if stop_event.is_set():
break
elif (message['raw'][0] == '\x00'):
message['type'] = 'inv_instr'
message['iserr'] = True
message['errmsg'] = 'Invalid instruction'
is_return = True
elif (message['raw'][0] == '\x01'):
message['type'] = 'ok'
message['errmsg'] = 'Successful execution of instruction'
is_return = True
elif (message['raw'][0] == '\x03'):
message['type'] = 'inv_pageid'
message['iserr'] = True
message['errmsg'] = 'Page ID invalid'
is_return = True
elif (message['raw'][0] == '\x04'):
message['type'] = 'inv_pictid'
message['iserr'] = True
message['errmsg'] = 'Picture ID invalid'
is_return = True
elif (message['raw'][0] == '\x05'):
message['type'] = 'inv_fontid'
message['iserr'] = True
message['errmsg'] = 'Font ID invalid'
is_return = True
elif (message['raw'][0] == '\x11'):
message['type'] = 'inv_baudrate'
message['iserr'] = True
message['errmsg'] = 'Baud rate setting invalid'
is_return = True
elif (message['raw'][0] == '\x12'):
message['type'] = 'inv_curve'
message['iserr'] = True
message['errmsg'] = 'Curve control ID number or channel number is invalid'
is_return = True
elif (message['raw'][0] == '\x1a'):
message['type'] = 'inv_varname'
message['iserr'] = True
message['errmsg'] = 'Variable name invalid '
is_return = True
elif (message['raw'][0] == '\x1B'):
message['type'] = 'inv_varop'
message['iserr'] = True
message['errmsg'] = 'Variable operation invalid'
is_return = True
elif (message['raw'][0] == '\x65'):
message['type'] = 'touch_event'
message['errmsg'] = 'Touch event return data'
message['data'] = {'page': unpack('B', message['raw'][1])[0], 'button': unpack('B', message['raw'][2])[0], 'event':['release', 'press'][unpack('B', message['raw'][3])[0]]}
elif (message['raw'][0] == '\x66'):
message['type'] = 'current_page'
message['errmsg'] = 'Current page ID number return'
message['data'] = {'page': unpack('B',message['raw'][1])[0]}
elif (message['raw'][0] == '\x67'):
message['type'] = 'touch_coord'
message['errmsg'] = 'Touch coordinate data returns'
message['data'] = {'x': unpack('>h', message['raw'][1:3])[0],'y': unpack('>h', message['raw'][3:5])[0], 'event':['release', 'press'][unpack('B', message['raw'][5])[0]]}
elif (message['raw'][0] == '\x68'):
message['type'] = 'touch_coord_sleep'
message['errmsg'] = 'Touch Event in sleep mode'
message['data'] = {'x': unpack('>h', message['raw'][1:3])[0] ,'y': unpack('>h', message['raw'][3:5])[0], 'event':['release', 'press'][unpack('B', message['raw'][5])[0]]}
elif (message['raw'][0] == '\x70'):
message['type'] = 'data_string'
message['errmsg'] = 'String variable data returns'
message['data'] = unpack((str(bytecount - 4)) + 's', message['raw'][1:-3])[0]
is_return = True
elif (message['raw'][0] == '\x71'):
message['type'] = 'data_int'
message['errmsg'] = 'Numeric variable data returns'
message['data'] = unpack('<i', message['raw'][1:5])[0]
is_return = True
elif (message['raw'][0] == '\x86'):
message['type'] = 'sleep'
message['errmsg'] = 'Device automatically enters into sleep mode'
NX_wake_event.clear()
elif (message['raw'][0] == '\x87'):
message['type'] = 'wakeup'
message['errmsg'] = 'Device automatically wake up'
# Device ist aufgewacht...
NX_wake_event.set()
elif (message['raw'][0] == '\x88'):
message['type'] = 'startup'
message['errmsg'] = 'System successful start up'
elif (message['raw'][0] == '\x89'):
message['type'] = 'sdupgrade'
message['errmsg'] = 'Start SD card upgrade'
# Selbst definierte Kommandos
elif (message['raw'][0] == '\x40'):
message['type'] = 'read_cmd'
message['errmsg'] = 'Request to read from Display'
message['data'] = {'area': unpack('B', message['raw'][1])[0], 'id': unpack('B', message['raw'][2])[0]}
elif (message['raw'][0] == '\x41'):
message['type'] = 'custom_cmd'
message['errmsg'] = 'Execute Special Command'
message['data'] = {'area': unpack('B', message['raw'][1])[0], 'id': unpack('B', message['raw'][2])[0], 'action': unpack('B', message['raw'][3])[0]}
logger.debug('Area: ' + str(message['data']['area']) + ' ID: ' + str(message['data']['id']) + ' Action: ' + str(message['data']['action']))
logger.debug('Meldung ' + message['type'] + ' vom Display erhalten')
if (is_return):
NX_returnq.put(message)
else:
NX_eventq.put(message)
logger.info('Reader-Thread gestoppt')
return True
def NX_waitok():
global stop_event
endcount = 0
bytecount = 0
ok = False
while (endcount != 3 or not stop_event.is_set()):
byte = ser.read()
if byte == '':
logger.info('Serial Communication Timeout!')
break
bytecount += 1
if (byte[0] == '\xff'):
endcount += 1
elif (byte[0] == '\x01' and bytecount == 1):
endcount = 0
ok = True
else:
endcount = 0
if ok == True:
return True
else:
return False
def NX_init(port, baudrate):
global ser, NX_lf, NX_reader_thread
ser.port = port
ser.baudrate = baudrate
ser.timeout = 1
ser.open()
logger.debug('Leere seriellen Buffer')
# Buffer des Displays leeren
# - Ungültigen Befehl senden
# - Aufwachbefehl senden
ser.write('nop' + NX_lf)
ser.write('sleep=0' + NX_lf)
# - Warten
ser.flush()
time.sleep(0.2)
# - Empfangene Zeichen löschen
ser.flushInput()
# Immer eine Rückmeldung erhalten
ser.write('ref 0' + NX_lf)
ser.flush()
return NX_waitok()
def NX_sendvalues(values):
global ser, NX_lf, NX_returnq, NX_wake_event
# NX_sendcmd('sleep=0')
error = False
for rawkey, value in values.iteritems():
# Länge wird im Key mit codiert "key:länge"
keys = rawkey.split(':')
key = keys[0]
if len(keys) == 2:
length = int(keys[1])
else:
length = None
# Sendet die Daten zum Display und wartet auf eine Rückmeldung
logger.debug("Sende " + key + ' zum Display: ' + str(value))
if key[-3:] == 'txt':
ser.write(str(key) + '="' + str(value)[:length] + '"\xff\xff\xff')
elif key[-3:] == 'val':
ser.write(str(key) + '=' + str(value) + '\xff\xff\xff')
else:
logger.warning('Unbekannter Variablentyp')
ser.flush()
try:
ret = NX_returnq.get(timeout=1)
except Queue.Empty:
logger.warning('Timeout - möglicherweise Sleep-Mode')
error = True
break
else:
NX_returnq.task_done()
if ret['iserr']:
logger.warning('Fehlermeldung ' + ret['type'] + ' vom Display erhalten')
error = True
else:
logger.debug('Meldung ' + ret['type'] + ' vom Display erhalten')
if error:
return False
return True
def NX_getvalues(ids):
global ser, NX_lf, NX_returnq
error = False
returnvalues = dict()
try:
while True:
ret = NX_returnq.get(False)
logger.info('Unerwartete Meldung ' + ret['type'] + ' vom Display erhalten (aus Displayprogramm)')
except Queue.Empty:
for id in ids:
# Sendet die Daten zum Display und wartet auf eine Rückmeldung
logger.debug("Hole " + str(id) + ' vom Display')
ser.write('get ' + str(id) + '\xff\xff\xff')
ser.flush()
try:
ret = NX_returnq.get(0.5)
NX_returnq.task_done()
if ret['iserr']:
logger.warning('Fehlermeldung ' + ret['type'] + ' vom Display erhalten')
error = True
else:
# Gehen wir von einem "OK" aus, was sonst?
logger.debug('Meldung ' + ret['type'] + ' vom Display erhalten')
# OK, dann Daten abholen
if ret['type'] == 'data_string':
logger.debug('String "' + ret['data'] + '" vom Display erhalten')
elif ret['type'] == 'data_int':
logger.debug('Integer "' + ret['data'] + '" vom Display erhalten')
else:
logger.info('Unerwartete Meldung ' + ret['type'] + ' vom Display erhalten')
if not error:
returnvalues[id] = ret['data']
error = True
except Queue.Empty:
logger.warning('Keine Rückmeldung vom Display erhalten')
error = True
return returnvalues
def NX_getvalue(id):
global ser, NX_lf, NX_returnq
error = False
# Sendet die Daten zum Display und wartet auf eine Rückmeldung
logger.debug("Hole " + str(id) + ' vom Display')
try:
while True:
ret = NX_returnq.get(False)
logger.info('Unerwartete Meldung ' + ret['type'] + ' vom Display erhalten (aus Displayprogramm)')
except Queue.Empty:
ser.write('get ' + str(id) + '\xff\xff\xff')
ser.flush()
try:
ret = NX_returnq.get(True, 0.5)
NX_returnq.task_done()
if ret['iserr']:
logger.warning('Fehlermeldung ' + ret['type'] + ' vom Display erhalten')
error = True
else:
# OK, dann Daten abholen
if ret['type'] == 'data_string':
logger.debug('String "' + ret['data'] + '" vom Display erhalten')
elif ret['type'] == 'data_int':
logger.debug('Integer "' + str(ret['data']) + '" vom Display erhalten')
else:
logger.info('Unerwartete Meldung ' + ret['type'] + ' vom Display erhalten')
except Queue.Empty:
logger.warning('Keine Rückmeldung vom Display erhalten')
error = True
if not error:
return ret['data']
else:
return None
def NX_sendcmd(cmd):
global ser, NX_returnq
error = False
# Sendet die Daten zum Display und wartet auf eine Rückmeldung
logger.debug('Sende Befehl "' + str(cmd) + '" zum Display')
try:
while True:
ret = NX_returnq.get(False)
logger.info('Unerwartete Meldung ' + ret['type'] + ' vom Display erhalten (aus Displayprogramm)')
NX_returnq.task_done()
except Queue.Empty:
ser.write(str(cmd) + '\xff\xff\xff')
ser.flush()
try:
ret = NX_returnq.get(True, 0.5)
NX_returnq.task_done()
if ret['iserr']:
logger.warning('Fehlermeldung ' + ret['type'] + ' vom Display erhalten')
error = True
else:
logger.debug('Meldung ' + ret['type'] + ' vom Display erhalten')
except Queue.Empty:
logger.warning('Keine Rückmeldung vom Display erhalten')
error = True
if error:
return False
return True
def NX_switchpage(new_page):
global ser, NX_returnq, NX_page
error = False
logger.debug("Sende Seitenwechsel zu " + str(new_page))
try:
while True:
ret = NX_returnq.get(False)
logger.info('Unerwartete Meldung ' + ret['type'] + ' vom Display erhalten (aus Displayprogramm)')
except Queue.Empty:
ser.write('page ' + str(new_page) + '\xff\xff\xff')
ser.flush()
try:
ret = NX_returnq.get(True, 0.5)
if ret['iserr']:
logger.error('Fehlermeldung ' + ret['type'] + ' vom Display erhalten')
error = True
else:
logger.debug('Meldung ' + ret['type'] + ' vom Display erhalten')
except Queue.Empty:
logger.warning('Keine Rückmeldung vom Display erhalten')
error = True
if error:
return False
NX_page = new_page
return True
def sensors_getvalues():
sensors = dict()
sensorconfig = ConfigParser.SafeConfigParser()
sensorconfig.read('/var/www/conf/sensor.conf')
for section in sensorconfig.sections():
sensors[sensorconfig.getint(section, 'number')] = dict()
sensors[sensorconfig.getint(section, 'number')]['name'] = sensorconfig.get(section, 'name')
return sensors
def temp_getvalues():
global logger, curPath, curFile
temps = dict()
if os.path.isfile(curPath + '/' + curFile):
logger.debug("Daten vom WLANThermo zum Anzeigen vorhanden")
ft = open(curPath + '/' + curFile).read()
temps_raw = ft.split(';')
temps = dict()
temps['timestamp'] = timestamp = time.mktime(time.strptime(temps_raw[0],'%d.%m.%y %H:%M:%S'))
for count in range(8):
temps[count] = {'value': temps_raw[count+1], 'alert': temps_raw[count+9]}
else:
return None
return temps
def tempcsv_write(config):
name ='/var/www/temperaturen.csv'
logger.debug('Schreibe Temperaturen in "' + name + '" neu!')
while True:
try:
fw = open(name + '_tmp','w') #Datei anlegen
for i in range(8):
fw.write(str(config.get('temp_max','temp_max' + str(i))) + '\n') # Alarm-Max-Werte schreiben
for i in range(8):
fw.write(str(config.get('temp_min','temp_min' + str(i))) + '\n') # Alarm-Min-Werte schreiben
fw.flush()
os.fsync(fw.fileno())
fw.close()
os.rename(name + '_tmp', name)
except IndexError:
time.sleep(0.1)
continue
break
def set_tempflag():
# Flag Datei für WebGUI anlegen
open('/var/www/tmp/flag', 'w').close()
def channels_setvalues(channel, high= None, low=None, sensor=None):
global configfile, configfile_lock
restart_comp = False
temp_changed = False
with configfile_lock:
newconfig = ConfigParser.SafeConfigParser()
newconfig.read(configfile)
if low != None:
newconfig.set('temp_min','temp_min' + str(channel), str(low))
temp_changed = True
if high != None:
newconfig.set('temp_max','temp_max' + str(channel), str(high))
temp_changed = True
if sensor != None:
newconfig.set('Sensoren','ch' + str(channel), str(sensor))
restart_comp = True
if restart_comp:
newconfig.set('ToDo','restart_thermo', 'True')
elif temp_changed:
tempcsv_write(newconfig)
if temp_changed:
set_tempflag()
config_write(configfile, newconfig)
def display_getvalues():
global configfile, configfile_lock
defaults = {'dim':'90', 'timeout':'30', 'start_page':'main', 'serialdevice':'/dev/ttyAMA0', 'serialspeed':'9600'}
display = {}
with configfile_lock:
config = ConfigParser.SafeConfigParser(defaults)
config.read(configfile)
display['dim'] = config.getint('Display','dim')
display['timeout'] = config.getint('Display','timeout')
display['start_page'] = config.get('Display','start_page')
display['serialdevice'] = Config.get('Display', 'serialdevice')
display['serialspeed'] = Config.getint('Display', 'serialspeed')
return display
def display_setvalues(dim = None, timeout = None):
global configfile, configfile_lock
with configfile_lock:
newconfig = ConfigParser.SafeConfigParser()
newconfig.read(configfile)
if dim != None:
newconfig.set('Display','dim', str(dim))
if timeout != None:
newconfig.set('Display','timeout', str(timeout))
config_write(configfile, newconfig)
def todo_setvalues(pi_down = None, pi_reboot = None):
global configfile, configfile_lock
with configfile_lock:
newconfig = ConfigParser.SafeConfigParser()
newconfig.read(configfile)
if pi_down != None:
newconfig.set('ToDo','raspi_shutdown', ['False', 'True'][pi_down])
if pi_reboot != None:
newconfig.set('ToDo','raspi_reboot', ['False', 'True'][pi_reboot])
config_write(configfile, newconfig)
def pitmaster_setvalues(pit_ch = None, pit_set = None, pit_lid= None, pit_on = None, pit_pid = None, pit_type = None, pit_inverted = None):
global configfile, configfile_lock
with configfile_lock:
newconfig = ConfigParser.SafeConfigParser()
newconfig.read(configfile)
if pit_ch != None:
newconfig.set('Pitmaster','pit_ch', str(pit_ch))
if pit_inverted != None:
newconfig.set('Pitmaster','pit_inverted', ['False', 'True'][pit_inverted])
if pit_set != None:
newconfig.set('Pitmaster','pit_set', str(pit_set))
if pit_lid != None:
newconfig.set('Pitmaster','pit_open_lid_detection', ['False', 'True'][pit_lid])
if pit_on != None:
newconfig.set('ToDo','pit_on', ['False', 'True'][pit_on])
if pit_pid != None:
newconfig.set('Pitmaster','pit_controller_type', ['False', 'PID'][pit_pid])
if pit_type != None:
newconfig.set('Pitmaster','pit_type', ['fan', 'servo', 'io', 'io_pwm', 'fan_pwm'][pit_type])
config_write(configfile, newconfig)
def channels_getvalues():
global logger, configfile, configfile_lock
logger.debug('Lade Kanalkonfiguration aus Logfile')
channels = {}
with configfile_lock:
Config = ConfigParser.SafeConfigParser()
Config.read(configfile)
for i in range(8):
channel = {}
channel['sensor'] = Config.getint('Sensoren', 'ch' + str(i))
channel['logging'] = Config.getboolean('Logging', 'ch' + str(i))
channel['web_alert'] = Config.getboolean('web_alert', 'ch' + str(i))
channel['name'] = Config.get('ch_name', 'ch_name' + str(i))
channel['show'] = Config.getboolean('ch_show', 'ch' + str(i))
channel['temp_min'] = Config.getint('temp_min', 'temp_min' + str(i))
channel['temp_max'] = Config.getint('temp_max', 'temp_max' + str(i))
channels[i] = channel
return channels
def pitmaster_config_getvalues():
global configfile, configfile_lock
pitconf = dict()
with configfile_lock:
Config = ConfigParser.SafeConfigParser()
Config.read(configfile)
pitconf['on'] = Config.getboolean('ToDo','pit_on')
pitconf['type'] = Config.get('Pitmaster','pit_type')
pitconf['inverted'] = Config.getboolean('Pitmaster','pit_inverted')
pitconf['curve'] = Config.get('Pitmaster','pit_curve')
pitconf['set'] = Config.getfloat('Pitmaster','pit_set')
pitconf['ch'] = Config.getint('Pitmaster','pit_ch')
pitconf['pause'] = Config.getfloat('Pitmaster','pit_pause')
pitconf['pwm_min'] = Config.getfloat('Pitmaster','pit_pwm_min')
pitconf['pwm_max'] = Config.getfloat('Pitmaster','pit_pwm_max')
pitconf['man'] = Config.getint('Pitmaster','pit_man')
pitconf['Kp'] = Config.getfloat('Pitmaster','pit_kp')
pitconf['Kd'] = Config.getfloat('Pitmaster','pit_kd')
pitconf['Ki'] = Config.getfloat('Pitmaster','pit_ki')
pitconf['Kp_a'] = Config.getfloat('Pitmaster','pit_kp_a')
pitconf['Kd_a'] = Config.getfloat('Pitmaster','pit_kd_a')
pitconf['Ki_a'] = Config.getfloat('Pitmaster','pit_ki_a')
pitconf['switch_a'] = Config.getfloat('Pitmaster','pit_switch_a')
pitconf['controller_type'] = Config.get('Pitmaster','pit_controller_type')
pitconf['iterm_min'] = Config.getfloat('Pitmaster','pit_iterm_min')
pitconf['iterm_max'] = Config.getfloat('Pitmaster','pit_iterm_max')
pitconf['open_lid_detection'] = Config.getboolean('Pitmaster','pit_open_lid_detection')
pitconf['open_lid_pause'] = Config.getfloat('Pitmaster','pit_open_lid_pause')
pitconf['open_lid_falling_border'] = Config.getfloat('Pitmaster','pit_open_lid_falling_border')
pitconf['open_lid_rising_border'] = Config.getfloat('Pitmaster','pit_open_lid_rising_border')
return pitconf
def pitmaster_getvalues():
global logger, pitPath, pitFile
temps = dict()
if os.path.isfile(pitPath + '/' + pitFile):
logger.debug("Daten vom Pitmaster zum Anzeigen vorhanden")
fp = open(pitPath + '/' + pitFile).read()
pitmaster_raw = fp.split(';',4)
# Es trägt sich zu, das im Lande WLANThermo manchmal nix im Pitmaster File steht
# Dann einfach munter so tun als ob einfach nix da ist
#TODO Fix everything
if pitmaster_raw[0] == '':
return None
timestamp = time.mktime(time.strptime(pitmaster_raw[0],'%d.%m.%y %H:%M:%S'))
pitmaster = {'timestamp': timestamp, 'set': float(pitmaster_raw[1]), 'now': float(pitmaster_raw[2]),'new': float(pitmaster_raw[3].rstrip('%')),'msg': pitmaster_raw[4]}
else:
return None
return pitmaster
def lan_getvalues():
interfacelist = ['eth0', 'eth1', 'wlan0', 'wlan1']
interfaces = dict()
for interface in interfacelist:
retvalue = os.popen("LANG=C ifconfig " + interface + " 2>/dev/null | grep 'inet ' | cut -d':' -f2| cut -d' ' -f1").readlines()
if (len(retvalue)!=0):
interfaces[interface] = {'name': interface, 'ip': retvalue[0].strip()}
return interfaces
def wlan_getsignal(interface):
logger.debug('Hole Signalstärke für "' + interface + '"')
retvalue = os.popen("LANG=C iwconfig " + interface + " 2>/dev/null").readlines()
for line in retvalue:
if 'Link Quality=' in line:
quality = re.match('.*Link Quality=(\S*)',line).group(1)
if '/' in quality:
(val, div) = quality.split('/')
quality = int(round(float(val) / float(div) * 100.0))
return quality
return None
def wlan_getssids():
ssidlist = os.popen("iwlist wlan0 scan").readlines()
ssids = list()
for line in ssidlist:
if "ESSID:" in line:
ssid = re.match('.*ESSID:"(.*)"',line).group(1)
if not ssid in ssids:
ssids.append(ssid)
return ssids
def wlan_reconnect():
os.system('ifdown wlan0')
time.sleep(1)
os.system('ifup wlan0')
def wlan_setpassphrase(ssid, psk):
logger.debug('Setze WPA Passhrase für: ' + ssid)
fw = file('/etc/wpa_supplicant/wpa_supplicant.conf').readlines()
ssids = list()
psks = list()
ssid_found = False
for line in fw:
if re.search(r'SSID',line,re.IGNORECASE):
ssids.append(line.split("=")[1].replace('"','').strip())
elif re.search(r'\#psk',line,re.IGNORECASE):
psks.append(line.split("=")[1].replace('"','').strip())
wpa_file = open('/etc/wpa_supplicant/wpa_supplicant.conf' + '_tmp', 'w')
wpa_file.write('ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev\n')
wpa_file.write('update_config=1\n')
if ssids:
for i in range(len(ssids)):
logger.debug('Schreibe wpa_supplicant.conf für: ' + ssids[i])
if ssid == ssids[i]:
# Wert verändert
logger.debug('SSID bereits in Config, PSK ändern')
wpa_passphrase = subprocess.Popen(("/usr/bin/wpa_passphrase", str(ssid), str(psk)), stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.readlines()
ssid_found = True
else:
# neue SSID
logger.debug('SSID und PSK aus alter Datei übernommen')
wpa_passphrase = subprocess.Popen(("/usr/bin/wpa_passphrase", str(ssids[i]), str(psks[i])), stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.readlines()
if wpa_passphrase[0] != "Passphrase must be 8..63 characters":
for line in wpa_passphrase:
wpa_file.write(line)
else:
logger.warning('Neuer PSK zu kurz für SSID: ' + ssid)
if not ssid_found:
# SSID nicht in konfigurierten WLANs, das neue hinzufügen
logger.debug('Schreibe wpa_supplicant.conf für: ' + ssid)
wpa_passphrase = subprocess.Popen(("/usr/bin/wpa_passphrase", str(ssid), str(psk)), stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.readlines()
if wpa_passphrase[0] != "Passphrase must be 8..63 characters":
for line in wpa_passphrase:
wpa_file.write(line)
else:
logger.warning('Neuer PSK zu kurz für SSID: ' + ssid)
wpa_file.flush()
os.fsync(wpa_file.fileno())
wpa_file.close()
os.rename('/etc/wpa_supplicant/wpa_supplicant.conf' + '_tmp', '/etc/wpa_supplicant/wpa_supplicant.conf')
return True
def alert_setack():
try:
os.mknod('/var/www/alert.ack')
except OSError:
pass
def NX_display():
logger.info('Display-Thread gestartet')
global NX_page, NX_channel, stop_event, NX_eventq
global temps_event, channels_event, pitmaster_event, pitmasterconfig_event
global Config
# Version des Displays prüfen
display_version = str(NX_getvalue('main.version.txt'))
logger.info('Version auf dem Display: ' + str(display_version))
if not str(display_version) in ['v1.3']:
logger.info('Update des Displays notwendig')
NX_sendcmd('page update')
open('/var/www/tmp/nextionupdate', 'w').close()
stop_event.wait()
return False
if os.path.isfile('/var/www/tmp/nextionupdate'):
# Update-Flag löschen wenn Version i.O.
os.unlink('/var/www/tmp/nextionupdate')
NX_sendvalues({'boot.text.txt:35':'Temperaturen werden geladen'})
NX_switchpage('boot')
# Werte initialisieren
temps_event.clear()
channels_event.clear()
logger.debug('Hole Temperaturen...')
temps = temp_getvalues()
while temps == None:
logger.info("Wartet auf Temperaturen")
temps_event.wait(0.1)
temps = temp_getvalues()
NX_sendvalues({'boot.text.txt:35':'Konfiguration wird geladen'})
logger.debug('Hole Displaykonfiguration...')
display = display_getvalues()
logger.debug('Hole Sensorkonfiguration...')
sensors = sensors_getvalues()
logger.debug('Hole Kanalkonfiguration...')
channels = channels_getvalues()
logger.debug('Hole Pitmasterkonfiguration...')
pitconf = pitmaster_config_getvalues()
interfaces = lan_getvalues()
# Leere Liste da der Scan etwas dauert...
ssids = []
# Zahl des aktuell gewählen Eintrages
ssids_i = 0
pitmaster = None
if pitconf['on'] == True:
logger.debug('Hole Pitmasterdaten...')
pitmaster = pitmaster_getvalues()
# Kann ein wenig dauern, bis valide Daten geliefert werden, daher nicht mehr warten
if pitmaster == None:
pitmaster = {'timestamp': 0, 'set': 0, 'now': 0,'new': 0,'msg': ''}
values = dict()
for i in range(1, 11):
values['main.sensor_name' + str(i) + '.txt:10'] = sensors[i]['name']
for i in range(8):
if temps[i]['value'] == '999.9':
values['main.kanal' + str(i) + '.txt:10'] = channels[i]['name']
else:
values['main.kanal' + str(i) + '.txt:10'] = temps[i]['value']
values['main.alert' + str(i) + '.txt:10'] = temps[i]['alert']
values['main.al' + str(i) + 'minist.txt:10'] = int(round(channels[i]['temp_min']))
values['main.al' + str(i) + 'maxist.txt:10'] = int(round(channels[i]['temp_max']))
values['main.sensor_type' + str(i) + '.val'] = channels[i]['sensor']
values['main.name' + str(i) + '.txt:10'] = channels[i]['name']
for interface in interfaces:
values['wlaninfo.' + interfaces[interface]['name'] + '.txt:20'] = interfaces[interface]['ip']
values['main.pit_ch.val'] = int(pitconf['ch'])
values['main.pit_power.val'] = int(round(pitmaster['new']))
values['main.pit_set.txt:10'] = round(pitconf['set'],1)
values['main.pit_lid.val'] = int(pitconf['open_lid_detection'])
values['main.pit_on.val'] = int(pitconf['on'])
values['main.pit_inverted.val'] = int(pitconf['inverted'])
values['main.pit_pid.val'] = {'False': 0, 'PID': 1}[pitconf['controller_type']]
# Displayeinstellungen sollten lokal sein und nur für uns
# Ansonsten müsste man hier noch mal ran
values['main.dim.val'] = int(display['dim'])
values['main.timeout.val'] = int(display['timeout'])
# NX_sendcmd('dims=' + str(values['main.dim.val']))
# NX_sendcmd('thsp=' + str(values['main.timeout.val']))
pit_types = {'fan':0, 'servo':1, 'io':2, 'io_pwm':3, 'fan_pwm':4}
values['main.pit_type.val'] = pit_types[pitconf['type']]
NX_sendvalues({'boot.text.txt:35':'Werte werden uebertragen'})
NX_sendvalues(values)
# Ruft die Startseite auf, vorher Text zurücksetzen
NX_sendvalues({'boot.text.txt:35':'Verbindung wird hergestellt'})
NX_sendcmd('page ' + display['start_page'])
NX_wake_event.set()
while not stop_event.is_set():
# idR werden wir bei einem Sleep hier warten
while not stop_event.is_set() and not NX_wake_event.wait(timeout = 0.01):
pass
if not NX_eventq.empty():
event = NX_eventq.get(False)
# Touchevents werden hier behandelt
if event['type'] == 'current_page' :
NX_page = event['data']['page']
elif event['type'] == 'startup':
# Restart des Displays - sterben und auf Wiedergeburt hoffen
logger.warning('Start-Up Meldung vom Display erhalten, breche ab.')
return False
elif event['type'] == 'read_cmd':
if event['data']['area'] == 0:
channel = event['data']['id']
low = NX_getvalue('main.al'+ str(channel)+'minist.txt')
channels_setvalues(channel, low=low)
elif event['data']['area'] == 1:
channel = event['data']['id']
high = NX_getvalue('main.al'+ str(channel)+'maxist.txt')
channels_setvalues(channel, high=high)
elif event['data']['area'] == 2:
channel = event['data']['id']
sensor = NX_getvalue('main.sensor_type'+ str(channel) + '.val')
channels_setvalues(channel, sensor=sensor)
elif event['data']['area'] == 3:
if event['data']['id'] == 0:
# pit_ch
pit_ch = NX_getvalue('main.pit_ch.val')
pitmaster_setvalues(pit_ch = pit_ch)
elif event['data']['id'] == 1:
# pit_set
pit_set = NX_getvalue('main.pit_set.txt')
pitmaster_setvalues(pit_set = pit_set)
elif event['data']['id'] == 2:
# pit_lid
pit_lid = NX_getvalue('main.pit_lid.val')
pitmaster_setvalues(pit_lid = pit_lid)
elif event['data']['id'] == 3:
# pit_on
pit_on = NX_getvalue('main.pit_on.val')
pitmaster_setvalues(pit_on = pit_on)
elif event['data']['id'] == 4:
# pit_pid
pit_pid = NX_getvalue('main.pit_pid.val')
pitmaster_setvalues(pit_pid = pit_pid)
elif event['data']['id'] == 5:
# pit_type
pit_type = NX_getvalue('main.pit_type.val')
pitmaster_setvalues(pit_type = pit_type)
elif event['data']['id'] == 6:
# pit_inverted
pit_inverted = NX_getvalue('main.pit_inverted.val')
pitmaster_setvalues(pit_inverted = pit_inverted)
elif event['data']['area'] == 4:
if event['data']['id'] == 0:
# dim
dim = NX_getvalue('main.dim.val')
display_setvalues(dim = dim)
elif event['data']['id'] == 1:
# timeout
timeout = NX_getvalue('main.timeout.val')
display_setvalues(timeout = timeout)
elif event['data']['area'] == 5:
if event['data']['id'] == 0:
# pi_down
# pi_down = NX_getvalue('main.pi_down.val')
todo_setvalues(pi_down = 1)
elif event['data']['id'] == 1:
# pi_reboot
# pi_reboot = NX_getvalue('main.pi_reboot.val')
todo_setvalues(pi_reboot = 1)
elif event['data']['id'] == 4:
# main.password.txt = WLAN konfigurieren
passphrase = wlan_setpassphrase(ssids[ssids_i], NX_getvalue('main.password.txt'))
wlan_reconnect()
# Sleepmode deaktivierne
# NX_sendcmd('thsp=0')
# 20s auf Verbindung warten
i = 0
while i in range(45) and not stop_event.is_set():
interfaces = lan_getvalues()
if 'wlan0' in interfaces:
# wlan0 hat eine IP-Adresse
NX_sendvalues({'main.result.txt:20': 'IP:' + interfaces['wlan0']['ip']})
NX_sendcmd('page result')
for interface in interfaces:
values['wlaninfo.' + interfaces[interface]['name'] + '.txt:20'] = interfaces[interface]['ip']
NX_sendvalues(values)
break
elif i == 44:
# wlan0 hat nach 20s noch keine IP-Adresse
NX_sendvalues({'main.result.txt:20': 'fehlgeschlagen'})
NX_sendcmd('page result')
break
else:
time.sleep(1)
i = i + 1
# NX_sendcmd('thsp=' + str(Config.getint('Display', 'timeout')))
elif event['data']['id'] == 5:
values = dict()
interfaces = lan_getvalues()
for interface in interfaces:
values['wlaninfo.' + interfaces[interface]['name'] + '.txt:20'] = interfaces[interface]['ip']
signal = wlan_getsignal('wlan0')
values['main.signal.val'] = signal
NX_sendvalues(values)
elif event['data']['id'] == 6:
wlan_reconnect()
elif event['type'] == 'custom_cmd':
if event['data']['area'] == 5:
if event['data']['id'] == 0:
if event['data']['action'] == 0:
logger.debug('Fahre herunter...')
todo_setvalues(pi_down = 1)
elif event['data']['id'] == 1:
if event['data']['action'] == 0:
logger.debug('Starte neu...')
todo_setvalues(pi_reboot = 1)
elif event['data']['id'] == 3:
if event['data']['action'] == 0:
# WLAN scannen
logger.debug('Scanne WLANs')
ssids = wlan_getssids()
ssids_i = 0
logger.debug('SSIDs:' + str(ssids))
if not ssids:
NX_sendvalues({'main.ssid.txt:35': 'Kein WLAN'})
NX_sendcmd('page setup')
else:
NX_sendvalues({'main.ssid.txt:35': ssids[ssids_i]})
NX_sendcmd('page ssidselect')
elif event['data']['action'] == 1:
# voherige SSID
if ssids_i <= 0:
ssids_i = len(ssids)-1
else:
ssids_i = ssids_i - 1
NX_sendvalues({'main.ssid.txt:35': ssids[ssids_i]})
elif event['data']['action'] == 2:
# nächste SSID
if ssids_i >= len(ssids)-1:
ssids_i = 0
else:
ssids_i = ssids_i + 1
NX_sendvalues({'main.ssid.txt:35': ssids[ssids_i]})
elif event['data']['area'] == 6:
if event['data']['id'] == 0:
if event['data']['action'] == 0:
logger.debug('Alarm bestätigt!')
alert_setack()
NX_eventq.task_done()
elif temps_event.is_set():
logger.debug('Temperatur Event')
values = dict()
new_temps = temp_getvalues()
if new_temps != None:
temps_event.clear()
for i in range(8):
if temps[i]['value'] != new_temps[i]['value']:
if new_temps[i]['value'] == '999.9':
values['main.kanal' + str(i) + '.txt:10'] = channels[i]['name']
else:
values['main.kanal' + str(i) + '.txt:10'] = new_temps[i]['value']
if temps[i]['alert'] != new_temps[i]['alert']:
values['main.alert' + str(i) + '.txt:10'] = new_temps[i]['alert']
if NX_sendvalues(values):
temps = new_temps
else:
# Im Fehlerfall später wiederholen
temps_event.set()
elif pitconf_event.is_set():
logger.debug('Pitmasterkonfiguration Event')
values = dict()
pitconf_event.clear()
new_pitconf = pitmaster_config_getvalues()
if pitconf['set'] != new_pitconf['set']:
values['main.pit_set.txt:10'] = round(new_pitconf['set'],1)
if pitconf['ch'] != new_pitconf['ch']:
values['main.pit_ch.val'] = int(new_pitconf['ch'])
if pitconf['open_lid_detection'] != new_pitconf['open_lid_detection']:
values['main.pit_lid.val'] = int(new_pitconf['open_lid_detection'])
if pitconf['inverted'] != new_pitconf['inverted']:
values['main.pit_inverted.val'] = int(new_pitconf['inverted'])
if pitconf['on'] != new_pitconf['on']:
values['main.pit_on.val'] = int(new_pitconf['on'])
if not new_pitconf['on']:
values['main.pit_power.val'] = 0
if pitconf['controller_type'] != new_pitconf['controller_type']:
values['main.pit_pid.val'] = {'False': 0, 'PID': 1}[new_pitconf['controller_type']]
if pitconf['type'] != new_pitconf['type']:
values['main.pit_type.val'] = pit_types[new_pitconf['type']]
if NX_sendvalues(values):
pitconf = new_pitconf
else:
# Im Fehlerfall später wiederholen
pitconf_event.set()
elif pitmaster_event.is_set():
logger.debug('Pitmaster Event')
values = dict()
pitmaster_event.clear()
new_pitmaster = pitmaster_getvalues()
if new_pitmaster != None:
if pitmaster['new'] != new_pitmaster['new']:
if pitconf['on']:
# Wenn Pitmaster aus, 0-Wert senden.
values['main.pit_power.val'] = int(round(float(new_pitmaster['new'])))
else:
values['main.pit_power.val'] = 0
if NX_sendvalues(values):
pitmaster = new_pitmaster
else:
# Im Fehlerfall später wiederholen
pitmaster_event.set()
elif channels_event.is_set():
logger.debug('Channels Event')
values = dict()
channels_event.clear()
new_channels = channels_getvalues()
for i in range(8):
if channels[i]['temp_min'] != new_channels[i]['temp_min']:
values['main.al' + str(i) + 'minist.txt:10'] = new_channels[i]['temp_min']
if channels[i]['temp_max'] != new_channels[i]['temp_max']:
values['main.al' + str(i) + 'maxist.txt:10'] = new_channels[i]['temp_max']
if channels[i]['sensor'] != new_channels[i]['sensor']:
values['main.sensor_type' + str(i) + '.val'] = new_channels[i]['sensor']
if channels[i]['name'] != new_channels[i]['name']:
values['main.name' + str(i) + '.txt:10'] = new_channels[i]['name']
if new_temps[i]['value'] == '999.9':
values['main.kanal' + str(i) + '.txt:10'] = new_channels[i]['name']
if NX_sendvalues(values):
channels = new_channels
else:
# Im Fehlerfall später wiederholen
channels_event.set()
else:
time.sleep(0.01)
logger.info('Display-Thread gestoppt')
return True
def config_write(configfile, config):
# Schreibt das Configfile
# Ein Lock sollte im aufrufenden Programm gehalten werden!
with open(configfile + '_tmp', 'w') as new_ini:
for section_name in config.sections():
new_ini.write('[' + section_name + ']\n')
for (key, value) in config.items(section_name):
new_ini.write(str(key) + ' = ' + str(value) + '\n')
new_ini.write('\n')
new_ini.flush()
os.fsync(new_ini.fileno())
new_ini.close()
os.rename(configfile + '_tmp', configfile)
def raise_keyboard(signum, frame):
raise KeyboardInterrupt('Received SIGTERM')
def check_pid(pid):
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
# Auf geht es
logger.info('Nextion Display gestartet!')
logger.debug('Skriptversion: ' + version)
#Einlesen der Software-Version
for line in open('/var/www/header.php'):
if 'webGUIversion' in line:
build = re.match('.*=\s*"(.*)"', line).group(1)
break
#ueberpruefe ob der Dienst schon laeuft
pid = str(os.getpid())
pidfilename = '/var/run/'+os.path.basename(__file__).split('.')[0]+'.pid'
if os.access(pidfilename, os.F_OK):
pidfile = open(pidfilename, "r")
pidfile.seek(0)
old_pid = int(pidfile.readline())
if check_pid(old_pid):
print("%s existiert, Prozess läuft bereits, beende Skript" % pidfilename)
logger.error("%s existiert, Prozess läuft bereits, beende Skript" % pidfilename)
sys.exit()
else:
logger.info("%s existiert, Prozess läuft nicht, setze Ausführung fort" % pidfilename)
pidfile.seek(0)
open(pidfilename, 'w').write(pid)
else:
logger.debug("%s geschrieben" % pidfilename)
open(pidfilename, 'w').write(pid)
# Display initialisieren
logger.debug('Lade Displaykonfiguration')
display = display_getvalues()
logger.debug('Öffne seriellen Port: ' + display['serialdevice'])
ser = serial.Serial()
logger.debug('Initialisiere Display, Baudrate: ' + str(display['serialspeed']))
if NX_init(display['serialdevice'], display['serialspeed']):
logger.debug('Initialisierung OK')
signal.signal(15, raise_keyboard)
logger.debug('Starte Reader-Thread')
NX_reader_thread = threading.Thread(target=NX_reader)
NX_reader_thread.daemon = True
NX_reader_thread.start()
logger.debug('Starte Display-Thread')
NX_display_thread = threading.Thread(target=NX_display)
NX_display_thread.daemon = True
NX_display_thread.start()
logger.debug('Starte Dateiüberwachung')
wm = pyinotify.WatchManager()
mask = pyinotify.IN_CLOSE_WRITE | pyinotify.IN_MOVED_TO
notifier = pyinotify.ThreadedNotifier(wm, FileEvent())
notifier.start()
wdd = wm.add_watch(curPath, mask)
wdd2 = wm.add_watch(pitPath, mask)
wdd3 = wm.add_watch(confPath, mask)
try:
while True:
# Hauptschleife
if not NX_display_thread.is_alive():
break
if not NX_reader_thread.is_alive():
break
time.sleep(0.5)
except KeyboardInterrupt:
if not NX_wake_event.is_set():
NX_sendcmd('sleep=0')
time.sleep(0.2)
NX_sendvalues({'boot.nextion_down.val': 1})
NX_switchpage('boot')
logger.debug('Sende Stopsignal an alle Threads')
notifier.stop()
# Signal zum stoppen geben
stop_event.set()
logger.debug('Warte auf Threads...')
# Auf Threads warten
NX_display_thread.join()
NX_reader_thread.join()
else:
logger.error('Keine Verbindung zum Nextion Display')
# Vielleicht ist die Software noch nicht auf dem Display installiert
open('/var/www/tmp/nextionupdate', 'w').close()
logger.info('Display stopped!')
logging.shutdown()
os.unlink(pidfilename)
|
BjoernSch/WLANThermo_v2
|
software/usr/sbin/wlt_2_nextion.py
|
Python
|
gpl-3.0
| 53,246
|
# (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst.
# -*- coding: utf-8 -*-
"""
Round raster to set decimals.
"""
from os.path import dirname, exists
import argparse
import os
from osgeo import gdal
import numpy as np
from raster_tools import datasets
# output driver and optinos
DRIVER = gdal.GetDriverByName('gtiff')
OPTIONS = ['compress=deflate', 'tiled=yes']
progress = True
class Exchange(object):
def __init__(self, source_path, target_path):
"""
Read source, create target array.
"""
dataset = gdal.Open(source_path)
band = dataset.GetRasterBand(1)
self.source = band.ReadAsArray()
self.no_data_value = band.GetNoDataValue()
self.shape = self.source.shape
self.kwargs = {
'no_data_value': self.no_data_value,
'projection': dataset.GetProjection(),
'geo_transform': dataset.GetGeoTransform(),
}
self.target_path = target_path
self.target = np.full_like(self.source, self.no_data_value)
def round(self, decimals):
""" Round target. """
active = self.source != self.no_data_value
self.target[active] = self.source[active].round(decimals)
def save(self):
""" Save. """
# prepare dirs
subdir = dirname(self.target_path)
if subdir:
os.makedirs(subdir, exist_ok=True)
# write tiff
array = self.target[np.newaxis]
with datasets.Dataset(array, **self.kwargs) as dataset:
DRIVER.CreateCopy(self.target_path, dataset, options=OPTIONS)
def roundd(source_path, target_path, decimals):
""" Round decimals. """
# skip existing
if exists(target_path):
print('{} skipped.'.format(target_path))
return
# skip when missing sources
if not exists(source_path):
print('Raster source "{}" not found.'.format(source_path))
return
# read
exchange = Exchange(source_path, target_path)
if decimals:
exchange.round(decimals)
# save
exchange.save()
def get_parser():
""" Return argument parser. """
parser = argparse.ArgumentParser(
description=__doc__,
)
# positional arguments
parser.add_argument(
'source_path',
metavar='SOURCE',
)
parser.add_argument(
'target_path',
metavar='TARGET',
)
parser.add_argument(
'-r', '--round',
type=int,
dest='decimals',
help='Round the result to this number of decimals.',
)
return parser
def main():
""" Call command with args from parser. """
roundd(**vars(get_parser().parse_args()))
|
nens/raster-tools
|
raster_tools/roundd.py
|
Python
|
gpl-3.0
| 2,683
|
# -*- coding: utf8 -*-
from phystricks import *
def rotation(angle,pts):
ptsp=[ x.rotation(angle) for x in pts ]
return tuple(ptsp)
def truc(A,B,C,D,points_names,angle,pspicts,n):
A,B,C,D=rotation(angle,[A,B,C,D])
quadri=Polygon(A,B,C,D)
quadri.put_mark(0.2,points_names=points_names,pspicts=pspicts)
no_symbol(A,B,C,D)
pspicts[n].DrawGraphs(quadri)
def ENQZooVqRaIv():
pspicts,figs = IndependentPictures("ENQZooVqRaIv",4)
for psp in pspicts:
psp.dilatation(0.7)
A=Point(0,0)
B=Point(2,0)
C=Point(3,-2)
D=Point(-3,-2)
truc(A,B,C,D,points_names="ABCD",angle=30,pspicts=pspicts,n=0)
E=Point(0,0)
F=Point(3,0)
G=F+(-2,-2)
H=E+G-F
truc(E,F,G,H,points_names="EFGH",angle=64,pspicts=pspicts,n=1)
I=Point(0,0)
J=Point(2,0)
K=J+(0,-3)
L=I+K-J
truc(I,J,K,L,points_names="IJKL",angle=-12,pspicts=pspicts,n=2)
M=Point(0,0)
N=M+(-1,-2)
O=M+(1,-2)
P=M+N+O
truc(M,N,P,O,points_names="MNPO",angle=45,pspicts=pspicts,n=3)
for fig in figs :
fig.no_figure()
fig.conclude()
fig.write_the_file()
|
LaurentClaessens/phystricks
|
testing/demonstration/phystricksENQZooVqRaIv.py
|
Python
|
gpl-3.0
| 1,151
|
# Generated by Django 3.1.4 on 2020-12-13 20:14
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import django_ca.models
class Migration(migrations.Migration):
dependencies = [
('django_ca', '0019_certificate_autogenerated'),
]
operations = [
migrations.CreateModel(
name='AcmeAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(auto_now=True)),
('pem', models.TextField(unique=True, validators=[django_ca.models.pem_validator], verbose_name='Public key')),
('thumbprint', models.CharField(max_length=64)),
('slug', models.SlugField(default=django_ca.models.acme_slug, unique=True)),
('kid', models.URLField(unique=True, validators=[django.core.validators.URLValidator(schemes=('http', 'https'))], verbose_name='Key ID')),
('status', models.CharField(choices=[('valid', 'Valid'), ('deactivated', 'Deactivated'), ('revoked', 'Revoked')], default='valid', max_length=12)),
('contact', models.TextField(blank=True, help_text='Contact addresses for this account, one per line.')),
('terms_of_service_agreed', models.BooleanField(default=False)),
],
options={
'verbose_name': 'ACME Account',
'verbose_name_plural': 'ACME Accounts',
},
bases=(django_ca.models.DjangoCAModel, ),
),
migrations.AddField(
model_name='certificateauthority',
name='acme_enabled',
field=models.BooleanField(default=False, help_text='Whether it is possible to use ACME for this CA.', verbose_name='Enable ACME'),
),
migrations.AddField(
model_name='certificateauthority',
name='acme_requires_contact',
field=models.BooleanField(default=True, help_text='If this CA requires a contact address during account registration.', verbose_name='Requires contact'),
),
migrations.AddField(
model_name='certificateauthority',
name='caa_identity',
field=models.CharField(blank=True, help_text='CAA identity for this CA (NOTE: Not currently used!).', max_length=32, verbose_name='CAA identity'),
),
migrations.AddField(
model_name='certificateauthority',
name='terms_of_service',
field=models.URLField(blank=True, help_text='URL to Terms of Service for this CA', verbose_name='Terms of Service'),
),
migrations.AddField(
model_name='certificateauthority',
name='website',
field=models.URLField(blank=True, help_text='Website for your CA.'),
),
migrations.CreateModel(
name='AcmeOrder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField(default=django_ca.models.acme_slug, unique=True)),
('status', models.CharField(choices=[('invalid', 'Invalid'), ('pending', 'Pending'), ('processing', 'Processing'), ('ready', 'Ready'), ('valid', 'Valid')], default='pending', max_length=10)),
('expires', models.DateTimeField(default=django_ca.models.acme_order_expires)),
('not_before', models.DateTimeField(null=True)),
('not_after', models.DateTimeField(null=True)),
('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders', to='django_ca.acmeaccount')),
],
options={
'verbose_name': 'ACME Order',
'verbose_name_plural': 'ACME Orders',
},
bases=(django_ca.models.DjangoCAModel, ),
),
migrations.CreateModel(
name='AcmeCertificate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField(default=django_ca.models.acme_slug, unique=True)),
('csr', models.TextField(verbose_name='CSR')),
('cert', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='django_ca.certificate')),
('order', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='django_ca.acmeorder')),
],
options={
'verbose_name': 'ACME Certificate',
'verbose_name_plural': 'ACME Certificate',
},
),
migrations.CreateModel(
name='AcmeAuthorization',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField(default=django_ca.models.acme_slug, unique=True)),
('type', models.CharField(choices=[('dns', 'DNS')], default='dns', max_length=8)),
('value', models.CharField(max_length=255)),
('status', models.CharField(choices=[('pending', 'Pending'), ('valid', 'Valid'), ('invalid', 'Invalid'), ('deactivated', 'Deactivated'), ('expired', 'Expired'), ('revoked', 'Revoked')], default='pending', max_length=12)),
('wildcard', models.BooleanField(default=False)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='authorizations', to='django_ca.acmeorder')),
],
options={
'verbose_name': 'ACME Authorization',
'verbose_name_plural': 'ACME Authorizations',
'unique_together': {('order', 'type', 'value')},
},
),
migrations.AddField(
model_name='acmeaccount',
name='ca',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_ca.certificateauthority', verbose_name='Certificate Authority'),
),
migrations.CreateModel(
name='AcmeChallenge',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField(default=django_ca.models.acme_slug, unique=True)),
('type', models.CharField(choices=[('http-01', 'HTTP Challenge'), ('dns-01', 'DNS Challenge'), ('tls-alpn-01', 'TLS ALPN Challenge')], max_length=12)),
('status', models.CharField(choices=[('pending', 'Pending'), ('processing', 'Processing'), ('valid', 'Valid'), ('invalid', 'Name')], default='pending', max_length=12)),
('validated', models.DateTimeField(blank=True, null=True)),
('error', models.CharField(blank=True, max_length=64)),
('token', models.CharField(blank=True, default=django_ca.models.acme_token, max_length=64)),
('auth', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='challenges', to='django_ca.acmeauthorization')),
],
options={
'verbose_name': 'ACME Challenge',
'verbose_name_plural': 'ACME Challenges',
'unique_together': {('auth', 'type')},
},
),
migrations.AlterUniqueTogether(
name='acmeaccount',
unique_together={('ca', 'thumbprint')},
),
]
|
mathiasertl/django-ca
|
ca/django_ca/migrations/0020_auto_20201213_2014.py
|
Python
|
gpl-3.0
| 7,726
|
# Copyright (C) 2016 William Langhoff WildBill567@users.noreply.github.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Adapted from:
# https://github.com/CodeReclaimers/neat-python, accessed May 2016
# Which is distributed with the following license:
# Copyright (c) 2007-2011, cesar.gomes and mirrorballu2
# Copyright (c) 2015, CodeReclaimers, LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import time
import networkx as nx
import matplotlib.pyplot as plt
from neat import activation_functions
def find_feed_forward_layers(inputs, connections):
"""
Collect the layers whose members can be evaluated in parallel in a feed-forward network.
Adapted from: https://github.com/CodeReclaimers/neat-python, accessed May 2016
:param inputs: list of the network input nodes
:param connections: list of (input, output) connections in the network.
Returns a list of layers, with each layer consisting of a set of node identifiers.
"""
# TODO: Detect and omit nodes whose output is ultimately never used.
layers = []
prev_nodes = set(inputs)
prev_nodes.add(0)
while 1:
# Find candidate nodes for the next layer. These nodes should connect
# a node in S to a node not in S.
candidate_set = set(b for (a, b) in connections if a in prev_nodes and b not in prev_nodes)
# Keep only the nodes whose entire input set is contained in S.
keeper_set = set()
for n in candidate_set:
if all(a in prev_nodes for (a, b) in connections if b == n):
keeper_set.add(n)
if not keeper_set:
break
layers.append(keeper_set)
prev_nodes = prev_nodes.union(keeper_set)
return layers
class FeedForwardPhenome:
def __init__(self, genome, config):
"""
FeedForwardPhenome - A feedforward network
Adapted from: https://github.com/CodeReclaimers/neat-python, accessed May 2016
:param genome: the genome to create the phenome
"""
self.graph, node_lists = self._construct_graph(genome)
self.input_nodes, self.hidden_nodes, self.output_nodes = node_lists
self.links = [(g.src, g.sink) for g in genome.link_genes]
self.node_evals = []
self.config = config
layers = find_feed_forward_layers(self.input_nodes, self.links)
used_nodes = set(self.input_nodes + self.output_nodes)
for layer in layers:
for node in layer:
inputs = []
# TODO: This could be more efficient.
for cg in genome.link_genes:
if cg.sink == node and cg.enabled:
inputs.append((cg.src, cg.weight))
used_nodes.add(cg.src)
used_nodes.add(node)
ng = genome.get_node_by_index(node)
activation_function = activation_functions.get(ng.activation)
self.node_evals.append((node, activation_function, inputs))
self.values = [0.0] * (1 + max(used_nodes))
def serial_activate(self, inputs):
"""
serial_activate - gives network output for an input
Adapted from: https://github.com/CodeReclaimers/neat-python, accessed May 2016
:param inputs: numerical input list
:return: numerical output list
"""
if len(self.input_nodes) != len(inputs):
raise ValueError("Expected {0} inputs, got {1}".format(len(self.input_nodes), len(inputs)))
self.values[0] = 1.0
for idx, v in zip(self.input_nodes, inputs):
self.values[idx] = v
for node, func, links in self.node_evals:
linear_activation = 0.0
for idx, weight in links:
linear_activation += self.values[idx] * weight
self.values[node] = func(linear_activation)
return [self.values[i] for i in self.output_nodes]
def draw(self, testing=False):
"""Draws the network with matplotlib"""
fig = plt.figure()
pos = {0: (-1.5, 0)}
for idx in range(len(self.input_nodes)):
pos[idx+1] = (idx, 0)
for idx, val in enumerate(self.output_nodes):
pos[val] = (idx, 4)
for idx, val in enumerate(self.hidden_nodes):
pos[val] = (idx, 2)
nx.draw_networkx_nodes(self.graph, pos,
nodelist=self.input_nodes,
node_color='r')
nx.draw_networkx_nodes(self.graph, pos,
nodelist=self.output_nodes,
node_color='g')
nx.draw_networkx_nodes(self.graph, pos,
nodelist=[0],
node_color='k')
nx.draw_networkx_edges(self.graph, pos)
plt.yticks([])
plt.xticks([])
fig.show()
if testing:
time.sleep(1)
plt.close(fig)
else:
plt.show()
@staticmethod
def _construct_graph(genome):
"""Constructs the DiGraph"""
graph = nx.DiGraph()
graph.add_node(0, {'node_type': 'BIAS', 'val': 1})
input_list = []
output_list = []
hidden_list = []
for gene in genome.input_genes:
graph.add_node(gene.idx)
input_list.append(gene.idx)
for gene in genome.output_genes:
graph.add_node(gene.idx)
output_list.append(gene.idx)
for gene in genome.hidden_genes:
graph.add_node(gene.idx)
hidden_list.append(gene.idx)
for gene in genome.link_genes:
graph.add_edge(gene.src, gene.sink,
{'weight': gene.weight,
'enabled': gene.enabled})
return graph, (input_list, hidden_list, output_list)
def handle_close(fig):
plt.close(fig)
|
WildBill567/nn-toy
|
neat/phenome.py
|
Python
|
gpl-3.0
| 8,014
|
#Made by Kerb
import sys
from com.l2scoria import Config
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "644_GraveRobberAnnihilation"
#Drop rate
DROP_CHANCE = 75
#Npc
KARUDA = 32017
#Items
ORC_GOODS = 8088
#Rewards
REWARDS = {
"1" : [1865 , 30], #Varnish
"2" : [1867 , 40], #Animal Skin
"3" : [1872 , 40], #Animal Bone
"4" : [1871 , 30], #Charcoal
"5" : [1870 , 30], #Coal
"6" : [1869 , 30], #Iron Ore
}
#Mobs
MOBS = [ 22003,22004,22005,22006,22008 ]
class Quest (JQuest) :
def onEvent (self,event,st) :
cond = st.getInt("cond")
htmltext = event
if event == "32017-03.htm" :
if st.getPlayer().getLevel() < 20 :
htmltext = "32017-02.htm"
st.exitQuest(1)
else :
st.set("cond","1")
st.setState(STARTED)
st.playSound("ItemSound.quest_accept")
elif event in REWARDS.keys() :
item, amount = REWARDS[event]
st.takeItems(ORC_GOODS,-1)
st.giveItems(item, amount)
st.playSound("ItemSound.quest_finish")
st.exitQuest(1)
return
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>"
st = player.getQuestState(qn)
if st :
npcId = npc.getNpcId()
id = st.getState()
cond = st.getInt("cond")
if cond == 0 :
htmltext = "32017-01.htm"
elif cond == 1 :
htmltext = "32017-04.htm"
elif cond == 2 :
if st.getQuestItemsCount(ORC_GOODS) >= 120 :
htmltext = "32017-05.htm"
else :
htmltext = "32017-04.htm"
return htmltext
def onKill(self,npc,player,isPet):
partyMember = self.getRandomPartyMember(player,"1")
if not partyMember: return
st = partyMember.getQuestState(qn)
if st :
if st.getState() == STARTED :
count = st.getQuestItemsCount(ORC_GOODS)
if st.getInt("cond") == 1 and count < 120 :
chance = DROP_CHANCE * Config.RATE_DROP_QUEST
numItems, chance = divmod(chance,100)
if st.getRandom(100) < chance :
numItems += 1
if numItems :
if count + numItems >= 120 :
numItems = 120 - count
st.playSound("ItemSound.quest_middle")
st.set("cond","2")
else:
st.playSound("ItemSound.quest_itemget")
st.giveItems(ORC_GOODS,int(numItems))
return
QUEST = Quest(644, qn, "Grave Robber Annihilation")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(KARUDA)
QUEST.addTalkId(KARUDA)
for i in MOBS :
QUEST.addKillId(i)
STARTED.addQuestDrop(KARUDA,ORC_GOODS,1)
|
zenn1989/scoria-interlude
|
L2Jscoria-Game/data/scripts/quests/644_GraveRobberAnnihilation/__init__.py
|
Python
|
gpl-3.0
| 2,948
|
def main():
s = 'this is a string'
for c in s:
print(c, end='')
else:
print(' else')
print()
for i in range(10, 0, -1):
print(i, end=' ')
if __name__ == "__main__":
main()
|
Safuya/python_3_essential_training
|
07 Loops/loop_control.py
|
Python
|
gpl-3.0
| 223
|
import numpy as np
from .abstract_layer import LayerBase, NoParamMixin
from ..util import zX, zX_like, white, scalX
class PoolLayer(NoParamMixin, LayerBase):
def __init__(self, filter_size, compiled=True):
LayerBase.__init__(self, activation="linear", trainable=False)
if compiled:
from ..llatomic.lltensor_op import MaxPoolOp
else:
from ..atomic import MaxPoolOp
self.fdim = filter_size
self.filter = None
self.op = MaxPoolOp()
def connect(self, brain):
ic, iy, ix = brain.outshape[-3:]
if any((iy % self.fdim, ix % self.fdim)):
raise RuntimeError(
"Incompatible shapes: {} % {}".format((ix, iy), self.fdim)
)
LayerBase.connect(self, brain)
self.output = zX(ic, iy // self.fdim, ix // self.fdim)
def feedforward(self, questions):
self.output, self.filter = self.op.forward(questions, self.fdim)
return self.output
def backpropagate(self, delta):
return self.op.backward(delta, self.filter)
@property
def outshape(self):
return self.output.shape[-3:]
def __str__(self):
return "Pool-{}x{}".format(self.fdim, self.fdim)
class ConvLayer(LayerBase):
def __init__(self, nfilters, filterx=3, filtery=3, compiled=True, **kw):
super().__init__(compiled=compiled, **kw)
self.nfilters = nfilters
self.fx = filterx
self.fy = filtery
self.depth = 0
self.stride = 1
self.inshape = None
self.op = None
def connect(self, brain):
if self.compiled:
from ..llatomic import ConvolutionOp
else:
from ..atomic import ConvolutionOp
depth, iy, ix = brain.outshape[-3:]
if any((iy < self.fy, ix < self.fx)):
raise RuntimeError(
"Incompatible shapes: iy ({}) < fy ({}) OR ix ({}) < fx ({})"
.format(iy, self.fy, ix, self.fx)
)
super().connect(brain)
self.op = ConvolutionOp()
self.inshape = brain.outshape
self.depth = depth
self.weights = white(self.nfilters, self.depth, self.fx, self.fy)
self.biases = zX(self.nfilters)[None, :, None, None]
self.nabla_b = zX_like(self.biases)
self.nabla_w = zX_like(self.weights)
def feedforward(self, X):
self.inputs = X
self.output = self.activation.forward(self.op.forward(X, self.weights, "valid"))
self.output += self.biases
return self.output
def backpropagate(self, delta):
delta *= self.activation.backward(self.output)
self.nabla_w, self.nabla_b, dX = self.op.backward(X=self.inputs, E=delta, F=self.weights)
return dX
@property
def outshape(self):
oy, ox = tuple(ix - fx + 1 for ix, fx in zip(self.inshape[-2:], (self.fx, self.fy)))
return self.nfilters, ox, oy
def __str__(self):
return "Conv({}x{}x{})-{}".format(self.nfilters, self.fy, self.fx, str(self.activation)[:4])
class GlobalAveragePooling(NoParamMixin, LayerBase):
def __init__(self):
LayerBase.__init__(self)
NoParamMixin.__init__(self)
self.repeats = 0
def feedforward(self, X):
self.repeats = np.prod(X.shape[2:])
return X.mean(axis=(2, 3))
def backpropagate(self, delta):
m = len(delta)
delta = np.repeat(delta / scalX(self.repeats), self.repeats)
delta = delta.reshape((m,) + self.inshape)
return delta
@property
def outshape(self):
return self.inshape[0],
|
csxeba/brainforge
|
brainforge/layers/tensor.py
|
Python
|
gpl-3.0
| 3,630
|
'''Publish sensor events to MQTT broker.'''
import logging
import paho.mqtt.publish as mqtt_pub
import paho.mqtt.client as mqtt
import socket
class MqttPublisher():
'''Publish sensor events to an MQTT broker.'''
def __init__(self, broker, topic_prefix='/sensors'):
'''Initialize a MqttPublisher instance.'''
self.broker = broker
# TODO: Choose between hostname and fqdn
self.node_name = socket.gethostname()
self.topic_prefix = topic_prefix
def get_topic(self, evt):
'''Generate the MQTT topic for the event.'''
data = {
'prefix': self.topic_prefix,
'node': self.node_name,
'sensor': evt.getSensorName(),
'quantity': evt.getQuantity(),
}
path_tmpl = '{prefix}/{node}/{sensor}/{quantity}'
return path_tmpl.format(**data)
def publish_event(self, evt):
'''Publish a single sensor event.'''
# The publish might fail, e.g. due to network problems. Just log
# the exception and try again next time.
try:
topic = self.get_topic(evt)
msg = "Publishing to topic '{0}'."
logging.debug(msg.format(topic))
# This fixes the protocol version to MQTT v3.1, because
# the current version of the MQTT broker available in
# raspbian does not support MQTT v3.1.1.
mqtt_pub.single(
topic=topic,
payload=evt.toJSON(),
hostname=self.broker,
protocol=mqtt.MQTTv31)
except:
logging.exception('Publish of MQTT value failed.')
def publish_events(self, evts):
'''Publish a list of sensor events.'''
for evt in evts:
self.publish_event(evt)
|
motlib/mqtt-ts
|
src/pub/evtpub.py
|
Python
|
gpl-3.0
| 1,817
|
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import socket
import sickbeard
from sickbeard import logger, common
from sickrage.helper.exceptions import ex
from libgrowl import gntp
class GrowlNotifier(object):
sr_logo_url = 'https://raw.githubusercontent.com/SickRage/SickRage/master/gui/slick/images/sickrage-shark-mascot.png'
def test_notify(self, host, password):
self._sendRegistration(host, password, 'Test')
return self._sendGrowl("Test Growl", "Testing Growl settings from SickRage", "Test", host, password,
force=True)
def notify_snatch(self, ep_name):
if sickbeard.GROWL_NOTIFY_ONSNATCH:
self._sendGrowl(common.notifyStrings[common.NOTIFY_SNATCH], ep_name)
def notify_download(self, ep_name):
if sickbeard.GROWL_NOTIFY_ONDOWNLOAD:
self._sendGrowl(common.notifyStrings[common.NOTIFY_DOWNLOAD], ep_name)
def notify_subtitle_download(self, ep_name, lang):
if sickbeard.GROWL_NOTIFY_ONSUBTITLEDOWNLOAD:
self._sendGrowl(common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD], ep_name + ": " + lang)
def notify_git_update(self, new_version="??"):
if sickbeard.USE_GROWL:
update_text = common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]
title = common.notifyStrings[common.NOTIFY_GIT_UPDATE]
self._sendGrowl(title, update_text + new_version)
def notify_login(self, ipaddress=""):
if sickbeard.USE_GROWL:
update_text = common.notifyStrings[common.NOTIFY_LOGIN_TEXT]
title = common.notifyStrings[common.NOTIFY_LOGIN]
self._sendGrowl(title, update_text.format(ipaddress))
def _send_growl(self, options, message=None):
# Send Notification
notice = gntp.GNTPNotice()
# Required
notice.add_header('Application-Name', options['app'])
notice.add_header('Notification-Name', options['name'])
notice.add_header('Notification-Title', options['title'])
if options['password']:
notice.set_password(options['password'])
# Optional
if options['sticky']:
notice.add_header('Notification-Sticky', options['sticky'])
if options['priority']:
notice.add_header('Notification-Priority', options['priority'])
if options['icon']:
notice.add_header('Notification-Icon', self.sr_logo_url)
if message:
notice.add_header('Notification-Text', message)
response = self._send(options['host'], options['port'], notice.encode(), options['debug'])
return True if isinstance(response, gntp.GNTPOK) else False
def _send(self, host, port, data, debug=False):
if debug:
print '<Sending>\n', data, '\n</Sending>'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.send(data)
response = gntp.parse_gntp(s.recv(1024))
s.close()
if debug:
print '<Received>\n', response, '\n</Received>'
return response
def _sendGrowl(self, title="SickRage Notification", message=None, name=None, host=None, password=None,
force=False):
if not sickbeard.USE_GROWL and not force:
return False
if name is None:
name = title
if host is None:
hostParts = sickbeard.GROWL_HOST.split(':')
else:
hostParts = host.split(':')
if len(hostParts) != 2 or hostParts[1] == '':
port = 23053
else:
port = int(hostParts[1])
growlHosts = [(hostParts[0], port)]
opts = {
'name': name,
'title': title,
'app': 'SickRage',
'sticky': None,
'priority': None,
'debug': False
}
if password is None:
opts['password'] = sickbeard.GROWL_PASSWORD
else:
opts['password'] = password
opts['icon'] = True
for pc in growlHosts:
opts['host'] = pc[0]
opts['port'] = pc[1]
logger.log(u"GROWL: Sending message '" + message + "' to " + opts['host'] + ":" + str(opts['port']), logger.DEBUG)
try:
if self._send_growl(opts, message):
return True
else:
if self._sendRegistration(host, password, 'Sickbeard'):
return self._send_growl(opts, message)
else:
return False
except Exception, e:
logger.log(u"GROWL: Unable to send growl to " + opts['host'] + ":" + str(opts['port']) + " - " + ex(e), logger.WARNING)
return False
def _sendRegistration(self, host=None, password=None, name='SickRage Notification'):
opts = {}
if host is None:
hostParts = sickbeard.GROWL_HOST.split(':')
else:
hostParts = host.split(':')
if len(hostParts) != 2 or hostParts[1] == '':
port = 23053
else:
port = int(hostParts[1])
opts['host'] = hostParts[0]
opts['port'] = port
if password is None:
opts['password'] = sickbeard.GROWL_PASSWORD
else:
opts['password'] = password
opts['app'] = 'SickRage'
opts['debug'] = False
# Send Registration
register = gntp.GNTPRegister()
register.add_header('Application-Name', opts['app'])
register.add_header('Application-Icon', self.sr_logo_url)
register.add_notification('Test', True)
register.add_notification(common.notifyStrings[common.NOTIFY_SNATCH], True)
register.add_notification(common.notifyStrings[common.NOTIFY_DOWNLOAD], True)
register.add_notification(common.notifyStrings[common.NOTIFY_GIT_UPDATE], True)
if opts['password']:
register.set_password(opts['password'])
try:
return self._send(opts['host'], opts['port'], register.encode(), opts['debug'])
except Exception, e:
logger.log(u"GROWL: Unable to send growl to " + opts['host'] + ":" + str(opts['port']) + " - " + ex(e), logger.WARNING)
return False
notifier = GrowlNotifier
|
Thraxis/SickRage
|
sickbeard/notifiers/growl.py
|
Python
|
gpl-3.0
| 7,057
|
import sys
import random
import collections
import itertools
import bisect
# @include
def nonuniform_random_number_generation(values, probabilities):
prefix_sum_of_probabilities = (
[0.0] + list(itertools.accumulate(probabilities)))
interval_idx = bisect.bisect(prefix_sum_of_probabilities,
random.random()) - 1
return values[interval_idx]
# @exclude
def main():
n = int(sys.argv[1]) if len(sys.argv) == 2 else random.randint(1, 50)
T = [float(i) for i in range(n)]
P = []
full_prob = 1.0
for i in range(n - 1):
pi = random.uniform(0.0, full_prob)
P.append(pi)
full_prob -= pi
P.append(full_prob)
print(*T)
print(*P)
print(nonuniform_random_number_generation(T, P))
# Test. Perform the nonuniform random number generation for n * k_times
# times and calculate the distribution of each bucket.
k_times = 100000
counts = collections.Counter(
int(nonuniform_random_number_generation(T, P))
for _ in range(n * k_times))
for i in range(n):
print(counts[i] / (n * k_times), P[i])
assert abs(counts[i] / (n * k_times) - P[i]) < 0.01
if __name__ == '__main__':
main()
|
meisamhe/GPLshared
|
Programming/MPI — AMath 483 583, Spring 2013 1.0 documentation_files/nonuniform_random_number_generation.py
|
Python
|
gpl-3.0
| 1,235
|
# Maked by Mr. Have fun! Version 0.2
# Shadow Weapon Coupons contributed by BiTi for the Official L2J Datapack Project
# Visit http://forum.l2jdp.com for more details
import sys
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "404_PathToWizard"
MAP_OF_LUSTER = 1280
KEY_OF_FLAME = 1281
FLAME_EARING = 1282
BROKEN_BRONZE_MIRROR = 1283
WIND_FEATHER = 1284
WIND_BANGEL = 1285
RAMAS_DIARY = 1286
SPARKLE_PEBBLE = 1287
WATER_NECKLACE = 1288
RUST_GOLD_COIN = 1289
RED_SOIL = 1290
EARTH_RING = 1291
BEAD_OF_SEASON = 1292
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st) :
htmltext = event
player = st.getPlayer()
if event == "1" :
st.set("id","0")
if player.getClassId().getId() == 0x0a :
if player.getLevel() >= 19 :
if st.getQuestItemsCount(BEAD_OF_SEASON) :
htmltext = "30391-03.htm"
else:
htmltext = "30391-08.htm"
st.set("cond","1")
st.setState(STARTED)
st.playSound("ItemSound.quest_accept")
else:
htmltext = "30391-02.htm"
else:
if player.getClassId().getId() == 0x0b :
htmltext = "30391-02a.htm"
else:
htmltext = "30391-01.htm"
elif event == "30410_1" :
if st.getQuestItemsCount(WIND_FEATHER) == 0 :
htmltext = "30410-03.htm"
st.giveItems(WIND_FEATHER,1)
st.set("cond","6")
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
if npcId != 30391 and id != STARTED : return htmltext
if id == CREATED :
st.setState(STARTING)
st.set("cond","0")
st.set("onlyone","0")
st.set("id","0")
if npcId == 30391 and st.getInt("cond")==0 :
#Talking to Parina before completing this quest
if st.getInt("cond")<15 :
htmltext = "30391-04.htm"
return htmltext
else:
htmltext = "30391-04.htm"
elif npcId == 30391 and st.getInt("cond")!=0 and (st.getQuestItemsCount(FLAME_EARING)==0 or st.getQuestItemsCount(WIND_BANGEL)==0 or st.getQuestItemsCount(WATER_NECKLACE)==0 or st.getQuestItemsCount(EARTH_RING)==0) :
htmltext = "30391-05.htm"
elif npcId == 30411 and st.getInt("cond")!=0 and st.getQuestItemsCount(MAP_OF_LUSTER)==0 and st.getQuestItemsCount(FLAME_EARING)==0 :
#Taking to the Flame salamander for the first time
#gains us the MAP_OF_LUSTER
#and flags cond = 2
if st.getQuestItemsCount(MAP_OF_LUSTER) == 0 :
st.giveItems(MAP_OF_LUSTER,1)
htmltext = "30411-01.htm"
st.set("cond","2")
elif npcId == 30411 and st.getInt("cond")!=0 and st.getQuestItemsCount(MAP_OF_LUSTER)!=0 and st.getQuestItemsCount(KEY_OF_FLAME)==0 :
#Talking to the Flame Salamander more than once
#without the KEY_OF_FLAME
#But with the MAP_OF_LUSTER
#results in the following text
htmltext = "30411-02.htm"
elif npcId == 30411 and st.getInt("cond")!=0 and st.getQuestItemsCount(MAP_OF_LUSTER)!=0 and st.getQuestItemsCount(KEY_OF_FLAME)!=0 :
#Talking to the Flame Salamander when Cond != 0
#while we have a KEY_OF_FLAME from the ratmen and the MAP_OF_LUSTER
#Remove both Items and give a FLAME_EARING
#Set the cond flag to 4 to signify we have completed the first part
st.takeItems(KEY_OF_FLAME,st.getQuestItemsCount(KEY_OF_FLAME))
st.takeItems(MAP_OF_LUSTER,st.getQuestItemsCount(MAP_OF_LUSTER))
if st.getQuestItemsCount(FLAME_EARING) == 0 :
st.giveItems(FLAME_EARING,1)
htmltext = "30411-03.htm"
st.set("cond","4")
elif npcId == 30411 and st.getInt("cond")!=0 and st.getQuestItemsCount(FLAME_EARING)!=0 :
#Talking to the Flame Salamander
#after finishing the Fire component results
#in the following text
htmltext = "30411-04.htm"
elif npcId == 30412 and st.getInt("cond")!=0 and st.getQuestItemsCount(FLAME_EARING)!=0 and st.getQuestItemsCount(BROKEN_BRONZE_MIRROR)==0 and st.getQuestItemsCount(WIND_BANGEL)==0 :
#Talking to the Wind Sylph for the first time
#With a FLAME_EARING (fire component complete)
#Gives us a BROKEN_BRONZE_MIRROR
#and sets cond = 5
if st.getQuestItemsCount(BROKEN_BRONZE_MIRROR) == 0 :
st.giveItems(BROKEN_BRONZE_MIRROR,1)
htmltext = "30412-01.htm"
st.set("cond","5")
elif npcId == 30412 and st.getInt("cond")!=0 and st.getQuestItemsCount(BROKEN_BRONZE_MIRROR)!=0 and st.getQuestItemsCount(WIND_FEATHER)==0 :
#Talking to the Wind Sylph for a second time
#results in the following text
htmltext = "30412-02.htm"
elif npcId == 30412 and st.getInt("cond")!=0 and st.getQuestItemsCount(BROKEN_BRONZE_MIRROR)!=0 and st.getQuestItemsCount(WIND_FEATHER)!=0 :
#Talking to the Wind Sylph with cond != 0
#while having a BROKEN_BRONZE_MIRROR and a WIND_FEATHER
#Removes both items
#Gives a WIND_BANGEL
#and sets cond = 7
st.takeItems(WIND_FEATHER,st.getQuestItemsCount(WIND_FEATHER))
st.takeItems(BROKEN_BRONZE_MIRROR,st.getQuestItemsCount(BROKEN_BRONZE_MIRROR))
if st.getQuestItemsCount(WIND_BANGEL) == 0 :
st.giveItems(WIND_BANGEL,1)
htmltext = "30412-03.htm"
st.set("cond","7")
elif npcId == 30412 and st.getInt("cond")!=0 and st.getQuestItemsCount(WIND_BANGEL)!=0 :
#Talking to the Wind Sylph after we get the WIND_BANGLE
#results in the following text
htmltext = "30412-04.htm"
elif npcId == 30410 and st.getInt("cond")!=0 and st.getQuestItemsCount(BROKEN_BRONZE_MIRROR)!=0 and st.getQuestItemsCount(WIND_FEATHER)==0 :
#Talking to the Lizardman of the Wastelands for the first time
#begins this conversation
htmltext = "30410-01.htm"
elif npcId == 30410 and st.getInt("cond")!=0 and st.getQuestItemsCount(BROKEN_BRONZE_MIRROR)!=0 and st.getQuestItemsCount(WIND_FEATHER)!=0 :
#Talking to the Lizardman of the Wastelands after obtaining
#the WIND_FEATHER
htmltext = "30410-04.htm"
elif npcId == 30413 and st.getInt("cond")!=0 and st.getQuestItemsCount(WIND_BANGEL)!=0 and st.getQuestItemsCount(RAMAS_DIARY)==0 and st.getQuestItemsCount(WATER_NECKLACE)==0 :
#Talking to the Water Undine for the first time
#gives RAMAS_DIARY
#and sets cond = 8
if st.getQuestItemsCount(RAMAS_DIARY) == 0 :
st.giveItems(RAMAS_DIARY,1)
htmltext = "30413-01.htm"
st.set("cond","8")
elif npcId == 30413 and st.getInt("cond")!=0 and st.getQuestItemsCount(RAMAS_DIARY)!=0 and st.getQuestItemsCount(SPARKLE_PEBBLE)<2 :
#Talking to the Water Undine for a second time
#without 2 SPARKLE_PEBLE
htmltext = "30413-02.htm"
elif npcId == 30413 and st.getInt("cond")!=0 and st.getQuestItemsCount(RAMAS_DIARY)!=0 and st.getQuestItemsCount(SPARKLE_PEBBLE)>=2 :
#Talking to the Water Undine with the 2 SPARKLE_PEBLE
#removes both items
#and gives WATER_NECKLACE
#sets cond = 10
st.takeItems(SPARKLE_PEBBLE,st.getQuestItemsCount(SPARKLE_PEBBLE))
st.takeItems(RAMAS_DIARY,st.getQuestItemsCount(RAMAS_DIARY))
if st.getQuestItemsCount(WATER_NECKLACE) == 0 :
st.giveItems(WATER_NECKLACE,1)
htmltext = "30413-03.htm"
st.set("cond","10")
elif npcId == 30413 and st.getInt("cond")!=0 and st.getQuestItemsCount(WATER_NECKLACE)!=0 :
#Talking to the Water Undine after completing it's task
htmltext = "30413-04.htm"
elif npcId == 30409 and st.getInt("cond")!=0 and st.getQuestItemsCount(WATER_NECKLACE)!=0 and st.getQuestItemsCount(RUST_GOLD_COIN)==0 and st.getQuestItemsCount(EARTH_RING)==0 :
#Talking to the Earth Snake for the first time
if st.getQuestItemsCount(RUST_GOLD_COIN) == 0 :
st.giveItems(RUST_GOLD_COIN,1)
htmltext = "30409-01.htm"
st.set("cond","11")
elif npcId == 30409 and st.getInt("cond")!=0 and st.getQuestItemsCount(RUST_GOLD_COIN)!=0 and st.getQuestItemsCount(RED_SOIL)==0 :
#Talking to the Earth Snake for a second time
#without RED_SOIL
htmltext = "30409-02.htm"
elif npcId == 30409 and st.getInt("cond")!=0 and st.getQuestItemsCount(RUST_GOLD_COIN)!=0 and st.getQuestItemsCount(RED_SOIL)!=0 :
#Talking to the Earth Snake afket collecting the RED_SOIL
#Gives EARTH_RING
#and sets cond = 13
st.takeItems(RED_SOIL,st.getQuestItemsCount(RED_SOIL))
st.takeItems(RUST_GOLD_COIN,st.getQuestItemsCount(RUST_GOLD_COIN))
if st.getQuestItemsCount(EARTH_RING) == 0 :
st.giveItems(EARTH_RING,1)
htmltext = "30409-03.htm"
st.set("cond","13")
elif npcId == 30409 and st.getInt("cond")!=0 and st.getQuestItemsCount(EARTH_RING)!=0 :
#Talking to the Earth Snake after completing his task
htmltext = "30409-03.htm"
elif npcId == 30391 and st.getInt("cond")!=0 and st.getQuestItemsCount(FLAME_EARING)!=0 and st.getQuestItemsCount(WIND_BANGEL)!=0 and st.getQuestItemsCount(WATER_NECKLACE)!=0 and st.getQuestItemsCount(EARTH_RING)!=0 :
#Talking to Parina after gathering all 4 tokens
#Gains BEAD_OF_SEASON
#Resets cond so these NPC's will no longer speak to you
#and Sets the quest as completed
st.takeItems(FLAME_EARING,st.getQuestItemsCount(FLAME_EARING))
st.takeItems(WIND_BANGEL,st.getQuestItemsCount(WIND_BANGEL))
st.takeItems(WATER_NECKLACE,st.getQuestItemsCount(WATER_NECKLACE))
st.takeItems(EARTH_RING,st.getQuestItemsCount(EARTH_RING))
st.set("cond","0")
st.setState(COMPLETED)
st.playSound("ItemSound.quest_finish")
if st.getQuestItemsCount(BEAD_OF_SEASON) == 0 :
st.giveItems(BEAD_OF_SEASON,1)
htmltext = "30391-06.htm"
return htmltext
def onKill(self,npc,player,isPet):
st = player.getQuestState(qn)
if not st : return
if st.getState() != STARTED : return
npcId = npc.getNpcId()
if npcId == 20359 : #Ratman Warrior, as of C3.
st.set("id","0")
#Only get a KEY_OF_FLAME if we are on the quest for the Fire Salamander
if st.getInt("cond") == 2 :
st.giveItems(KEY_OF_FLAME,1)
st.playSound("ItemSound.quest_middle")
#Increase the Cond so we can only get one key
st.set("cond","3")
elif npcId == 27030 : #water seer
st.set("id","0")
#Only get a SPARKLE_PEBBLE if we are on the quest for the Water Undine
if st.getInt("cond") == 8 and st.getQuestItemsCount(SPARKLE_PEBBLE) < 2:
st.giveItems(SPARKLE_PEBBLE,1)
if st.getQuestItemsCount(SPARKLE_PEBBLE) == 2 :
st.playSound("ItemSound.quest_middle")
st.set("cond","9")
else:
st.playSound("ItemSound.quest_itemget")
elif npcId == 20021 : #Red Bear
st.set("id","0")
#Only get a RED_SOIL if we are on the quest for the Earth Snake
if st.getInt("cond") == 11 :
st.giveItems(RED_SOIL,1)
st.playSound("ItemSound.quest_middle")
st.set("cond","12")
return
QUEST = Quest(404,qn,"Path To Wizard")
CREATED = State('Start', QUEST)
STARTING = State('Starting', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(30391)
QUEST.addTalkId(30391)
QUEST.addTalkId(30409)
QUEST.addTalkId(30410)
QUEST.addTalkId(30411)
QUEST.addTalkId(30412)
QUEST.addTalkId(30413)
QUEST.addKillId(20021)
QUEST.addKillId(20359)
QUEST.addKillId(27030)
STARTED.addQuestDrop(20359,KEY_OF_FLAME,1)
STARTED.addQuestDrop(30411,MAP_OF_LUSTER,1)
STARTED.addQuestDrop(30410,WIND_FEATHER,1)
STARTED.addQuestDrop(30412,BROKEN_BRONZE_MIRROR,1)
STARTED.addQuestDrop(27030,SPARKLE_PEBBLE,1)
STARTED.addQuestDrop(30413,RAMAS_DIARY,1)
STARTED.addQuestDrop(20021,RED_SOIL,1)
STARTED.addQuestDrop(30409,RUST_GOLD_COIN,1)
STARTED.addQuestDrop(30411,FLAME_EARING,1)
STARTED.addQuestDrop(30412,WIND_BANGEL,1)
STARTED.addQuestDrop(30413,WATER_NECKLACE,1)
STARTED.addQuestDrop(30409,EARTH_RING,1)
|
zenn1989/scoria-interlude
|
L2Jscoria-Game/data/scripts/quests/404_PathToWizard/__init__.py
|
Python
|
gpl-3.0
| 12,640
|
import json
import hashlib
from django.db import models
from django.db.models import Count, Func
from django.contrib.postgres.fields import ArrayField
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser
from django.utils.translation import gettext_lazy as _
# from social.apps.django_app.default.models import UserSocialAuth
from django.contrib.auth.models import Permission, Group, PermissionsMixin
from django.db import transaction
from random import randint
from django.core.cache import cache
from mptt.models import MPTTModel, TreeForeignKey
from netfields import InetAddressField, NetManager
from django_gravatar.helpers import get_gravatar_url
from . import now
# from lazysignup.utils import is_lazy_user
# Travis payload format:
# https://docs.travis-ci.com/user/notifications#Webhooks-Delivery-Format
class SiteUpdate(models.Model):
started = models.DateTimeField(
default=None,
null=True, blank=True,
db_index=True
)
finished = models.DateTimeField(
auto_now_add=True,
db_index=True,
null=True, blank=True
)
sha1 = models.CharField(max_length=40, editable=False, unique=True)
commit_time = models.DateTimeField(
db_index=True,
null=True, blank=True
)
commit_message = models.CharField(
max_length=150,
editable=False,
null=True, blank=True
)
travis_raw = models.TextField(null=True, blank=True)
log = models.TextField(null=True, blank=True)
class Meta:
verbose_name = _("Site update")
verbose_name_plural = _("Site updates")
@property
def travis_raw_pretty(self):
if self.travis_raw:
parsed = json.loads(self.travis_raw)
return json.dumps(parsed, indent=4, sort_keys=True)
else:
return ""
@property
def length(self):
if self.finished and self.started:
return self.finished-self.started
else:
return None
def __str__(self):
return self.sha1
class AddedChanged(models.Model):
added = models.DateTimeField(
auto_now_add=True,
db_index=True,
# default=now,
)
changed = models.DateTimeField(
auto_now=True,
db_index=True,
# default=now
)
# , editable=False
class Meta:
abstract = True
class UserManager(BaseUserManager):
def create_user(self, email, username=None, password=None):
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
username=username,
is_staff=False,
is_active=True,
is_superuser=False,
last_login=now(),
date_joined=now()
)
user.set_password(password)
user.save(using=self._db)
return user
def random(self):
"""TODO"""
# there can be deleted items
with transaction.atomic():
count = self.aggregate(count=Count('id'))['count']
random_index = randint(0, count - 1)
return self.all()[random_index]
def create_superuser(self, email, username, password):
user = self.create_user(email, username, password)
user.is_active = True
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
objects = UserManager()
USERNAME_FIELD = 'email'
email = models.EmailField(
verbose_name='Email',
max_length=255,
unique=True,
db_index=True,
blank=True, null=True,
default=None,
)
username = models.CharField(
max_length=200,
db_index=True,
# unique=True,
default='',
blank=True, null=True,
help_text=_("This is an unique identifier, not actual username. Can be a session \
key for temporary users")
)
# is_superuser = models.BooleanField(default=False)
is_staff = models.BooleanField(
default=False,
help_text=_("Designates whether this user can access the admin site.")
)
is_active = models.BooleanField(default=True)
date_joined = models.DateTimeField(auto_now_add=True, db_index=True)
first_name = models.CharField(
max_length=200,
blank=True, null=True,
)
last_name = models.CharField(
max_length=200,
blank=True, null=True,
)
date_last_pass_sent = models.DateTimeField(null=True)
skype = models.CharField(max_length=200, blank=True, null=True)
discord = models.CharField(max_length=200, blank=True, null=True)
phone = models.CharField(max_length=200, blank=True, null=True)
city = models.CharField(max_length=200, blank=True, null=True)
browser_on_creation = models.CharField(
max_length=300,
db_index=True,
default=None,
blank=True, null=True,
help_text=_("Browser string used when this user was created")
)
created_from_ip = models.GenericIPAddressField(blank=True, null=True)
timezone_str = models.CharField(
max_length=50,
db_index=True,
default='UTC',
)
# avatar = models.ForeignKey(
# 'images.Image',
# null=True,
# blank=True,
# # help_text=_("Avatar image")
# )
permissions = models.ManyToManyField(
Permission,
related_name="permissions",
blank=True
)
groups = models.ManyToManyField(
Group,
related_name="groups",
blank=True
)
telegram_chat_id = models.IntegerField(
blank=True, null=True,
)
class Meta:
verbose_name = _("User")
verbose_name_plural = _("Users")
def gravatar(self, size_in_px=25):
"""Return authorized social accounts"""
return get_gravatar_url(self.email, size=size_in_px)
# @property
# def social_accounts(self):
# """Return authorized social accounts"""
# return UserSocialAuth.objects.filter(user=self)
@property
def is_lazy(self):
return True
# return is_lazy_user(self)
def get_full_name(self):
"Used in Admin. Dajngo wants this to be defined."
return "{} {}".format(self.first_name, self.last_name)
def get_short_name(self):
"Used in Admin. Dajngo wants this to be defined."
return self.email
def __str__(self):
# if self.is_lazy:
# return "{}".format(_('Anonymous'))
if self.first_name:
return self.first_name
elif self.email:
return self.email
else:
return "User {}".format(self.pk)
# pip install django-mptt
class Tree(MPTTModel):
parent = TreeForeignKey(
'self',
default=None,
null=True,
blank=True,
db_index=True,
# related_name="%(app_label)s_%(class)s_parent",
# related_name="%(app_label)s_%(class)s_children",
related_name='children',
verbose_name=_("Parent element"),
on_delete=models.SET_NULL,
)
class Meta:
abstract = True
class Comment(Tree):
author = models.ForeignKey(
'core.User',
default=None,
null=True,
blank=True,
on_delete=models.SET_NULL,
)
src = models.TextField()
class LoginAttempt(models.Model):
'''
A login attempt record (both successful and not).
If user field is set then login was successful.
Instead login and password fields are set.
'''
# https://docs.python.org/3/library/ipaddress.html
# inet = InetAddressField(primary_key=True)
ip = InetAddressField()
login = models.CharField(
max_length=260,
null=True, blank=True,
)
password = models.CharField(
max_length=260,
null=True, blank=True,
)
user = models.ForeignKey(
'core.User',
default=None,
null=True,
blank=True,
on_delete=models.SET_NULL,
)
time = models.DateTimeField(
auto_now_add=True,
db_index=True,
null=True, blank=True,
)
# success = models.BooleanField(default=False)
objects = NetManager()
class Unnest(Func):
function = 'UNNEST'
class IP(models.Model):
# https://docs.python.org/3/library/ipaddress.html
# inet = InetAddressField(primary_key=True)
inet = InetAddressField()
open_ports = ArrayField(
models.IntegerField(),
blank=True,
null=True
)
objects = NetManager()
class Meta:
verbose_name = _('IP')
verbose_name_plural = _('IP-addresses')
@classmethod
def stat(cls):
"""Return Port and how many IPs have it open"""
return cls.objects \
.annotate(port=Unnest('open_ports', distinct=True)) \
.values('port') \
.annotate(count=Count('port')) \
.order_by('-count', '-port')
@classmethod
def with_open_ports(cls, ports):
"""Return Port and how many IPs have it open"""
return cls.objects.filter(open_ports__contains=ports)
def __str__(self):
# from django.contrib.postgres.aggregates import ArrayAgg
# print(IP.objects.aggregate(arrayagg=ArrayAgg('inet')))
# print(IP.objects.values('open_ports')\
# .annotate(number_of_days=Count('open_ports', distinct=True)))
# print(IP.objects.filter()\
# .aggregate(Avg('open_ports')))
# print(IP.objects.aggregate(All('open_ports')))
# print(IP.stat())
# .group_by('inet'))
# print(IP.objects.values('inet').annotate(arr_els=Unnest('open_ports')))
# .values_list('arr_els', flat=True).distinct())
return str(self.inet)
class Hostname(models.Model):
name = models.CharField(
max_length=260,
help_text="example.org, host.example.org"
)
# 2 level domain?
is_domain = models.BooleanField(default=False)
class Meta:
# unique_together = (("name", "domain"),)
# index_together = [["name", "domain"], ]
verbose_name = _("Hostname")
verbose_name_plural = _("Hostnames")
@property
def key(self):
return 'host_{}'.format(
hashlib.md5(str(self).encode('utf-8')).hexdigest()
)
@property
def last_visited(self):
key = self.key+'lastvisit'
return cache.get(key)
@last_visited.setter
def last_visited(self, t):
key = self.key+'lastvisit'
return cache.set(key, t, 60)
def last_visit_older(self, s):
# print(self, self.last_visited)
if self.last_visited is None:
return True
# return now() - self.last_visited > timedelta(seconds=3)
# @classmethod
# def from_string(cls, s):
# host_arr = s.split('.')
# host_part = '.'.join(host_arr[:-2])
# domain_part = '.'.join(host_arr[-2:])
# # try:
# domain, c = Domain.objects.get_or_create(name=domain_part)
# # except:
# # client.captureException()
# domain.clean()
# host, c = Hostname.objects.get_or_create(
# name=host_part,
# domain=domain
# )
# return host
def __eq__(self, other):
if other is None:
return False
if str(self) == str(other):
# if self.name == other.name and \
# self.domain == other.domain:
return True
return False
def __str__(self):
if self.name:
return '{}.{}'.format(self.name, str(self.domain))
else:
return str(self.domain)
# class Country(models.Model):
# name_ru = models.CharField(max_length=150)
# name_en = models.CharField(max_length=150)
# code = models.CharField(max_length=2)
# truecountry = models.IntegerField(default=0, null=False)
# class Meta:
# db_table = 'countries'
# ordering = ('name_en',)
# verbose_name_plural = "Countries"
# def __str__(self):
# lng = django.utils.translation.get_language()
# if 'ru' == lng:
# return self.name_ru
# return self.name_en
# class PersonManager(models.Manager):
# def get_queryset(self):
# return super().get_queryset() \
# .select_related('name',
# class URLScheme(models.Model):
# """http://en.wikipedia.org/wiki/URI_scheme"""
# name = models.CharField(max_length=260)
# class Meta:
# db_table = 'url_scheme'
# def __eq__(self, other):
# if other is None or self is None:
# return False
# if self.name == str(other):
# return True
# return False
# def __str__(self):
# return self.name
# class URL(models.Model):
# """scheme://username:password@example.org:8042/path?query#fragment"""
# cookies_file = '/var/www/xdev/tmp/url_cookies.txt'
# scheme = models.ForeignKey(URLScheme, null=True, blank=True)
# host = models.ForeignKey(Hostname, null=True, blank=True)
# path_str = models.CharField(max_length=260, help_text="/path/in/url",
# null=True, blank=True, default=None)
# # image = models.ForeignKey('Image', null=True, blank=True)
# # query = hstore.DictionaryField(null=True, blank=True)
# query = models.CharField(max_length=260, null=True, blank=True,
# help_text="?query")
# fragment = models.CharField(max_length=260, null=True, blank=True,
# help_text="#fragment")
# # objects = hstore.HStoreManager()
# status_code = models.IntegerField(default=None, null=True, blank=True)
# redirect = models.ForeignKey('self', null=True, blank=True, default=None,
# db_column='redirect_id', related_name='+')
# v = models.IntegerField(default=0, help_text="asd")
# class Meta:
# db_table = 'url'
# unique_together = (("scheme", "host", "path_str",
# "query", "fragment"), )
# # index_together = [["name", "domain"], ]
# verbose_name = "URL"
# verbose_name_plural = "URLs"
# @property
# def sha1(self):
# s = str(self)
# if isinstance(s, six.text_type):
# s = s.encode('utf-8')
# return hashlib.sha1(s).hexdigest()
# @property
# def links_abs(self):
# """Absolute URLs from the page. Return QuerySet of URL models."""
# links = self.soup.find_all('a')
# u = str(self.final_url)
# s = set([urljoin(u, tag.get('href', None)) for tag in links
# if tag.get('href', None) is not None])
# def id(x):
# return URL.from_string(x).id
# ids = list(map(id, s))
# return URL.objects.filter(pk__in=ids)
# @property
# def final_url(self):
# if self.redirect:
# return self.redirect
# return self
# def get(self):
# "Returns [Request object] or None. See 'requests' pkg"
# key = 'url_data_{}_r'.format(self.sha1)
# r = cache.get(key)
# if r is not None:
# return r
# wait = 4
# while not self.host.last_visit_older(wait):
# sleep(wait)
# headers = {
# 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:29.0)'
# ' Gecko/20100101 Firefox/29.0'
# }
# try:
# r = requests.get(str(self), headers=headers)
# except requests.exceptions.ConnectionError:
# client.captureException()
# return None
# if r.history:
# u_redirected = URL.from_string(r.url)
# if settings.DEBUG:
# print('got redirect to:', u_redirected)
# if self.redirect != u_redirected and self.redirect != self:
# self.redirect = u_redirected
# self.save()
# cache.set(key, r, 60*60)
# self.host.last_visited = now()
# return r
# @property
# def key(self):
# return 'url_data_{}'.format(self.sha1)
# def download(self, wait=4, **kwargs):
# return self.get().content
# @classmethod
# def download_url(cls, url, filename, **kwargs):
# "Download URL and save it to FILENAME."
# # endfile = os.path.basename(url) + '.jpg'
# headers = {'User-Agent':
# 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:29.0)
# Gecko/20100101 Firefox/29.0'}
# import requests
# r = requests.get(url, headers=headers, stream=True)
# if r.status_code == 200:
# with open(filename, 'wb') as f:
# for chunk in r.iter_content(chunk_size=1024):
# if chunk: # filter out keep-alive new chunks
# f.write(chunk)
# else:
# return r.status_code
# return url
# def data_to_unicode(self, **kwargs):
# """Extract META tags from HTML.
#
# and try to convert data to Unicode string"""
# from article.parser.html import guess_html_encoding
# # update = kwargs.get('update', False)
# data = self.download(**kwargs)
# # print(data.decode("cp1251", 'ignore'))
# s, enc = guess_html_encoding(data)
# if enc is not None:
# # print(enc)
# # print(s)
# return s
# try:
# return data.decode('utf-8')
# except UnicodeDecodeError as e:
# try:
# return data.decode('cp1251')
# except UnicodeDecodeError as e:
# return str(e)
# @property
# def soup(self):
# key = 'url_soup_{}'.format(self.sha1)
# soup = cache.get(key)
# if soup is None:
# soup = BeautifulSoup(self.data_to_unicode())
# # cache.set(key, soup)
# return soup
# def matches(self, d=None, h=None, path=None, f=None, q=None):
# if d is not None and not d.lower() == str(self.host.domain).lower():
# return False
# if h is not None and not re.match(h, str(self.host)):
# return False
# if path is not None and not re.match(path, self.path_str):
# return False
# if f is not None and not re.match(f, self.fragment):
# return False
# if q is not None and not re.match(q, self.query):
# return False
# return True
# @property
# def obj(self):
# return 'todo'
# @classmethod
# def from_string(cls, s):
# if isinstance(s, cls):
# return s
# o = urlparse(s)
# scheme, c = URLScheme.objects.get_or_create(name=o.scheme)
# host = Hostname.from_string(o.hostname)
# u, c = cls.objects.get_or_create(scheme=scheme,
# host=host,
# path_str=o.path,
# query=o.query,
# fragment=o.fragment)
# return u
# def __eq__(self, other):
# if other is None or self is None:
# return False
# #if self.url == other.url and self.url is not None:
# # return True
# else:
# if self.scheme == other.scheme and \
# self.host == other.host and \
# self.path_str == other.path_str and \
# self.query == other.query:
# return True
# return False
# return NotImplemented
# def __str__(self):
# s = "{}://{}".format(str(self.scheme), self.host)
# if self.path_str:
# s += self.path_str
# if self.query:
# s += "?" + self.query
# if self.fragment:
# s += "#" + self.fragment
# if self.scheme and self.host:
# return s
# else:
# return NotImplemented
# class UrlObject(models.Model):
# # url = models.ForeignKey(URL, primary_key=True)
# url = models.OneToOneField(URL, primary_key=True)
# # obj = models.ForeignKey(Hostname, null=True, blank=True, default=None)
# content_type = models.ForeignKey(ContentType, null=True)
# object_id = models.PositiveIntegerField(null=True)
# obj = GenericForeignKey('content_type', 'object_id')
# v = models.IntegerField(default=0)
# class Meta:
# db_table = 'obj_from_url'
# # ordering = ('name',)
# def __str__(self):
# return self.obj
# class Translated(models.Model):
# translation_of = models.ForeignKey(
# 'self',
# default=None,
# null=True,
# blank=True,
# related_name="%(app_label)s_%(class)s_translated",
# verbose_name=_("Translation of")
# )
# lng = models.ForeignKey(
# Language,
# default=None,
# null=True,
# blank=True,
# related_name="%(app_label)s_%(class)s_lng",
# verbose_name=_("Language")
# )
# def get_translation(self, language):
# if self.lng == language:
# return self
# if self.translation_of is not None:
# pass
# return
# class Meta:
# abstract = True
# class Language(models.Model):
# name = models.CharField(
# max_length=150,
# help_text="Original language name"
# )
# name_en = models.CharField(max_length=150, help_text="Name in English")
# code = models.CharField(
# max_length=2,
# help_text="2 chars",
# unique=True,
# primary_key=True,
# verbose_name=_("Code")
# )
# class Meta:
# db_table = 'languages'
# ordering = ('name',)
# verbose_name = _("Language")
# verbose_name_plural = _("Languages")
# def __str__(self):
# return self.name
|
pashinin-com/pashinin.com
|
src/core/models.py
|
Python
|
gpl-3.0
| 22,154
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
smuggle_url,
)
class TeleQuebecIE(InfoExtractor):
_VALID_URL = r'https?://zonevideo\.telequebec\.tv/media/(?P<id>\d+)'
_TEST = {
'url': 'http://zonevideo.telequebec.tv/media/20984/le-couronnement-de-new-york/couronnement-de-new-york',
'md5': 'fe95a0957e5707b1b01f5013e725c90f',
'info_dict': {
'id': '20984',
'ext': 'mp4',
'title': 'Le couronnement de New York',
'description': 'md5:f5b3d27a689ec6c1486132b2d687d432',
'upload_date': '20160220',
'timestamp': 1455965438,
}
}
def _real_extract(self, url):
media_id = self._match_id(url)
media_data = self._download_json(
'https://mnmedias.api.telequebec.tv/api/v2/media/' + media_id,
media_id)['media']
return {
'_type': 'url_transparent',
'id': media_id,
'url': smuggle_url('limelight:media:' + media_data['streamInfo']['sourceId'], {'geo_countries': ['CA']}),
'title': media_data['title'],
'description': media_data.get('descriptions', [{'text': None}])[0].get('text'),
'duration': int_or_none(media_data.get('durationInMilliseconds'), 1000),
'ie_key': 'LimelightMedia',
}
|
Dunkas12/BeepBoopBot
|
lib/youtube_dl/extractor/telequebec.py
|
Python
|
gpl-3.0
| 1,419
|
#
# SPDX-FileCopyrightText: 2020 Dmytro Kolomoiets <amerlyq@gmail.com> and contributors.
#
# SPDX-License-Identifier: GPL-3.0-only
#
import pickle
import urwid
import zmq
from ..ifc import *
def tui_client(src_uri, dst_uri, log_uri):
set_current_thread_name()
_log = getLogger()
ctx = zmq.Context.instance()
# NOTE:(cohesion): connect topology backward :: from dst to src
dst_sock = ctx.socket(zmq.PUSH)
dst_sock.connect(dst_uri)
src_sock = ctx.socket(zmq.SUB)
src_sock.connect(src_uri)
src_sock.setsockopt_string(zmq.SUBSCRIBE, '*') # custom broadcast
src_sock.setsockopt_string(zmq.SUBSCRIBE, 'ui')
try:
## BET: python-urwid
# [Urwid] key capture in different views
# http://lists.excess.org/pipermail/urwid/2011-July/001080.html
body = urwid.Text("<Press ',' to exit>")
view = urwid.Filler(body, 'top')
def unhandled_input(key):
if key in ('esc', ','):
raise urwid.ExitMainLoop()
dst_sock.send_multipart([b'key', pickle.dumps(key)])
_log.info("Press: " + key)
# FIXME: change text only in subscriber
body.set_text(repr(key))
loop = urwid.MainLoop(view, unhandled_input=unhandled_input)
loop.run()
except KeyboardInterrupt:
pass
finally:
# _log.info("Fin: " + threading.current_thread().name)
# dst_sock.send_multipart([b'*', pickle.dumps('quit')])
dst_sock.close()
src_sock.close()
# ERR:(exception): can call only once in main thread
# zmq.Context.instance().term()
|
miur/miur
|
tui/urwid_tui.py
|
Python
|
gpl-3.0
| 1,631
|
import os
sys = os.system
CC = 'g++ {} -std=gnu++0x -Wall'
FLAG_clear = ['/c', '-c']
FLAG_window = ['/w', '-w']
FLAG_exit = ['/e', '-e']
def main():
print('List of existing <*.cpp> files:')
files = []
counter = 0
for file in os.listdir():
if file[-4:] == '.cpp':
counter += 1
files.append(file)
print('{:->3d}) {}'.format(counter, file[:-4]))
name = ''
flags = []
command, *ex = input('Enter your <command> [<name>] [<*flags>]: ').split()
if len(ex):
name = ex[0]
flags = list(ex[1:])
try:
name = files[int(name) - 1]
except:
if name[0] == '#':
try:
fileid = int(name[1:])
name = files[fileid - 1]
except:
pass
else:
flags = list(ex)
if command == 'open':
if len(list(set(FLAG_clear).intersection(set(flags)))) > 0:
sys('cls')
if len(list(set(FLAG_window).intersection(set(flags)))) > 0:
sys('start {}'.format(name))
else:
sys('{}'.format(name))
elif command == 'compile':
if len(list(set(FLAG_clear).intersection(set(flags)))) > 0:
sys('cls')
print('Compiling...')
err = sys((CC+' -o {}.exe').format(name, name[:-4]))
if err:
print('Error during compiling. <{}>'.format(err))
else:
print('Compiled succesfully.')
elif command == 'run':
if len(list(set(FLAG_clear).intersection(set(flags)))) > 0:
sys('cls')
print('Compiling...')
err = sys((CC+' -o {}.exe').format(name, name[:-4]))
if err:
print('Error during compiling. <{}>'.format(err))
else:
print('Compiled succesfully. Starting:\n' + '-' * 31)
if len(list(set(FLAG_window).intersection(set(flags)))) > 0:
err2 = sys('start {}.exe'.format(name[:-4]))
else:
err2 = sys('{}.exe'.format(name[:-4]))
if err2:
print('-' * 30 + '\nError during execution. <{}>'.format(err2))
else:
print('-' * 17 + '\nDone succesfully.')
elif command == 'list':
if name != '':
if len(list(set(FLAG_clear).intersection(set(flags)))) > 0:
sys('cls')
print('List of existing <*.{}> files:'.format(name))
l = len(name)
for file in os.listdir():
if file[-l:] == name:
print('{:>20}'.format(file[:-l - 1]))
else:
print('List of all existing files:')
for file in os.listdir():
print('{:>20}'.format(file))
if len(list(set(FLAG_exit).intersection(set(flags)))) == 0:
input('-' * 25 + '\nEnd. Press enter to exit: ')
main()
|
Lipen/LipenDev
|
Azeroth/Pandaria/process.py
|
Python
|
gpl-3.0
| 2,341
|
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
setup(name='peloton_bloomfilters',
author = 'Adam DePrince',
author_email = 'adam@pelotoncycle.com',
url = 'https://github.com/pelotoncycle/peloton_bloomfilters',
version='0.0.1',
description="Peloton Cycle's Bloomin fast Bloomfilters",
ext_modules=(
[
Extension(
name='peloton_bloomfilters',
sources=['peloton_bloomfiltersmodule.c']),
]
)
)
|
pelotoncycle/shared_memory_bloomfilter
|
setup.py
|
Python
|
gpl-3.0
| 572
|
#!/usr/bin/env python
""" create rst files for documentation of DIRAC """
import os
import shutil
import socket
import sys
import logging
import glob
from diracdoctools.Utilities import writeLinesToFile, mkdir, makeLogger
from diracdoctools.Config import Configuration, CLParser as clparser
LOG = makeLogger('CodeReference')
# global used inside the CustomizedDocs modules
CUSTOMIZED_DOCSTRINGS = {}
class CLParser(clparser):
"""Extension to CLParser to also parse buildType."""
def __init__(self):
super(CLParser, self).__init__()
self.log = LOG.getChild('CLParser')
self.clean = False
self.parser.add_argument('--buildType', action='store', default='full',
choices=['full', 'limited'],
help='Build full or limited code reference',
)
self.parser.add_argument('--clean', action='store_true',
help='Remove rst files and exit',
)
def parse(self):
super(CLParser, self).parse()
self.log.info('Parsing options')
self.buildType = self.parsed.buildType
self.clean = self.parsed.clean
def optionDict(self):
oDict = super(CLParser, self).optionDict()
oDict['buildType'] = self.buildType
oDict['clean'] = self.clean
return oDict
class CodeReference(object):
"""Module to create rst files containing autodoc for sphinx."""
def __init__(self, configFile='docs.conf'):
self.config = Configuration(configFile, sections=['Code'])
self.orgWorkingDir = os.getcwd()
def end(self):
"""Make sure we are back in the original working directory."""
LOG.info('Done with creating code reference')
os.chdir(self.orgWorkingDir)
def getCustomDocs(self):
"""Import the dynamically created docstrings from the files in CustomizedDocs.
Use 'exec' to avoid a lot of relative import, pylint errors, etc.
"""
customizedPath = os.path.join(self.config.code_customDocsPath, '*.py')
LOG.info('Looking for custom strings in %s', customizedPath)
for filename in glob.glob(customizedPath):
LOG.info('Found customization: %s', filename)
exec(open(filename).read(), globals()) # pylint: disable=exec-used
def mkPackageRst(self, filename, modulename, fullmodulename, subpackages=None, modules=None):
"""Make a rst file for module containing other modules."""
if modulename == 'scripts':
return
else:
modulefinal = modulename
lines = []
lines.append('%s' % modulefinal)
lines.append('=' * len(modulefinal))
lines.append('.. module:: %s ' % fullmodulename)
lines.append('')
if subpackages or modules:
lines.append('.. toctree::')
lines.append(' :maxdepth: 1')
lines.append('')
subpackages = [s for s in subpackages if not s.endswith(('scripts', ))]
if subpackages:
LOG.info('Module %r with subpackages: %r', fullmodulename, ', '.join(subpackages))
lines.append('SubPackages')
lines.append('...........')
lines.append('')
lines.append('.. toctree::')
lines.append(' :maxdepth: 1')
lines.append('')
for package in sorted(subpackages):
lines.append(' %s/%s_Module.rst' % (package, package.split('/')[-1]))
lines.append('')
# remove CLI etc. because we drop them earlier
modules = [m for m in modules if not m.endswith('CLI') and '-' not in m]
if modules:
lines.append('Modules')
lines.append('.......')
lines.append('')
lines.append('.. toctree::')
lines.append(' :maxdepth: 1')
lines.append('')
for module in sorted(modules):
lines.append(' %s.rst' % (module.split('/')[-1],))
lines.append('')
writeLinesToFile(filename, lines)
def mkDummyRest(self, classname, _fullclassname):
"""Create a dummy rst file for files that behave badly."""
filename = classname + '.rst'
lines = []
lines.append('%s' % classname)
lines.append('=' * len(classname))
lines.append('')
lines.append(' This is an empty file, because we cannot parse this file correctly or it causes problems')
lines.append(' , please look at the source code directly')
writeLinesToFile(filename, lines)
def mkModuleRst(self, classname, fullclassname, buildtype='full'):
"""Create rst file for module."""
LOG.info('Creating rst file for %r, aka %r', classname, fullclassname)
filename = classname + '.rst'
lines = []
lines.append('%s' % classname)
lines.append('=' * len(classname))
lines.append('.. automodule:: %s' % fullclassname)
if buildtype == 'full':
lines.append(' :members:')
if classname not in self.config.code_noInherited:
lines.append(' :inherited-members:')
lines.append(' :undoc-members:')
lines.append(' :show-inheritance:')
if classname in self.config.code_privateMembers:
lines.append(' :special-members:')
lines.append(' :private-members:')
else:
lines.append(' :special-members: __init__')
if classname.startswith('_'):
lines.append(' :private-members:')
if fullclassname in CUSTOMIZED_DOCSTRINGS:
ds = CUSTOMIZED_DOCSTRINGS[fullclassname]
if ds.replace:
lines = ds.doc_string
else:
lines.append(ds.doc_string)
writeLinesToFile(filename, lines)
def getsubpackages(self, abspath, direc):
"""return list of subpackages with full path"""
packages = []
for dire in direc:
if dire.lower() == 'test' or dire.lower() == 'tests' or '/test' in dire.lower():
LOG.debug('Skipping test directory: %s/%s', abspath, dire)
continue
if dire.lower() == 'docs' or '/docs' in dire.lower():
LOG.debug('Skipping docs directory: %s/%s', abspath, dire)
continue
if os.path.exists(os.path.join(self.config.sourcePath, abspath, dire, '__init__.py')):
packages.append(os.path.join(dire))
return packages
def getmodules(self, abspath, _direc, files):
"""Return list of subpackages with full path."""
packages = []
for filename in files:
if filename.lower().startswith('test') or filename.lower().endswith('test') or \
any(f.lower() in filename.lower() for f in self.config.code_ignoreFiles):
LOG.debug('Skipping file: %s/%s', abspath, filename)
continue
if 'test' in filename.lower():
LOG.warn("File contains 'test', but is kept: %s/%s", abspath, filename)
if filename != '__init__.py':
packages.append(filename.split('.py')[0])
return packages
def cleanDoc(self):
"""Remove the code output folder."""
LOG.info('Removing existing code documentation: %r', self.config.code_targetPath)
if os.path.exists(self.config.code_targetPath):
shutil.rmtree(self.config.code_targetPath)
def createDoc(self, buildtype="full"):
"""create the rst files for all the things we want them for"""
LOG.info('self.config.sourcePath: %s', self.config.sourcePath)
LOG.info('self.config.targetPath: %s', self.config.code_targetPath)
LOG.info('Host: %s', socket.gethostname())
# we need to replace existing rst files so we can decide how much code-doc to create
if os.path.exists(self.config.code_targetPath) and os.environ.get('READTHEDOCS', 'False') == 'True':
self.cleanDoc()
mkdir(self.config.code_targetPath)
os.chdir(self.config.code_targetPath)
self.getCustomDocs()
LOG.info('Now creating rst files: starting in %r', self.config.sourcePath)
firstModule = True
for root, direc, files in os.walk(self.config.sourcePath):
configTemplate = [os.path.join(root, _) for _ in files if _ == 'ConfigTemplate.cfg']
files = [_ for _ in files if _.endswith('.py')]
if '__init__.py' not in files:
continue
elif any(f.lower() in root.lower() for f in self.config.code_ignoreFolders):
LOG.debug('Skipping folder: %s', root)
continue
modulename = root.split('/')[-1].strip('.')
codePath = root.split(self.config.sourcePath)[1].strip('/.')
docPath = codePath
if docPath.startswith(self.config.moduleName):
docPath = docPath[len(self.config.moduleName) + 1:]
fullmodulename = '.'.join(codePath.split('/')).strip('.')
if not fullmodulename.startswith(self.config.moduleName):
fullmodulename = ('.'.join([self.config.moduleName, fullmodulename])).strip('.')
packages = self.getsubpackages(codePath, direc)
if docPath:
LOG.debug('Trying to create folder: %s', docPath)
mkdir(docPath)
os.chdir(docPath)
if firstModule:
firstModule = False
self.createCodeDocIndex(
subpackages=packages,
modules=self.getmodules(
codePath,
direc,
files),
buildtype=buildtype)
elif buildtype == 'limited':
os.chdir(self.config.code_targetPath)
return 0
else:
self.mkPackageRst(
modulename + '_Module.rst',
modulename,
fullmodulename,
subpackages=packages,
modules=self.getmodules(
docPath,
direc,
files))
for filename in files:
# Skip things that call parseCommandLine or similar issues
fullclassname = '.'.join(docPath.split('/') + [filename])
if any(f in filename for f in self.config.code_dummyFiles):
LOG.debug('Creating dummy for file %r', filename)
self.mkDummyRest(filename.split('.py')[0], fullclassname.split('.py')[0])
continue
elif not filename.endswith('.py') or \
filename.endswith('CLI.py') or \
filename.lower().startswith('test') or \
filename == '__init__.py' or \
any(f in filename for f in self.config.code_ignoreFiles) or \
'-' in filename: # not valid python identifier, e.g. dirac-pilot
LOG.debug('Ignoring file %r', filename)
continue
if not fullclassname.startswith(self.config.moduleName):
fullclassname = '.'.join([self.config.moduleName, fullclassname])
self.mkModuleRst(filename.split('.py')[0], fullclassname.split('.py')[0], buildtype)
# copy configTemplate files to code doc so we can import them in the agent docstrings
if configTemplate:
shutil.copy(configTemplate[0], os.path.join(self.config.code_targetPath, docPath))
os.chdir(self.config.code_targetPath)
return 0
def createCodeDocIndex(self, subpackages, modules, buildtype="full"):
"""create the main index file"""
LOG.info('Creating base index file')
filename = 'index.rst'
lines = []
lines.append('.. _code_documentation:')
lines.append('')
lines.append('Code Documentation (|release|)')
lines.append('------------------------------')
# for limited builds we only create the most basic code documentation so
# we let users know there is more elsewhere
if buildtype == 'limited':
lines.append('')
lines.append('.. warning::')
lines.append(
' This a limited build of the code documentation, for the full code documentation '
'please look at the website')
lines.append('')
else:
if subpackages or modules:
lines.append('.. toctree::')
lines.append(' :maxdepth: 1')
lines.append('')
if subpackages:
systemPackages = sorted([pck for pck in subpackages if pck.endswith('System')])
otherPackages = sorted([pck for pck in subpackages if not pck.endswith('System')])
lines.append('=======')
lines.append('Systems')
lines.append('=======')
lines.append('')
lines.append('.. toctree::')
lines.append(' :maxdepth: 1')
lines.append('')
for package in systemPackages:
lines.append(' %s/%s_Module.rst' % (package, package.split('/')[-1]))
lines.append('')
lines.append('=====')
lines.append('Other')
lines.append('=====')
lines.append('')
lines.append('.. toctree::')
lines.append(' :maxdepth: 1')
lines.append('')
for package in otherPackages:
lines.append(' %s/%s_Module.rst' % (package, package.split('/')[-1]))
if modules:
for module in sorted(modules):
lines.append(' %s.rst' % (module.split('/')[-1],))
writeLinesToFile(filename, lines)
def checkBuildTypeAndRun(self, buildType='full'):
"""Check for input argument and then create the doc rst files."""
buildTypes = ('full', 'limited')
if buildType not in buildTypes:
LOG.error('Unknown build type: %s use %s ', buildType, ' '.join(buildTypes))
return 1
LOG.info('Buildtype: %s', buildType)
return self.createDoc(buildType)
def run(configFile='docs.conf', logLevel=logging.INFO, debug=False, buildType='full', clean=False):
"""Create the code reference.
:param str configFile: path to the configFile
:param logLevel: logging level to use
:param bool debug: if true even more debug information is printed
:param str buildType: 'full' or 'limited', use limited only when memory is limited
:param bool clean: Remove rst files and exit
:returns: return value 1 or 0
"""
logging.getLogger().setLevel(logLevel)
code = CodeReference(configFile=configFile)
if clean:
code.cleanDoc()
return 0
retVal = code.checkBuildTypeAndRun(buildType=buildType)
code.end()
return retVal
if __name__ == '__main__':
sys.exit(run(**(CLParser().optionDict())))
|
chaen/DIRAC
|
docs/diracdoctools/cmd/codeReference.py
|
Python
|
gpl-3.0
| 13,727
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
###############################################################################
# Module to define chemical reaction functionality
###############################################################################
from math import exp, log
import sqlite3
from numpy import polyval
from scipy.optimize import fsolve
from PyQt4.QtGui import QApplication
from lib import unidades
from lib.sql import databank_name
class Reaction(object):
"""Chemical reaction object"""
status = 0
msg = QApplication.translate("pychemqt", "undefined")
error = 0
kwargs = {"comp": [],
"coef": [],
"tipo": 0,
"fase": 0,
"key": 0,
"base": 0,
"customHr": False,
"Hr": 0.0,
"formula": False,
"conversion": None,
"keq": None}
kwargsValue = ("Hr",)
kwargsList = ("tipo", "fase", "key", "base")
kwargsCheck = ("customHr", "formula")
calculateValue = ("DeltaP", "DeltaP_f", "DeltaP_ac", "DeltaP_h",
"DeltaP_v", "DeltaP_100ft", "V", "f", "Re", "Tout")
TEXT_TYPE = [QApplication.translate("pychemqt", "Estequiometric"),
QApplication.translate("pychemqt", "Equilibrium"),
QApplication.translate("pychemqt", "Kinetic"),
QApplication.translate("pychemqt", "Catalitic")]
TEXT_PHASE = [QApplication.translate("pychemqt", "Global"),
QApplication.translate("pychemqt", "Liquid"),
QApplication.translate("pychemqt", "Gas")]
TEXT_BASE = [QApplication.translate("pychemqt", "Mole"),
QApplication.translate("pychemqt", "Mass"),
QApplication.translate("pychemqt", "Partial pressure")]
def __init__(self, **kwargs):
"""constructor, kwargs keys can be:
comp: array with index of reaction components
coef: array with stequiometric coefficient for each component
fase: Phase where reaction work
0 - Global
1 - Liquid
2 - Gas
key: Index of key component
base
0 - Mol
1 - Mass
2 - Partial pressure
Hr: Heat of reaction, calculate from heat of formation if no input
formula: boolean to show compound names in formules
tipo: Kind of reaction
0 - Stequiometric, without equilibrium or kinetic calculations
1 - Equilibrium, without kinetic calculation
2 - Equilibrium by minimization of Gibbs free energy
3 - Kinetic
4 - Catalytic
conversion: conversion value for reaction with tipo=0
keq: equilibrium constant for reation with tipo=1
-it is float if it don't depend with temperature
-it is array if it depends with temperature
"""
self.kwargs = Reaction.kwargs.copy()
if kwargs:
self.__call__(**kwargs)
def __call__(self, **kwargs):
oldkwargs = self.kwargs.copy()
self.kwargs.update(kwargs)
if oldkwargs != self.kwargs and self.isCalculable:
self.calculo()
@property
def isCalculable(self):
self.msg = ""
self.status = 1
if not self.kwargs["comp"]:
self.msg = QApplication.translate("pychemqt", "undefined components")
self.status = 0
return
if not self.kwargs["coef"]:
self.msg = QApplication.translate("pychemqt", "undefined stequiometric")
self.status = 0
return
if self.kwargs["tipo"] == 0:
if self.kwargs["conversion"] is None:
self.msg = QApplication.translate("pychemqt", "undefined conversion")
self.status = 3
elif self.kwargs["tipo"] == 1:
if self.kwargs["keq"] is None:
self.msg = QApplication.translate("pychemqt", "undefined equilibrium constants")
self.status = 3
elif self.kwargs["tipo"] == 2:
pass
elif self.kwargs["tipo"] == 3:
pass
return True
def calculo(self):
self.componentes = self.kwargs["comp"]
self.coef = self.kwargs["coef"]
self.tipo = self.kwargs["tipo"]
self.base = self.kwargs["base"]
self.fase = self.kwargs["fase"]
self.calor = self.kwargs["Hr"]
self.formulas = self.kwargs["formula"]
self.keq = self.kwargs["keq"]
databank = sqlite3.connect(databank_name).cursor()
databank.execute("select nombre, peso_molecular, formula, \
calor_formacion_gas from compuestos where id IN \
%s" % str(tuple(self.componentes)))
nombre = []
peso_molecular = []
formula = []
calor_reaccion = 0
check_estequiometria = 0
for i, compuesto in enumerate(databank):
nombre.append(compuesto[0])
peso_molecular.append(compuesto[1])
formula.append(compuesto[2])
calor_reaccion += compuesto[3]*self.coef[i]
check_estequiometria += self.coef[i]*compuesto[1]
self.nombre = nombre
self.peso_molecular = peso_molecular
self.formula = formula
if self.calor:
self.Hr = self.kwargs.get("Hr", 0)
else:
self.Hr = unidades.MolarEnthalpy(calor_reaccion/abs(
self.coef[self.base]), "Jkmol")
self.error = round(check_estequiometria, 1)
self.state = self.error == 0
self.text = self._txt(self.formulas)
def conversion(self, corriente, T):
"""Calculate reaction conversion
corriente: Corriente instance for reaction
T: Temperature of reaction"""
if self.tipo == 0:
# Material balance without equilibrium or kinetics considerations
alfa = self.kwargs["conversion"]
elif self.tipo == 1:
# Chemical equilibrium without kinetics
if isinstance(self.keq, list):
A, B, C, D, E, F, G, H = self.keq
keq = exp(A+B/T+C*log(T)+D*T+E*T**2+F*T**3+G*T**4+H*T**5)
else:
keq = self.keq
def f(alfa):
conc_out = [
(corriente.caudalunitariomolar[i]+alfa*self.coef[i])
/ corriente.Q.m3h for i in range(len(self.componentes))]
productorio = 1
for i in range(len(self.componentes)):
productorio *= conc_out[i]**self.coef[i]
return keq-productorio
alfa = fsolve(f, 0.5)
print alfa, f(alfa)
avance = alfa*self.coef[self.base]*corriente.caudalunitariomolar[self.base]
Q_out = [corriente.caudalunitariomolar[i]+avance*self.coef[i] /
self.coef[self.base] for i in range(len(self.componentes))]
minimo = min(Q_out)
if minimo < 0:
# The key component is not correct, redo the result
indice = Q_out.index(minimo)
avance = self.coef[indice]*corriente.caudalunitariomolar[indice]
Q_out = [corriente.caudalunitariomolar[i]+avance*self.coef[i] /
self.coef[indice] for i in range(len(self.componentes))]
h = unidades.Power(self.Hr*self.coef[self.base] /
self.coef[indice]*avance, "Jh")
else:
h = unidades.Power(self.Hr*avance, "Jh")
print alfa, avance
caudal = sum(Q_out)
fraccion = [caudal_i/caudal for caudal_i in Q_out]
return fraccion, h
# def cinetica(self, tipo, Ko, Ei):
# """Método que define la velocidad de reacción"""
#
#
def _txt(self, nombre=False):
"""Function to get text representation for reaction"""
if nombre:
txt = self.nombre
else:
txt = self.formula
reactivos = []
productos = []
for i in range(len(self.componentes)):
if self.coef[i] == int(self.coef[i]):
self.coef[i] = int(self.coef[i])
if self.coef[i] < -1:
reactivos.append(str(-self.coef[i])+txt[i])
elif self.coef[i] == -1:
reactivos.append(txt[i])
elif -1 < self.coef[i] < 0:
reactivos.append(str(-self.coef[i])+txt[i])
elif 0 < self.coef[i] < 1:
productos.append(str(self.coef[i])+txt[i])
elif self.coef[i] == 1:
productos.append(txt[i])
elif self.coef[i] > 1:
productos.append(str(self.coef[i])+txt[i])
return " + ".join(reactivos)+" ---> "+" + ".join(productos)
def __repr__(self):
if self.status:
eq = self._txt()
return eq + " " + "Hr= %0.4e Jkmol" % self.Hr
else:
return str(self.msg)
if __name__ == "__main__":
# from lib.corriente import Corriente, Mezcla
# mezcla=Corriente(300, 1, 1000, Mezcla([1, 46, 47, 62], [0.03, 0.01, 0.96, 0]))
# reaccion=Reaction([1, 46, 47, 62], [-2, 0, -1, 2], base=2)
# reaccion.conversion(mezcla)
# print reaccion
reaccion = Reaction(comp=[1, 47, 62], coef=[-2, -1, 2])
print reaccion
|
edusegzy/pychemqt
|
lib/reaction.py
|
Python
|
gpl-3.0
| 9,477
|
#!/usr/bin/env python
from __future__ import absolute_import, print_function
from grid_cell_model.submitting import flagparse
import noisefigs
from noisefigs.env import NoiseEnvironment
import config_standard_gEE_3060 as config
parser = flagparse.FlagParser()
parser.add_flag('--bumpDriftSweep')
args = parser.parse_args()
env = NoiseEnvironment(user_config=config.get_config())
if args.bumpDriftSweep or args.all:
env.register_plotter(noisefigs.plotters.BumpDriftAtTimePlotter)
env.plot()
|
MattNolanLab/ei-attractor
|
grid_cell_model/simulations/007_noise/figures/paper/ee_connections_ei_flat/figure_drifts.py
|
Python
|
gpl-3.0
| 501
|
"""
Form widgets for ToscaWidgets.
To download and install::
easy_install twForms
"""
from tw.api import Widget
from tw.forms.core import *
from tw.forms.fields import *
from tw.forms.datagrid import *
from tw.forms.calendars import *
# build all so doc tools introspect me properly
from tw.forms.core import __all__ as __core_all
from tw.forms.fields import __all__ as __fields_all
from tw.forms.datagrid import __all__ as __datagrid_all
from tw.forms.calendars import __all__ as __calendars_all
__all__ = __core_all + __fields_all + __datagrid_all + __calendars_all
|
jokajak/itweb
|
data/env/lib/python2.6/site-packages/tw.forms-0.9.9-py2.6.egg/tw/forms/__init__.py
|
Python
|
gpl-3.0
| 578
|
""" unit test for Watchdog.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# imports
import os
from mock import MagicMock
# sut
from DIRAC.WorkloadManagementSystem.JobWrapper.Watchdog import Watchdog
mock_exeThread = MagicMock()
mock_spObject = MagicMock()
def test_calibrate():
pid = os.getpid()
wd = Watchdog(pid, mock_exeThread, mock_spObject, 5000)
res = wd.calibrate()
assert res['OK'] is True
def test__performChecks():
pid = os.getpid()
wd = Watchdog(pid, mock_exeThread, mock_spObject, 5000)
res = wd.calibrate()
assert res['OK'] is True
res = wd._performChecks()
assert res['OK'] is True
def test__performChecksFull():
pid = os.getpid()
wd = Watchdog(pid, mock_exeThread, mock_spObject, 5000)
wd.testCPULimit = 1
wd.testMemoryLimit = 1
res = wd.calibrate()
assert res['OK'] is True
res = wd._performChecks()
assert res['OK'] is True
|
yujikato/DIRAC
|
src/DIRAC/WorkloadManagementSystem/JobWrapper/test/Test_Watchdog.py
|
Python
|
gpl-3.0
| 958
|
import sys
# where RobotControl.py, etc lives
sys.path.append('/home/pi/Desktop/ADL/YeastRobot/PythonLibrary')
from RobotControl import *
#################################
### Define Deck Layout
#################################
deck="""\
DW96W DW96W DW96W BLANK BLANK BLANK BLANK
DW96W DW96W DW96W BLANK BLANK BLANK BLANK
DW96W DW96W DW96W BLANK BLANK BLANK BLANK
BLANK BLANK BLANK BLANK BLANK BLANK BLANK
"""
# 2 3 4 5 6
# note the 1st user defined column is "2" not zero or one, since tips are at 0 & 1
##################################
OffsetDict={0: 'UL', 1: 'UR', 2: 'LL', 3: 'LR'}
# read in deck, etc
DefineDeck(deck)
printDeck()
InitializeRobot()
CurrentTipPosition = 2
# eventually row in 0,1,2,3
for row in [0,1,2]:
for offset in [0,1,2,3]:
# get tips
CurrentTipPosition = retrieveTips(CurrentTipPosition)
extraSeatTips()
adjusted_depth = 94 + row
# pick up 300ul of YPD from C4, add to C2
position(row,4,position = OffsetDict[offset])
aspirate(300,depth=adjusted_depth + 2,speed=50, mix=3)
position(row,2,position = OffsetDict[offset])
dispense(300, depth=adjusted_depth, speed=50)
aspirate(300, depth=adjusted_depth, speed=50, mix = 3)
dispense(300, depth=adjusted_depth, speed=50)
# discard tips
#disposeTips()
manualDisposeTips()
position(0,0)
ShutDownRobot()
quit()
|
tdlong/YeastRobot
|
UserPrograms/ASE/Mon_Wash_3_temp.py
|
Python
|
gpl-3.0
| 1,361
|
# Copyright 2018-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.distance._tarantula.
Tarantula similarity
"""
from typing import Any, Counter as TCounter, Optional, Sequence, Set, Union
from ._token_distance import _TokenDistance
from ..tokenizer import _Tokenizer
__all__ = ['Tarantula']
class Tarantula(_TokenDistance):
r"""Tarantula similarity.
For two sets X and Y and a population N, Tarantula similarity
:cite:`Jones:2005` is
.. math::
sim_{Tarantula}(X, Y) =
\frac{\frac{|X \cap Y|}{|X \cap Y| + |X \setminus Y|}}
{\frac{|X \cap Y|}{|X \cap Y| + |X \setminus Y|} +
\frac{|Y \setminus X|}
{|Y \setminus X| + |(N \setminus X) \setminus Y|}}
In :ref:`2x2 confusion table terms <confusion_table>`, where a+b+c+d=n,
this is
.. math::
sim_{Tarantula} =
\frac{\frac{a}{a+b}}{\frac{a}{a+b} + \frac{c}{c+d}}
.. versionadded:: 0.4.0
"""
def __init__(
self,
alphabet: Optional[
Union[TCounter[str], Sequence[str], Set[str], int]
] = None,
tokenizer: Optional[_Tokenizer] = None,
intersection_type: str = 'crisp',
**kwargs: Any
) -> None:
"""Initialize Tarantula instance.
Parameters
----------
alphabet : Counter, collection, int, or None
This represents the alphabet of possible tokens.
See :ref:`alphabet <alphabet>` description in
:py:class:`_TokenDistance` for details.
tokenizer : _Tokenizer
A tokenizer instance from the :py:mod:`abydos.tokenizer` package
intersection_type : str
Specifies the intersection type, and set type as a result:
See :ref:`intersection_type <intersection_type>` description in
:py:class:`_TokenDistance` for details.
**kwargs
Arbitrary keyword arguments
Other Parameters
----------------
qval : int
The length of each q-gram. Using this parameter and tokenizer=None
will cause the instance to use the QGram tokenizer with this
q value.
metric : _Distance
A string distance measure class for use in the ``soft`` and
``fuzzy`` variants.
threshold : float
A threshold value, similarities above which are counted as
members of the intersection for the ``fuzzy`` variant.
.. versionadded:: 0.4.0
"""
super(Tarantula, self).__init__(
alphabet=alphabet,
tokenizer=tokenizer,
intersection_type=intersection_type,
**kwargs
)
def sim(self, src: str, tar: str) -> float:
"""Return the Tarantula similarity of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
Returns
-------
float
Tarantula similarity
Examples
--------
>>> cmp = Tarantula()
>>> cmp.sim('cat', 'hat')
0.9948979591836735
>>> cmp.sim('Niall', 'Neil')
0.98856416772554
>>> cmp.sim('aluminum', 'Catalan')
0.9249106078665077
>>> cmp.sim('ATCG', 'TAGC')
0.0
.. versionadded:: 0.4.0
"""
if src == tar:
return 1.0
self._tokenize(src, tar)
a = self._intersection_card()
b = self._src_only_card()
c = self._tar_only_card()
d = self._total_complement_card()
num = a * (c + d)
if num:
return num / (a * (2 * c + d) + b * c)
return 0.0
if __name__ == '__main__':
import doctest
doctest.testmod()
|
chrislit/abydos
|
abydos/distance/_tarantula.py
|
Python
|
gpl-3.0
| 4,524
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 30 20:12:17 2017
@author: Mohtashim
"""
# Create a list of strings: spells
spells = ["protego", "accio", "expecto patronum", "legilimens"]
# Use map() to apply a lambda function over spells: shout_spells
shout_spells = map(lambda item: item + '!!!', spells)
# Convert shout_spells to a list: shout_spells_list
shout_spells_list = list(shout_spells)
# Convert shout_spells into a list and print it
print(shout_spells_list)
|
Moshiasri/learning
|
Python_dataCamp/Map()LambdaFunction.py
|
Python
|
gpl-3.0
| 474
|
from os.path import abspath
import wptools
from mycroft.messagebus.message import Message
from mycroft.skills.LILACS_knowledge.services import KnowledgeBackend
from mycroft.util.log import getLogger
__author__ = 'jarbas'
logger = getLogger(abspath(__file__).split('/')[-2])
class WikidataService(KnowledgeBackend):
def __init__(self, config, emitter, name='wikidata'):
self.config = config
self.process = None
self.emitter = emitter
self.name = name
self.emitter.on('WikidataKnowledgeAdquire', self._adquire)
def _adquire(self, message=None):
logger.info('WikidataKnowledge_Adquire')
subject = message.data["subject"]
if subject is None:
logger.error("No subject to adquire knowledge about")
return
else:
dict = {}
node_data = {}
# get knowledge about
# TODO exception handling for erros
try:
page = wptools.page(subject, silent=True, verbose=False).get_wikidata()
# parse for distant child of
node_data["description"] = page.description
# direct child of
node_data["what"] = page.what
# data fields
node_data["data"] = page.wikidata
# related to
# TODO parse and make cousin/child/parent
node_data["properties"] = page.props
# id info source
dict["wikidata"] = node_data
except:
logger.error("Could not parse wikidata for " + str(subject))
self.send_result(dict)
def adquire(self, subject):
logger.info('Call WikidataKnowledgeAdquire')
self.emitter.emit(Message('WikidataKnowledgeAdquire', {"subject": subject}))
def send_result(self, result = {}):
self.emitter.emit(Message("LILACS_result", {"data": result}))
def stop(self):
logger.info('WikidataKnowledge_Stop')
if self.process:
self.process.terminate()
self.process = None
def load_service(base_config, emitter):
backends = base_config.get('backends', [])
services = [(b, backends[b]) for b in backends
if backends[b]['type'] == 'wikidata']
instances = [WikidataService(s[1], emitter, s[0]) for s in services]
return instances
|
ElliotTheRobot/LILACS-mycroft-core
|
mycroft/skills/LILACS_knowledge/services/wikidata/__init__.py
|
Python
|
gpl-3.0
| 2,400
|
# Copyright (C) 2018 Philipp Hörist <philipp AT hoerist.com>
#
# This file is part of nbxmpp.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; If not, see <http://www.gnu.org/licenses/>.
from nbxmpp.namespaces import Namespace
from nbxmpp.structs import StanzaHandler
from nbxmpp.structs import ChatMarker
from nbxmpp.modules.base import BaseModule
class ChatMarkers(BaseModule):
def __init__(self, client):
BaseModule.__init__(self, client)
self._client = client
self.handlers = [
StanzaHandler(name='message',
callback=self._process_message_marker,
ns=Namespace.CHATMARKERS,
priority=15),
]
def _process_message_marker(self, _client, stanza, properties):
type_ = stanza.getTag('received', namespace=Namespace.CHATMARKERS)
if type_ is None:
type_ = stanza.getTag('displayed', namespace=Namespace.CHATMARKERS)
if type_ is None:
type_ = stanza.getTag('acknowledged',
namespace=Namespace.CHATMARKERS)
if type_ is None:
return
name = type_.getName()
id_ = type_.getAttr('id')
if id_ is None:
self._log.warning('Chatmarker without id')
self._log.warning(stanza)
return
properties.marker = ChatMarker(name, id_)
|
gajim/python-nbxmpp
|
nbxmpp/modules/chat_markers.py
|
Python
|
gpl-3.0
| 1,984
|
# -*- coding: utf-8 -*-
'''
Mepinta
Copyright (c) 2011-2012, Joaquin G. Duo
This file is part of Mepinta.
Mepinta is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Mepinta is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Mepinta. If not, see <http://www.gnu.org/licenses/>.
'''
from mepinta.testing.plugins_testing.base import ModuleAutoTesterBase
class ModuleAutoTesterUtil(ModuleAutoTesterBase):
pass
def testModule():
from getDefaultContext import getDefaultContext
context = getDefaultContext()
if __name__ == "__main__":
testModule()
|
joaduo/mepinta
|
core/python_core/mepinta/testing/plugins_testing/ModuleAutoTesterUtil.py
|
Python
|
gpl-3.0
| 990
|
# coding=utf-8
# This file is part of SickChill.
#
# URL: https://sickchill.github.io
# Git: https://github.com/SickChill/SickChill.git
#
# SickChill is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickChill is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickChill. If not, see <http://www.gnu.org/licenses/>.
"""
Test coming episodes
"""
from __future__ import print_function, unicode_literals
import os
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../lib')))
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..')))
from sickchill.show.ComingEpisodes import ComingEpisodes
import six
class ComingEpisodesTests(unittest.TestCase):
"""
Test comping episodes
"""
def test_get_categories(self):
"""
Tests if get_categories returns the valid format and the right values
"""
categories_list = [
None, [], ['A', 'B'], ['A', 'B'], '', 'A|B', 'A|B',
]
results_list = [
[], [], ['A', 'B'], ['A', 'B'], [], ['A', 'B'], ['A', 'B']
]
self.assertEqual(
len(categories_list), len(results_list),
'Number of parameters ({0:d}) and results ({1:d}) does not match'.format(len(categories_list), len(results_list))
)
for (index, categories) in enumerate(categories_list):
self.assertEqual(ComingEpisodes._get_categories(categories), results_list[index])
def test_get_categories_map(self):
"""
Tests if get_categories_map returns the valid format and the right values
"""
categories_list = [
None, [], ['A', 'B'], ['A', 'B']
]
results_list = [
{}, {}, {'A': [], 'B': []}, {'A': [], 'B': []}
]
self.assertEqual(
len(categories_list), len(results_list),
'Number of parameters ({0:d}) and results ({1:d}) does not match'.format(len(categories_list), len(results_list))
)
for (index, categories) in enumerate(categories_list):
self.assertEqual(ComingEpisodes._get_categories_map(categories), results_list[index])
def test_get_sort(self):
"""
Tests if get_sort returns the right sort of coming episode
"""
test_cases = {
None: 'date',
'': 'date',
'wrong': 'date',
'date': 'date',
'Date': 'date',
'network': 'network',
'NetWork': 'network',
'show': 'show',
'Show': 'show',
}
unicode_test_cases = {
'': 'date',
'wrong': 'date',
'date': 'date',
'Date': 'date',
'network': 'network',
'NetWork': 'network',
'show': 'show',
'Show': 'show',
}
for tests in test_cases, unicode_test_cases:
for (sort, result) in six.iteritems(tests):
self.assertEqual(ComingEpisodes._get_sort(sort), result)
if __name__ == '__main__':
print('=====> Testing {0}'.format(__file__))
SUITE = unittest.TestLoader().loadTestsFromTestCase(ComingEpisodesTests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
|
coderbone/SickRage-alt
|
tests/sickchill_tests/show/coming_episodes_tests.py
|
Python
|
gpl-3.0
| 3,730
|
# coding: utf-8
"""
Wheel will try to read configurations from environment variables
so you dont need this local_settings.py file if you have env vars.
1. You can set as a file
export WHEEL_SETTINGS='/path/to/settings.py'
2. You can set individual values
export WHEEL_MONGODB_DB="wheel_db"
export WHEEL_MONGODB_HOST='localhost'
export WHEEL_MONGODB_PORT='$int 27017'
Or just fill your values in this file and rename it to 'local_settings.py'
"""
# MONGO
MONGODB_DB = "wheel_db"
MONGODB_HOST = 'localhost'
MONGODB_PORT = 27017
MONGODB_USERNAME = None
MONGODB_PASSWORD = None
# Debug and toolbar
DEBUG = True
DEBUG_TOOLBAR_ENABLED = False
# Logger
LOGGER_ENABLED = True
LOGGER_LEVEL = 'DEBUG'
LOGGER_FORMAT = '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'
LOGGER_DATE_FORMAT = '%d.%m %H:%M:%S'
"""
If you want to have a new theme installed you can use wheelcms tool
$ pip install wheelcms
$ cd wheel
$ wheelcms install_theme material
The above commands will download material design theme to your themes folder,
then just enable it.
DEFAULT_THEME = 'material'
"""
|
seraphln/wheel
|
wheel/example.local_settings.py
|
Python
|
gpl-3.0
| 1,093
|
# -*- coding: utf-8 -*-
# Copyright (C) 2015-2021 Martin Glueck All rights reserved
# Neugasse 2, A--2244 Spannberg, Austria. martin@mangari.org
# #*** <License> ************************************************************#
# This module is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this module. If not, see <http://www.gnu.org/licenses/>.
# #*** </License> ***********************************************************#
#
#++
# Name
# STG.Program
#
# Purpose
# A application program used by a device
#
#--
from Once_Property import Once_Property
from STG._Object_ import _STG_Object_
from STG._Program_Object_ import _Program_Object_
from STG.Parameter import Parameter, Parameter_Ref, Parameter_Type, Absolute_Segment
from STG.Language import Language
import os
from collections import defaultdict
class Static (_STG_Object_) :
"""Find a static reference"""
def __init__ (self, xml) :
super ().__init__ ()
self.xml = xml
self.memories = dict ()
# end def __init__
def find (self, id, tag, cls = None, * args, ** kw) :
result = super ().get \
(self.xml, "//E:%s[@Id='%s']" % (tag, id))
if cls :
result = cls (xml = result, * args, ** kw)
return result
# end def find
def get (self, id, tag, cls = None, * args, ** kw) :
if id not in cls.Table :
cls.Table [id] = self.find (id, tag, cls, * args, ** kw)
return cls.Table [id]
# end def get
def Parameter_Ref (self, id, parent, program) :
return self.get \
( id, "ParameterRef", Parameter_Ref
, static = self, parent = parent, program = program
)
# end def Parameter_Ref
def Parameter (self, id) :
return self.get \
(id, "Parameter", Parameter, static = self)
# end def Parameter_Ref
def Parameter_Type (self, id) :
return self.get \
(id, "ParameterType", Parameter_Type, static = self)
# end def Parameter_Type
def Memory (self, id) :
result = self.get \
(id, "AbsoluteSegment", Absolute_Segment, static = self)
self.memories [id] = result
return result
# end def Code_Segment
# end class Static
class Program (_Program_Object_) :
"""An application program used by an EIB device"""
def __init__ (self, xml) :
super ().__init__ ()
self.xml = xml
self.id = xml.get ("Id")
self.mask = xml.get ("MaskVersion")
self.raw_name = xml.get ("Name")
self.manu_id = int (self.id[2:6], 16)
self.app_number = int (xml.get ("ApplicationNumber"))
self.app_version = int (xml.get ("ApplicationVersion"))
prop_load = self.xpath (xml, "//E:LdCtrlCompareProp[@PropId=78]")
if prop_load :
idata = prop_load [0].get ("InlineData")
data = []
for i in range (len (idata) // 2) :
data.append ("0x" + idata [i*2:i*2+2])
data = ", ".join (data)
else :
data = "-"
self.load_compare = data
self.parameter_refs = dict ()
self.com_object_refs = dict ()
static = Static (self.get (xml, "E:Static"))
for abse in self.xpath (xml, "//E:AbsoluteSegment") :
static.Memory (abse.get ("Id"))
self._visit_element (self, self.get (xml, "E:Dynamic"), static)
self._setup_tables (static)
self._finalize ()
# end def __init__
def _finalize (self) :
self.memory_segments = \
[ m for m in sorted ( Absolute_Segment.Table.values ()
, key = lambda m : m.address
)
]
ram_section = \
[ m for m in self.memory_segments
if (m.size > 1) and m.data is None
]
if ram_section :
self.com_ram_memory = ram_section [0]
self.com_objects_by_number = defaultdict (list)
for cor in self.com_object_refs.values () :
self.com_objects_by_number [cor.number].append (cor)
# end def _finalize
def as_html (self, template = "parameter_overview-grid.jnj") :
from jinja2 import Environment, FileSystemLoader
path = os.path.dirname (__file__)
env = Environment \
(loader = FileSystemLoader (os.path.join (path, "jinja")))
template = env.get_template (template)
return template.render (dict (device = self))
# end def as_html
def eeprom_as_html (self, reference_address = 0) :
p_refs = sorted \
( ( pr for pr in self.parameter_refs.values ()
if pr.parameter.address
)
, key = lambda pr : (pr.parameter.address, pr.parameter.mask)
)
from jinja2 import Environment, FileSystemLoader
path = os.path.dirname (__file__)
env = Environment \
(loader = FileSystemLoader (os.path.join (path, "jinja")))
template = env.get_template ("eeprom_layout.jnj")
return template.render \
( dict ( p_refs = p_refs
, program = self
, ref_addr = reference_address
)
)
# end def eeprom_as_html
@Once_Property
def name (self) :
return Language.Translation (self.id, "Name", self.raw_name)
# end def name
def _setup_tables (self, static) :
adr_tab = self.get (self.xml, "//E:AddressTable")
aso_tab = self.get (self.xml, "//E:AssociationTable")
com_tab = self.get (self.xml, "//E:ComObjectTable")
self.address_table = \
( int (adr_tab.get ("Offset"))
, int (adr_tab.get ("MaxEntries"))
, static.Memory (adr_tab.get ("CodeSegment"))
)
self.assoc_table = \
( int (aso_tab.get ("Offset"))
, int (aso_tab.get ("MaxEntries"))
, static.Memory (aso_tab.get ("CodeSegment"))
)
self.com_table = \
( int (aso_tab.get ("Offset"))
, static.Memory (com_tab.get ("CodeSegment"))
)
# end def _setup_tables
### pickle interfaces
Value_Attributes = ("id", "mask", "app_number", "app_version", "load_compare")
@property
def pickle_cargo (self) :
result = super ().pickle_cargo
for attr in "address_table", "assoc_table", "com_table" :
value = getattr (self, attr)
value = value [:-1] + (value [-1].id, )
result [attr] = value
return result
# end def pickle_cargo
@classmethod
def From_Pickle (cls, dump) :
for attr in "address_table", "assoc_table", "com_table" :
value = dump [attr]
value = value [:-1] + (Absolute_Segment.Table [value [-1]], )
dump [attr] = value
result = super (Program, cls).From_Pickle (None, dump)
result._finalize ()
return result
# end def From_Pickle
# end class Program
if __name__ == "__main__" :
from STG._Object_ import _STG_Object_
from STG.Language import Language
from STG.Datapoint import Datapoint
import sys
if len (sys.argv) > 2 :
master = Datapoint.Parse (sys.argv [2])
Datapoint.From_Master (master)
root = _STG_Object_.Parse (sys.argv [1])
Language.add (root)
Language.set ("de-DE")
if 1 :
prg = Program (Program.get (root, "//E:ApplicationProgram"))
if len (sys.argv) > 3 :
file = open (sys.argv [3], "w", encoding = "utf-8")
else :
file = sys.stdout
file.write (prg.as_html ())
if len (sys.argv) > 3 :
file.close ()
print (prg.name)
### __END__ STG.Program
|
selfbus/development-tools
|
test-case-generator/STG/Program.py
|
Python
|
gpl-3.0
| 8,523
|
from multiprocessing import Process,Queue
import os
class TestMP:
def __init__(self,n):
self.n = n
@staticmethod
def worker(q):
"""worker function"""
# print('worker',*args)
# print("ppid= {} pid= {}".format(os.getppid(),os.getpid()))
q.put([1,'x',(os.getpid(),[])])
return
def main(self):
if __name__ == '__main__':
jobs = []
for i in range(self.n):
q = Queue()
p = Process(target=self.worker,args=(q,))
jobs.append((p,q))
p.start()
for i in range(self.n):
j=jobs.pop(0)
j[0].join()
msg = j[1].get()
print("job no {} ended, msg: {}".format(i,msg))
m=TestMP(10)
m.main()
|
vleo/vleo-notebook
|
test_python/multiprocessing/test_multiprocessing.py
|
Python
|
gpl-3.0
| 811
|
import sys
import unittest
sys.path.insert(0, "../src/build")
import addonconf
class AddonConfModuleTestCase(unittest.TestCase):
def test_load(self):
# act
config = addonconf.load("configs/config.json")
# assert
self.assertEqual(config, None, "Wrong return value for not exists config")
def test_load2(self):
# act
config = addonconf.load("configs/config.json.1")
# assert
self.assertEqual(config, None, "Wrong return value for unvalide config")
def test_load3(self):
# arrange
correct_config = {'version': '0.1', 'xpi': {'theme': 'firefox-theme-test.xpi', 'package': 'firefox-test-@VERSION@.xpi', 'extension': 'firefox-extension-test.xpi'}, 'max-version': '31.0a1', 'directory-structure': {'shared-dir': 'chrome'}, 'min-version': '29.0'}
# act
config = addonconf.load("configs/config.json.2")
# assert
self.assertEqual(config, correct_config, "Uncorrect load config")
if __name__ == '__main__':
unittest.main()
|
seleznev/firefox-complete-theme-build-system
|
tests/test_addonconf.py
|
Python
|
mpl-2.0
| 1,050
|
import os
from configurations.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "morgoth.settings")
os.environ.setdefault("DJANGO_CONFIGURATION", "Production")
application = DjangoWhiteNoise(get_wsgi_application())
|
rehandalal/morgoth
|
morgoth/wsgi.py
|
Python
|
mpl-2.0
| 296
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import hashlib
import StringIO
import gzip
import web
import mock
from nose.tools import eq_, ok_
from datetime import datetime
from contextlib import closing
from configman.dotdict import DotDict
from socorro.collector.wsgi_breakpad_collector import BreakpadCollector
from socorro.collector.throttler import ACCEPT, IGNORE, DEFER
from socorro.unittest.testbase import TestCase
class ObjectWithValue(object):
def __init__(self, v):
self.value = v
class TestCollectorApp(TestCase):
def get_standard_config(self):
config = DotDict()
config.logger = mock.MagicMock()
config.throttler = mock.MagicMock()
config.collector = DotDict()
config.collector.collector_class = BreakpadCollector
config.collector.dump_id_prefix = 'bp-'
config.collector.dump_field = 'dump'
config.collector.accept_submitted_crash_id = False
config.collector.accept_submitted_legacy_processing = False
config.collector.checksum_method = hashlib.md5
config.crash_storage = mock.MagicMock()
return config
def test_setup(self):
config = self.get_standard_config()
c = BreakpadCollector(config)
eq_(c.config, config)
eq_(c.logger, config.logger)
eq_(c.throttler, config.throttler)
eq_(c.crash_storage, config.crash_storage)
eq_(c.dump_id_prefix, 'bp-')
eq_(c.dump_field, 'dump')
def test_make_raw_crash(self):
config = self.get_standard_config()
form = DotDict()
form.ProductName = 'FireSquid'
form.Version = '99'
form.dump = 'fake dump'
form.some_field = '\x0023'
form.some_other_field = ObjectWithValue('XYZ')
class BreakpadCollectorWithMyForm(config.collector.collector_class):
def _form_as_mapping(self):
return form
c = BreakpadCollectorWithMyForm(config)
rc, dmp = c._get_raw_crash_from_form()
eq_(rc.ProductName, 'FireSquid')
eq_(rc.Version, '99')
eq_(rc.some_field, '23')
eq_(rc.some_other_field, 'XYZ')
@mock.patch('socorro.collector.wsgi_breakpad_collector.time')
@mock.patch('socorro.collector.wsgi_breakpad_collector.utc_now')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.webapi')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web')
def test_POST(self, mocked_web, mocked_webapi, mocked_utc_now, mocked_time):
config = self.get_standard_config()
c = BreakpadCollector(config)
rawform = DotDict()
rawform.ProductName = '\x00FireSquid'
rawform.Version = '99'
rawform.dump = DotDict({'value': 'fake dump', 'file': 'faked file'})
rawform.aux_dump = DotDict({'value': 'aux_dump contents', 'file': 'silliness'})
rawform.some_field = '23'
rawform.some_other_field = ObjectWithValue('XYZ')
form = DotDict(rawform)
form.dump = rawform.dump.value
erc = DotDict()
erc.ProductName = 'FireSquid'
erc.Version = '99'
erc.some_field = '23'
erc.some_other_field = 'XYZ'
erc.legacy_processing = ACCEPT
erc.timestamp = 3.0
erc.submitted_timestamp = '2012-05-04T15:10:00'
erc.throttle_rate = 100
erc.dump_checksums = {
'dump': '2036fd064f93a0d086cf236c5f0fd8d4',
'aux_dump': 'aa2e5bf71df8a4730446b2551d29cb3a',
}
erc = dict(erc)
mocked_web.input.return_value = form
mocked_webapi.rawinput.return_value = rawform
mocked_utc_now.return_value = datetime(2012, 5, 4, 15, 10)
mocked_time.time.return_value = 3.0
c.throttler.throttle.return_value = (ACCEPT, 100)
r = c.POST()
ok_(r.startswith('CrashID=bp-'))
ok_(r.endswith('120504\n'))
erc['uuid'] = r[11:-1]
c.crash_storage.save_raw_crash.assert_called_with(
erc,
{'dump':'fake dump', 'aux_dump':'aux_dump contents'},
r[11:-1]
)
@mock.patch('socorro.collector.wsgi_breakpad_collector.time')
@mock.patch('socorro.collector.wsgi_breakpad_collector.utc_now')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.webapi')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web')
def test_POST_reject_browser_with_hangid(
self,
mocked_web,
mocked_webapi,
mocked_utc_now,
mocked_time
):
config = self.get_standard_config()
c = BreakpadCollector(config)
rawform = DotDict()
rawform[u'\u0000ProductName'] = 'FireSquid'
rawform.Version = '99'
rawform.dump = DotDict({'value': 'fake dump', 'file': 'faked file'})
rawform.some_field = '23'
rawform.some_other_field = ObjectWithValue('XYZ')
rawform.HangID = 'xyz'
rawform.ProcessType = 'browser'
form = DotDict(rawform)
form.dump = rawform.dump.value
erc = DotDict()
erc.ProductName = 'FireSquid'
erc.Version = '99'
erc.some_field = '23'
erc.some_other_field = 'XYZ'
erc.legacy_processing = ACCEPT
erc.throttle_rate = None
erc.timestamp = 3.0
erc.submitted_timestamp = '2012-05-04T15:10:00'
erc = dict(erc)
mocked_webapi.rawinput.return_value = rawform
mocked_utc_now.return_value = datetime(2012, 5, 4, 15, 10)
mocked_time.time.return_value = 3.0
c.throttler.throttle.return_value = (IGNORE, None)
r = c.POST()
eq_(r, "Unsupported=1\n")
ok_(not
c.crash_storage.save_raw_crash.call_count
)
@mock.patch('socorro.collector.wsgi_breakpad_collector.time')
@mock.patch('socorro.collector.wsgi_breakpad_collector.utc_now')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.webapi')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web')
def test_POST_with_existing_crash_id(
self,
mocked_web,
mocked_webapi,
mocked_utc_now,
mocked_time
):
config = self.get_standard_config()
c = BreakpadCollector(config)
rawform = DotDict()
rawform.ProductName = 'FireSquid'
rawform.Version = '99'
rawform.dump = DotDict({'value': 'fake dump', 'file': 'faked file'})
rawform.aux_dump = DotDict({'value': 'aux_dump contents', 'file': 'silliness'})
rawform.some_field = '23'
rawform.some_other_field = ObjectWithValue('XYZ')
rawform.uuid = '332d798f-3cx\x0042-47a5-843f-a0f892140107'
form = DotDict(rawform)
form.dump = rawform.dump.value
erc = DotDict()
erc.ProductName = 'FireSquid'
erc.Version = '99'
erc.some_field = '23'
erc.some_other_field = 'XYZ'
erc.legacy_processing = ACCEPT
erc.timestamp = 3.0
erc.submitted_timestamp = '2012-05-04T15:10:00'
erc.throttle_rate = 100
erc.dump_checksums = {
'dump': '2036fd064f93a0d086cf236c5f0fd8d4',
'aux_dump': 'aa2e5bf71df8a4730446b2551d29cb3a',
}
erc = dict(erc)
mocked_web.input.return_value = form
mocked_webapi.rawinput.return_value = rawform
mocked_utc_now.return_value = datetime(2012, 5, 4, 15, 10)
mocked_time.time.return_value = 3.0
c.throttler.throttle.return_value = (ACCEPT, 100)
r = c.POST()
ok_(r.startswith('CrashID=bp-'))
ok_(r.endswith('120504\n'))
erc['uuid'] = r[11:-1]
c.crash_storage.save_raw_crash.assert_called_with(
erc,
{'dump':'fake dump', 'aux_dump':'aux_dump contents'},
r[11:-1]
)
@mock.patch('socorro.collector.wsgi_breakpad_collector.time')
@mock.patch('socorro.collector.wsgi_breakpad_collector.utc_now')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.webapi')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web')
def test_POST_with_existing_crash_id_and_use_it(
self,
mocked_web,
mocked_webapi,
mocked_utc_now,
mocked_time
):
config = self.get_standard_config()
config.collector.accept_submitted_crash_id = True
c = BreakpadCollector(config)
rawform = DotDict()
rawform.ProductName = 'FireSquid'
rawform.Version = '99'
rawform.dump = DotDict({'value': 'fake dump', 'file': 'faked file'})
rawform.aux_dump = DotDict({'value': 'aux_dump contents', 'file': 'silliness'})
rawform.some_field = '23'
rawform.some_other_field = ObjectWithValue('XYZ')
rawform.uuid = '332d798f-3c42-47a5-843f-a0f892140107'
rawform.legacy_processing = str(DEFER)
rawform.throttle_rate = 100
rawform.dump_checksums = "this is poised to overwrite and cause trouble"
form = DotDict(rawform)
form.dump = rawform.dump.value
erc = DotDict()
erc.ProductName = 'FireSquid'
erc.Version = '99'
erc.some_field = '23'
erc.some_other_field = 'XYZ'
erc.legacy_processing = DEFER
erc.timestamp = 3.0
erc.submitted_timestamp = '2012-05-04T15:10:00'
erc.throttle_rate = 100
erc.uuid = '332d798f-3c42-47a5-843f-a0f892140107'
erc.dump_checksums = {
'dump': '2036fd064f93a0d086cf236c5f0fd8d4',
'aux_dump': 'aa2e5bf71df8a4730446b2551d29cb3a',
}
erc = dict(erc)
mocked_web.input.return_value = form
mocked_webapi.rawinput.return_value = rawform
mocked_utc_now.return_value = datetime(2012, 5, 4, 15, 10)
mocked_time.time.return_value = 3.0
c.throttler.throttle.return_value = (DEFER, 100)
r = c.POST()
ok_(r.startswith('CrashID=bp-'))
ok_(r.endswith('140107\n'))
c.crash_storage.save_raw_crash.assert_called_with(
erc,
{'dump':'fake dump', 'aux_dump':'aux_dump contents'},
r[11:-1]
)
@mock.patch('socorro.collector.wsgi_breakpad_collector.time')
@mock.patch('socorro.collector.wsgi_breakpad_collector.utc_now')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.webapi')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web')
def test_POST_with_existing_legacy_processing(
self,
mocked_web,
mocked_webapi,
mocked_utc_now,
mocked_time
):
config = self.get_standard_config()
c = BreakpadCollector(config)
rawform = DotDict()
rawform.ProductName = 'FireSquid'
rawform.Version = '99'
rawform.dump = DotDict({'value': 'fake dump', 'file': 'faked file'})
rawform.aux_dump = DotDict({'value': 'aux_dump contents', 'file': 'silliness'})
rawform.some_field = '23'
rawform.some_other_field = ObjectWithValue('XYZ')
rawform.uuid = '332d798f-3c42-47a5-843f-a0f892140107'
rawform.legacy_processing = u'1'
form = DotDict(rawform)
form.dump = rawform.dump.value
erc = DotDict()
erc.ProductName = 'FireSquid'
erc.Version = '99'
erc.some_field = '23'
erc.some_other_field = 'XYZ'
erc.legacy_processing = ACCEPT
erc.timestamp = 3.0
erc.submitted_timestamp = '2012-05-04T15:10:00'
erc.throttle_rate = 100
erc.dump_checksums = {
'dump': '2036fd064f93a0d086cf236c5f0fd8d4',
'aux_dump': 'aa2e5bf71df8a4730446b2551d29cb3a',
}
erc = dict(erc)
mocked_web.input.return_value = form
mocked_webapi.rawinput.return_value = rawform
mocked_utc_now.return_value = datetime(2012, 5, 4, 15, 10)
mocked_time.time.return_value = 3.0
c.throttler.throttle.return_value = (ACCEPT, 100)
r = c.POST()
ok_(r.startswith('CrashID=bp-'))
ok_(r.endswith('120504\n'))
erc['uuid'] = r[11:-1]
c.crash_storage.save_raw_crash.assert_called_with(
erc,
{'dump':'fake dump', 'aux_dump':'aux_dump contents'},
r[11:-1]
)
@mock.patch('socorro.collector.wsgi_breakpad_collector.time')
@mock.patch('socorro.collector.wsgi_breakpad_collector.utc_now')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.webapi')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web')
def test_POST_with_existing_legacy_processing_and_use_it(
self,
mocked_web,
mocked_webapi,
mocked_utc_now,
mocked_time
):
config = self.get_standard_config()
config.collector.accept_submitted_crash_id = True
config.collector.accept_submitted_legacy_processing = True
c = BreakpadCollector(config)
rawform = DotDict()
rawform.ProductName = 'FireSquid'
rawform.Version = '99\x00'
rawform.dump = DotDict({'value': 'fake dump', 'file': 'faked file'})
rawform.aux_dump = DotDict({'value': 'aux_dump contents', 'file': 'silliness'})
rawform[u'some_field\u0000'] = '23'
rawform[u'some_\u0000other_field'] = ObjectWithValue('XYZ')
rawform.uuid = '332d798f-3c42-47a5-843f-a0f892140107'
rawform.legacy_processing = str(DEFER)
rawform.throttle_rate = 100
form = DotDict(rawform)
form.dump = rawform.dump.value
erc = DotDict()
erc.ProductName = 'FireSquid'
erc.Version = '99'
erc.some_field = '23'
erc.some_other_field = 'XYZ'
erc.legacy_processing = DEFER
erc.timestamp = 3.0
erc.submitted_timestamp = '2012-05-04T15:10:00'
erc.throttle_rate = 100
erc.uuid = '332d798f-3c42-47a5-843f-a0f892140107'
erc.dump_checksums = {
'dump': '2036fd064f93a0d086cf236c5f0fd8d4',
'aux_dump': 'aa2e5bf71df8a4730446b2551d29cb3a',
}
erc = dict(erc)
mocked_web.input.return_value = form
mocked_webapi.rawinput.return_value = rawform
mocked_utc_now.return_value = datetime(2012, 5, 4, 15, 10)
mocked_time.time.return_value = 3.0
c.throttler.throttle.return_value = (DEFER, 100)
r = c.POST()
ok_(r.startswith('CrashID=bp-'))
ok_(r.endswith('140107\n'))
c.crash_storage.save_raw_crash.assert_called_with(
erc,
{'dump':'fake dump', 'aux_dump':'aux_dump contents'},
r[11:-1]
)
@mock.patch('socorro.collector.wsgi_breakpad_collector.time')
@mock.patch('socorro.collector.wsgi_breakpad_collector.utc_now')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.webapi')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.ctx')
def test_POST_with_gzip(
self,
mocked_web_ctx,
mocked_webapi,
mocked_utc_now,
mocked_time
):
config = self.get_standard_config()
c = BreakpadCollector(config)
form = """
--socorro1234567
Content-Disposition: form-data; name="ProductName"
FireSquid
--socorro1234567
Content-Disposition: form-data; name="Version"
99
--socorro1234567
Content-Disposition: form-data; name="some_field"
23
--socorro1234567
Content-Disposition: form-data; name="some_other_field"
XYZ
--socorro1234567
Content-Disposition: form-data; name="dump"; filename="dump"
Content-Type: application/octet-stream
fake dump
--socorro1234567
Content-Disposition: form-data; name="aux_dump"; filename="aux_dump"
Content-Type: application/octet-stream
aux_dump contents
"""
erc = DotDict()
erc.ProductName = 'FireSquid'
erc.Version = '99'
erc.some_field = '23'
erc.some_other_field = 'XYZ'
erc.legacy_processing = ACCEPT
erc.timestamp = 3.0
erc.submitted_timestamp = '2012-05-04T15:10:00'
erc.throttle_rate = 100
erc.dump_checksums = {
'dump': '2036fd064f93a0d086cf236c5f0fd8d4',
'aux_dump': 'aa2e5bf71df8a4730446b2551d29cb3a',
}
erc = dict(erc)
with closing(StringIO.StringIO()) as s:
g = gzip.GzipFile(fileobj=s, mode='w')
g.write(form)
g.close()
gzipped_form = s.getvalue()
mocked_webapi.data.return_value = gzipped_form
mocked_web_ctx.configure_mock(
env={
'HTTP_CONTENT_ENCODING': 'gzip',
'CONTENT_ENCODING': 'gzip',
'CONTENT_TYPE':
'multipart/form-data; boundary="socorro1234567"',
'REQUEST_METHOD': 'POST'
}
)
mocked_utc_now.return_value = datetime(2012, 5, 4, 15, 10)
mocked_time.time.return_value = 3.0
c.throttler.throttle.return_value = (ACCEPT, 100)
r = c.POST()
ok_(r.startswith('CrashID=bp-'))
ok_(r.endswith('120504\n'))
erc['uuid'] = r[11:-1]
c.crash_storage.save_raw_crash.assert_called_with(
erc,
{'dump':'fake dump', 'aux_dump':'aux_dump contents'},
r[11:-1]
)
|
Tchanders/socorro
|
socorro/unittest/collector/test_wsgi_breakpad_collector.py
|
Python
|
mpl-2.0
| 17,334
|
# -*- coding: utf-8 -*-
SFDC_COUNTRIES = {
'af': 'Afghanistan',
'ax': 'Aland Islands',
'al': 'Albania',
'dz': 'Algeria',
'as': 'American Samoa',
'ad': 'Andorra',
'ao': 'Angola',
'ai': 'Anguilla',
'aq': 'Antarctica',
'ag': 'Antigua and Barbuda',
'ar': 'Argentina',
'am': 'Armenia',
'aw': 'Aruba',
'au': 'Australia',
'at': 'Austria',
'az': 'Azerbaijan',
'bs': 'Bahamas',
'bh': 'Bahrain',
'bd': 'Bangladesh',
'bb': 'Barbados',
'by': 'Belarus',
'be': 'Belgium',
'bz': 'Belize',
'bj': 'Benin',
'bm': 'Bermuda',
'bt': 'Bhutan',
'bo': 'Bolivia, Plurinational State of',
'bq': 'Bonaire, Sint Eustatius and Saba',
'ba': 'Bosnia and Herzegovina',
'bw': 'Botswana',
'bv': 'Bouvet Island',
'br': 'Brazil',
'io': 'British Indian Ocean Territory',
'bn': 'Brunei Darussalam',
'bg': 'Bulgaria',
'bf': 'Burkina Faso',
'bi': 'Burundi',
'kh': 'Cambodia',
'cm': 'Cameroon',
'ca': 'Canada',
'cv': 'Cape Verde',
'ky': 'Cayman Islands',
'cf': 'Central African Republic',
'td': 'Chad',
'cl': 'Chile',
'cn': 'China',
'cx': 'Christmas Island',
'cc': 'Cocos (Keeling) Islands',
'co': 'Colombia',
'km': 'Comoros',
'cg': 'Congo',
'cd': 'Congo, the Democratic Republic of the',
'ck': 'Cook Islands',
'cr': 'Costa Rica',
'ci': 'Cote d\'Ivoire',
'hr': 'Croatia',
'cu': 'Cuba',
'cw': 'Curaçao',
'cy': 'Cyprus',
'cz': 'Czech Republic',
'dk': 'Denmark',
'dj': 'Djibouti',
'dm': 'Dominica',
'do': 'Dominican Republic',
'ec': 'Ecuador',
'eg': 'Egypt',
'sv': 'El Salvador',
'gq': 'Equatorial Guinea',
'er': 'Eritrea',
'ee': 'Estonia',
'et': 'Ethiopia',
'fk': 'Falkland Islands (Malvinas)',
'fo': 'Faroe Islands',
'fj': 'Fiji',
'fi': 'Finland',
'fr': 'France',
'gf': 'French Guiana',
'pf': 'French Polynesia',
'tf': 'French Southern Territories',
'ga': 'Gabon',
'gm': 'Gambia',
'ge': 'Georgia',
'de': 'Germany',
'gh': 'Ghana',
'gi': 'Gibraltar',
'gr': 'Greece',
'gl': 'Greenland',
'gd': 'Grenada',
'gp': 'Guadeloupe',
'gu': 'Guam',
'gt': 'Guatemala',
'gg': 'Guernsey',
'gn': 'Guinea',
'gw': 'Guinea-Bissau',
'gy': 'Guyana',
'ht': 'Haiti',
'hm': 'Heard Island and McDonald Islands',
'va': 'Holy See (Vatican City State)',
'hn': 'Honduras',
'hk': 'Hong Kong',
'hu': 'Hungary',
'is': 'Iceland',
'in': 'India',
'id': 'Indonesia',
'ir': 'Iran, Islamic Republic of',
'iq': 'Iraq',
'ie': 'Ireland',
'im': 'Isle of Man',
'il': 'Israel',
'it': 'Italy',
'jm': 'Jamaica',
'jp': 'Japan',
'je': 'Jersey',
'jo': 'Jordan',
'kz': 'Kazakhstan',
'ke': 'Kenya',
'ki': 'Kiribati',
'kp': 'Korea, Democratic People\'s Republic of',
'kr': 'Korea, Republic of',
'kw': 'Kuwait',
'kg': 'Kyrgyzstan',
'la': 'Lao People\'s Democratic Republic',
'lv': 'Latvia',
'lb': 'Lebanon',
'ls': 'Lesotho',
'lr': 'Liberia',
'ly': 'Libya',
'li': 'Liechtenstein',
'lt': 'Lithuania',
'lu': 'Luxembourg',
'mo': 'Macao',
'mk': 'Macedonia, the former Yugoslav Republic of',
'mg': 'Madagascar',
'mw': 'Malawi',
'my': 'Malaysia',
'mv': 'Maldives',
'ml': 'Mali',
'mt': 'Malta',
'mh': 'Marshall Islands',
'mq': 'Martinique',
'mr': 'Mauritania',
'mu': 'Mauritius',
'yt': 'Mayotte',
'mx': 'Mexico',
'fm': 'Micronesia',
'md': 'Moldova, Republic of',
'mc': 'Monaco',
'mn': 'Mongolia',
'me': 'Montenegro',
'ms': 'Montserrat',
'ma': 'Morocco',
'mz': 'Mozambique',
'mm': 'Myanmar',
'na': 'Namibia',
'nr': 'Nauru',
'np': 'Nepal',
'nl': 'Netherlands',
'an': 'Netherlands Antilles',
'nc': 'New Caledonia',
'nz': 'New Zealand',
'ni': 'Nicaragua',
'ne': 'Niger',
'ng': 'Nigeria',
'nu': 'Niue',
'nf': 'Norfolk Island',
'mp': 'Northern Mariana Islands',
'no': 'Norway',
'om': 'Oman',
'pk': 'Pakistan',
'pw': 'Palau',
'ps': 'Palestine',
'pa': 'Panama',
'pg': 'Papua New Guinea',
'py': 'Paraguay',
'pe': 'Peru',
'ph': 'Philippines',
'pn': 'Pitcairn',
'pl': 'Poland',
'pt': 'Portugal',
'pr': 'Puerto Rico',
'qa': 'Qatar',
're': 'Reunion',
'ro': 'Romania',
'ru': 'Russian Federation',
'rw': 'Rwanda',
'bl': 'Saint Barthélemy',
'sh': 'Saint Helena, Ascension and Tristan da Cunha',
'kn': 'Saint Kitts and Nevis',
'lc': 'Saint Lucia',
'mf': 'Saint Martin (French part)',
'pm': 'Saint Pierre and Miquelon',
'vc': 'Saint Vincent and the Grenadines',
'ws': 'Samoa',
'sm': 'San Marino',
'st': 'Sao Tome and Principe',
'sa': 'Saudi Arabia',
'sn': 'Senegal',
'rs': 'Serbia',
'sc': 'Seychelles',
'sl': 'Sierra Leone',
'sg': 'Singapore',
'sx': 'Sint Maarten (Dutch part)',
'sk': 'Slovakia',
'si': 'Slovenia',
'sb': 'Solomon Islands',
'so': 'Somalia',
'za': 'South Africa',
'gs': 'South Georgia and the South Sandwich Islands',
'ss': 'South Sudan',
'es': 'Spain',
'lk': 'Sri Lanka',
'sd': 'Sudan',
'sr': 'Suriname',
'sj': 'Svalbard and Jan Mayen',
'sz': 'Swaziland',
'se': 'Sweden',
'ch': 'Switzerland',
'sy': 'Syrian Arab Republic',
'tw': 'Taiwan',
'tj': 'Tajikistan',
'tz': 'Tanzania, United Republic of',
'th': 'Thailand',
'tl': 'Timor-Leste',
'tg': 'Togo',
'tk': 'Tokelau',
'to': 'Tonga',
'tt': 'Trinidad and Tobago',
'tn': 'Tunisia',
'tr': 'Turkey',
'tm': 'Turkmenistan',
'tc': 'Turks and Caicos Islands',
'tv': 'Tuvalu',
'vi': 'U.S. Virgin Islands',
'ug': 'Uganda',
'ua': 'Ukraine',
'ae': 'United Arab Emirates',
'gb': 'United Kingdom',
'us': 'United States',
'um': 'United States Minor Outlying Islands',
'uy': 'Uruguay',
'uz': 'Uzbekistan',
'vu': 'Vanuatu',
've': 'Venezuela, Bolivarian Republic of',
'vn': 'Viet Nam',
'vg': 'Virgin Islands, British',
'wf': 'Wallis and Futuna',
'eh': 'Western Sahara',
'ye': 'Yemen',
'zm': 'Zambia',
'zw': 'Zimbabwe',
}
SFDC_COUNTRIES_LIST = list(SFDC_COUNTRIES.keys())
COUNTRY_CODES_MAP = {
'afg': 'af',
'ala': 'ax',
'alb': 'al',
'dza': 'dz',
'asm': 'as',
'and': 'ad',
'ago': 'ao',
'aia': 'ai',
'ata': 'aq',
'atg': 'ag',
'arg': 'ar',
'arm': 'am',
'abw': 'aw',
'aus': 'au',
'aut': 'at',
'aze': 'az',
'bhs': 'bs',
'bhr': 'bh',
'bgd': 'bd',
'brb': 'bb',
'blr': 'by',
'bel': 'be',
'blz': 'bz',
'ben': 'bj',
'bmu': 'bm',
'btn': 'bt',
'bol': 'bo',
'bih': 'ba',
'bwa': 'bw',
'bvt': 'bv',
'bra': 'br',
'vgb': 'vg',
'iot': 'io',
'brn': 'bn',
'bgr': 'bg',
'bfa': 'bf',
'bdi': 'bi',
'khm': 'kh',
'cmr': 'cm',
'can': 'ca',
'cpv': 'cv',
'cym': 'ky',
'caf': 'cf',
'tcd': 'td',
'chl': 'cl',
'chn': 'cn',
'hkg': 'hk',
'mac': 'mo',
'cxr': 'cx',
'cck': 'cc',
'col': 'co',
'com': 'km',
'cog': 'cg',
'cod': 'cd',
'cok': 'ck',
'cri': 'cr',
'civ': 'ci',
'hrv': 'hr',
'cub': 'cu',
'cyp': 'cy',
'cze': 'cz',
'dnk': 'dk',
'dji': 'dj',
'dma': 'dm',
'dom': 'do',
'ecu': 'ec',
'egy': 'eg',
'slv': 'sv',
'gnq': 'gq',
'eri': 'er',
'est': 'ee',
'eth': 'et',
'flk': 'fk',
'fro': 'fo',
'fji': 'fj',
'fin': 'fi',
'fra': 'fr',
'guf': 'gf',
'pyf': 'pf',
'atf': 'tf',
'gab': 'ga',
'gmb': 'gm',
'geo': 'ge',
'deu': 'de',
'gha': 'gh',
'gib': 'gi',
'grc': 'gr',
'grl': 'gl',
'grd': 'gd',
'glp': 'gp',
'gum': 'gu',
'gtm': 'gt',
'ggy': 'gg',
'gin': 'gn',
'gnb': 'gw',
'guy': 'gy',
'hti': 'ht',
'hmd': 'hm',
'vat': 'va',
'hnd': 'hn',
'hun': 'hu',
'isl': 'is',
'ind': 'in',
'idn': 'id',
'irn': 'ir',
'irq': 'iq',
'irl': 'ie',
'imn': 'im',
'isr': 'il',
'ita': 'it',
'jam': 'jm',
'jpn': 'jp',
'jey': 'je',
'jor': 'jo',
'kaz': 'kz',
'ken': 'ke',
'kir': 'ki',
'prk': 'kp',
'kor': 'kr',
'kwt': 'kw',
'kgz': 'kg',
'lao': 'la',
'lva': 'lv',
'lbn': 'lb',
'lso': 'ls',
'lbr': 'lr',
'lby': 'ly',
'lie': 'li',
'ltu': 'lt',
'lux': 'lu',
'mkd': 'mk',
'mdg': 'mg',
'mwi': 'mw',
'mys': 'my',
'mdv': 'mv',
'mli': 'ml',
'mlt': 'mt',
'mhl': 'mh',
'mtq': 'mq',
'mrt': 'mr',
'mus': 'mu',
'myt': 'yt',
'mex': 'mx',
'fsm': 'fm',
'mda': 'md',
'mco': 'mc',
'mng': 'mn',
'mne': 'me',
'msr': 'ms',
'mar': 'ma',
'moz': 'mz',
'mmr': 'mm',
'nam': 'na',
'nru': 'nr',
'npl': 'np',
'nld': 'nl',
'ant': 'an',
'ncl': 'nc',
'nzl': 'nz',
'nic': 'ni',
'ner': 'ne',
'nga': 'ng',
'niu': 'nu',
'nfk': 'nf',
'mnp': 'mp',
'nor': 'no',
'omn': 'om',
'pak': 'pk',
'plw': 'pw',
'pse': 'ps',
'pan': 'pa',
'png': 'pg',
'pry': 'py',
'per': 'pe',
'phl': 'ph',
'pcn': 'pn',
'pol': 'pl',
'prt': 'pt',
'pri': 'pr',
'qat': 'qa',
'reu': 're',
'rou': 'ro',
'rus': 'ru',
'rwa': 'rw',
'blm': 'bl',
'shn': 'sh',
'kna': 'kn',
'lca': 'lc',
'maf': 'mf',
'spm': 'pm',
'vct': 'vc',
'wsm': 'ws',
'smr': 'sm',
'stp': 'st',
'sau': 'sa',
'sen': 'sn',
'srb': 'rs',
'syc': 'sc',
'sle': 'sl',
'sgp': 'sg',
'svk': 'sk',
'svn': 'si',
'slb': 'sb',
'som': 'so',
'zaf': 'za',
'sgs': 'gs',
'ssd': 'ss',
'esp': 'es',
'lka': 'lk',
'sdn': 'sd',
'sur': 'sr',
'sjm': 'sj',
'swz': 'sz',
'swe': 'se',
'che': 'ch',
'syr': 'sy',
'twn': 'tw',
'tjk': 'tj',
'tza': 'tz',
'tha': 'th',
'tls': 'tl',
'tgo': 'tg',
'tkl': 'tk',
'ton': 'to',
'tto': 'tt',
'tun': 'tn',
'tur': 'tr',
'tkm': 'tm',
'tca': 'tc',
'tuv': 'tv',
'uga': 'ug',
'ukr': 'ua',
'are': 'ae',
'gbr': 'gb',
'usa': 'us',
'umi': 'um',
'ury': 'uy',
'uzb': 'uz',
'vut': 'vu',
'ven': 've',
'vnm': 'vn',
'vir': 'vi',
'wlf': 'wf',
'esh': 'eh',
'yem': 'ye',
'zmb': 'zm',
'zwe': 'zw',
}
def convert_country_3_to_2(ccode):
ccode = ccode.lower()
return COUNTRY_CODES_MAP.get(ccode, None)
|
glogiotatidis/basket
|
basket/news/country_codes.py
|
Python
|
mpl-2.0
| 10,229
|
import functools
from . import (
constants,
utils,
)
class Card():
def __init__(self, kind=None, strength=None, value=None, verbose=None, **kwargs):
if kind is None:
raise(TypeError("Missing required 'kind' argument."))
self.kind = kind
self.strength = strength
self.value = value
self.verbose = verbose if verbose is not None else kind
super().__init__(**kwargs)
def __valid_comparision(self, arg):
return hasattr(arg, "kind") and hasattr(arg, "strength")
_valid_comparision = __valid_comparision
def __lt__(self, value):
if not self.__valid_comparision(value):
return NotImplemented
if self.strength is not None:
if value.strength is not None:
return self.strength < value.strength
else:
return False
elif value.strength is not None:
return True
return self.kind < value.kind
def __str__(self):
return self.kind
class SimpleCard(Card):
def __init__(self, colour=None, kind=None, strength=None, **kwargs):
if colour is None:
raise(TypeError("Missing required 'colour' argument."))
self.colour = colour
if kind is None:
if strength is not None:
kind = str(strength)
super().__init__(kind=kind, strength=strength, **kwargs)
def __valid_comparision(self, arg):
if super()._valid_comparision(arg):
if hasattr(arg, "colour") and (arg.colour is not None):
if arg.strength is not None:
return True
return False
_valid_comparision = __valid_comparision
def __lt__(self, value):
if not self.__valid_comparision(value):
return super().__lt__(value)
if self.strength < value.strength:
return True
if self.strength == value.strength:
return self.colour < value.colour
return False
def __eq__(self, value):
if not self._valid_comparision(value):
return False
if (self.strength == value.strength) and (self.colour == value.colour):
return True
def __str__(self):
return self.kind + self.colour[0]
class MahJongg(Card):
def __init__(self):
super().__init__(kind='1', strength=1)
class Dragon(Card):
def __init__(self):
super().__init__(kind='R', value=25, verbose="Dragon")
class Pheonix(Card):
def __init__(self):
super().__init__(kind='P', value=-25, verbose="Pheonix")
class Dog(Card):
def __init__(self):
super().__init__(kind="D", verbose="Dog")
|
julka2010/games
|
games/tichu/cards.py
|
Python
|
mpl-2.0
| 2,702
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
config = {
"suite_definitions": {
"gaiatest_desktop": {
"options": [
"--restart",
"--timeout=%(timeout)s",
"--type=%(type)s",
"--testvars=%(testvars)s",
"--profile=%(profile)s",
"--symbols-path=%(symbols_path)s",
"--gecko-log=%(gecko_log)s",
"--xml-output=%(xml_output)s",
"--html-output=%(html_output)s",
"--log-raw=%(raw_log_file)s",
"--binary=%(binary)s",
"--address=%(address)s",
"--total-chunks=%(total_chunks)s",
"--this-chunk=%(this_chunk)s"
],
"run_filename": "",
"testsdir": ""
},
"gaiatest_emulator": {
"options": [
"--restart",
"--timeout=%(timeout)s",
"--type=%(type)s",
"--testvars=%(testvars)s",
"--profile=%(profile)s",
"--symbols-path=%(symbols_path)s",
"--xml-output=%(xml_output)s",
"--html-output=%(html_output)s",
"--log-raw=%(raw_log_file)s",
"--logcat-dir=%(logcat_dir)s",
"--emulator=%(emulator)s",
"--homedir=%(homedir)s"
],
"run_filename": "",
"testsdir": ""
},
"marionette_desktop": {
"options": [
"--type=%(type)s",
"--log-raw=%(raw_log_file)s",
"--binary=%(binary)s",
"--address=%(address)s",
"--symbols-path=%(symbols_path)s"
],
"run_filename": "",
"testsdir": ""
},
"marionette_emulator": {
"options": [
"--type=%(type)s",
"--log-raw=%(raw_log_file)s",
"--logcat-dir=%(logcat_dir)s",
"--emulator=%(emulator)s",
"--homedir=%(homedir)s",
"--symbols-path=%(symbols_path)s"
],
"run_filename": "",
"testsdir": ""
},
"webapi_desktop": {
"options": [],
"run_filename": "",
"testsdir": ""
},
"webapi_emulator": {
"options": [
"--type=%(type)s",
"--log-raw=%(raw_log_file)s",
"--symbols-path=%(symbols_path)s",
"--logcat-dir=%(logcat_dir)s",
"--emulator=%(emulator)s",
"--homedir=%(homedir)s"
],
"run_filename": "",
"testsdir": ""
}
}
}
|
vladikoff/fxa-mochitest
|
tests/config/mozharness/marionette.py
|
Python
|
mpl-2.0
| 2,925
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from firefox_ui_harness.arguments import UpdateArguments
from firefox_ui_harness.runners import UpdateTestRunner
from firefox_ui_harness.runtests import cli
def cli_update():
cli(runner_class=UpdateTestRunner, parser_class=UpdateArguments)
if __name__ == '__main__':
cli_update()
|
galgeek/firefox-ui-tests
|
firefox_ui_harness/cli_update.py
|
Python
|
mpl-2.0
| 492
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2015 Digi International Inc. All Rights Reserved.
import json
from wva.test.test_utilities import WVATestBase
class TestWVASubscriptions(WVATestBase):
def test_get_subscriptions(self):
self.prepare_json_response("GET", "/ws/subscriptions", {
"subscriptions": [
"subscriptions/a",
"subscriptions/b",
]
})
subs = self.wva.get_subscriptions()
self.assertEqual(len(subs), 2)
self.assertEqual(subs[0].short_name, "a")
self.assertEqual(subs[1].short_name, "b")
def test_get_metadata(self):
self.prepare_json_response("GET", "/ws/subscriptions/speedy", {
'subscription': {'buffer': 'queue',
'interval': 1,
'uri': 'vehicle/data/VehicleSpeed'}
})
sub = self.wva.get_subscription("speedy")
self.assertEqual(sub.get_metadata(), {
'buffer': 'queue',
'interval': 1,
'uri': 'vehicle/data/VehicleSpeed',
})
def test_delete(self):
self.prepare_response("DELETE", "/ws/subscriptions/short-name", "")
sub = self.wva.get_subscription("short-name")
sub.delete()
self.assertEqual(self._get_last_request().method, "DELETE")
self.assertEqual(self._get_last_request().path, "/ws/subscriptions/short-name")
def test_create(self):
self.prepare_response("PUT", "/ws/subscriptions/new-short-name", "")
sub = self.wva.get_subscription("new-short-name")
sub.create("vehicle/data/EngineSpeed", buffer="drop", interval=5)
req = self._get_last_request()
self.assertDictEqual(json.loads(req.body.decode('utf-8')), {
'subscription': {'buffer': 'drop',
'interval': 5,
'uri': 'vehicle/data/EngineSpeed'},
})
|
digidotcom/python-wvalib
|
wva/test/test_subscriptions.py
|
Python
|
mpl-2.0
| 2,119
|
"""
rebuildbot/travis.py
Wrapper around travispy
The latest version of this package is available at:
<https://github.com/jantman/rebuildbot>
################################################################################
Copyright 2015 Jason Antman <jason@jasonantman.com> <http://www.jasonantman.com>
This file is part of rebuildbot.
rebuildbot is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
rebuildbot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with rebuildbot. If not, see <http://www.gnu.org/licenses/>.
The Copyright and Authors attributions contained herein may not be removed or
otherwise altered, except to add the Author attribution of a contributor to
this work. (Additional Terms pursuant to Section 7b of the AGPL v3)
################################################################################
While not legally required, I sincerely request that anyone who finds
bugs please submit them at <https://github.com/jantman/rebuildbot> or
to me via email, and that you send any contributions or improvements
either as a pull request on GitHub, or to me via email.
################################################################################
AUTHORS:
Jason Antman <jason@jasonantman.com> <http://www.jasonantman.com>
################################################################################
"""
import time
import logging
from dateutil import parser
from datetime import timedelta, datetime
import pytz
from rebuildbot.exceptions import (PollTimeoutException, TravisTriggerError)
try:
from urllib import quote
except ImportError:
from urllib.parse import quote
from travispy import TravisPy
from travispy.travispy import PUBLIC
logger = logging.getLogger(__name__)
CHECK_WAIT_TIME = 10 # seconds to wait before polling for builds
POLL_NUM_TIMES = 6 # how many times to poll before raising exception
class Travis(object):
"""
ReBuildBot wrapper around TravisPy.
"""
def __init__(self, github_token):
"""
Connect to TravisCI. Return a connected TravisPy instance.
:param github_token: GitHub access token to auth to Travis with
:type github_token: str
:rtype: :py:class:`TravisPy`
"""
self.travis = TravisPy.github_auth(github_token)
self.user = self.travis.user()
logger.debug("Authenticated to TravisCI as %s <%s> (user ID %s)",
self.user.login, self.user.email, self.user.id)
def get_repos(self, date_check=True):
"""
Return a list of all repo names for the current authenticated user. If
``date_check`` is True, only return repos with a last build more
than 24 hours ago.
This only returns repos with a slug (<user_or_org>/<repo_name>) that
begins with the user login; it ignores organization repos or repos
that the user is a collaborator on.
:param date_check: whether or not to only return repos with a last
build more than 24 hours ago.
:type date_check: bool
:returns: list of the user's repository slugs
:rtype: list of strings
"""
repos = []
for r in self.travis.repos(member=self.user.login):
if not r.slug.startswith(self.user.login + '/'):
logger.debug("Ignoring repo owned by another user: %s", r.slug)
continue
build_in_last_day = False
try:
build_in_last_day = self.repo_build_in_last_day(r)
except KeyError:
logger.debug('Skipping repo with no builds: %s', r.slug)
continue
if date_check and build_in_last_day:
logger.debug("Skipping repo with build in last day: %s", r.slug)
continue
repos.append(r.slug)
logger.debug('Found %d repos: %s', len(repos), repos)
return sorted(repos)
def repo_build_in_last_day(self, repo):
"""
Return True if the repo has had a build in the last day, False otherwise
:param repo: Travis repository object
:rtype: bool
"""
now = datetime.now(pytz.utc)
dt = parser.parse(repo.last_build.started_at)
if now - dt > timedelta(hours=24):
return False
return True
def run_build(self, repo_slug, branch='master'):
"""
Trigger a Travis build of the specified repository on the specified
branch. Wait for the build repository's latest build ID to change,
and then return a 2-tuple of the old build id and the new one.
If the new build has not started within the timeout interval, the
new build ID will be None.
:param repo_slug: repository slug (<username>/<repo_name>)
:type repo_slug: string
:param branch: name of the branch to build
:type branch: string
:raises: PollTimeoutException, TravisTriggerError
:returns: (last build ID, new build ID)
:rtype: tuple
"""
repo = self.travis.repo(repo_slug)
logger.info("Travis Repo %s (%s): pending=%s queued=%s running=%s "
"state=%s", repo_slug, repo.id, repo.pending, repo.queued,
repo.running, repo.state)
last_build = repo.last_build
logger.debug("Found last build as #%s (%s), state=%s (%s), "
"started_at=%s (<%s>)",
last_build.number, last_build.id,
last_build.state, last_build.color, last_build.started_at,
self.url_for_build(repo_slug, last_build.id))
self.trigger_travis(repo_slug, branch=branch)
try:
new_id = self.wait_for_new_build(repo_slug, last_build.id)
except PollTimeoutException:
logger.warning("Could not find new build ID for %s within timeout;"
" will poll later." % repo_slug)
new_id = None
return (last_build.id, new_id)
def wait_for_new_build(self, repo_slug, last_build_id):
"""
Wait for a repository to show a new last build ID, indicating that the
triggered build has started or is queued.
This polls for the last_build ID every :py:const:`~.CHECK_WAIT_TIME`
seconds, up to :py:const:`~.POLL_NUM_TRIES` times. If the ID has not
changed at the end, raise a :py:class:`~.PollTimeoutException`.
:param repo_slug: the slug for the repo to check
:type repo_slug: string
:param last_build_id: the ID of the last build
:type last_build_id: int
:raises: PollTimeoutException, TravisTriggerError
:returns: ID of the new build
:rtype: int
"""
logger.info("Waiting up to %s seconds for build of %s to start",
(POLL_NUM_TIMES * CHECK_WAIT_TIME), repo_slug)
for c in range(0, POLL_NUM_TIMES):
build_id = self.get_last_build(repo_slug).id
if build_id != last_build_id:
logger.debug("Found new build ID: %s", build_id)
return build_id
logger.debug("Build has not started; waiting %ss", CHECK_WAIT_TIME)
time.sleep(CHECK_WAIT_TIME)
else:
raise PollTimeoutException('last_build.id', repo_slug,
CHECK_WAIT_TIME, POLL_NUM_TIMES)
def get_last_build(self, repo_slug):
"""
Return the TravisPy.Build object for the last build of the repo.
"""
return self.travis.repo(repo_slug).last_build
def trigger_travis(self, repo_slug, branch='master'):
"""
Trigger a TravisCI build of a specific branch of a specific repo.
The `README.rst for TravisPy <https://github.com/menegazzo/travispy>`_
clearly says that it will only support official, non-experimental,
non-Beta API methods. As a result, the API functionality to
`trigger builds <http://docs.travis-ci.com/user/triggering-builds/>`_
is not supported. This method adds that.
:raises TravisTriggerError
:param repo_slug: repository slug (<username>/<repo_name>)
:type repo_slug: string
:param branch: name of the branch to build
:type branch: string
"""
body = {
'request': {
'branch': branch,
'message': 'triggered by https://github.com/jantman/rebuildbot'
}
}
url = PUBLIC + '/repo/' + quote(repo_slug, safe='') + '/requests'
logger.debug("Triggering build of %s %s via %s", repo_slug, branch, url)
headers = self.travis._HEADERS
headers['Content-Type'] = 'application/json'
headers['Accept'] = 'application/json'
headers['Travis-API-Version'] = '3'
res = self.travis._session.post(url, json=body, headers=headers)
if res.status_code >= 200 and res.status_code < 300:
logger.info("Successfully triggered build on %s", repo_slug)
return
raise TravisTriggerError(repo_slug, branch, url, res.status_code,
res.headers, res.text)
@staticmethod
def url_for_build(repo_slug, build_num):
"""
Given a repository name and build number, return the HTML URL for the
build.
"""
s = 'https://travis-ci.org/%s/builds/%s' % (repo_slug, build_num)
return s
def get_build(self, build_id):
"""
Return the Build object for the specified build ID.
:param build_id: the build ID of the build to get
:type build_id: int
:rtype: :py:class:`travispy.entities.Build`
"""
b = self.travis.build(build_id)
b.check_state()
return b
|
jantman/rebuildbot
|
rebuildbot/travis.py
|
Python
|
agpl-3.0
| 10,259
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20141128_0831'),
]
operations = [
migrations.AlterField(
model_name='acl',
name='user',
field=models.ForeignKey(to='api.UserData', related_name='acls'),
),
]
|
Virako/authapi
|
authapi/api/migrations/0004_auto_20141128_0914.py
|
Python
|
agpl-3.0
| 419
|
from django.conf.urls.defaults import *
from twistranet.core.views import AsView
from views import *
urlpatterns = patterns('sharing',
url(r'^like_toggle_by_id/(\d+)$', AsView(LikeToggleView, lookup = 'id'), name=LikeToggleView.name),
url(r'^like_toggle_by_slug/(\d+)$', AsView(LikeToggleView, lookup = 'slug'), name=LikeToggleView.name),
)
|
numericube/twistranet
|
twistranet/sharing/urls.py
|
Python
|
agpl-3.0
| 351
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Item Info')
class TestItemInfo(unittest.TestCase):
pass
|
reddymeghraj/showroom
|
erpnext/buying/doctype/item_info/test_item_info.py
|
Python
|
agpl-3.0
| 300
|
# -*- encoding: utf-8 -*-
# © 2014 Elico Corp (https://www.elico-corp.com)
# Licence AGPL-3.0 or later(http://www.gnu.org/licenses/agpl.html)
from datetime import datetime
from dateutil.relativedelta import relativedelta
import time
from osv import fields, orm, osv
from tools.translate import _
import netsvc
import tools
class stock_location(orm.Model):
_inherit = "stock.location"
_columns = {
'retention_mode': fields.selection(
[('retention', 'Retention Mode'), ('thru', 'Thru mode')],
'Retention Mode',
required=True,
help="In 'Retention mode' the system wait for the\
whole quantity before the stuff is processed.\n"
"In 'Thru mode' the shipped quantity is processed regardless\
of the ordered quantity."
),
}
_defaults = {
'retention_mode': 'retention',
}
class stock_picking(orm.Model):
_inherit = "stock.picking"
def get_move_chain(self, cr, uid, move_id, context=None, move_obj=False):
'''Recursively get the chained moves
@return list of the chained moves
'''
if not move_obj:
move_obj = self.pool.get('stock.move')
move_tbc = move_obj.browse(cr, uid, move_id, context, move_obj)
if move_tbc.move_dest_id: # If there is move_dest_id in the chain
move_chain = self.get_move_chain(cr, uid, move_tbc.move_dest_id.id, context)
else:
move_chain = []
move_chain.append(move_tbc)
return move_chain
def copy_pick_chain(self, cr, uid, all_moves, context=None):
'''Copy all the picking related to this order
@return the dictionary of couple: old_pick_id => new_pick_id
'''
new_picks = {}
all_chained_moves = []
sequence_obj = self.pool.get('ir.sequence')
for move in all_moves:
all_chained_moves.extend(self.get_move_chain(cr, uid, move.id, context))
for move in all_chained_moves:
if move.picking_id.id and not new_picks.has_key(move.picking_id.id):
pick_tbc = self.browse(cr, uid, move.picking_id.id, context)
new_note = ((pick_tbc.note if pick_tbc.note else '') + ' Copy of stock.pick[%d].') % move.picking_id.id
new_pick_id = self.copy(cr, uid, move.picking_id.id, {
'state': 'draft',
'note': new_note,
'name': sequence_obj.get(cr, uid, 'stock.picking.%s'%(pick_tbc.type)),
'move_lines' : [],
})
new_picks[move.picking_id.id] = new_pick_id
return new_picks
def copy_move_chain(self, cr, uid, move_id, product_qty, new_picks, context=None, move_obj=False):
'''Recursively copy the chained move until a location in retention mode or the end.
@return id of the new first move.
'''
if not move_obj:
move_obj = self.pool.get('stock.move')
move_tbc = move_obj.browse(cr, uid, move_id, context)
move_dest_id = False
if move_tbc.move_dest_id and move_tbc.location_dest_id.retention_mode == 'thru': # If there is move_dest_id in the chain and the current location is in thru mode, we need to make a copy of that, then use it as new move_dest_id.
move_dest_id = self.copy_move_chain(cr, uid, move_tbc.move_dest_id.id, product_qty, new_picks, context, move_obj)
my_picking_id = (new_picks[move_tbc.picking_id.id] if new_picks.has_key(move_tbc.picking_id.id) else False)
new_note = ((move_tbc.note if move_tbc.note else '') + ' Copy of stock.move[%d].') % move_id
new_move_id = move_obj.copy(cr, uid, move_id, {
'move_dest_id': move_dest_id,
'state': 'waiting',
'note': new_note,
'move_history_ids': False, # Don't inherit child, populate it in next step. The same to next line.
'move_history_ids2': False,
'product_qty' : product_qty,
'product_uos_qty': product_qty,
'picking_id' : my_picking_id,
'price_unit': move_tbc.price_unit,
})
if move_dest_id: # Create the move_history_ids (child) if there is.
move_obj.write(cr, uid, [new_move_id], {'move_history_ids': [(4, move_dest_id)]})
return new_move_id
def update_move_chain_pick(self, cr, uid, move_id, vals, new_picks, context=None):
'''Recursively update the new chained move with the new related picking by the first move id until a location in retention mode or the end.
@return True if ok.
'''
move_obj = self.pool.get('stock.move')
move_tbu = move_obj.browse(cr, uid, move_id, context)
while True:
vals.update(picking_id=new_picks[move_tbu.picking_id.id])
move_obj.write(cr, uid, [move_tbu.id], vals, context)
if not move_tbu.move_dest_id or move_tbu.location_dest_id.retention_mode != 'thru':
break
move_tbu = move_tbu.move_dest_id
return True
def update_move_chain(self, cr, uid, move_id, vals, context=None):
'''Recursively update the old chained move by the first move id until a location in retention mode or the end.
@return True if ok.
'''
ids = [move_id]
move_obj = self.pool.get('stock.move')
move_tbu = move_obj.browse(cr, uid, move_id, context)
while move_tbu.move_dest_id and move_tbu.location_dest_id.retention_mode == 'thru':
ids.append(move_tbu.move_dest_id.id)
move_tbu = move_tbu.move_dest_id
move_obj.write(cr, uid, ids, vals, context)
return True
def isPickNotEmpty(self, cr, uid, pick_id, move_obj, context=None):
cpt = move_obj.search(
cr, uid,
[('picking_id', '=', pick_id)],
context=context, count=True)
return cpt > 0
def check_production_node_move_chain(
self, cr, uid, move_tbc, context=None):
if move_tbc.location_id.usage == 'production' or \
move_tbc.location_dest_id.usage == 'production':
return True
return False
def has_production_mode(self, cr, uid, all_moves, context=None):
for move in all_moves:
if self.check_production_node_move_chain(cr, uid, move, context):
return True
return False
def do_partial(self, cr, uid, ids, partial_datas, context=None):
""" Makes partial picking and moves done.
@param partial_datas : Dictionary containing details of partial picking
like partner_id, address_id, delivery_date,
delivery moves with product_id, product_qty, uom
@return: Dictionary of values
"""
if context is None:
context = {}
else:
context = dict(context)
res = {}
move_obj = self.pool.get('stock.move')
product_obj = self.pool.get('product.product')
currency_obj = self.pool.get('res.currency')
uom_obj = self.pool.get('product.uom')
wf_service = netsvc.LocalService("workflow")
for pick in self.browse(cr, uid, ids, context=context):
new_picks = False
complete, too_many, too_few, all_moves = [], [], [], []
move_product_qty = {}
prodlot_ids = {}
product_avail = {}
for move in pick.move_lines:
if move.state in ('done', 'cancel'):
continue
partial_data = partial_datas.get('move%s' % (move.id), {})
#Commented in order to process the less number of stock moves from partial picking wizard
#assert partial_data, _('Missing partial picking data for move #%s') % (move.id)
product_qty = partial_data.get('product_qty') or 0.0
move_product_qty[move.id] = product_qty
product_uom = partial_data.get('product_uom') or False
product_price = partial_data.get('product_price') or 0.0
product_currency = partial_data.get('product_currency') or False
prodlot_id = partial_data.get('prodlot_id') or False
prodlot_ids[move.id] = prodlot_id
all_moves.append(move)
if move.product_qty == product_qty:
complete.append(move)
elif move.product_qty > product_qty:
too_few.append(move)
else:
too_many.append(move)
# Average price computation
if (pick.type == 'in') and (move.product_id.cost_method == 'average'):
product = product_obj.browse(cr, uid, move.product_id.id)
move_currency_id = move.company_id.currency_id.id
context['currency_id'] = move_currency_id
qty = uom_obj._compute_qty(cr, uid, product_uom, product_qty, product.uom_id.id)
if product.id in product_avail:
product_avail[product.id] += qty
else:
product_avail[product.id] = product.qty_available
if qty > 0:
new_price = currency_obj.compute(cr, uid, product_currency,
move_currency_id, product_price)
new_price = uom_obj._compute_price(cr, uid, product_uom, new_price,
product.uom_id.id)
if product.qty_available <= 0:
new_std_price = new_price
else:
# Get the standard price
amount_unit = product.price_get('standard_price', context)[product.id]
new_std_price = ((amount_unit * product_avail[product.id]) \
+ (new_price * qty)) / (product_avail[product.id] + qty)
# Write the field according to price type field
product_obj.write(cr, uid, [product.id], {'standard_price': new_std_price})
# Record the values that were chosen in the wizard, so they can be
# used for inventory valuation if real-time valuation is enabled.
move_obj.write(cr, uid, [move.id],
{'price_unit': product_price,
'price_currency_id': product_currency})
if not too_few:
res = super(stock_picking, self).do_partial(cr, uid, [pick.id], partial_datas, context=context)
else:
if self.has_production_mode(cr, uid, all_moves, context=context):# check if there is a production location in the chain
res[pick.id] = super(stock_picking, self).do_partial(cr, uid, [pick.id], partial_datas, context=context)
#res[pick.id]['warning'] = {'title': _('Warning'), 'message': _('One of your location destinations type is Production. Only the first pick has been split.')}
else:
new_picks = self.copy_pick_chain(cr, uid, all_moves, context)
for move in too_few:
product_qty = move_product_qty[move.id] #actual received quantity
if product_qty != 0:
"""Copy not only one move, but all the moves where the destination location is in THRU MODE """
new_move_id = self.copy_move_chain(cr, uid, move.id, product_qty, new_picks, context)
prodlot_id = prodlot_ids[move.id]
if prodlot_id:
self.update_move_chain(cr, uid, new_move_id, {
'prodlot_id': prodlot_id,
}, context)
"""Update the old moves with the remaining quantity"""
self.update_move_chain(cr, uid, move.id, {
'product_qty': move.product_qty - product_qty,
'product_uos_qty': move.product_qty - product_qty,#TODO: put correct uos_qty
}, context)
else:
#EC self.write(cr, uid, [move.id],
move_obj.write(cr, uid, [move.id],#EC
{
'states': 'waiting',
})
for move in complete:
defaults = {}
if prodlot_ids.get(move.id):
defaults.update(prodlot_id=prodlot_id)
if move.location_id.retention_mode == 'thru':
self.update_move_chain_pick(cr, uid, move.id, defaults, new_picks, context)
else:
move_obj.write(cr, uid, [move.id], {'picking_id' : new_picks[move.picking_id.id]}, context)
for move in too_many:
product_qty = move_product_qty[move.id]
defaults = {}
defaults_1st_move = {
'picking_id' : new_picks[move.picking_id.id],
'product_qty' : product_qty,
'product_uos_qty': product_qty, #TODO: put correct uos_qty
}
prodlot_id = prodlot_ids.get(move.id)
if prodlot_ids.get(move.id):
defaults.update(prodlot_id=prodlot_id)
defaults_1st_move.update(prodlot_id=prodlot_id)
move_obj.write(cr, uid, [move.id], defaults_1st_move, context)
if move.location_id.retention_mode == 'thru':
self.update_move_chain_pick(cr, uid, move.id, defaults, new_picks, context)
else:
move_obj.write(cr, uid, [move.id], {'picking_id' : new_picks[move.picking_id.id]}, context)
# At first we confirm the new pickings (if necessary)
for old_pick, new_pick in new_picks.iteritems():
# check if the old pick is empty
if not self.isPickNotEmpty(cr, uid, old_pick, move_obj, context):
self.unlink(cr, uid, [old_pick])
# check if the new pick is not empty
if self.isPickNotEmpty(cr, uid, new_pick, move_obj, context):
if self.isPickNotEmpty(cr, uid, old_pick, move_obj, context):
self.write(cr, uid, [old_pick], {'backorder_id': new_pick})
wf_service.trg_validate(uid, 'stock.picking', new_pick, 'button_confirm', cr)
# Alex commented this, fix task:4547
# self.action_move(cr, uid, [new_pick])
else:
self.unlink(cr, uid, [new_pick])
#pick.refresh() <= Works on 6.1
# Here we set the moves as "assigned"
pick_hack = self.browse(cr, uid, pick.id, context=context)
for move in pick_hack.backorder_id.move_lines:
move_obj.action_assign(cr, uid, [move.id])
# The pick is set as "confirmed" then "done"
wf_service.trg_validate(uid, 'stock.picking', new_picks[pick.id], 'button_done', cr)
wf_service.trg_write(uid, 'stock.picking', pick.id, cr)
#pick.refresh() <= Works on 6.1
# Finally we set the next pick as "assigned"
pick_hack = self.browse(cr, uid, pick.id, context=context)
for move in pick_hack.backorder_id.move_lines:
if move.move_dest_id.picking_id and self.test_assigned(cr, uid, [move.move_dest_id.picking_id.id]):
self.action_assign_wkf(cr, uid, [move.move_dest_id.picking_id.id], context=context)
res[pick.id] = {'delivered_picking': new_picks[pick.id] or False}
return res
class stock_move(orm.Model):
_name = "stock.move"
_inherit = "stock.move"
def copy_move_chain(self, cr, uid, move_id, product_qty, context=None):
'''Recursively copy the chained move until a location in retention mode or the end.
@return id of the new first move.
'''
move_tbc = self.browse(cr, uid, move_id, context)
move_dest_id = False
if move_tbc.move_dest_id and move_tbc.location_dest_id.retention_mode == 'thru': # If there is move_dest_id in the chain and the current location is in thru mode, we need to make a copy of that, then use it as new move_dest_id.
move_dest_id = self.copy_move_chain(cr, uid, move_tbc.move_dest_id.id, product_qty, context)
new_note = ((move_tbc.note if move_tbc.note else '') + ' Copy of stock.move[%d].') % move_id
new_move_id = self.copy(cr, uid, move_id, {
'move_dest_id': move_dest_id,
'state': 'waiting',
'note': new_note,
'move_history_ids': False, # Don't inherit child, populate it in next step. The same to next line.
'move_history_ids2': False,
'product_qty' : product_qty,
'product_uos_qty': product_qty,
'picking_id' : move_tbc.picking_id.id,
'price_unit': move_tbc.price_unit,
'auto_validate': False
})
if move_dest_id: # Create the move_history_ids (child) if there is.
self.write(cr, uid, [new_move_id], {'move_history_ids': [(4, move_dest_id)]})
return new_move_id
def update_move_chain(self, cr, uid, move_id, vals, context=None):
'''Recursively update the chained move by the first move id until a location in retention mode or the end.
@return True if ok.
'''
if isinstance(move_id, list):
move_id = move_id[0]
ids = [move_id]
move_tbu = self.browse(cr, uid, move_id, context)
move_location = self.browse(cr, uid, move_tbu.location_id, context)
while move_tbu.move_dest_id and move_tbu.location_dest_id.retention_mode == 'thru':
ids.append(move_tbu.move_dest_id.id)
move_tbu = move_tbu.move_dest_id
self.write(cr, uid, ids, vals, context)
return True
def do_partial(self, cr, uid, ids, partial_datas, context=None):
""" Makes partial pickings and moves done.
@param partial_datas: Dictionary containing details of partial picking
like partner_id, address_id, delivery_date, delivery
moves with product_id, product_qty, uom
"""
res = {}
picking_obj = self.pool.get('stock.picking')
product_obj = self.pool.get('product.product')
currency_obj = self.pool.get('res.currency')
uom_obj = self.pool.get('product.uom')
wf_service = netsvc.LocalService("workflow")
if context is None:
context = {}
complete, too_many, too_few = [], [], []
move_product_qty = {}
prodlot_ids = {}
for move in self.browse(cr, uid, ids, context=context):
if move.state in ('done', 'cancel'):
continue
partial_data = partial_datas.get('move%s'%(move.id), False)
assert partial_data, _('Missing partial picking data for move #%s') % (move.id)
product_qty = partial_data.get('product_qty',0.0)
move_product_qty[move.id] = product_qty
product_uom = partial_data.get('product_uom',False)
product_price = partial_data.get('product_price',0.0)
product_currency = partial_data.get('product_currency',False)
prodlot_ids[move.id] = partial_data.get('prodlot_id')
if move.product_qty == product_qty:
complete.append(move)
elif move.product_qty > product_qty:
too_few.append(move)
else:
too_many.append(move)
# Average price computation
if (move.picking_id.type == 'in') and (move.product_id.cost_method == 'average'):
product = product_obj.browse(cr, uid, move.product_id.id)
move_currency_id = move.company_id.currency_id.id
context['currency_id'] = move_currency_id
qty = uom_obj._compute_qty(cr, uid, product_uom, product_qty, product.uom_id.id)
if qty > 0:
new_price = currency_obj.compute(cr, uid, product_currency,
move_currency_id, product_price)
new_price = uom_obj._compute_price(cr, uid, product_uom, new_price,
product.uom_id.id)
if product.qty_available <= 0:
new_std_price = new_price
else:
# Get the standard price
amount_unit = product.price_get('standard_price', context)[product.id]
new_std_price = ((amount_unit * product.qty_available)\
+ (new_price * qty))/(product.qty_available + qty)
product_obj.write(cr, uid, [product.id],{'standard_price': new_std_price})
# Record the values that were chosen in the wizard, so they can be
# used for inventory valuation if real-time valuation is enabled.
self.write(cr, uid, [move.id],
{'price_unit': product_price,
'price_currency_id': product_currency,
})
for move in too_few:
product_qty = move_product_qty[move.id]
if product_qty != 0:
prodlot_id = prodlot_ids[move.id]
if prodlot_id:
defaults.update(prodlot_id=prodlot_id)
"""Copy not only one move, but all the moves where the destination location is in THRU MODE """
new_move_id = self.copy_move_chain(cr, uid, move.id, product_qty, context)
complete.append(self.browse(cr, uid, new_move_id))
"""Update not only one move, but all the moves where the destination location is in THRU MODE """
self.update_move_chain(cr, uid, [move.id], {
'product_qty' : move.product_qty - product_qty,
'product_uos_qty':move.product_qty - product_qty,
}, context)
else:
self.write(cr, uid, [move.id],
{
'states' : 'waiting',
})
for move in too_many:
self.write(cr, uid, [move.id],
{
'product_qty': move.product_qty,
'product_uos_qty': move.product_qty,
})
complete.append(move)
for move in complete:
if prodlot_ids.get(move.id):
self.write(cr, uid, [move.id],{'prodlot_id': prodlot_ids.get(move.id)})
self.action_done(cr, uid, [move.id], context=context)
if move.picking_id.id :
# TOCHECK : Done picking if all moves are done
cr.execute("""
SELECT move.id FROM stock_picking pick
RIGHT JOIN stock_move move ON move.picking_id = pick.id AND move.state = %s
WHERE pick.id = %s""",
('done', move.picking_id.id))
res = cr.fetchall()
if len(res) == len(move.picking_id.move_lines):
picking_obj.action_move(cr, uid, [move.picking_id.id])
wf_service.trg_validate(uid, 'stock.picking', move.picking_id.id, 'button_done', cr)
return [move.id for move in complete]
|
Elico-Corp/openerp-7.0
|
stock_back2back_order_proc/stock.py
|
Python
|
agpl-3.0
| 26,042
|
"""
Comprehensive Theming support for Django's collectstatic functionality.
See https://docs.djangoproject.com/en/1.8/ref/contrib/staticfiles/
"""
from __future__ import absolute_import
import os.path
import posixpath
import re
from django.conf import settings
from django.contrib.staticfiles.finders import find
from django.contrib.staticfiles.storage import CachedFilesMixin, StaticFilesStorage
from django.utils._os import safe_join
from django.utils.six.moves.urllib.parse import ( # pylint: disable=no-name-in-module, import-error
unquote,
urldefrag,
urlsplit
)
from pipeline.storage import PipelineMixin
from openedx.core.djangoapps.theming.helpers import (
get_current_theme,
get_project_root_name,
get_theme_base_dir,
get_themes,
is_comprehensive_theming_enabled
)
class ThemeStorage(StaticFilesStorage):
"""
Comprehensive theme aware Static files storage.
"""
# prefix for file path, this prefix is added at the beginning of file path before saving static files during
# collectstatic command.
# e.g. having "edx.org" as prefix will cause files to be saved as "edx.org/images/logo.png"
# instead of "images/logo.png"
prefix = None
def __init__(self, location=None, base_url=None, file_permissions_mode=None,
directory_permissions_mode=None, prefix=None):
self.prefix = prefix
super(ThemeStorage, self).__init__(
location=location,
base_url=base_url,
file_permissions_mode=file_permissions_mode,
directory_permissions_mode=directory_permissions_mode,
)
def url(self, name):
"""
Returns url of the asset, themed url will be returned if the asset is themed otherwise default
asset url will be returned.
Args:
name: name of the asset, e.g. 'images/logo.png'
Returns:
url of the asset, e.g. '/static/red-theme/images/logo.png' if current theme is red-theme and logo
is provided by red-theme otherwise '/static/images/logo.png'
"""
prefix = ''
theme = get_current_theme()
# get theme prefix from site address if if asset is accessed via a url
if theme:
prefix = theme.theme_dir_name
# get theme prefix from storage class, if asset is accessed during collectstatic run
elif self.prefix:
prefix = self.prefix
# join theme prefix with asset name if theme is applied and themed asset exists
if prefix and self.themed(name, prefix):
name = os.path.join(prefix, name)
return super(ThemeStorage, self).url(name)
def themed(self, name, theme):
"""
Returns True if given asset override is provided by the given theme otherwise returns False.
Args:
name: asset name e.g. 'images/logo.png'
theme: theme name e.g. 'red-theme', 'edx.org'
Returns:
True if given asset override is provided by the given theme otherwise returns False
"""
if not is_comprehensive_theming_enabled():
return False
# in debug mode check static asset from within the project directory
if settings.DEBUG:
themes_location = get_theme_base_dir(theme, suppress_error=True)
# Nothing can be themed if we don't have a theme location or required params.
if not all((themes_location, theme, name)):
return False
themed_path = "/".join([
themes_location,
theme,
get_project_root_name(),
"static/"
])
name = name[1:] if name.startswith("/") else name
path = safe_join(themed_path, name)
return os.path.exists(path)
# in live mode check static asset in the static files dir defined by "STATIC_ROOT" setting
else:
return self.exists(os.path.join(theme, name))
class ThemeCachedFilesMixin(CachedFilesMixin):
"""
Comprehensive theme aware CachedFilesMixin.
Main purpose of subclassing CachedFilesMixin is to override the following methods.
1 - _url
2 - url_converter
_url:
This method takes asset name as argument and is responsible for adding hash to the name to support caching.
This method is called during both collectstatic command and live server run.
When called during collectstatic command that name argument will be asset name inside STATIC_ROOT,
for non themed assets it will be the usual path (e.g. 'images/logo.png') but for themed asset it will
also contain themes dir prefix (e.g. 'red-theme/images/logo.png'). So, here we check whether the themed asset
exists or not, if it exists we pass the same name up in the MRO chain for further processing and if it does not
exists we strip theme name and pass the new asset name to the MRO chain for further processing.
When called during server run, we get the theme dir for the current site using `get_current_theme` and
make sure to prefix theme dir to the asset name. This is done to ensure the usage of correct hash in file name.
e.g. if our red-theme overrides 'images/logo.png' and we do not prefix theme dir to the asset name, the hash for
'{platform-dir}/lms/static/images/logo.png' would be used instead of
'{themes_base_dir}/red-theme/images/logo.png'
url_converter:
This function returns another function that is responsible for hashing urls that appear inside assets
(e.g. url("images/logo.png") inside css). The method defined in the superclass adds a hash to file and returns
relative url of the file.
e.g. for url("../images/logo.png") it would return url("../images/logo.790c9a5340cb.png"). However we would
want it to return absolute url (e.g. url("/static/images/logo.790c9a5340cb.png")) so that it works properly
with themes.
The overridden method here simply comments out the line that convert absolute url to relative url,
hence absolute urls are used instead of relative urls.
"""
def _processed_asset_name(self, name):
"""
Returns either a themed or unthemed version of the given asset name,
depending on several factors.
See the class docstring for more info.
"""
theme = get_current_theme()
if theme and theme.theme_dir_name not in name:
# during server run, append theme name to the asset name if it is not already there
# this is ensure that correct hash is created and default asset is not always
# used to create hash of themed assets.
name = os.path.join(theme.theme_dir_name, name)
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
asset_name = name
if not self.exists(clean_name):
# if themed asset does not exists then use default asset
theme = name.split("/", 1)[0]
# verify that themed asset was accessed
if theme in [theme.theme_dir_name for theme in get_themes()]:
asset_name = "/".join(name.split("/")[1:])
return asset_name
def _url(self, hashed_name_func, name, force=False, hashed_files=None):
"""
This override method swaps out `name` with a processed version.
See the class docstring for more info.
"""
processed_asset_name = self._processed_asset_name(name)
return super(ThemeCachedFilesMixin, self)._url(hashed_name_func, processed_asset_name, force, hashed_files)
def url_converter(self, name, hashed_files, template=None):
"""
This is an override of url_converter from CachedFilesMixin.
It changes one line near the end of the method (see the NOTE) in order
to return absolute urls instead of relative urls. This behavior is
necessary for theme overrides, as we get 404 on assets with relative
urls on a themed site.
"""
if template is None:
template = self.default_template
def converter(matchobj):
"""
Convert the matched URL to a normalized and hashed URL.
This requires figuring out which files the matched URL resolves
to and calling the url() method of the storage.
"""
matched, url = matchobj.groups()
# Ignore absolute/protocol-relative and data-uri URLs.
if re.match(r'^[a-z]+:', url):
return matched
# Ignore absolute URLs that don't point to a static file (dynamic
# CSS / JS?). Note that STATIC_URL cannot be empty.
if url.startswith('/') and not url.startswith(settings.STATIC_URL):
return matched
# Strip off the fragment so a path-like fragment won't interfere.
url_path, fragment = urldefrag(url)
if url_path.startswith('/'):
# Otherwise the condition above would have returned prematurely.
assert url_path.startswith(settings.STATIC_URL)
target_name = url_path[len(settings.STATIC_URL):]
else:
# We're using the posixpath module to mix paths and URLs conveniently.
source_name = name if os.sep == '/' else name.replace(os.sep, '/')
target_name = posixpath.join(posixpath.dirname(source_name), url_path)
# Determine the hashed name of the target file with the storage backend.
hashed_url = self._url(
self._stored_name, unquote(target_name),
force=True, hashed_files=hashed_files,
)
# NOTE:
# The line below was commented out so that absolute urls are used instead of relative urls to make themed
# assets work correctly.
#
# The line is commented and not removed to make future django upgrade easier and show exactly what is
# changed in this method override
#
#transformed_url = '/'.join(url_path.split('/')[:-1] + hashed_url.split('/')[-1:])
transformed_url = hashed_url # This line was added.
# Restore the fragment that was stripped off earlier.
if fragment:
transformed_url += ('?#' if '?#' in url else '#') + fragment
# Return the hashed version to the file
return template % unquote(transformed_url)
return converter
class ThemePipelineMixin(PipelineMixin):
"""
Mixin to make sure themed assets are also packaged and used along with non themed assets.
if a source asset for a particular package is not present then the default asset is used.
e.g. in the following package and for 'red-theme'
'style-vendor': {
'source_filenames': [
'js/vendor/afontgarde/afontgarde.css',
'css/vendor/font-awesome.css',
'css/vendor/jquery.qtip.min.css',
'css/vendor/responsive-carousel/responsive-carousel.css',
'css/vendor/responsive-carousel/responsive-carousel.slide.css',
],
'output_filename': 'css/lms-style-vendor.css'
}
'red-theme/css/vendor/responsive-carousel/responsive-carousel.css' will be used of it exists otherwise
'css/vendor/responsive-carousel/responsive-carousel.css' will be used to create 'red-theme/css/lms-style-vendor.css'
"""
packing = True
def post_process(self, paths, dry_run=False, **options):
"""
This post_process hook is used to package all themed assets.
"""
if dry_run:
return
themes = get_themes()
for theme in themes:
css_packages = self.get_themed_packages(theme.theme_dir_name, settings.PIPELINE['STYLESHEETS'])
from pipeline.packager import Packager
packager = Packager(storage=self, css_packages=css_packages)
for package_name in packager.packages['css']:
package = packager.package_for('css', package_name)
output_file = package.output_filename
if self.packing:
packager.pack_stylesheets(package)
paths[output_file] = (self, output_file)
yield output_file, output_file, True
super_class = super(ThemePipelineMixin, self)
if hasattr(super_class, 'post_process'):
for name, hashed_name, processed in super_class.post_process(paths.copy(), dry_run, **options):
yield name, hashed_name, processed
@staticmethod
def get_themed_packages(prefix, packages):
"""
Update paths with the themed assets,
Args:
prefix: theme prefix for which to update asset paths e.g. 'red-theme', 'edx.org' etc.
packages: packages to update
Returns: list of updated paths and a boolean indicating whether any path was path or not
"""
themed_packages = {}
for name in packages:
# collect source file names for the package
source_files = []
for path in packages[name].get('source_filenames', []):
# if themed asset exists use that, otherwise use default asset.
if find(os.path.join(prefix, path)):
source_files.append(os.path.join(prefix, path))
else:
source_files.append(path)
themed_packages[name] = {
'output_filename': os.path.join(prefix, packages[name].get('output_filename', '')),
'source_filenames': source_files,
}
return themed_packages
|
ESOedX/edx-platform
|
openedx/core/djangoapps/theming/storage.py
|
Python
|
agpl-3.0
| 13,828
|
"""
Discounts application configuration
"""
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.apps import AppConfig
class DiscountsConfig(AppConfig):
name = 'openedx.features.discounts'
|
ESOedX/edx-platform
|
openedx/features/discounts/apps.py
|
Python
|
agpl-3.0
| 220
|
# -*- coding: utf-8 -*-
from .base_premierecinemas import BasePremierecinemasCinemaSpider
class Spider(BasePremierecinemasCinemaSpider):
name = 'praha-premierecinemas'
calendar_url = 'http://www.premierecinemas.cz/'
|
zitkino/backend
|
zitkino/spiders/praha_premierecinemas.py
|
Python
|
agpl-3.0
| 229
|
# Part of Patient Flow.
# See LICENSE file for full copyright and licensing details.
from openerp.osv import orm, fields, osv
from openerp import SUPERUSER_ID
import logging
_logger = logging.getLogger(__name__)
class nh_etake_list_review(orm.Model):
_name = "nh.etake_list.review"
_inherits = {'nh.activity': 'activity_id'}
_description = "Review View"
_rec_name = 'patient_id'
_auto = False
_table = "nh_etake_list_review"
_state_selection = [['To be Reviewed', 'To be Reviewed'],
['PTWR', 'PTWR'],
['Discharged', 'Discharged'],
['To be Discharged', 'To be Discharged'],
['Other', 'Other']]
_columns = {
'activity_id': fields.many2one('nh.activity', 'Activity', required=1, ondelete='restrict'),
'location_id': fields.many2one('nh.clinical.location', 'Ward'),
'patient_id': fields.many2one('nh.clinical.patient', 'Patient'),
'hospital_number': fields.text('Hospital Number'),
'nhs_number': fields.text('NHS Number'),
'state': fields.selection(_state_selection, 'State'),
'date_started': fields.datetime('Started'),
'date_terminated': fields.datetime('Completed'),
'user_id': fields.many2one('res.users', 'Asignee'),
'doctor_task_ids': fields.one2many('nh.activity', 'parent_id', string='Doctor Tasks', domain="[['data_model','=','nh.clinical.doctor.task']]"),
'ptwr_id': fields.many2one('nh.activity', 'PTWR Activity'),
'diagnosis': fields.text('Diagnosis'),
'plan': fields.text('Plan')
}
def _get_review_groups(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
res = [['To be Reviewed', 'To be Reviewed'], ['To be Discharged', 'To be Discharged'], ['PTWR', 'PTWR']]
fold = {r[0]: False for r in res}
return res, fold
_group_by_full = {
'state': _get_review_groups,
}
def create_task(self, cr, uid, ids, context=None):
data = self.browse(cr, uid, ids[0], context=context)
context.update({'default_patient_id': data.patient_id.id, 'default_spell_id': data.id})
return {
'name': 'Add Task',
'type': 'ir.actions.act_window',
'res_model': 'nh.clinical.doctor_task_wizard',
'view_mode': 'form',
'view_type': 'form',
'target': 'new',
'context': context
}
def init(self, cr):
cr.execute("""
drop view if exists %s;
create or replace view %s as (
select
spell_activity.id as id,
review_activity.id as activity_id,
case
when discharge_activity.state is not null and discharge_activity.state = 'completed' then 'Discharged'
when discharge_activity.state is not null and discharge_activity.state != 'completed' then 'To be Discharged'
when ptwr_activity.state is not null then 'PTWR'
when review_activity.state = 'scheduled' then 'To be Reviewed'
else 'Other'
end as state,
review_activity.date_started as date_started,
review_activity.date_terminated as date_terminated,
review_activity.user_id as user_id,
ptwr_activity.id as ptwr_id,
spell.patient_id as patient_id,
spell.diagnosis as diagnosis,
spell.doctor_plan as plan,
location.id as location_id,
patient.other_identifier as hospital_number,
patient.patient_identifier as nhs_number
from nh_clinical_spell spell
inner join nh_activity spell_activity on spell_activity.id = spell.activity_id
inner join nh_clinical_patient patient on spell.patient_id = patient.id
inner join nh_activity review_activity on review_activity.parent_id = spell_activity.id and review_activity.data_model = 'nh.clinical.patient.review'
left join nh_activity discharge_activity on discharge_activity.parent_id = spell_activity.id and discharge_activity.data_model = 'nh.clinical.adt.patient.discharge'
left join nh_activity ptwr_activity on ptwr_activity.parent_id = spell_activity.id and ptwr_activity.data_model = 'nh.clinical.ptwr' and ptwr_activity.state != 'completed'
left join nh_clinical_location location on location.id = spell.location_id
)
""" % (self._table, self._table))
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
if 'doctor_task_ids' in fields:
activity_pool = self.pool['nh.activity']
fields.remove('doctor_task_ids')
read_values = super(nh_etake_list_review, self).read(cr, uid, ids, fields, context, load)
for rv in read_values:
rv['doctor_task_ids'] = activity_pool.search(cr, uid, [['parent_id', '=', rv['id']], ['data_model', '=', 'nh.clinical.doctor.task']], context=context)
return read_values
return super(nh_etake_list_review, self).read(cr, uid, ids, fields, context, load)
def write(self, cr, uid, ids, vals, context=None):
activity_pool = self.pool['nh.activity']
for review in self.browse(cr, uid, ids, context=context):
if 'diagnosis' in vals:
activity_pool.submit(cr, uid, review.activity_id.id, {'diagnosis': vals['diagnosis']}, context=context)
activity_pool.submit(cr, uid, review.id, {'diagnosis': vals['diagnosis']}, context=context)
if 'plan' in vals:
activity_pool.submit(cr, uid, review.activity_id.id, {'plan': vals['plan']}, context=context)
activity_pool.submit(cr, uid, review.id, {'doctor_plan': vals['plan']}, context=context)
if 'doctor_task_ids' in vals:
for dt in vals['doctor_task_ids']:
if dt[2]:
activity_pool.write(cr, uid, dt[1], dt[2], context=context)
return True
def transfer(self, cr, uid, ids, context=None):
ptwr_pool = self.pool['nh.clinical.ptwr']
api_pool = self.pool['nh.clinical.api']
activity_pool = self.pool['nh.activity']
for review in self.browse(cr, uid, ids, context=context):
spell_activity_id = api_pool.get_patient_spell_activity_id(cr, SUPERUSER_ID, review.patient_id.id, context=context)
if not spell_activity_id:
raise osv.except_osv("Error!", "Spell not found!")
ptwr_id = ptwr_pool.create_activity(cr, uid, {
'parent_id': spell_activity_id,
'creator_id': review.id
}, {}, context=context)
activity_pool.submit(cr, uid, review.activity_id.id, {'location_id': review.location_id.id}, context=context)
activity_pool.complete(cr, uid, review.activity_id.id, context=context)
return True
def discharge(self, cr, uid, ids, context=None):
discharge_pool = self.pool['nh.clinical.adt.patient.discharge']
api_pool = self.pool['nh.clinical.api']
activity_pool = self.pool['nh.activity']
for review in self.browse(cr, uid, ids, context=context):
spell_activity_id = api_pool.get_patient_spell_activity_id(cr, SUPERUSER_ID, review.patient_id.id, context=context)
if not spell_activity_id:
raise osv.except_osv("Error!", "Spell not found!")
discharge_id = discharge_pool.create_activity(cr, uid, {
'parent_id': spell_activity_id,
'creator_id': review.id
}, {'other_identifier': review.patient_id.other_identifier}, context=context)
activity_pool.submit(cr, uid, review.activity_id.id, {'location_id': review.location_id.id}, context=context)
activity_pool.complete(cr, uid, review.activity_id.id, context=context)
return True
def ptwr_complete(self, cr, uid, ids, context=None):
activity_pool = self.pool['nh.activity']
for review in self.browse(cr, uid, ids, context=context):
activity_pool.submit(cr, uid, review.ptwr_id.id, {}, context=context)
activity_pool.complete(cr, uid, review.ptwr_id.id, context=context)
return True
|
NeovaHealth/patientflow
|
nh_etake_list/review.py
|
Python
|
agpl-3.0
| 8,568
|
from . import test_invoice_mode_weekly
from . import test_invoice_mode_weekly_is_it_today
|
OCA/account-invoicing
|
account_invoice_mode_weekly/tests/__init__.py
|
Python
|
agpl-3.0
| 90
|
# -*- coding: utf-8 -*-
# Author: Alexandre Fayolle
# Copyright 2013 Camptocamp SA
# Author: Damien Crier
# Copyright 2015 Camptocamp SA
# © 2015 Eficent Business and IT Consulting Services S.L. -
# Jordi Ballester Alomar
# © 2015 Serpent Consulting Services Pvt. Ltd. - Sudhir Arya
# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl.html).
from . import purchase
from . import invoice
|
SerpentCS/purchase-workflow
|
purchase_order_line_sequence/models/__init__.py
|
Python
|
agpl-3.0
| 413
|
#!/usr/bin/env python
#! -*- coding: utf-8 -*-
###
# Copyright (c) Rice University 2012-13
# This software is subject to
# the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
###
"""
THis exists solely to provide less typing for a "leaf node"
in a simple realtional schema (1:M and 1:M-N:1) when used with SQLAlchemy
SA does not support class based inheritence in the normal Python way for objects inheriting from Base. Thus we have those objects perform multiple inheritence...
"""
import json
import sqlalchemy.types
import datetime
class CNXBase():
def from_dict(self, userprofile_dict):
"""
SHould test for schema validity etc.
"""
d = userprofile_dict
for k in d:
setattr(self, k, d[k])
def to_dict(self):
"""Return self as a dict, suitable for jsonifying """
d = {}
for col in self.__table__.columns:
d[col.name] = self.safe_type_out(col)
return d
def jsonify(self):
"""Helper function that returns simple json repr """
selfd = self.to_dict()
jsonstr = json.dumps(selfd) # here use the Json ENcoder???
return jsonstr
def safe_type_out(self, col):
"""return the value of a coulmn field safely as something that
json can use This is essentially a JSONEncoder sublclass
inside this object.
"""
if isinstance(type(col.type), sqlalchemy.types.DateTime):
outstr = getattr(self, col.name).isoformat()
else:
outstr = getattr(self, col.name)
return outstr
|
jbarmash/rhaptos2.user
|
rhaptos2/user/cnxbase.py
|
Python
|
agpl-3.0
| 1,673
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2010 - 2014 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import os
import sys
import shutil
import openerp
from openerp import api
from openerp.osv import orm, fields
from openerp.addons.runbot.runbot import mkdirs
_logger = logging.getLogger(__name__)
MAGIC_PID_RUN_NEXT_JOB = -2
def custom_build(func):
"""Decorator for functions which should be overwritten only if
is_custom_build is enabled in repo.
"""
def custom_func(self, cr, uid, ids, context=None):
args = [
('id', 'in', ids),
('branch_id.repo_id.is_custom_build', '=', True)
]
custom_ids = self.search(cr, uid, args, context=context)
regular_ids = list(set(ids) - set(custom_ids))
ret = None
if regular_ids:
regular_func = getattr(super(runbot_build, self), func.func_name)
ret = regular_func(cr, uid, regular_ids, context=context)
if custom_ids:
assert ret is None
ret = func(self, cr, uid, custom_ids, context=context)
return ret
return custom_func
class runbot_build(orm.Model):
_inherit = "runbot.build"
_columns = {
'prebuilt': fields.boolean("Prebuilt"),
}
def job_00_init(self, cr, uid, build, lock_path, log_path):
res = super(runbot_build, self).job_00_init(
cr, uid, build, lock_path, log_path
)
if build.branch_id.repo_id.is_custom_build:
build.pre_build(lock_path, log_path)
build.prebuilt = True
return res
def job_10_test_base(self, cr, uid, build, lock_path, log_path):
if build.branch_id.repo_id.skip_test_jobs:
_logger.info('skipping job_10_test_base')
return MAGIC_PID_RUN_NEXT_JOB
else:
return super(runbot_build, self).job_10_test_base(
cr, uid, build, lock_path, log_path
)
def job_20_test_all(self, cr, uid, build, lock_path, log_path):
if build.branch_id.repo_id.skip_test_jobs:
_logger.info('skipping job_20_test_all')
with open(log_path, 'w') as f:
f.write('consider tests as passed: '
'.modules.loading: Modules loaded.')
return MAGIC_PID_RUN_NEXT_JOB
else:
return super(runbot_build, self).job_20_test_all(
cr, uid, build, lock_path, log_path
)
def sub_cmd(self, build, cmd):
if not cmd:
return []
if isinstance(cmd, basestring):
cmd = cmd.split()
internal_vals = {
'custom_build_dir': build.repo_id.custom_build_dir or '',
'custom_server_path': build.repo_id.custom_server_path,
'other_repo_path': build.repo_id.other_repo_id.path or '',
'build_dest': build.dest,
}
return [i % internal_vals for i in cmd]
def pre_build(self, cr, uid, ids, lock_path, log_path, context=None):
"""Run pre-build command if there is one
Substitute path variables after splitting command to avoid problems
with spaces in internal variables.
Run command in build path to avoid relative path issues.
"""
pushd = os.getcwd()
try:
for build in self.browse(cr, uid, ids, context=context):
if build.prebuilt:
continue
cmd = self.sub_cmd(build, build.repo_id.custom_pre_build_cmd)
if not cmd:
continue
os.chdir(build.path())
self.spawn(cmd, lock_path, log_path)
finally:
os.chdir(pushd)
@custom_build
def checkout(self, cr, uid, ids, context=None):
"""Checkout in custom build directories if they are specified
Do same as superclass except for git_export path.
"""
for build in self.browse(cr, uid, ids, context=context):
if build.prebuilt:
continue
# starts from scratch
if os.path.isdir(build.path()):
shutil.rmtree(build.path())
# runbot log path
mkdirs([build.path("logs")])
# checkout branch
build_path = build.path()
custom_build_dir = build.repo_id.custom_build_dir
if custom_build_dir:
mkdirs([build.path(custom_build_dir)])
build_path = os.path.join(build_path, custom_build_dir)
build.repo_id.git_export(build.name, build_path)
@custom_build
def cmd(self, cr, uid, ids, context=None):
"""Get server start script from build config
Overwrite superclass completely
Specify database user in the case of custom config, to allow viewing
after db has been created by Odoo (using current user).
Disable multiworker
"""
build = self.browse(cr, uid, ids[0], context=context)
server_path = build.path(build.repo_id.custom_server_path)
mods = build.repo_id.modules or "base"
params = self.sub_cmd(build, build.repo_id.custom_server_params)
# commandline
cmd = [
sys.executable,
server_path,
"--no-xmlrpcs",
"--xmlrpc-port=%d" % build.port,
"--db_user=%s" % openerp.tools.config['db_user'],
"--workers=0",
] + params
return cmd, mods
@api.cr_uid_ids_context
def server(self, cr, uid, ids, *l, **kw):
for build in self.browse(cr, uid, ids, context=None):
if build.repo_id.is_custom_build:
custom_odoo_path = build.repo_id.custom_odoo_path
if custom_odoo_path and\
os.path.exists(build.path(custom_odoo_path)):
return build.path(custom_odoo_path, *l)
return super(runbot_build, self).server(cr, uid, ids, *l, **kw)
|
open-synergy/runbot-addons
|
runbot_build_instructions/runbot_build.py
|
Python
|
agpl-3.0
| 6,892
|
from .api import API, ApiMethods
from .config import Config
__all__ = ["API", "ApiMethods", "Config"]
|
Aurorastation/BOREALISbot2
|
core/subsystems/__init__.py
|
Python
|
agpl-3.0
| 102
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class ChangeRequest(Document):
pass
|
gangadharkadam/office_erp
|
erpnext/projects/doctype/change_request/change_request.py
|
Python
|
agpl-3.0
| 275
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# This Class is a plugin for the Shinken Broker. It is in charge
# to brok log into the syslog
import syslog
from shinken.basemodule import BaseModule
from shinken.log import logger
properties = {
'daemons': ['broker'],
'type': 'syslog',
'external': False,
'phases': ['running'],
}
# called by the plugin manager to get a broker
def get_instance(plugin):
logger.info("Get a Syslog broker for plugin %s" % plugin.get_name())
#Catch errors
#path = plugin.path
instance = Syslog_broker(plugin)
return instance
# Class for the Merlindb Broker
# Get broks and puts them in merlin database
class Syslog_broker(BaseModule):
def __init__(self, modconf):
BaseModule.__init__(self, modconf)
# A service check have just arrived, we UPDATE data info with this
def manage_log_brok(self, b):
data = b.data
syslog.syslog(data['log'].encode('UTF-8'))
|
wbsavage/shinken
|
shinken/modules/syslog_broker.py
|
Python
|
agpl-3.0
| 1,841
|
# -*- coding: utf-8 -*-
from odoo import api, models, fields, tools, _
from odoo.tools import DEFAULT_SERVER_DATE_FORMAT, float_repr
from odoo.tests.common import Form
from odoo.exceptions import UserError
from datetime import datetime
from lxml import etree
from PyPDF2 import PdfFileReader
import io
import logging
_logger = logging.getLogger(__name__)
DEFAULT_FACTURX_DATE_FORMAT = '%Y%m%d'
class AccountEdiFormat(models.Model):
_inherit = 'account.edi.format'
def _export_invoice_to_attachment(self, invoice):
self.ensure_one()
if self.code != 'facturx_1_0_05':
return super()._export_invoice_to_attachment(invoice)
def format_date(dt):
# Format the date in the Factur-x standard.
dt = dt or datetime.now()
return dt.strftime(DEFAULT_FACTURX_DATE_FORMAT)
def format_monetary(number, currency):
# Format the monetary values to avoid trailing decimals (e.g. 90.85000000000001).
return float_repr(number, currency.decimal_places)
# Create file content.
template_values = {
'record': invoice,
'format_date': format_date,
'format_monetary': format_monetary,
}
xml_content = b"<?xml version='1.0' encoding='UTF-8'?>"
xml_content += self.env.ref('account_edi_facturx.account_invoice_facturx_export')._render(template_values)
xml_name = '%s_facturx.xml' % (invoice.name.replace('/', '_'))
return {'name': xml_name,
'datas': xml_content,
'res_model': 'account.move',
'res_id': invoice._origin.id,
'mimetype': 'application/xml'
}
def _is_facturx(self, filename, tree):
return self.code == 'facturx_1_0_05' and tree.tag == '{urn:un:unece:uncefact:data:standard:CrossIndustryInvoice:100}CrossIndustryInvoice'
def _create_invoice_from_xml_tree(self, filename, tree):
self.ensure_one()
if self._is_facturx(filename, tree):
return self._import_facturx(tree, self.env['account_move'])
return super()._create_invoice_from_xml_tree(filename, tree)
def _update_invoice_from_xml_tree(self, filename, tree, invoice):
self.ensure_one()
if self._is_facturx(filename, tree):
return self._import_facturx(tree, invoice)
return super()._update_invoice_from_xml_tree(filename, tree, invoice)
def _import_facturx(self, tree, invoice):
""" Decodes a factur-x invoice into an invoice.
:param tree: the factur-x tree to decode.
:param invoice: the invoice to update or an empty recordset.
:returns: the invoice where the factur-x data was imported.
"""
amount_total_import = None
default_move_type = False
if invoice._context.get('default_journal_id'):
journal = self.env['account.journal'].browse(self.env.context['default_journal_id'])
default_move_type = 'out_invoice' if journal.type == 'sale' else 'in_invoice'
elif invoice._context.get('default_move_type'):
default_move_type = self._context['default_move_type']
elif invoice.move_type in self.env['account.move'].get_invoice_types(include_receipts=True):
# in case an attachment is saved on a draft invoice previously created, we might
# have lost the default value in context but the type was already set
default_move_type = invoice.move_type
if not default_move_type:
raise UserError(_("No information about the journal or the type of invoice is passed"))
if default_move_type == 'entry':
return
# Total amount.
elements = tree.xpath('//ram:GrandTotalAmount', namespaces=tree.nsmap)
total_amount = elements and float(elements[0].text) or 0.0
# Refund type.
# There is two modes to handle refund in Factur-X:
# a) type_code == 380 for invoice, type_code == 381 for refund, all positive amounts.
# b) type_code == 380, negative amounts in case of refund.
# To handle both, we consider the 'a' mode and switch to 'b' if a negative amount is encountered.
elements = tree.xpath('//rsm:ExchangedDocument/ram:TypeCode', namespaces=tree.nsmap)
type_code = elements[0].text
default_move_type.replace('_refund', '_invoice')
if type_code == '381':
default_move_type = 'out_refund' if default_move_type == 'out_invoice' else 'in_refund'
refund_sign = -1
else:
# Handle 'b' refund mode.
if total_amount < 0:
default_move_type = 'out_refund' if default_move_type == 'out_invoice' else 'in_refund'
refund_sign = -1 if 'refund' in default_move_type else 1
# Write the type as the journal entry is already created.
invoice.move_type = default_move_type
# self could be a single record (editing) or be empty (new).
with Form(invoice.with_context(default_move_type=default_move_type)) as invoice_form:
# Partner (first step to avoid warning 'Warning! You must first select a partner.').
partner_type = invoice_form.journal_id.type == 'purchase' and 'SellerTradeParty' or 'BuyerTradeParty'
elements = tree.xpath('//ram:' + partner_type + '/ram:SpecifiedTaxRegistration/ram:ID', namespaces=tree.nsmap)
partner = elements and self.env['res.partner'].search([('vat', '=', elements[0].text)], limit=1)
if not partner:
elements = tree.xpath('//ram:' + partner_type + '/ram:Name', namespaces=tree.nsmap)
partner_name = elements and elements[0].text
partner = elements and self.env['res.partner'].search([('name', 'ilike', partner_name)], limit=1)
if not partner:
elements = tree.xpath('//ram:' + partner_type + '//ram:URIID[@schemeID=\'SMTP\']', namespaces=tree.nsmap)
partner = elements and self.env['res.partner'].search([('email', '=', elements[0].text)], limit=1)
if partner:
invoice_form.partner_id = partner
# Reference.
elements = tree.xpath('//rsm:ExchangedDocument/ram:ID', namespaces=tree.nsmap)
if elements:
invoice_form.ref = elements[0].text
# Name.
elements = tree.xpath('//ram:BuyerOrderReferencedDocument/ram:IssuerAssignedID', namespaces=tree.nsmap)
if elements:
invoice_form.payment_reference = elements[0].text
# Comment.
elements = tree.xpath('//ram:IncludedNote/ram:Content', namespaces=tree.nsmap)
if elements:
invoice_form.narration = elements[0].text
# Total amount.
elements = tree.xpath('//ram:GrandTotalAmount', namespaces=tree.nsmap)
if elements:
# Currency.
if elements[0].attrib.get('currencyID'):
currency_str = elements[0].attrib['currencyID']
currency = self.env.ref('base.%s' % currency_str.upper(), raise_if_not_found=False)
if currency != self.env.company.currency_id and currency.active:
invoice_form.currency_id = currency
# Store xml total amount.
amount_total_import = total_amount * refund_sign
# Date.
elements = tree.xpath('//rsm:ExchangedDocument/ram:IssueDateTime/udt:DateTimeString', namespaces=tree.nsmap)
if elements:
date_str = elements[0].text
date_obj = datetime.strptime(date_str, DEFAULT_FACTURX_DATE_FORMAT)
invoice_form.invoice_date = date_obj.strftime(DEFAULT_SERVER_DATE_FORMAT)
# Due date.
elements = tree.xpath('//ram:SpecifiedTradePaymentTerms/ram:DueDateDateTime/udt:DateTimeString', namespaces=tree.nsmap)
if elements:
date_str = elements[0].text
date_obj = datetime.strptime(date_str, DEFAULT_FACTURX_DATE_FORMAT)
invoice_form.invoice_date_due = date_obj.strftime(DEFAULT_SERVER_DATE_FORMAT)
# Invoice lines.
elements = tree.xpath('//ram:IncludedSupplyChainTradeLineItem', namespaces=tree.nsmap)
if elements:
for element in elements:
with invoice_form.invoice_line_ids.new() as invoice_line_form:
# Sequence.
line_elements = element.xpath('.//ram:AssociatedDocumentLineDocument/ram:LineID', namespaces=tree.nsmap)
if line_elements:
invoice_line_form.sequence = int(line_elements[0].text)
# Product.
line_elements = element.xpath('.//ram:SpecifiedTradeProduct/ram:Name', namespaces=tree.nsmap)
if line_elements:
invoice_line_form.name = line_elements[0].text
line_elements = element.xpath('.//ram:SpecifiedTradeProduct/ram:SellerAssignedID', namespaces=tree.nsmap)
if line_elements and line_elements[0].text:
product = self.env['product.product'].search([('default_code', '=', line_elements[0].text)])
if product:
invoice_line_form.product_id = product
if not invoice_line_form.product_id:
line_elements = element.xpath('.//ram:SpecifiedTradeProduct/ram:GlobalID', namespaces=tree.nsmap)
if line_elements and line_elements[0].text:
product = self.env['product.product'].search([('barcode', '=', line_elements[0].text)])
if product:
invoice_line_form.product_id = product
# Quantity.
line_elements = element.xpath('.//ram:SpecifiedLineTradeDelivery/ram:BilledQuantity', namespaces=tree.nsmap)
if line_elements:
invoice_line_form.quantity = float(line_elements[0].text)
# Price Unit.
line_elements = element.xpath('.//ram:GrossPriceProductTradePrice/ram:ChargeAmount', namespaces=tree.nsmap)
if line_elements:
invoice_line_form.price_unit = float(line_elements[0].text) / invoice_line_form.quantity
else:
line_elements = element.xpath('.//ram:NetPriceProductTradePrice/ram:ChargeAmount', namespaces=tree.nsmap)
if line_elements:
invoice_line_form.price_unit = float(line_elements[0].text) / invoice_line_form.quantity
# Discount.
line_elements = element.xpath('.//ram:AppliedTradeAllowanceCharge/ram:CalculationPercent', namespaces=tree.nsmap)
if line_elements:
invoice_line_form.discount = float(line_elements[0].text)
# Taxes
line_elements = element.xpath('.//ram:SpecifiedLineTradeSettlement/ram:ApplicableTradeTax/ram:RateApplicablePercent', namespaces=tree.nsmap)
invoice_line_form.tax_ids.clear()
for tax_element in line_elements:
percentage = float(tax_element.text)
tax = self.env['account.tax'].search([
('company_id', '=', invoice_form.company_id.id),
('amount_type', '=', 'percent'),
('type_tax_use', '=', invoice_form.journal_id.type),
('amount', '=', percentage),
], limit=1)
if tax:
invoice_line_form.tax_ids.add(tax)
elif amount_total_import:
# No lines in BASICWL.
with invoice_form.invoice_line_ids.new() as invoice_line_form:
invoice_line_form.name = invoice_form.comment or '/'
invoice_line_form.quantity = 1
invoice_line_form.price_unit = amount_total_import
return invoice_form.save()
|
ddico/odoo
|
addons/account_edi_facturx/models/account_edi_format.py
|
Python
|
agpl-3.0
| 12,584
|
# TmLibrary - TissueMAPS library for distibuted image analysis routines.
# Copyright (C) 2016 Markus D. Herrmann, University of Zurich and Robin Hafen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''Base classes for data analysis tools.'''
import re
import logging
import inspect
import importlib
import simplejson
import numpy as np
import pandas as pd
import collections
from abc import ABCMeta
from abc import abstractmethod
from abc import abstractproperty
from sqlalchemy import func
from sqlalchemy.dialects.postgresql import FLOAT
from psycopg2.extras import execute_values
from psycopg2.sql import SQL, Identifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import RobustScaler
from sklearn.model_selection import GridSearchCV, KFold
from sklearn.cluster import KMeans
from tmlib import cfg
import tmlib.models as tm
from tmlib.config import DEFAULT_LIB, IMPLEMENTED_LIBS
from tmlib.utils import (
same_docstring_as, autocreate_directory_property, assert_type,
create_partitions
)
logger = logging.getLogger(__name__)
_register = {}
class _ToolMeta(ABCMeta):
'''Meta class for :class:`Tool <tmlib.tools.base.Tool>`.'''
def __init__(cls, cls_name, cls_bases, cls_args):
def is_abstract(cls):
is_abstract = False
if '__abstract__' in vars(cls):
if getattr(cls, '__abstract__'):
is_abstract = True
return is_abstract
if not is_abstract(cls):
required_attrs = {'__icon__', '__description__'}
for attr in required_attrs:
if not hasattr(cls, attr):
raise AttributeError(
'Tool class "%s" must implement attribute "%s".' % (
cls_name, attr
)
)
logger.debug('registering tool %s', cls.__name__)
_register[cls_name] = cls
return super(_ToolMeta, cls).__init__(cls_name, cls_bases, cls_args)
def __call__(cls, *args, **kwargs):
return super(_ToolMeta, cls).__call__(*args, **kwargs)
class Tool(object):
'''Abstract base class for data analysis tools.
Tools use the
`Pandas DataFrame <http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html>`_ data container.
This is compatible with standard machine learning libries,
such as `Scikit-Learn <http://scikit-learn.org/stable/>`_
`Caffe <http://caffe.berkeleyvision.org/>`_ or `Keras <https://keras.io/>`_.
'''
__metaclass__ = _ToolMeta
__abstract__ = True
def __init__(self, experiment_id):
'''
Parameters
----------
experiment_id: int
ID of the experiment for which the tool request is made
'''
self.experiment_id = experiment_id
def load_feature_values(self, mapobject_type_name, feature_names,
mapobject_ids=None):
'''Loads values for each given feature of the given mapobject type.
Parameters
----------
mapobject_type_name: str
name of the selected
:class:`MapobjectType <tmlib.models.mapobject.MapobjectType>`
feature_names: List[str]
name of each selected
:class:`Feature <tmlib.models.feature.Feature>`
mapobject_ids: List[int], optional
ID of each :class:`Mapobject <tmlib.models.mapobject.Mapobject>`
for which values should be selected; if ``None`` values for
all objects will be loaded (default: ``None``)
Returns
-------
pandas.DataFrame
dataframe where columns are features and rows are mapobjects
indexable by their ID
'''
logger.info(
'load feature values for objects of type "%s"', mapobject_type_name
)
logger.debug(
'load values for features: "%s"', '", "'.join(feature_names)
)
if mapobject_ids is not None:
logger.debug('load values for %d objects', len(mapobject_ids))
else:
logger.debug('load values for all objects')
# FIXME: Use ExperimentSession
with tm.utils.ExperimentConnection(self.experiment_id) as conn:
conn.execute('''
SELECT t.id AS mapobject_type_id, f.id AS feature_id, f.name
FROM features AS f
JOIN mapobject_types AS t ON t.id = f.mapobject_type_id
WHERE f.name = ANY(%(feature_names)s)
AND t.name = %(mapobject_type_name)s;
''', {
'feature_names': feature_names,
'mapobject_type_name': mapobject_type_name
})
records = conn.fetchall()
mapobject_type_id = records[0].mapobject_type_id
feature_map = {str(r.feature_id): r.name for r in records}
sql = '''
SELECT
v.mapobject_id, v.tpoint,
slice(v.values, %(feature_ids)s) AS values
FROM feature_values AS v
JOIN mapobjects AS m
ON m.id = v.mapobject_id AND m.partition_key = v.partition_key
WHERE m.mapobject_type_id = %(mapobject_type_id)s
'''
if mapobject_ids is not None:
sql += '''
AND m.id = ANY(%(mapobject_ids)s)
'''
conn.execute(sql, {
'feature_ids': feature_map.keys(),
'mapobject_type_id': mapobject_type_id,
'mapobject_ids': mapobject_ids
})
records = conn.fetchall()
values = list()
index = list()
for r in records:
values.append(r.values)
index.append((r.mapobject_id, r.tpoint))
index = pd.MultiIndex.from_tuples(
index, names=['mapobject_id', 'tpoint']
)
# TODO: This probably creates a copy in memory. Can we avoid this?
df = pd.DataFrame(values, index=index).astype(float)
column_map = {i: name for i, name in feature_map.iteritems()}
df.rename(columns=column_map, inplace=True)
# TODO: How shall we deal with NaN values? Ideally we would expose
# the option to users to either filter rows (mapobjects) or columns
# (columns).
null_indices = self.identify_features_with_null_values(df)
for name, count in null_indices:
if count > 0:
logger.warn('feature "%s" contains %d null values', name, count)
return df
def calculate_extrema(self, mapobject_type_name, feature_name):
'''Calculates minimum and maximum values of a given feature and
mapobject type.
Parameters
----------
mapobject_type_name: str
name of the selected
:class:`MapobjectType <tmlib.models.mapobject.MapobjectType>`
feature_names: List[str]
name of each selected
:class:`Feature <tmlib.models.feature.Feature>`
Returns
-------
Tuple[float]
min and max
'''
logger.info(
'calculate min/max for objects of type "%s" and feature "%s"',
mapobject_type_name, feature_name
)
with tm.utils.ExperimentSession(self.experiment_id) as session:
mapobject_type = session.query(tm.MapobjectType.id).\
filter_by(name=mapobject_type_name).\
one()
feature = session.query(tm.Feature.id).\
filter_by(
name=feature_name, mapobject_type_id=mapobject_type.id
).\
one()
lower, upper = session.query(
func.min(
tm.FeatureValues.values[str(feature.id)].cast(FLOAT)
),
func.max(
tm.FeatureValues.values[str(feature.id)].cast(FLOAT)
)
).\
join(tm.Mapobject).\
filter(
tm.Mapobject.mapobject_type_id == mapobject_type.id,
tm.FeatureValues.values[str(feature.id)] != 'nan'
).\
one()
return (lower, upper)
def get_random_mapobject_subset(self, mapobject_type_name, n):
'''Selects a random subset of mapobjects.
Parameters
----------
mapobject_type_name: str
name of the selected
:class:`MapobjectType <tmlib.models.mapobject.MapobjectType>`
n: int
number of mapobjects that should be selected at random
Returns
-------
Tuple[int]
IDs of selected mapobject
'''
with tm.utils.ExperimentSession(self.experiment_id) as session:
mapobject_type = session.query(tm.MapobjectType.id).\
filter_by(name=mapobject_type_name).\
one()
mapobjects = session.query(tm.Mapobject.id).\
filter_by(mapobject_type_id=mapobject_type.id).\
order_by(func.random()).\
limit(n).\
all()
return [m.id for m in mapobjects]
def partition_mapobjects(self, mapobject_type_name, n):
'''Splits mapobjects into partitions of size `n`.
Parameters
----------
mapobject_type_name: str
name of the selected
:class:`MapobjectType <tmlib.models.mapobject.MapobjectType>`
n: int
number of mapobjects per partition
Returns
-------
List[List[int]]
mapobject IDs
Note
----
Mapobjects are ordered by ID.
'''
with tm.utils.ExperimentSession(self.experiment_id) as session:
mapobject_type = session.query(tm.MapobjectType.id).\
filter_by(name=mapobject_type_name).\
one()
mapobjects = session.query(tm.Mapobject.id).\
filter_by(mapobject_type_id=mapobject_type.id).\
order_by(tm.Mapobject.id).\
all()
return create_partitions([m.id for m in mapobjects], n)
def identify_features_with_null_values(self, feature_data):
'''Identifies features with NULL values (including NaNs).
Parameters
----------
feature_data: pandas.DataFrame
data frame where columns are feature names and rows and objects
Returns
-------
Tuple[Union[str, int]]
name of the feature and the number of objects with NULL values
'''
null_indices = list()
for name, values in feature_data.isnull().iteritems():
null_indices.append((name, np.sum(values)))
return null_indices
def save_result_values(self, mapobject_type_name, result_id, data):
'''Saves generated label values.
Parameters
----------
mapobject_type_name: str
name of the selected
:class:`MapobjectType <tmlib.models.mapobject.MapobjectType>`
result_id: int
ID of a registerd
:class:`ToolResult <tmlib.models.result.ToolResult>`
data: pandas.Series
series with multi-level index for "mapobject_id" and "tpoint"
See also
--------
:class:`tmlib.models.result.LabelValues`
'''
logger.info('save label values for result %d', result_id)
mapobject_ids = data.index.levels[0].tolist()
tpoints = data.index.levels[1]
with tm.utils.ExperimentConnection(self.experiment_id) as connection:
connection.execute('''
SELECT id FROM mapobject_types
WHERE name = %(mapobject_type_name)s
''', {
'mapobject_type_name': mapobject_type_name
})
results = connection.fetchall()
mapobject_type_id = results[0][0]
connection.execute('''
SELECT partition_key, array_agg(id) AS mapobject_ids
FROM mapobjects AS m
WHERE m.mapobject_type_id = %(mapobject_type_id)s
AND m.id = ANY(%(mapobject_ids)s)
GROUP BY partition_key
''', {
'mapobject_type_id': mapobject_type_id,
'mapobject_ids': mapobject_ids
})
records = connection.fetchall()
# Grouping mapobject IDs per partition_key allows us
# to target individual shards of the label_values table directly
# on the worker nodes with full SQL support, including multi-row
# insert/update statements.
for tpoint in tpoints:
for partition_key, mapobject_ids in records:
with tm.utils.ExperimentConnection(self.experiment_id) as conn:
host, port, shard_id = conn.locate_partition(
tm.LabelValues, partition_key
)
worker_connection = tm.utils.ExperimentWorkerConnection(
self.experiment_id, host, port
)
with worker_connection as connection:
logger.debug(
'upsert label values for partition %d', partition_key
)
sql = '''
INSERT INTO label_values_{shard} AS v (
partition_key, mapobject_id, values, tpoint
)
VALUES %s
ON CONFLICT ON CONSTRAINT label_values_pkey_{shard}
DO UPDATE
SET values = v.values || EXCLUDED.values
'''.format(shard=shard_id)
template = '''
(
%(partition_key)s, %(mapobject_id)s,
%(values)s, %(tpoint)s
)
'''
args = [
{
'values': {
str(result_id):
str(np.round(data.ix[(mid, tpoint)], 6))
},
'mapobject_id': mid,
'partition_key': partition_key,
'tpoint': tpoint
}
for mid in mapobject_ids
]
execute_values(
connection, sql, args, template=template, page_size=500
)
def register_result(self, submission_id, mapobject_type_name,
result_type, **result_attributes):
'''Registers a result for the given tool request.
Parameters
----------
submission_id: int
ID of the corresponding job submission
mapobject_type_name: str
name of the selected
:class:`MapobjectType <tmlib.models.mapobject.MapobjectType>`
result_type: str
name of a class derived from
:class:`ToolResult <tmlib.models.result.ToolResult>`
**result_attributes: dict, optional
result-specific attributes as key-value value pairs
that get parsed to the constructor of the implemented `result_type`
Returns
-------
int
ID of the tool result
'''
logger.info('register result')
with tm.utils.ExperimentSession(self.experiment_id) as session:
mapobject_type = session.query(tm.MapobjectType.id).\
filter_by(name=mapobject_type_name).\
one()
try:
module_name = 'tmlib.models.result'
module = importlib.import_module(module_name)
cls = getattr(module, result_type)
except ImportError:
raise ImportError(
'Ups this module should exist: %s' % module_name
)
except AttributeError:
raise ValueError(
'"%s" is not a valid result type.' % result_type
)
required_args = inspect.getargspec(cls.__init__).args[1:]
provided_args = {
'submission_id', 'tool_name', 'mapobject_type_id', 'type'
}
for arg in required_args:
if arg not in result_attributes and arg not in provided_args:
raise ValueError(
'Argument "%s" is required for result of type "%s".'
% (arg, result_type)
)
# A result might already exist, for example when debugging
# or when the job got canceled.
result = session.query(tm.ToolResult).\
filter_by(submission_id=submission_id).\
one_or_none()
if result is None:
result = tm.ToolResult(
submission_id, self.__class__.__name__, mapobject_type.id,
type=result_type, **result_attributes
)
session.add(result)
session.flush()
return result.id
@abstractmethod
def process_request(self, submission_id, payload):
'''Processes a tool request sent by the client.
Parameters
----------
submission_id: int
ID of the corresponding job submission
payload: dict
an arbitrary mapping provided by the client that describes the job
'''
pass
class Classifier(Tool):
'''Abstract base class for classification tools.'''
__abstract__ = True
@same_docstring_as(Tool.__init__)
def __init__(self, experiment_id):
super(Classifier, self).__init__(experiment_id)
def train_supervised(self, feature_data, labels, method, n_fold_cv):
'''Trains a classifier for mapobjects based on `feature_data` and
known labels.
Parameters
----------
feature_data: pandas.DataFrame
feature values that should be used to train the classifier
labels: Dict[int, int]
mapping of :class:`Mapobject <tmlib.models.mapobject.Mapobject>`
ID to assigned label
method: str
method to use for classification
n_fold_cv: int
number of crossvalidation iterations (*n*-fold)
Returns
-------
Tuple[sklearn.base.BaseEstimator]
trained supervised classifier and scaler
'''
classifiers = {
'randomforest': {
# NOTE: RF could be parallelized.
'cls': RandomForestClassifier(n_jobs=1),
# No scaling required for decision trees.
'scaler': None,
'search_space': {
# Number of trees.
'n_estimators': [3, 6, 12, 24],
# Number of leafs in the tree.
'max_depth': [3, 6, 12, None],
'min_samples_split': [2, 4, 8],
# TODO: this should rather be a user defined parameter
'class_weight': ['balanced', None]
},
},
'svm': {
'cls': SVC(cache_size=500, decision_function_shape='ovr'),
# Scale to zero mean and unit variance
'scaler': RobustScaler(quantile_range=(1.0, 99.0), copy=False),
# Search optimal regularization parameters to control
# model complexity.
'search_space': {
'kernel': ['linear', 'rbf'],
'C': np.logspace(-5, 15, 10, base=2),
'gamma': np.logspace(-15, -3, 10, base=2)
}
},
'logisticregression': {
'cls': SGDClassifier(
loss='log', fit_intercept=False,
n_jobs=1, penalty='elasticnet'
),
# Scale to zero mean and unit variance
'scaler': RobustScaler(quantile_range=(1.0, 99.0), copy=False),
# Search optimal regularization parameters to control
# model complexity.
'search_space': {
'alpha': np.logspace(-6, -1, 10),
'l1_ratio': np.linspace(0, 1, 10)
}
}
}
logger.info('train "%s" classifier', method)
# TODO: We may want to include tpoint into labels mapping.
y = list()
for i in feature_data.index.get_level_values('mapobject_id'):
y.append(labels[i])
scaler = classifiers[method]['scaler']
# TODO: identify NaN and infinite values
X = feature_data
if scaler:
scaler.fit(X)
X = scaler.transform(X)
clf = classifiers[method]['cls']
folds = KFold(n_splits=n_fold_cv)
# TODO: Second, finer grid search
model = GridSearchCV(clf, classifiers[method]['search_space'], cv=folds)
model.fit(X, y)
return (model, scaler)
def train_unsupervised(self, feature_data, k, method):
'''Trains a classifier that groups mapobjects into `k` classes based
on `feature_data`.
Parameters
----------
feature_data: pandas.DataFrame
feature values that should be used to train the classifier
k: int
number of classes
method: str
model to use for clustering
Returns
-------
Tuple[sklearn.base.BaseEstimator]
trained unsupervised classifier and scaler
'''
classifiers = {
'kmeans': {
'cls': KMeans,
'scaler': RobustScaler(quantile_range=(1.0, 99.0), copy=False)
}
}
logger.info('train "%s" classifier for %d classes', method)
scaler = classifiers[method]['scaler']
X = feature_data
if scaler:
scaler.fit(X)
X = scaler.transform(X)
clf = classifiers[method]['cls']
model = clf(n_clusters=k)
model.fit(X)
return (model, scaler)
def predict(self, feature_data, model, scaler=None):
'''Predicts class labels for mapobjects based on `feature_values` using
pre-trained `model`.
Parameters
----------
feature_data: pandas.DataFrame
feature values based on which labels should be predicted
model: sklearn.base.BaseEstimator
model fitted on training data
scaler: sklearn.preprocessing.data.RobustScaler, optional
scaler fitted on training data to rescale `feature_data` the same
way
Returns
-------
pandas.Series
predicted labels for each mapobject
'''
logger.info('predict labels')
X = feature_data
if scaler is not None:
X = scaler.transform(X)
predictions = model.predict(X)
return pd.Series(predictions, index=feature_data.index)
|
TissueMAPS/TmLibrary
|
tmlib/tools/base.py
|
Python
|
agpl-3.0
| 24,061
|
# -*- coding: utf-8 -*-
#
#
# Author: Yannick Vaucher
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import models, fields, api
import openerp.addons.decimal_precision as dp
class PurchaseOrderLine(models.Model):
_inherit = 'purchase.order.line'
@api.one
@api.depends('price_unit',
'price_subtotal',
'order_id.pricelist_id.currency_id',
'order_id.requisition_id.date_exchange_rate',
'order_id.requisition_id.currency_id')
def _compute_prices_in_company_currency(self):
""" """
requisition = self.order_id.requisition_id
date = requisition.date_exchange_rate or fields.Date.today()
from_curr = self.order_id.currency_id.with_context(date=date)
if requisition and requisition.currency_id:
to_curr = requisition.currency_id
else:
to_curr = self.order_id.company_id.currency_id
self.price_unit_co = from_curr.compute(self.price_unit,
to_curr, round=False)
self.price_subtotal_co = from_curr.compute(self.price_subtotal,
to_curr, round=False)
@api.multi
def _requisition_currency(self):
for rec in self:
requisition = rec.order_id.requisition_id
if requisition:
rec.requisition_currency = requisition.currency_id
price_unit_co = fields.Float(
compute='_compute_prices_in_company_currency',
string="Unit Price",
digits=dp.get_precision('Account'),
store=True,
help="Unit Price in company currency."
)
price_subtotal_co = fields.Float(
compute='_compute_prices_in_company_currency',
string="Subtotal",
digits=dp.get_precision('Account'),
store=True,
help="Subtotal in company currency."
)
order_currency = fields.Many2one(string="Currency", readonly=True,
related="order_id.currency_id")
requisition_currency = fields.Many2one(
"res.currency", string="Requisition Currency", readonly=True,
compute="_requisition_currency")
|
xpansa/purchase-workflow
|
purchase_requisition_multicurrency/model/purchase_order.py
|
Python
|
agpl-3.0
| 2,905
|
from cStringIO import StringIO
from captcha.models import CaptchaStore
from django.http import HttpResponse, Http404
from django.shortcuts import get_object_or_404
import Image,ImageDraw,ImageFont,ImageFilter
import random
from captcha.conf import settings
def captcha_image(request,key):
store = get_object_or_404(CaptchaStore,hashkey=key)
text=store.challenge
if settings.CAPTCHA_FONT_PATH.lower().strip().endswith('ttf'):
font = ImageFont.truetype(settings.CAPTCHA_FONT_PATH,settings.CAPTCHA_FONT_SIZE)
else:
font = ImageFont.load(settings.CAPTCHA_FONT_PATH)
size = font.getsize(text)
size = (size[0]*2,size[1])
image = Image.new('RGB', size , settings.CAPTCHA_BACKGROUND_COLOR)
xpos = 2
for char in text:
fgimage = Image.new('RGB', size, settings.CAPTCHA_FOREGROUND_COLOR)
charimage = Image.new('L', font.getsize(' %s '%char), '#000000')
chardraw = ImageDraw.Draw(charimage)
chardraw.text((0,0), ' %s '%char, font=font, fill='#ffffff')
charimage = charimage.rotate(random.randrange( *settings.CAPTCHA_LETTER_ROTATION ), expand=0, resample=Image.BICUBIC)
charimage = charimage.crop(charimage.getbbox())
maskimage = Image.new('L', size)
maskimage.paste(charimage, (xpos, 4, xpos+charimage.size[0], 4+charimage.size[1] ))
size = maskimage.size
image = Image.composite(fgimage, image, maskimage)
xpos = xpos + 2 + charimage.size[0]
image = image.crop((0,0,xpos+1,size[1]))
draw = ImageDraw.Draw(image)
for f in settings.noise_functions():
draw = f(draw,image)
for f in settings.filter_functions():
image = f(image)
out = StringIO()
image.save(out,"PNG")
out.seek(0)
response = HttpResponse()
response['Content-Type'] = 'image/png'
response.write(out.read())
return response
def captcha_audio(request,key):
if settings.CAPTCHA_FLITE_PATH:
store = get_object_or_404(CaptchaStore,hashkey=key)
text=store.challenge
if 'captcha.helpers.math_challenge' == settings.CAPTCHA_CHALLENGE_FUNCT:
text = text.replace('*','times').replace('-','minus')
elif 'captcha.helpers.random_char_challenge' == settings.CAPTCHA_CHALLENGE_FUNCT:
text = '.'.join(list(text))
import tempfile, os
path = str(os.path.join(tempfile.gettempdir(),'%s.wav' %key))
cline = '%s -t "%s" -o "%s"' %(settings.CAPTCHA_FLITE_PATH, text, path)
os.popen(cline).read()
if os.path.isfile(path):
response = HttpResponse()
f = open(path,'rb')
response['Content-Type'] = 'audio/x-wav'
response.write(f.read())
f.close()
os.unlink(path)
return response
raise Http404
|
DraXus/andaluciapeople
|
captcha/views.py
|
Python
|
agpl-3.0
| 2,889
|
# Copyright 2019 Tecnativa - Sergio Teruel
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo.addons.website_sale.controllers.main import WebsiteSale
from odoo import http
class WebsiteSaleProductDetailAttributeImage(WebsiteSale):
@http.route()
def product(self, product, category='', search='', **kwargs):
res = super().product(
product, category=category, search=search, **kwargs)
attributes_detail = product.attribute_line_ids.filtered(
lambda x: x.attribute_id.website_product_detail_image_published)
res.qcontext['attributes_detail'] = attributes_detail
return res
|
Vauxoo/e-commerce
|
website_sale_product_detail_attribute_image/controllers/main.py
|
Python
|
agpl-3.0
| 661
|
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='''ckanext-api_basic_functions''',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version='0.0.1',
description='''API with functions to execute SQL queries or re index solr.''',
long_description=long_description,
# The project's main homepage.
url='https://github.com/odevsp/ckanext-api_basic_functions',
# Author details
author='''odevsp''',
author_email='''jroigfer@everis.com''',
# Choose your license
license='AGPL',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
# What does your project relate to?
keywords='''CKAN extension solr reindex query database''',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=[],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
include_package_data=True,
package_data={
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points='''
[ckan.plugins]
api_basic_functions=ckanext.api_basic_functions.plugin:Api_Basic_FunctionsPlugin
''',
)
|
odevsp/ckanext-api_basic_functions
|
setup.py
|
Python
|
agpl-3.0
| 3,241
|
import logging
from datetime import datetime
import os
import json
from flask import request, g, Response
#from openspending.core import cache
from openspending.auth import require
from openspending.lib.jsonexport import jsonify
from openspending.views.api_v2.common import blueprint
from openspending.views.error import api_json_errors
#imports prepare_cell_cubes_ext
from openspending.lib.cubes_util import *
from openspending.lib.cache import cache_key
from openspending.core import cache
from cubes.server.utils import *
from cubes.formatters import JSONLinesGenerator, csv_generator, xls_generator
from cubes.browser import SPLIT_DIMENSION_NAME
from cubes.server.decorators import prepare_cell
log = logging.getLogger(__name__)
@blueprint.route("/api/slicer/cube/<star_name>/cubes_model", methods=["JSON", "GET"])
@requires_complex_browser
@api_json_errors
@cache.cached(timeout=60, key_prefix=cache_key)
#@log_request("aggregate", "aggregates")
def cubes_model(star_name):
cubes_arg = request.args.get("cubes", None)
try:
cubes = cubes_arg.split("|")
except:
raise RequestError("Parameter cubes with value '%s'should be a valid cube names separated by a '|'"
% (cubes_arg) )
if len (cubes) > 5:
raise RequestError("You can only join 5 cubes together at one time")
g.cube = get_complex_cube(star_name, cubes)
hier_limits = None
# Copy from the application context
#g.json_record_limit = current_app.slicer.json_record_limit
g.json_record_limit = 10000
if "prettyprint" in request.args:
g.prettyprint = str_to_bool(request.args.get("prettyprint"))
else:
g.prettyprint = current_app.slicer.prettyprint
response = g.cube.to_dict(expand_dimensions=True,
with_mappings=False,
full_attribute_names=True,
create_label=True,
hierarchy_limits=hier_limits)
response["features"] = workspace.cube_features(g.cube)
return jsonify(response)
def xlschecker(*args, **kwargs):
if "format" in request.args:
if request.args.get("format") in ['excel', 'csv']:
return True
return False
@blueprint.route("/api/slicer/cube/<star_name>/cubes_aggregate", methods=["JSON", "GET"])
@requires_complex_browser
@api_json_errors
@cache.cached(timeout=60, key_prefix=cache_key, unless=xlschecker)
def aggregate_cubes(star_name):
cubes_arg = request.args.get("cubes", None)
try:
cubes = cubes_arg.split("|")
except:
raise RequestError("Parameter cubes with value '%s'should be a valid cube names separated by a '|'"
% (cubes_arg) )
if len (cubes) > 5:
raise RequestError("You can only join 5 cubes together at one time")
g.cube = get_complex_cube(star_name, cubes)
g.browser = current_app.cubes_workspace.browser(g.cube)
cube = g.cube
output_format = validated_parameter(request.args, "format",
values=["json", "csv", "excel"],
default="json")
header_type = validated_parameter(request.args, "header",
values=["names", "labels", "none"],
default="labels")
fields_str = request.args.get("fields")
if fields_str:
fields = fields_str.lower().split(',')
else:
fields = None
# Aggregates
# ----------
aggregates = []
for agg in request.args.getlist("aggregates") or []:
aggregates += agg.split("|")
drilldown = []
ddlist = request.args.getlist("drilldown")
if ddlist:
for ddstring in ddlist:
drilldown += ddstring.split("|")
#this handles cuts with geometry__time
prepare_cell_cubes_ext(restrict=False)
prepare_cell("split", "split")
result = g.browser.aggregate(g.cell,
aggregates=aggregates,
drilldown=drilldown,
split=g.split,
page=g.page,
page_size=g.page_size,
order=g.order)
# Hide cuts that were generated internally (default: don't)
if current_app.slicer.hide_private_cuts:
result.cell = result.cell.public_cell()
# Copy from the application context
#g.json_record_limit = current_app.slicer.json_record_limit
g.json_record_limit = 10000
if "prettyprint" in request.args:
g.prettyprint = str_to_bool(request.args.get("prettyprint"))
else:
g.prettyprint = current_app.slicer.prettyprint
if output_format == "json":
resultdict= result.to_dict()
tempcells = list(result._cells)
resultdict['cells'] = tempcells
resultdict['cell'] = list(resultdict['cell'])
if "cluster" in request.args:
clusteragg = request.args.get('clusteragg', 'avg')
if len(cubes) > 1 or len(cubes) < 1:
log.warn("cluster must have one and only one cube. This call had %s"%str(cubes))
if clusteragg in ['avg', 'min', 'max', 'sum']:
clusterfield = "%s__amount_%s"%(cubes[0], clusteragg,)
numclusters = request.args.get('numclusters',5)
tempresult = get_cubes_breaks(resultdict['cells'], clusterfield, method=request.args.get('cluster'), k=numclusters)
tempresult['data'] = list(tempresult['data'])
resultdict.set('cluster', tempresult)
resp = Response(response=json.dumps(resultdict),
status=200, \
mimetype="application/json")
return(resp)
elif output_format not in ["csv","excel"]:
raise RequestError("unknown response format '%s'" % output_format)
# csv
if header_type == "names":
header = result.labels
elif header_type == "labels":
header = []
for l in result.labels:
# TODO: add a little bit of polish to this
if l == SPLIT_DIMENSION_NAME:
header.append('Matches Filters')
else:
header += [ attr.label or attr.name for attr in cube.get_attributes([l], aggregated=True) ]
else:
header = None
fields = result.labels
try:
filename_output = cubes[0] + "_" + datetime.now().strftime("%Y-%m-%d")
except:
filename_output = "aggregate_" + datetime
if output_format == "excel":
output_string = xls_generator(result,
fields,
include_header=bool(header),
header=header)
headers = {"Content-Disposition": 'attachment; filename="' + filename_output + '.xlsx"'}
return Response(output_string,
mimetype="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
headers=headers)
else:
generator = csv_generator(result,
fields,
include_header=bool(header),
header=header)
headers = {"Content-Disposition": 'attachment; filename="' + filename_output + '.csv"'}
return Response(generator,
mimetype='text/csv',
headers=headers)
@blueprint.route("/api/slicer/cube/<star_name>/cubes_facts", methods=["JSON", "GET"])
@requires_complex_browser
@api_json_errors
@cache.cached(timeout=60, key_prefix=cache_key)
#@log_request("facts", "fields")
def cubes_facts(star_name):
cubes_arg = request.args.get("cubes", None)
try:
cubes = cubes_arg.split("|")
except:
raise RequestError("Parameter cubes with value '%s'should be a valid cube names separated by a '|'"
% (cubes_arg) )
if len (cubes) > 5:
raise RequestError("You can only join 5 cubes together at one time")
g.cube = get_complex_cube(star_name, cubes)
g.browser = current_app.cubes_workspace.browser(g.cube)
# Copy from the application context
g.json_record_limit = current_app.slicer.json_record_limit
if "prettyprint" in request.args:
g.prettyprint = str_to_bool(request.args.get("prettyprint"))
else:
g.prettyprint = current_app.slicer.prettyprint
# Request parameters
fields_str = request.args.get("fields")
if fields_str:
fields = fields_str.split(',')
else:
fields = None
# fields contain attribute names
if fields:
attributes = g.cube.get_attributes(fields)
else:
attributes = g.cube.all_attributes
# Construct the field list
fields = [attr.ref() for attr in attributes]
# Get the result
facts = g.browser.facts(g.cell,
fields=fields,
order=g.order,
page=g.page,
page_size=g.page_size)
# Add cube key to the fields (it is returned in the result)
fields.insert(0, g.cube.key or "id")
# Construct the header
labels = [attr.label or attr.name for attr in attributes]
labels.insert(0, g.cube.key or "id")
return formated_response(facts, fields, labels)
|
nathanhilbert/FPA_Core
|
openspending/views/api_v2/cubes_ext.py
|
Python
|
agpl-3.0
| 9,418
|
# -*- coding: utf-8 -*-
# #############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from dateutil.relativedelta import *
from datetime import datetime, date
from openerp.osv import fields, osv
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
class doctor_attentions_diseases(osv.osv):
_name = "doctor.attentions.diseases"
_inherit = 'doctor.attentions.diseases'
_columns = {
}
def _check_main_disease(self, cr, uid, ids, context=None):
'''
verify there's only one main disease
'''
for r in self.browse(cr, uid, ids, context=context):
diseases_ids = self.search(cr,uid,[('attentiont_id','=',r.attentiont_id.id),('diseases_type','=','main')])
if len(diseases_ids) > 1:
return False
return True
def _check_duplicated_disease(self, cr, uid, ids, context=None):
'''
verify duplicated disease
'''
for r in self.browse(cr, uid, ids, context=context):
diseases_ids = self.search(cr,uid,[('attentiont_id','=',r.attentiont_id.id),('diseases_id','=',r.diseases_id.id)])
if len(diseases_ids) > 1:
return False
return True
_constraints = [
#(_check_main_disease, u'Hay más de un diagnóstico seleccionado como Principal. Por favor seleccione uno como Principal y los demás como Relacionados.', [u'\n\nTipo de Diagnóstico\n\n']),
#(_check_duplicated_disease, u'Hay uno o más diagnósticos duplicados.', [u'\n\nDiagnósticos\n\n'])
]
doctor_attentions_diseases()
|
hivam/l10n_co_doctor
|
doctor_attentions_diseases_inherit.py
|
Python
|
agpl-3.0
| 2,526
|
import ConfigParser
import os
import re
from itertools import product
import LineRegress
import ResultScraper
def readconfig(filename):
## SETS DEFaULTS
#all defaults must be in a list even if only one value
speciesFile = ""
outFolder = ""
lineRegressConfig = ""
lambdas = [1.0]
startPopulations = []#TODO need default
N0s = []#TODO need default
microsats = []#TODO need default
alleleCount = []#TODO needs default
SNPs = []#TODO need default
mutationRate = [0]
lociSampling = [1.0]
populationSampling = [1.0]
simReps = [100]
##SET FILE DELIMITERS
delimiters = ',|\||\n|;'
#open files
config = ConfigParser.ConfigParser()
config.readfp(open(filename))
#read in output filename
if config.has_section("outFolder"):
if config.has_option("outFolder", "name"):
outFolder = config.get("outFolder", "name")
##read species input file
if config.has_section("species"):
if config.has_option("species", "name"):
speciesFile = config.get("species", "name")
##read lineRegress input file
if config.has_section("lineRegress"):
if config.has_option("lineRegress", "name"):
lineRegressConfig = config.get("lineRegress", "name")
##read Lambda
if config.has_section("lambda"):
if config.has_option("lambda", "values"):
paramTemp = config.get("lambda", "values")
paramList = re.split(delimiters.paramTemp)
lambdas = [float(value) for value in paramList]
##read starting population
if config.has_section("startPop"):
if config.has_option("startPop", "values"):
paramTemp = config.get("startPop", "values")
paramList = re.split(delimiters.paramTemp)
startPopulations = [int(value) for value in paramList]
##read starting newborns (N0)
if config.has_section("startNewborns"):
if config.has_option("startNewborns", "values"):
paramTemp = config.get("startNewborns", "values")
paramList = re.split(delimiters.paramTemp)
N0s = [int(value) for value in paramList]
##read starting newborns (N0)
if config.has_section("N0"):
if config.has_option("N0", "values"):
paramTemp = config.get("N0", "values")
paramList = re.split(delimiters.paramTemp)
N0s = [int(value) for value in paramList]
##read Number of Microsats
if config.has_section("Microsats"):
if config.has_option("Microsats", "values"):
paramTemp = config.get("Microsats", "values")
paramList = re.split(delimiters.paramTemp)
microsats = [int(value) for value in paramList]
## read number of alleles per microsat
if config.has_section("alleleCount"):
if config.has_option("alleleCount", "values"):
paramTemp = config.get("alleleCount", "values")
paramList = re.split(delimiters.paramTemp)
alleleCount = [int(value) for value in paramList]
##read in number of SNPs
if config.has_section("SNPs"):
if config.has_option("SNPs", "values"):
paramTemp = config.get("SNPs", "values")
paramList = re.split(delimiters.paramTemp)
SNPs = [int(value) for value in paramList]
##read in mutation Rate
if config.has_section("mutationRate"):
if config.has_option("mutationRate", "values"):
paramTemp = config.get("mutationRate", "values")
paramList = re.split(delimiters.paramTemp)
mutationRate = [float(value) for value in paramList]
if config.has_section("lociSampleRate"):
if config.has_option("lociSampleRate", "values"):
paramTemp = config.get("lociSampleRate", "values")
paramList = re.split(delimiters.paramTemp)
lociSampling = [int(value) for value in paramList]
if config.has_section("individualSamplRate"):
if config.has_option("individualSamplRate", "values"):
paramTemp = config.get("individualSamplRate", "values")
paramList = re.split(delimiters.paramTemp)
populationSampling = [int(value) for value in paramList]
if config.has_section("simReps"):
if config.has_option("simReps", "values"):
paramTemp = config.get("simReps", "values")
paramList = re.split(delimiters.paramTemp)
simReps = [int(value) for value in paramList]
##create parameter dictionary for return
paramDict = {"species":speciesFile,
"outputFolder":outFolder,
"regressConfig":lineRegressConfig,
"lambdas":lambdas,
"startPops":startPopulations,
"N0":N0s,
"microsats":microsats,
"alleleCount":alleleCount,
"SNPs":SNPs,
"mutationRate":mutationRate,
"lociSampling":lociSampling,
"popSampling":populationSampling,
"simReps":simReps}
return paramDict
def runSimulation(species,outFolder,simReps,lambdaVal,startPop,N0,microSats,alleleCount,SNPs,mutationRate):
outputFiles = []
#create folder for simupop run
#run simupop
return outputFiles
def runNeEst(files,runFolder,locisampling,popsampling,regressConfig):
statsFile = ""
#create output folder
#run neEstimator
neFile = ""
#run lineregress
configVals = LineRegress.neConfigRead(regressConfig)
statsFile = LineRegress._neStatsHelper(neFile, configVals["alpha"], outFileName=statsFile,significantValue=configVals["sigSlope"],firstVal=configVals["startData"])
return statsFile
def gatherNe(fileName,firstVal):
results, temp = ResultScraper.scrapeNE(fileName,firstVal)
return results
def gatherPower(filename):
powerData = ResultScraper.scrapePower(filename)
return powerData
def gatherSlopes(filename):
instanceArray, arrayDict = ResultScraper.scrapeSlopes(filename)
return instanceArray
def createIdentifier(species, outFolder, simReps, lambdaVal, startPop, N0, microSats, alleleCount, SNPs, mutationRate, locisampling, popsampling, regressConfig):
identifier = "l"+str(lambdaVal)
+"p" + str(startPop)\
+ "N0" + str(N0) \
+ "m" + str(microSats)\
+ "ac" + str(alleleCount)\
+ "SNPs" + str(SNPs)\
+ "mr" + str(mutationRate)\
+ "ls" + str(locisampling)\
+ "ps" + str(popsampling)
return identifier
def parseIdentifier(identifier):
re.compile('l(?P<lambda>[\d.\.]*)p(?P<startPop>[\d*])N0(?P<N0>[\d]*)m(?P<microsats>[\d]*)ac(?P<allelecount>[\d]*)SNPs(?P<SNPs>[\d]*)mr(?P<mutations>[\d\.]*)ls(?P<locisampling>[\d\.]*)ps(?P<popsampling>[\d\.]*)')
def nameRunFolder(species,outFolder,simReps,lambdaVal,startPop,N0,microSats,alleleCount,SNPs,mutationRate,locisampling,popsampling,regressConfig):
runFolder = createIdentifier(species,outFolder,simReps,lambdaVal,startPop,N0,microSats,alleleCount,SNPs,mutationRate,locisampling,popsampling,regressConfig)
print runFolder
runFolder = os.sys.join(outFolder, runFolder)
if os.path.isdir(runFolder):
return None
return runFolder
def run(species,outFolder,simReps,lambdaVal,startPop,N0,microSats,alleleCount,SNPs,mutationRate,locisampling,popsampling,regressConfig):
runFolder = nameRunFolder(species,outFolder,simReps,lambdaVal,startPop,N0,microSats,alleleCount,SNPs,mutationRate,locisampling,popsampling,regressConfig)
if not runFolder:
return
os.makedirs(runFolder)
simFiles = runSimulation(species,runFolder,simReps,lambdaVal,startPop,N0,microSats,alleleCount,SNPs,mutationRate)
neFile, statsFile = runNeEst(simFiles,runFolder,locisampling,popsampling,regressConfig)
return neFile, statsFile
def runSamplingOnly(files,runFolder,locisampling,popsampling,regressConfig):
neFile, statsFile = runNeEst(files,runFolder,locisampling,popsampling,regressConfig)
return neFile,statsFile
def collectStatsData(neDict, statsDict, outFolder,firstVal):
slopesName = "slopes.csv"
powerName = "power.csv"
neName = "Ne.csv"
nePath = os.path.join(outFolder, neName)
neOut = open(nePath, "w")
neOut.write("parameters,replicate,Reproductive Cycle,Ne\n")
for identifier in neDict:
neFile = neDict[identifier]
neData = gatherNe(neFile, firstVal)
for datapoint in neData:
print datapoint
data = neData[datapoint]
print data
for point in data:
neOut.write(str(identifier) + "," + str(datapoint) + "," + str(point[0]) + "," + str(point[1]) + "\n")
neOut.close()
#compile stats file
slopePath = os.path.join(outFolder, slopesName)
powerPath = os.path.join(outFolder, powerName)
powerOut = open(powerPath, "w")
powerOut.write("parameters,Positive Slopes,Neutral Slopes, Negative Slopes, Total\n")
slopeOut = open(slopePath, "w")
slopeOut.write("parameters,Slope,Intercept,CI Slope Min,CI Slope Max\n")
for identifier in statsDict:
statsFile = statsDict[identifier]
power = gatherPower(statsFile)
slopes = gatherSlopes(statsFile)
sumPower = sum(power.values())
powerOut.write(str(identifier)+ "," +str(power["positive"])+ "," +str(power["neutral"])+ "," +str(power["negative"])+ "," +str(sumPower)+"\n")
for dataPoint in slopes:
slopeOut.write(str(identifier)+ "," +dataPoint["slope"]+ "," +dataPoint["intercept"]+ "," +dataPoint["lowerCI"]+ "," +dataPoint["upperCI"]+"\n")
powerOut.close()
slopeOut.close()
def batch(configFile,threads = 1):
configs = readconfig(configFile)
speciesFile = configs["species"]
outFolder = configs["outputFolder"]
incriment = 1
while os.path.isdir(outFolder):
outFolder = outFolder+"("+incriment+")"
incriment+=1
os.mkdirs(outFolder)
runParams = product(configs["species"],[outFolder],configs["simReps"],configs["lambdas"],configs["startPops"],configs["N0"],configs["microsats"],configs["alleleCount"],configs["SNPs"],configs["mutationRate"],configs["lociSampling"],configs["popSampling"],configs["regressConfig"])
if len(configs["simReps"])==1 and len(configs["startPops"])==1 and len(configs["N0"])==1 and len(configs["microsats"])==1 and len(configs["alleleCount"])==1 and len(configs["SNPs"])==1 and len(configs["mutationRate"])==1:
if threads == 1:
neFiles = []
simFiles = runSimulation(runParams[0],runParams[1],runParams[2],runParams[3],runParams[4],runParams[5],runParams[6],runParams[7],runParams[8],runParams[9])
neDict = {}
statsDict ={}
for paramset in runParams:
runFolder = nameRunFolder(*runParams)
if not runFolder:
continue
ident = createIdentifier(*runParams)
neFile, statsFile = run(*runParams)
neDict[ident] = neFile
statsDict[ident] = statsFile
else:
if threads ==1:
neDict = {}
statsDict ={}
for paramset in runParams:
ident = createIdentifier(*runParams)
neFile, statsFile = run(*runParams)
neDict[ident] = neFile
statsDict[ident] = statsFile
|
BrianTrethewey/negui
|
neLineRegress/SimBatchRun.py
|
Python
|
agpl-3.0
| 11,399
|
# -*- coding: utf-8 -*-
"""
Views to support exchange of authentication credentials.
The following are currently implemented:
1. AccessTokenExchangeView:
3rd party (social-auth) OAuth 2.0 access token -> 1st party (open-edx) OAuth 2.0 access token
2. LoginWithAccessTokenView:
1st party (open-edx) OAuth 2.0 access token -> session cookie
"""
# pylint: disable=abstract-method
import django.contrib.auth as auth
import social_django.utils as social_utils
from django.conf import settings
from django.contrib.auth import login
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from edx_oauth2_provider.constants import SCOPE_VALUE_DICT
from oauth2_provider import models as dot_models
from oauth2_provider.settings import oauth2_settings
from oauth2_provider.views.base import TokenView as DOTAccessTokenView
from oauthlib.oauth2.rfc6749.tokens import BearerToken
from provider import constants
from provider.oauth2.views import AccessTokenView as DOPAccessTokenView
from rest_framework import permissions
from rest_framework.exceptions import AuthenticationFailed
from rest_framework.response import Response
from rest_framework.views import APIView
from openedx.core.djangoapps.auth_exchange.forms import AccessTokenExchangeForm
from openedx.core.djangoapps.oauth_dispatch import adapters
from openedx.core.lib.api.authentication import OAuth2AuthenticationAllowInactiveUser
class AccessTokenExchangeBase(APIView):
"""
View for token exchange from 3rd party OAuth access token to 1st party
OAuth access token.
"""
@method_decorator(csrf_exempt)
@method_decorator(social_utils.psa("social:complete"))
def dispatch(self, *args, **kwargs):
return super(AccessTokenExchangeBase, self).dispatch(*args, **kwargs)
def get(self, request, _backend): # pylint: disable=arguments-differ
"""
Pass through GET requests without the _backend
"""
return super(AccessTokenExchangeBase, self).get(request)
def post(self, request, _backend): # pylint: disable=arguments-differ
"""
Handle POST requests to get a first-party access token.
"""
form = AccessTokenExchangeForm(request=request, oauth2_adapter=self.oauth2_adapter, data=request.POST) # pylint: disable=no-member
if not form.is_valid():
return self.error_response(form.errors) # pylint: disable=no-member
user = form.cleaned_data["user"]
scope = form.cleaned_data["scope"]
client = form.cleaned_data["client"]
return self.exchange_access_token(request, user, scope, client)
def exchange_access_token(self, request, user, scope, client):
"""
Exchange third party credentials for an edx access token, and return a
serialized access token response.
"""
if constants.SINGLE_ACCESS_TOKEN:
edx_access_token = self.get_access_token(request, user, scope, client) # pylint: disable=no-member
else:
edx_access_token = self.create_access_token(request, user, scope, client)
return self.access_token_response(edx_access_token) # pylint: disable=no-member
class DOPAccessTokenExchangeView(AccessTokenExchangeBase, DOPAccessTokenView):
"""
View for token exchange from 3rd party OAuth access token to 1st party
OAuth access token. Uses django-oauth2-provider (DOP) to manage access
tokens.
"""
oauth2_adapter = adapters.DOPAdapter()
class DOTAccessTokenExchangeView(AccessTokenExchangeBase, DOTAccessTokenView):
"""
View for token exchange from 3rd party OAuth access token to 1st party
OAuth access token. Uses django-oauth-toolkit (DOT) to manage access
tokens.
"""
oauth2_adapter = adapters.DOTAdapter()
def get(self, request, _backend):
return Response(status=400, data={
'error': 'invalid_request',
'error_description': 'Only POST requests allowed.',
})
def get_access_token(self, request, user, scope, client):
"""
TODO: MA-2122: Reusing access tokens is not yet supported for DOT.
Just return a new access token.
"""
return self.create_access_token(request, user, scope, client)
def create_access_token(self, request, user, scope, client):
"""
Create and return a new access token.
"""
_days = 24 * 60 * 60
token_generator = BearerToken(
expires_in=settings.OAUTH_EXPIRE_PUBLIC_CLIENT_DAYS * _days,
request_validator=oauth2_settings.OAUTH2_VALIDATOR_CLASS(),
)
self._populate_create_access_token_request(request, user, scope, client)
return token_generator.create_token(request, refresh_token=True)
def access_token_response(self, token):
"""
Wrap an access token in an appropriate response
"""
return Response(data=token)
def _populate_create_access_token_request(self, request, user, scope, client):
"""
django-oauth-toolkit expects certain non-standard attributes to
be present on the request object. This function modifies the
request object to match these expectations
"""
request.user = user
request.scopes = [SCOPE_VALUE_DICT[scope]]
request.client = client
request.state = None
request.refresh_token = None
request.extra_credentials = None
request.grant_type = client.authorization_grant_type
def error_response(self, form_errors, **kwargs):
"""
Return an error response consisting of the errors in the form
"""
return Response(status=400, data=form_errors, **kwargs)
class LoginWithAccessTokenView(APIView):
"""
View for exchanging an access token for session cookies
"""
authentication_classes = (OAuth2AuthenticationAllowInactiveUser,)
permission_classes = (permissions.IsAuthenticated,)
@staticmethod
def _get_path_of_arbitrary_backend_for_user(user):
"""
Return the path to the first found authentication backend that recognizes the given user.
"""
for backend_path in settings.AUTHENTICATION_BACKENDS:
backend = auth.load_backend(backend_path)
if backend.get_user(user.id):
return backend_path
@staticmethod
def _is_grant_password(access_token):
"""
Check if the access token provided is DOT based and has password type grant.
"""
token_query = dot_models.AccessToken.objects.select_related('user')
dot_token = token_query.filter(token=access_token).first()
if dot_token and dot_token.application.authorization_grant_type == dot_models.Application.GRANT_PASSWORD:
return True
return False
@method_decorator(csrf_exempt)
def post(self, request):
"""
Handler for the POST method to this view.
"""
# The django login method stores the user's id in request.session[SESSION_KEY] and the
# path to the user's authentication backend in request.session[BACKEND_SESSION_KEY].
# The login method assumes the backend path had been previously stored in request.user.backend
# in the 'authenticate' call. However, not all authentication providers do so.
# So we explicitly populate the request.user.backend field here.
if not hasattr(request.user, 'backend'):
request.user.backend = self._get_path_of_arbitrary_backend_for_user(request.user)
#if not self._is_grant_password(request.auth):
# raise AuthenticationFailed({
# u'error_code': u'non_supported_token',
# u'developer_message': u'Only support DOT type access token with grant type password. '
# })
login(request, request.user) # login generates and stores the user's cookies in the session
return HttpResponse(status=204) # cookies stored in the session are returned with the response
|
kmoocdev2/edx-platform
|
openedx/core/djangoapps/auth_exchange/views.py
|
Python
|
agpl-3.0
| 8,123
|
# -*- encoding: utf-8 -*-
# © 2014 Elico Corp (https://www.elico-corp.com)
# Licence AGPL-3.0 or later(http://www.gnu.org/licenses/agpl.html)
from datetime import datetime
import time
from osv import fields, osv
from tools.translate import _
from tools import ustr
#import tools
class gap_analysis_effort(osv.Model):
_name = "gap_analysis.effort"
_description = "Gap Analysis Efforts"
_columns = {
'name': fields.char('Effort', size=4, required=True,),
'unknown': fields.boolean('Undefined duration ?', help='If checked, when this effort is used, the user would have to specify the duration manually.'),
'duration': fields.float('Duration (hour)', help='Duration in hour for this effort.', required=True,),
}
def onchange_unknown(self, cr, uid, ids, unknown):
val = {}
val['unknown'] = unknown
if not unknown:
val['duration'] = 0.0
return {'value': val}
_order = 'name'
class gap_analysis_workload_type(osv.Model):
_name = "gap_analysis.workload.type"
_description = "Gap Analysis Workload Type"
_columns = {
'name': fields.char('Name', size=64, required=True, translate=True),
'category': fields.selection([('Functional Analysis','Functional'), ('Technical Analysis','Technical')], 'Analysis', required=True,),
'code': fields.char('Code for Report', size=8, required=True, translate=True, help="Set the code if name is too long (eg: in reports)."),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of workload type."),
'duration': fields.float('Duration (hour)', help='Default duration in hour for this type of workload.', required=True,),
}
_defaults = {
'sequence': 10,
'category': 'Functional Analysis',
'duration': 4,
}
_order = 'sequence'
class gap_analysis_workload(osv.Model):
_name = "gap_analysis.workload"
_description = "Gap Analysis Workload"
_columns = {
'gap_line_id': fields.many2one('gap_analysis.line', 'Gap-analysis Line', ondelete='cascade', select=True, readonly=True),
'fct_id': fields.many2one('gap_analysis.functionality', 'Gap-analysis Functionality Template', ondelete='cascade', select=True, readonly=True),
'type': fields.many2one('gap_analysis.workload.type', 'Type', required=True, select=True),
'duration': fields.float('Duration (hour)', help='Duration in hour for this task.', required=True,),
}
def onchange_type_id(self, cr, uid, ids, type_id):
val = {}
my_type = self.pool.get('gap_analysis.workload.type').browse(cr, uid, type_id)
val['duration'] = my_type.duration
return {'value': val}
class gap_analysis_functionality_category(osv.Model):
_inherit = "product.category"
_name = "gap_analysis.functionality.category"
_description = "Gap Analysis Functionality Categories"
def _category_to_update(self, cr, uid, ids, fields=None, arg=None, context=None):
if type(ids) != type([]):
ids = [ids]
return self.pool.get('gap_analysis.functionality.category').search(cr, uid, [], order='parent_left') or []
def _name_get_full_path(self, cursor, uid, ids, fields, arg, context=None):
result = {}
for category in self.browse(cursor, uid, ids):
full_path = ''
current_category = category
while current_category:
if full_path=='':
full_path = ustr(current_category.name)
else:
full_path = ustr(current_category.name) + ' / ' + full_path
current_category = current_category.parent_id or False
result[category.id] = full_path
return result
_columns = {
'parent_id': fields.many2one('gap_analysis.functionality.category','Parent Category', select=True, ondelete='cascade'),
'child_id': fields.one2many('gap_analysis.functionality.category', 'parent_id', string='Child Categories'),
'code': fields.char('Code', size=8, required=True, help="Use for functionality sequencing."),
'full_path': fields.function(_name_get_full_path, type="char", method=True, size=2048, store={'gap_analysis.functionality.category': (_category_to_update, ['name','parent_id'], 10)}, string='Name'),
}
def _check_recursion(self, cr, uid, ids, context=None):
level = 100
while len(ids):
cr.execute('select distinct parent_id from gap_analysis_functionality_category where id IN %s',(tuple(ids),))
ids = filter(None, map(lambda x:x[0], cr.fetchall()))
if not level:
return False
level -= 1
return True
_constraints = [
(_check_recursion, 'Error ! You cannot create recursive categories.', ['parent_id'])
]
_parent_name = "parent_id"
_parent_store = True
_parent_order = 'sequence, name'
_order = 'parent_left'
class gap_analysis_functionality(osv.Model):
_name = "gap_analysis.functionality"
_description = "Gap Analysis Functionalities"
_columns = {
'name': fields.char('Functionality', size=256, required=True, translate=True),
'description': fields.text('Description'),
'category': fields.many2one('gap_analysis.functionality.category', 'Category', required=True, select=True),
'is_tmpl': fields.boolean('Template ?', help='This Functionality is a Template ?'),
'proposed': fields.boolean('Propose as template ?'),
#### Default values (Templating) ####
'workloads': fields.one2many('gap_analysis.workload', 'fct_id', 'Default Workloads'),
'openerp_fct': fields.many2one('gap_analysis.openerp', 'Default OpenERP feature', select=True),
'critical': fields.integer('Default Critical Level', help='Indicator to specify the importance of this functionality in the project.'),
'testing': fields.float('Test (hour)'),
'effort': fields.many2one('gap_analysis.effort', 'Default Effort', help="Development Effort for this functionality."),
'duration_wk': fields.float('Default Duration (hour)', help='Since this effort has no pre-defined duration, you must set one.'),
'unknown_wk': fields.boolean('Must set the duration manually ? (Default)',),
}
def onchange_effort_id(self, cr, uid, ids, effort_id, unknown_wk):
val = {}
my_effort = self.pool.get('gap_analysis.effort').browse(cr, uid, effort_id)
val['unknown_wk'] = my_effort.unknown
return {'value': val}
def write(self, cr, uid, ids, vals, context=None):
if 'is_tmpl' in vals and vals['is_tmpl'] == True:
vals['proposed'] = False
return super(gap_analysis_functionality, self).write(cr, uid, ids, vals, context=context)
class gap_analysis_openerp(osv.Model):
_name = "gap_analysis.openerp"
_description = "Gap Analysis OpenERP features"
_columns = {
'name': fields.char('OpenERP feature', size=256, required=True, translate=True),
}
class gap_analysis(osv.Model):
_name = "gap_analysis"
_description = "Gap Analysis"
def _estimated_time_cost(self, cursor, uid, ids, fields, arg, context=None):
result = {}
for gap in self.browse(cursor, uid, ids):
res = {}
res['estimated_time'] = 0.0
res['estimated_cost'] = 0.0
for gap_line in gap.gap_lines:
if gap_line.keep:
res['estimated_time'] += gap_line.total_time
res['estimated_cost'] += gap_line.total_cost
result[gap.id] = res
return result
def _sorted_distinct_workloads(self, cursor, uid, ids, arg, context=None):
result = {}
for gap in self.browse(cursor, uid, ids):
types = []
line_ids = [l.id for l in gap.gap_lines]
if line_ids:
cursor.execute("SELECT id, code FROM gap_analysis_workload_type T WHERE id in (SELECT DISTINCT(W.type) FROM gap_analysis_workload W WHERE W.gap_line_id IN %s) ORDER BY T.sequence ASC",(tuple(line_ids),))
types = cursor.fetchall()
return types
def button_dummy(self, cr, uid, ids, context=None):
gapline_pool = self.pool.get('gap_analysis.line')
gap_cat_pool = self.pool.get('gap_analysis.functionality.category')
if type(ids) != type([]):
ids = [ids]
for gap_id in ids:
cr.execute("SELECT DISTINCT c.code FROM gap_analysis_line l, gap_analysis_functionality_category c WHERE l.category=c.id AND l.gap_id = %s",(gap_id,))
categ_codes = map(lambda x: x[0], cr.fetchall()) or []
for code in categ_codes:
idx = 1
seq = 999
cr.execute("SELECT id FROM gap_analysis_functionality_category WHERE id IN (SELECT DISTINCT c.id FROM gap_analysis_line l, gap_analysis_functionality_category c WHERE l.category=c.id AND c.code = %s AND l.gap_id = %s) ORDER BY parent_left",(code, gap_id,))
categ_ids = map(lambda x: x[0], cr.fetchall()) or []
for categ in gap_cat_pool.browse(cr, uid, categ_ids):
current_categ = categ
seq = ''
while current_categ:
seq = str(current_categ.sequence) + seq
current_categ = current_categ.parent_id or False
line_ids = gapline_pool.search(cr, uid, [('category','=',categ.id),('gap_id','=',gap_id)], order='critical desc, effort asc') or []
for line_id in line_ids:
code_line = code
code_line += str(idx).rjust(3, '0')
gapline_pool.write(cr, uid, [line_id], {'code':code_line,'seq':seq})
idx += 1
return True
def import_from_tmpl(self, cr, uid, ids, context=None):
return {
'name': _('Import from Template'),
'view_type': 'form',
'view_mode': 'form',
'view_id': False,
'res_model': 'gap_analysis.import_from_tmpl',
'context': context,
'type': 'ir.actions.act_window',
'target': 'new',
'res_id': False,
}
def _get_lines(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('gap_analysis.line').browse(cr, uid, ids, context=context):
result[line.gap_id.id] = True
return result.keys()
def action_change(self, cr, uid, ids, context=None):
for o in self.browse(cr, uid, ids):
self.write(cr, uid, [o.id], {'state':'draft', 'date_confirm': False})
return True
def action_done(self, cr, uid, ids, context=None):
for o in self.browse(cr, uid, ids):
self.write(cr, uid, [o.id], {'state': 'done', 'date_confirm': fields.date.context_today(self, cr, uid, context=context)})
return True
def action_cancel(self, cr, uid, ids, context=None):
for o in self.browse(cr, uid, ids):
self.write(cr, uid, [o.id], {'state': 'cancel'})
return True
def copy(self, cr, uid, id, default=None, context=None):
raise osv.except_osv(_('Warning'), _("Copying a Gap Analysis is currently not allowed."))
return False
def onchange_project_id(self, cr, uid, ids, project_id):
val = {}
my_project = self.pool.get('project.project').browse(cr, uid, project_id)
if my_project.partner_id:
val['partner_id'] = my_project.partner_id.id
return {'value': val}
_columns = {
'reference': fields.char('Reference', size=64, required=True, readonly=True, states={'draft': [('readonly', False)]}, select=True),
'name': fields.char('Name', size=256, required=True, readonly=True, states={'draft': [('readonly', False)]}),
'state': fields.selection([('draft', 'Draft'), ('done', 'Done'), ('cancel', 'Cancelled')], 'State', readonly=True, help="Gives the state of the gap-analysis.", select=True),
'note': fields.text('Note'),
'date_create': fields.datetime('Creation Date', readonly=True, select=True, help="Date on which the gap-analysis is created."),
'date_confirm': fields.date('Confirmation Date', readonly=True, select=True, help="Date on which the gap-analysis is confirmed."),
'user_id': fields.many2one('res.users', 'Analyst', readonly=True, states={'draft': [('readonly', False)]}, select=True),
'partner_id': fields.many2one('res.partner', 'Customer', select=True, readonly=True, states={'draft': [('readonly', False)]}, ),
'gap_lines': fields.one2many('gap_analysis.line', 'gap_id', 'Functionalities', readonly=True, states={'draft': [('readonly', False)]}),
'estimated_time': fields.function(_estimated_time_cost, type='float', multi="gapsums", string='Estimated Time', store = False),
'estimated_cost': fields.function(_estimated_time_cost, type='float', multi="gapsums", string='Estimated Selling Price', store = False),
'project_id': fields.many2one('project.project', 'Project'),
'partner_id': fields.many2one('res.partner', 'Partner'),
'is_tmpl': fields.boolean('Template ?', help='This Gap Analysis is a Template ?'),
'tech_cost': fields.float('Technical Analysis Price', help='Default Price per hour for Technical Analysis.'),
'func_cost': fields.float('Functional Analysis Price', help='Default Price per hour for Functional Analysis.'),
'dev_cost': fields.float('Effort Price', help='Price per hour for Effort.'),
'user_functional': fields.many2one('res.users', 'Default Functional Analyst'),
'user_technical': fields.many2one('res.users', 'Default Technical Analyst'),
'user_dev': fields.many2one('res.users', 'Default Developer'),
'user_test': fields.many2one('res.users', 'Default Tester'),
}
_defaults = {
'state': 'draft',
'user_id': lambda obj, cr, uid, context: uid,
'user_functional': lambda obj, cr, uid, context: uid,
'reference': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'gap_analysis'),
'date_create': fields.date.context_today,
'tech_cost': 500.0,
'func_cost': 500.0,
'dev_cost': 250.0,
}
_sql_constraints = [
('reference_uniq', 'unique(reference)', 'Reference must be unique !'),
]
_order = 'name desc'
class gap_analysis_line(osv.Model):
_name = "gap_analysis.line"
_description = "Gap-analysis Lines"
def _estimated_line_time_cost(self, cursor, uid, ids, fields, arg, context=None):
result = {}
gap = False
for gap_line in self.browse(cursor, uid, ids):
res = {}
res['total_time'] = 0
res['total_cost'] = 0
if not gap:
gap = self.pool.get("gap_analysis").browse(cursor, uid, gap_line.gap_id.id)
if gap_line.effort:
if gap_line.effort.unknown:
thistime = gap_line.duration_wk
else:
thistime = gap_line.effort.duration
res['total_time'] = thistime
res['total_cost'] = (gap.dev_cost * thistime)
for workload in gap_line.workloads:
if workload.type.category == "Technical Analysis":
workload_cost = gap.tech_cost
else:
workload_cost = gap.func_cost
res['total_time'] += workload.duration
res['total_cost'] += (workload.duration * workload_cost)
if gap_line.testing:
res['total_time'] += gap_line.testing
res['total_cost'] += (gap_line.testing * gap.tech_cost)
result[gap_line.id] = res
return result
def _get_lines_from_workload(self, cr, uid, ids, context=None):
result = {}
for workload in self.pool.get('gap_analysis.workload').browse(cr, uid, ids, context=context):
result[workload.gap_line_id.id] = True
return result.keys()
def _total_workloads(self, cursor, uid, ids, arg, context=None):
result = {}
for line in self.browse(cursor, uid, ids):
amount = 0
for w in line.workloads:
if w.type.id == arg:
amount += w.duration
return amount
def onchange_functionality_id(self, cr, uid, ids, functionality_id, gap_line_id):
val = {}
functionality_tmpl = self.pool.get('gap_analysis.functionality').browse(cr, uid, functionality_id)
if functionality_tmpl.effort:
val['effort'] = functionality_tmpl.effort.id
if functionality_tmpl.category:
val['category'] = functionality_tmpl.category.id
if functionality_tmpl.testing:
val['testing'] = functionality_tmpl.testing
if functionality_tmpl.unknown_wk:
val['unknown_wk'] = functionality_tmpl.unknown_wk
if functionality_tmpl.duration_wk:
val['duration_wk'] = functionality_tmpl.duration_wk
if functionality_tmpl.critical:
val['critical'] = functionality_tmpl.critical
if functionality_tmpl.openerp_fct:
val['openerp_fct'] = functionality_tmpl.openerp_fct.id
if functionality_tmpl.workloads:
workload_pool = self.pool.get('gap_analysis.workload')
my_workloads = []
for workload in functionality_tmpl.workloads:
workload_vals = {'type':workload.type.id,'duration':workload.duration,}
if gap_line_id:
workload_vals['gap_line_id'] = gap_line_id
workload_id = workload_pool.create(cr, uid, workload_vals)
if workload_id:
my_workloads.append(workload_id)
if my_workloads:
val['workloads'] = my_workloads
return {'value': val}
def onchange_effort_id(self, cr, uid, ids, effort_id, unknown_wk):
val = {}
my_effort = self.pool.get('gap_analysis.effort').browse(cr, uid, effort_id)
val['unknown_wk'] = my_effort.unknown
return {'value': val}
_columns = {
'gap_id': fields.many2one('gap_analysis', 'Gap-analysis', required=True, ondelete='cascade', select=True, readonly=True),
'seq': fields.char('Sequence', size=48),
'code': fields.char('Code', size=6),
'functionality': fields.many2one('gap_analysis.functionality', 'Functionality', required=True, select=True),
'category': fields.many2one('gap_analysis.functionality.category', 'Category', required=True, select=True),
'workloads': fields.one2many('gap_analysis.workload', 'gap_line_id', 'Workloads'),
'total_time': fields.function(_estimated_line_time_cost, method=True, type='float', multi=True, string='Estimated Time', store = {'gap_analysis.line': (lambda self, cr, uid, ids, c={}: ids, ['testing','workloads','duration_wk','effort','unknown_wk'], 10),'gap_analysis.workload': (_get_lines_from_workload, ['workload', 'duration'], 10),}),
'total_cost': fields.function(_estimated_line_time_cost, method=True, type='float', multi=True, string='Estimated Selling Price', store = {'gap_analysis.line': (lambda self, cr, uid, ids, c={}: ids, ['testing','workloads','duration_wk','effort','unknown_wk'], 10),'gap_analysis.workload': (_get_lines_from_workload, ['workload', 'duration'], 10),}),
'openerp_fct': fields.many2one('gap_analysis.openerp', 'OpenERP feature', select=True),
'contributors': fields.char('Contributor', size=256, help='Who is/are your main contact(s) to define this functionality.'),
'keep': fields.boolean('Keep ?', help='Keep the functionality in the Gap Analysis. If unchecked, the functionality will be print in the report but not used for the price calculation.'),
'critical': fields.integer('Critical Level', help='Indicator to specify the importance of this functionality in the project.'),
'testing': fields.float('Test (hour)'),
'effort': fields.many2one('gap_analysis.effort', 'Effort', help="Development Effort for this functionality."),
'duration_wk': fields.float('Duration (hour)', help='Since this effort has no pre-defined duration, you must set one.'),
'unknown_wk': fields.boolean('Must set the duration manually ?',),
}
_defaults = {
'unknown_wk': False,
'keep': True,
'critical': 1,
}
_order = 'seq asc, code asc'
_rec_name = 'code'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Elico-Corp/openerp-7.0
|
gap_analysis/gap_analysis.py
|
Python
|
agpl-3.0
| 21,590
|
# Copyright 2018 Silvio Gregorini (silviogregorini@openforce.it)
# Copyright (c) 2018 Openforce Srls Unipersonale (www.openforce.it)
# Copyright (c) 2019 Matteo Bilotta
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import fields, models
class ResConfigSettings(models.TransientModel):
_inherit = "res.config.settings"
sp_description = fields.Char(
related="company_id.sp_description",
string="Description for period end statements",
readonly=False,
)
|
OCA/l10n-italy
|
l10n_it_vat_statement_split_payment/models/account_config.py
|
Python
|
agpl-3.0
| 520
|
# Copyright 2021 Tecnativa - Sergio Teruel
# License AGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
{
"name": "Account Invoice Margin Sale Delivered Sync",
"summary": "Sync invoice margin between invoices and sale orders",
"version": "12.0.1.0.1",
"development_status": "Beta",
"maintainers": ["sergio-teruel"],
"category": "Account",
"website": "https://github.com/OCA/margin-analysis",
"author": "Tecnativa, "
"Odoo Community Association (OCA)",
"license": "AGPL-3",
"application": False,
"installable": True,
"depends": [
"sale_margin_delivered",
"account_invoice_margin_sale",
],
}
|
OCA/margin-analysis
|
account_invoice_margin_sale_delivered_sync/__manifest__.py
|
Python
|
agpl-3.0
| 676
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import factory.fuzzy
from base.tests.factories.learning_unit_year import LearningUnitYearFactory
from base.tests.factories.tutor import TutorFactory
from learning_unit.tests.factories.learning_class_year import LearningClassYearFactory
class ScoreResponsibleFactory(factory.DjangoModelFactory):
class Meta:
model = 'assessments.ScoreResponsible'
tutor = factory.SubFactory(TutorFactory)
learning_unit_year = factory.SubFactory(LearningUnitYearFactory)
learning_class_year = None
class ScoreResponsibleOfClassFactory(ScoreResponsibleFactory):
learning_class_year = factory.SubFactory(
LearningClassYearFactory,
learning_component_year__lecturing=True,
learning_component_year__learning_unit_year=factory.LazyAttribute(
lambda component: component.factory_parent.factory_parent.learning_unit_year
)
)
|
uclouvain/osis
|
assessments/tests/factories/score_responsible.py
|
Python
|
agpl-3.0
| 2,160
|
# coding: utf-8
# Copyright (C) 1994-2016 Altair Engineering, Inc.
# For more information, contact Altair at www.altair.com.
#
# This file is part of the PBS Professional ("PBS Pro") software.
#
# Open Source License Information:
#
# PBS Pro is free software. You can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# PBS Pro is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Commercial License Information:
#
# The PBS Pro software is licensed under the terms of the GNU Affero General
# Public License agreement ("AGPL"), except where a separate commercial license
# agreement for PBS Pro version 14 or later has been executed in writing with
# Altair.
#
# Altair’s dual-license business model allows companies, individuals, and
# organizations to create proprietary derivative works of PBS Pro and
# distribute them - whether embedded or bundled with other software - under
# a commercial license agreement.
#
# Use of Altair’s trademarks, including but not limited to "PBS™",
# "PBS Professional®", and "PBS Pro™" and Altair’s logos is subject to Altair's
# trademark licensing policies.
import sys
import os
import socket
import pwd
import grp
import logging
import time
import re
import random
import string
import tempfile
import cPickle
import copy
import datetime
import traceback
import threading
from operator import itemgetter
from collections import OrderedDict
from distutils.version import LooseVersion
try:
import psycopg2
PSYCOPG = True
except:
PSYCOPG = False
try:
from ptl.lib.pbs_ifl import *
API_OK = True
except:
try:
from ptl.lib.pbs_ifl_mock import *
except:
sys.stderr.write("failed to import pbs_ifl, run pbs_swigify " +
"to make it\n")
raise ImportError
API_OK = False
from ptl.lib.pbs_api_to_cli import api_to_cli
from ptl.utils.pbs_dshutils import DshUtils
from ptl.utils.pbs_procutils import ProcUtils
from ptl.utils.pbs_cliutils import CliUtils
from ptl.utils.pbs_fileutils import FileUtils, FILE_TAIL
# suppress logging exceptions
logging.raiseExceptions = False
# Various mappings and aliases
MGR_OBJ_VNODE = MGR_OBJ_NODE
VNODE = MGR_OBJ_VNODE
NODE = MGR_OBJ_NODE
HOST = MGR_OBJ_HOST
JOB = MGR_OBJ_JOB
RESV = MGR_OBJ_RESV
SERVER = MGR_OBJ_SERVER
QUEUE = MGR_OBJ_QUEUE
SCHED = MGR_OBJ_SCHED
HOOK = MGR_OBJ_HOOK
RSC = MGR_OBJ_RSC
PBS_HOOK = MGR_OBJ_PBS_HOOK
# the order of these symbols matters, see pbs_ifl.h
(SET, UNSET, INCR, DECR, EQ, NE, GE, GT,
LE, LT, MATCH, MATCH_RE, NOT, DFLT) = range(14)
(PTL_OR, PTL_AND) = [0, 1]
(IFL_SUBMIT, IFL_SELECT, IFL_TERMINATE, IFL_ALTER,
IFL_MSG, IFL_DELETE) = [0, 1, 2, 3, 4, 5]
(PTL_API, PTL_CLI) = ['api', 'cli']
(PTL_COUNTER, PTL_FILTER) = [0, 1]
PTL_STR_TO_OP = {
'<': LT,
'<=': LE,
'=': EQ,
'>=': GE,
'>': GT,
'!=': NE,
' set ': SET,
' unset ': UNSET,
' match ': MATCH,
'~': MATCH_RE,
'!': NOT
}
PTL_OP_TO_STR = {
LT: '<',
LE: '<=',
EQ: '=',
GE: '>=',
GT: '>',
SET: ' set ',
NE: '!=',
UNSET: ' unset ',
MATCH: ' match ',
MATCH_RE: '~',
NOT: 'is not'
}
PTL_ATTROP_TO_STR = {PTL_AND: '&&', PTL_OR: '||'}
(RESOURCES_AVAILABLE, RESOURCES_TOTAL) = [0, 1]
EXPECT_MAP = {
UNSET: 'Unset',
SET: 'Set',
EQ: 'Equal',
NE: 'Not Equal',
LT: 'Less Than',
GT: 'Greater Than',
LE: 'Less Equal Than',
GE: 'Greater Equal Than',
MATCH_RE: 'Matches regexp',
MATCH: 'Matches',
NOT: 'Not'
}
PBS_CMD_MAP = {
MGR_CMD_CREATE: 'create',
MGR_CMD_SET: 'set',
MGR_CMD_DELETE: 'delete',
MGR_CMD_UNSET: 'unset',
MGR_CMD_IMPORT: 'import',
MGR_CMD_EXPORT: 'export',
MGR_CMD_LIST: 'list',
}
PBS_CMD_TO_OP = {
MGR_CMD_SET: SET,
MGR_CMD_UNSET: UNSET,
MGR_CMD_DELETE: UNSET,
MGR_CMD_CREATE: SET,
}
PBS_OBJ_MAP = {
MGR_OBJ_NONE: 'none',
SERVER: 'server',
QUEUE: 'queue',
JOB: 'job',
NODE: 'node',
RESV: 'reservation',
RSC: 'resource',
SCHED: 'sched',
HOST: 'host',
HOOK: 'hook',
VNODE: 'node',
PBS_HOOK: 'pbshook'
}
PTL_TRUE = ('1', 'true', 't', 'yes', 'y', 'enable', 'enabled', 'True', True)
PTL_FALSE = ('0', 'false', 'f', 'no', 'n', 'disable', 'disabled', 'False',
False)
PTL_NONE = ('None', None)
PTL_FORMULA = '__formula__'
PTL_NOARG = '__noarg__'
PTL_ALL = '__ALL__'
CMD_ERROR_MAP = {
'alterjob': 'PbsAlterError',
'holdjob': 'PbsHoldError',
'sigjob': 'PbsSignalError',
'msgjob': 'PbsMessageError',
'rlsjob': 'PbsReleaseError',
'rerunjob': 'PbsRerunError',
'orderjob': 'PbsOrderError',
'runjob': 'PbsRunError',
'movejob': 'PbsMoveError',
'delete': 'PbsDeleteError',
'deljob': 'PbsDeljobError',
'delresv': 'PbsDelresvError',
'status': 'PbsStatusError',
'manager': 'PbsManagerError',
'submit': 'PbsSubmitError',
'terminate': 'PbsQtermError'
}
class PtlConfig(object):
"""
Holds configuration options
The options can be stored in a file as well as in the OS environment
variables.When set, the environment variables will override
definitions in the file.By default, on Unix like systems, the file
read is ``/etc/ptl.conf``, the environment variable ``PTL_CONF_FILE``
can be used to set the path to the file to read.
The format of the file is a series of ``<key> = <value>`` properties.
A line that starts with a '#' is ignored and can be used for comments
:param conf: Path to PTL configuration file
:type conf: str or None
"""
logger = logging.getLogger(__name__)
def __init__(self, conf=None):
self.options = {
'PTL_SUDO_CMD': 'sudo -H',
'PTL_RSH_CMD': 'ssh',
'PTL_CP_CMD': 'scp -p',
'PTL_EXPECT_MAX_ATTEMPTS': 60,
'PTL_EXPECT_INTERVAL': 0.5,
'PTL_UPDATE_ATTRIBUTES': True,
}
self.handlers = {
'PTL_SUDO_CMD': DshUtils.set_sudo_cmd,
'PTL_RSH_CMD': DshUtils.set_rsh_cmd,
'PTL_CP_CMD': DshUtils.set_copy_cmd,
'PTL_EXPECT_MAX_ATTEMPTS': Server.set_expect_max_attempts,
'PTL_EXPECT_INTERVAL': Server.set_expect_interval,
'PTL_UPDATE_ATTRIBUTES': Server.set_update_attributes
}
if conf is None:
conf = os.environ.get('PTL_CONF_FILE', '/etc/ptl.conf')
try:
lines = open(conf).readlines()
except IOError:
lines = []
for line in lines:
line = line.strip()
if (line.startswith('#') or (line == '')):
continue
try:
k, v = line.split('=', 1)
k = k.strip()
v = v.strip()
self.options[k] = v
except:
self.logger.error('Error parsing line ' + line)
for k, v in self.options.items():
if k in os.environ:
v = os.environ[k]
else:
os.environ[k] = str(v)
if k in self.handlers:
self.handlers[k](v)
class PtlException(Exception):
"""
Generic errors raised by PTL operations.
Sets a ``return value``, a ``return code``, and a ``message``
A post function and associated positional and named arguments
are available to perform any necessary cleanup.
:param rv: Return value set for the error occured during PTL
operation
:type rv: int or None.
:param rc: Return code set for the error occured during PTL
operation
:type rc: int or None.
:param msg: Message set for the error occured during PTL operation
:type msg: str or None.
:param post: Execute necessary cleanup if not None
:raises: PTL exceptions
"""
def __init__(self, rv=None, rc=None, msg=None, post=None, *args, **kwargs):
self.rv = rv
self.rc = rc
self.msg = msg
if post is not None:
post(*args, **kwargs)
def __str__(self):
return ('rc=' + str(self.rc) + ', rv=' + str(self.rv) +
', msg=' + str(self.msg))
def __repr__(self):
return (self.__class__.__name__ + '(rc=' + str(self.rc) + ', rv=' +
str(self.rv) + ', msg=' + str(self.msg) + ')')
class PbsServiceError(PtlException):
pass
class PbsConnectError(PtlException):
pass
class PbsStatusError(PtlException):
pass
class PbsSubmitError(PtlException):
pass
class PbsManagerError(PtlException):
pass
class PbsDeljobError(PtlException):
pass
class PbsDelresvError(PtlException):
pass
class PbsDeleteError(PtlException):
pass
class PbsRunError(PtlException):
pass
class PbsSignalError(PtlException):
pass
class PbsMessageError(PtlException):
pass
class PbsHoldError(PtlException):
pass
class PbsReleaseError(PtlException):
pass
class PbsOrderError(PtlException):
pass
class PbsRerunError(PtlException):
pass
class PbsMoveError(PtlException):
pass
class PbsAlterError(PtlException):
pass
class PbsResourceError(PtlException):
pass
class PbsSelectError(PtlException):
pass
class PbsSchedConfigError(PtlException):
pass
class PbsMomConfigError(PtlException):
pass
class PbsFairshareError(PtlException):
pass
class PbsQdisableError(PtlException):
pass
class PbsQenableError(PtlException):
pass
class PbsQstartError(PtlException):
pass
class PbsQstopError(PtlException):
pass
class PtlExpectError(PtlException):
pass
class PbsInitServicesError(PtlException):
pass
class PbsQtermError(PtlException):
pass
class PbsTypeSize(str):
"""
Descriptor class for memory as a numeric entity.
Units can be one of ``b``, ``kb``, ``mb``, ``gb``, ``tb``, ``pt``
:param unit: The unit type associated to the memory value
:type unit: str
:param value: The numeric value of the memory
:type value: int or None
:raises: ValueError and TypeError
"""
def __init__(self, value=None):
if value is None:
return
if len(value) < 2:
raise ValueError
if value[-1:] in ('b', 'B') and value[:-1].isdigit():
self.unit = 'b'
self.value = int(int(value[:-1]) / 1024)
return
# lower() applied to ignore case
unit = value[-2:].lower()
self.value = value[:-2]
if not self.value.isdigit():
raise ValueError
if unit == 'kb':
self.value = int(self.value)
elif unit == 'mb':
self.value = int(self.value) * 1024
elif unit == 'gb':
self.value = int(self.value) * 1024 * 1024
elif unit == 'tb':
self.value = int(self.value) * 1024 * 1024 * 1024
elif unit == 'pb':
self.value = int(self.value) * 1024 * 1024 * 1024 * 1024
else:
raise TypeError
self.unit = 'kb'
def encode(self, value=None, valtype='kb', precision=1):
"""
Encode numeric memory input in kilobytes to a string, including
unit
:param value: The numeric value of memory to encode
:type value: int or None.
:param valtype: The unit of the input value, defaults to kb
:type valtype: str
:param precision: Precision of the encoded value, defaults to 1
:type precision: int
:returns: Encoded memory in kb to string
"""
if value is None:
value = self.value
if valtype == 'b':
val = value
elif valtype == 'kb':
val = value * 1024
elif valtype == 'mb':
val = value * 1024 * 1024
elif valtype == 'gb':
val = value * 1024 * 1024 * 1024 * 1024
elif valtype == 'tb':
val = value * 1024 * 1024 * 1024 * 1024 * 1024
elif valtype == 'pt':
val = value * 1024 * 1024 * 1024 * 1024 * 1024 * 1024
m = (
(1 << 50, 'pb'),
(1 << 40, 'tb'),
(1 << 30, 'gb'),
(1 << 20, 'mb'),
(1 << 10, 'kb'),
(1, 'b')
)
for factor, suffix in m:
if val >= factor:
break
return '%.*f%s' % (precision, float(val) / factor, suffix)
def __cmp__(self, other):
if self.value < other.value:
return -1
if self.value == other.value:
return 0
return 1
def __lt__(self, other):
if self.value < other.value:
return True
return False
def __le__(self, other):
if self.value <= other.value:
return True
return False
def __gt__(self, other):
if self.value > other.value:
return True
return False
def __ge__(self, other):
if self.value < other.value:
return True
return False
def __eq__(self, other):
if self.value == other.value:
return True
return False
def __get__(self):
return self.value
def __add__(self, other):
if isinstance(other, int):
self.value += other
else:
self.value += other.value
return self
def __mul__(self, other):
if isinstance(other, int):
self.value *= other
else:
self.value *= other.value
return self
def __floordiv__(self, other):
self.value /= other.value
return self
def __sub__(self, other):
self.value -= other.value
return self
def __repr__(self):
return self.__str__()
def __str__(self):
return self.encode(valtype=self.unit)
class PbsTypeDuration(str):
"""
Descriptor class for a duration represented as ``hours``,
``minutes``, and ``seconds``,in the form of ``[HH:][MM:]SS``
:param as_seconds: HH:MM:SS represented in seconds
:type as_seconds: int
:param as_str: duration represented in HH:MM:SS
:type as_str: str
"""
def __init__(self, val):
if isinstance(val, str):
if ':' in val:
s = val.split(':')
l = len(s)
if l > 3:
raise ValueError
hr = mn = sc = 0
if l >= 2:
sc = s[l - 1]
mn = s[l - 2]
if l == 3:
hr = s[0]
self.duration = int(hr) * 3600 + int(mn) * 60 + int(sc)
elif val.isdigit():
self.duration = int(val)
elif isinstance(val, int) or isinstance(val, float):
self.duration = val
def __add__(self, other):
self.duration += other.duration
return self
def __sub__(self, other):
self.duration -= other.duration
return self
def __cmp__(self, other):
if self.duration < other.duration:
return -1
if self.duration == other.duration:
return 0
return 1
def __lt__(self, other):
if self.duration < other.duration:
return True
return False
def __le__(self, other):
if self.duration <= other.duration:
return True
return False
def __gt__(self, other):
if self.duration > other.duration:
return True
return False
def __ge__(self, other):
if self.duration < other.duration:
return True
return False
def __eq__(self, other):
if self.duration == other.duration:
return True
return False
def __get__(self):
return self.as_str
def __repr__(self):
return self.__str__()
def __int__(self):
return int(self.duration)
def __str__(self):
return str(datetime.timedelta(seconds=self.duration))
class PbsTypeArray(list):
"""
Descriptor class for a PBS array list type, e.g. String array
:param value: Array value to be passed
:param sep: Separator for two array elements
:type sep: str
:returns: List
"""
def __init__(self, value=None, sep=','):
self.separator = sep
self = list.__init__(self, value.split(sep))
def __str__(self):
return self.separator.join(self)
class PbsTypeList(dict):
"""
Descriptor class for a generic PBS list that are key/value pairs
delimited
:param value: List value to be passed
:param sep: Separator for two key/value pair
:type sep: str
:param kvsep: Separator for key and value
:type kvsep: str
:returns: Dictionary
"""
def __init__(self, value=None, sep=',', kvsep='='):
self.kvsep = kvsep
self.separator = sep
d = {}
as_list = map(lambda v: v.split(kvsep), value.split(sep))
if as_list:
for k, v in as_list:
d[k] = v
del as_list
dict.__init__(self, d)
def __str__(self):
s = []
for k, v in self.items():
s += [str(k) + self.kvsep + str(v)]
return self.separator.join(s)
class PbsTypeLicenseCount(PbsTypeList):
"""
Descriptor class for a PBS license_count attribute.
It is a specialized list where key/values are ':' delimited, separated
by a ' ' (space)
:param value: PBS license_count attribute value
:returns: Specialized list
"""
def __init__(self, value=None):
super(PbsTypeLicenseCount, self).__init__(value, sep=' ', kvsep=':')
class PbsTypeVariableList(PbsTypeList):
"""
Descriptor class for a PBS Variable_List attribute
It is a specialized list where key/values are '=' delimited, separated
by a ',' (space)
:param value: PBS Variable_List attribute value
:returns: Specialized list
"""
def __init__(self, value=None):
super(PbsTypeVariableList, self).__init__(value, sep=',', kvsep='=')
class PbsTypeSelect(list):
"""
Descriptor class for PBS select/schedselect specification.
Select is of the form:
``<select> ::= <m>":"<chunk> | <select>"+"<select>``
``<m> ::= <digit> | <digit><m>``
``<chunk> ::= <resc_name>":"<resc_value> | <chunk>":"<chunk>``
``<m>`` is a multiplying factor for each chunk requested
``<chunk>`` are resource key/value pairs
The type populates a list of single chunk of resource
``key/value`` pairs, the list can be walked by iterating over
the type itself.
:param num_chunks: The total number of chunks in the select
:type num_chunk: int
:param resources: A dictionary of all resource counts in the select
:type resources: Dictionary
"""
def __init__(self, s=None):
if s is not None:
self._as_str = s
self.resources = {}
self.num_chunks = 0
nc = s.split('+')
for chunk in nc:
self._parse_chunk(chunk)
def _parse_chunk(self, chunk):
d = chunk.split(':')
# number of chunks
_num_chunks = int(d[0])
self.num_chunks += _num_chunks
r = {}
for e in d[1:]:
k, v = e.split('=')
r[k] = v
if 'mem' in k:
try:
v = PbsTypeSize(v).value
except:
# failed so we guessed wrong on the type
pass
if isinstance(v, int) or v.isdigit():
if k not in self.resources:
self.resources[k] = _num_chunks * int(v)
else:
self.resources[k] += _num_chunks * int(v)
else:
if k not in self.resources:
self.resources[k] = v
else:
self.resources[k] = [self.resources[k], v]
# explicitly expose the multiplying factor
for _ in range(_num_chunks):
self.append(r)
def __add__(self, chunk=None):
if chunk is None:
return self
self._parse_chunk(chunk)
self._as_str = self._as_str + "+" + chunk
return self
def __repr__(self):
return str(self)
def __str__(self):
return self._as_str
class PbsTypeChunk(dict):
"""
Descriptor class for a PBS chunk associated to a
``PbsTypeExecVnode``.This type of chunk corresponds to
a node solution to a resource request,not to the select
specification.
``chunk ::= <subchk> | <chunk>"+"<chunk>``
``subchk ::= <node>":"<resource>``
``resource ::= <key>":"<val> | <resource>":"<resource>``
A chunk expresses a solution to a specific select-chunk
request. If multiple chunks are needed to solve a single
select-chunk, e.g., on a shared memory system, the chunk
will be extended into virtual chunk,vchunk.
:param vnode: the vnode name corresponding to the chunk
:type vnode: str or None
:param resources: the key value pair of resources in
dictionary form
:type resources: Dictionary or None
:param vchunk: a list of virtual chunks needed to solve
the select-chunk, vchunk is only set if more
than one vchunk are required to solve the
select-chunk
:type vchunk: list
"""
def __init__(self, vnode=None, resources=None, chunkstr=None):
self.vnode = vnode
if resources is not None:
self.resources = resources
else:
self.resources = {}
self.vchunk = []
self.as_str = chunkstr
self.__parse_chunk(chunkstr)
def __parse_chunk(self, chunkstr=None):
if chunkstr is None:
return
vchunks = chunkstr.split('+')
if len(vchunks) == 1:
entities = chunkstr.split(':')
self.vnode = entities[0]
if len(entities) > 1:
for e in entities[1:]:
(r, v) = e.split('=')
self.resources[r] = v
self[self.vnode] = self.resources
else:
for sc in vchunks:
chk = PbsTypeChunk(chunkstr=sc)
self.vchunk.append(chk)
self[chk.vnode] = chk.resources
def add(self, vnode, resources):
"""
Add a chunk specificiation. If a chunk is already
defined, add the chunk as a vchunk.
:param vnode: The vnode to add
:type vnode: str
:param resources: The resources associated to the
vnode
:type resources: str
:returns: Added chunk specification
"""
if self.vnode == vnode:
self.resources = dict(self.resources.items() + resources.items())
return self
elif len(self.vchunk) != 0:
for chk in self.vchunk:
if chk.vnode == vnode:
chk.resources = dict(self.resources.items() +
resources.items())
return self
chk = PbsTypeChunk(vnode, resources)
self.vchunk.append(chk)
return self
def __repr__(self):
return self.__str__()
def __str__(self):
_s = ["("]
_s += [self.vnode, ":"]
for resc_k, resc_v in self.resources.items():
_s += [resc_k, "=", str(resc_v)]
if self.vchunk:
for _v in self.vchunk:
_s += ["+", _v.vnode, ":"]
for resc_k, resc_v in _v.resources.items():
_s += [resc_k, "=", str(resc_v)]
_s += [")"]
return "".join(_s)
class PbsTypeExecVnode(list):
"""
Execvnode representation, expressed as a list of
PbsTypeChunk
:param vchunk: List of virtual chunks, only set when
more than one vnode is allocated to a
host satisfy a chunk requested
:type vchunk: List
:param num_chunks: The number of chunks satisfied by
this execvnode
:type num_chunks: int
:param vnodes: List of vnode names allocated to the execvnode
:type vnodes: List
:param resource: method to return the amount of a named
resource satisfied by this execvnode
"""
def __init__(self, s=None):
if s is None:
return None
self._as_str = s
start = 0
self.num_chunks = 0
for c in range(len(s)):
# must split on '+' between parens because '+' can occur within
# paren for complex specs
if s[c] == '(':
start = c + 1
if s[c] == ')':
self.append(PbsTypeChunk(chunkstr=s[start:c]))
self.num_chunks += 1
def resource(self, name=None):
"""
:param name: Name of the resource
:type name: str or None
"""
if name is None:
return None
_total = 0
for _c in self:
if _c.vchunk:
for _v in _c.vchunk:
if name in _v.resources:
_total += int(_v.resources[name])
if name in _c.resources:
if name in _c.resources:
_total += int(_c.resources[name])
return _total
@property
def vnodes(self):
vnodes = []
for e in self:
vnodes += [e.vnode]
if e.vchunk:
vnodes += map(lambda n: n.vnode, e.vchunk)
return list(set(vnodes))
def _str__(self):
return self._as_str
# below would be to verify that the converted type maps back correctly
_s = []
for _c in self:
_s += [str(_c)]
return "+".join(_s)
class PbsTypeExecHost(str):
"""
Descriptor class for exec_host attribute
:param hosts: List of hosts in the exec_host. Each entry is
a host info dictionary that maps the number of
cpus and its task number
:type hosts: List
"""
def __init__(self, s=None):
if s is None:
return None
self._as_str = s
self.hosts = []
hsts = s.split('+')
for h in hsts:
hi = {}
ti = {}
(host, task) = h.split('/',)
d = task.split('*')
if len(d) == 1:
taskslot = d[0]
ncpus = 1
elif len(d) == 2:
(taskslot, ncpus) = d
else:
(taskslot, ncpus) = (0, 1)
ti['task'] = taskslot
ti['ncpus'] = ncpus
hi[host] = ti
self.hosts.append(hi)
def __repr__(self):
return str(self.hosts)
def __str__(self):
return self._as_str
class PbsTypeJobId(str):
"""
Descriptor class for a Job identifier
:param id: The numeric portion of a job identifier
:type id: int
:param server_name: The pbs server name
:type server_name: str
:param server_shortname: The first portion of a FQDN server
name
:type server_shortname: str
"""
def __init__(self, value=None):
if value is None:
return
self.value = value
r = value.split('.', 1)
if len(r) != 2:
return
self.id = int(r[0])
self.server_name = r[1]
self.server_shortname = r[1].split('.', 1)[0]
def __str__(self):
return str(self.value)
class PbsUser(object):
"""
The PbsUser type augments a PBS username to associate
it to groups to which the user belongs
:param name: The user name referenced
:type name: str
:param uid: uid of user
:type uid: int or None
:param groups: The list of PbsGroup objects the user
belongs to
:type groups: List or None
"""
def __init__(self, name, uid=None, groups=None):
self.name = name
if uid is not None:
self.uid = int(uid)
else:
self.uid = None
self.home = None
self.gid = None
self.shell = None
self.gecos = None
try:
_user = pwd.getpwnam(self.name)
self.uid = _user.pw_uid
self.home = _user.pw_dir
self.gid = _user.pw_gid
self.shell = _user.pw_shell
self.gecos = _user.pw_gecos
except:
pass
if groups is None:
self.groups = []
elif isinstance(groups, list):
self.groups = groups
else:
self.groups = groups.split(",")
for g in self.groups:
if isinstance(g, str):
self.groups.append(PbsGroup(g, users=[self]))
elif self not in g.users:
g.users.append(self)
def __repr__(self):
return str(self.name)
def __str__(self):
return self.__repr__()
def __int__(self):
return int(self.uid)
class PbsGroup(object):
"""
The PbsGroup type augments a PBS groupname to associate it
to users to which the group belongs
:param name: The group name referenced
:type name: str
:param gid: gid of group
:type gid: int or None
:param users: The list of PbsUser objects the group belongs to
:type users: List or None
"""
def __init__(self, name, gid=None, users=None):
self.name = name
if gid is not None:
self.gid = int(gid)
else:
self.gid = None
try:
_group = grp.getgrnam(self.name)
self.gid = _group.gr_gid
except:
pass
if users is None:
self.users = []
elif isinstance(users, list):
self.users = users
else:
self.users = users.split(",")
for u in self.users:
if isinstance(u, str):
self.users.append(PbsUser(u, groups=[self]))
elif self not in u.groups:
u.groups.append(self)
def __repr__(self):
return str(self.name)
def __str__(self):
return self.__repr__()
def __int__(self):
return int(self.gid)
class BatchUtils(object):
"""
Utility class to create/convert/display various PBS
data structures
"""
legal = "\d\w:\+=\[\]~"
chunks_tag = re.compile("(?P<chunk>\([\d\w:\+=\[\]~]\)[\+]?)")
chunk_tag = re.compile("(?P<vnode>[\w\d\[\]]+):" +
"(?P<resources>[\d\w:\+=\[\]~])+\)")
array_tag = re.compile("(?P<jobid>[\d]+)\[(?P<subjobid>[0-9]*)\]*" +
"[.]*[(?P<server>.*)]*")
subjob_tag = re.compile("(?P<jobid>[\d]+)\[(?P<subjobid>[0-9]+)\]*" +
"[.]*[(?P<server>.*)]*")
pbsobjname_re = re.compile("^([\w\d][\d\w\s]*:?[\s]+)" +
"*(?P<name>[\w@\.\d\[\]-]+)$")
pbsobjattrval_re = re.compile(r"""
[\s]*(?P<attribute>[\w\d\.-]+)
[\s]*=[\s]*
(?P<value>.*)
[\s]*""",
re.VERBOSE)
dt_re = '(?P<dt_from>\d\d/\d\d/\d\d\d\d \d\d:\d\d)' + \
'[\s]+' + \
'(?P<dt_to>\d\d/\d\d/\d\d\d\d \d\d:\d\d)'
dt_tag = re.compile(dt_re)
hms_tag = re.compile('(?P<hr>\d\d):(?P<mn>\d\d):(?P<sc>\d\d)')
lim_tag = re.compile("(?P<limtype>[a-z_]+)[\.]*(?P<resource>[\w\d-]*)"
"=[\s]*\[(?P<entity_type>[ugpo]):"
"(?P<entity_name>[\w\d-]+)"
"=(?P<entity_value>[\d\w]+)\][\s]*")
def __init__(self):
self.logger = logging.getLogger(__name__)
self.du = DshUtils()
def list_to_attrl(self, l):
"""
Convert a list to a PBS attribute list
:param l: List to be converted
:type l: List
:returns: PBS attribute list
"""
return self.list_to_attropl(l, None)
def list_to_attropl(self, l, op=SET):
"""
Convert a list to a PBS attribute operation list
:param l: List to be converted
:type l: List
:returns: PBS attribute operation list
"""
head = None
prev = None
for i in l:
a = self.str_to_attropl(i, op)
if prev is None:
head = a
else:
prev.next = a
prev = a
if op is not None:
a.op = op
return head
def str_to_attrl(self, s):
"""
Convert a string to a PBS attribute list
:param s: String to be converted
:type s: str
:returns: PBS attribute list
"""
return self.str_to_attropl(s, None)
def str_to_attropl(self, s, op=SET):
"""
Convert a string to a PBS attribute operation list
:param s: String to be converted
:type s: str
:returns: PBS attribute operation list
"""
if op is not None:
a = attropl()
else:
a = attrl()
if '.' in s:
(attribute, resource) = s.split('.')
a.name = attribute
a.resource = resource.strip()
else:
a.name = s
a.value = ''
a.next = None
if op:
a.op = op
return a
def dict_to_attrl(self, d={}):
"""
Convert a dictionary to a PBS attribute list
:param d: Dictionary to be converted
:type d: Dictionary
:returns: PBS attribute list
"""
return self.dict_to_attropl(d, None)
def dict_to_attropl(self, d={}, op=SET):
"""
Convert a dictionary to a PBS attribute operation list
:param d: Dictionary to be converted
:type d: Dictionary
:returns: PBS attribute operation list
"""
if len(d.keys()) == 0:
return None
prev = None
head = None
for k, v in d.items():
if isinstance(v, tuple):
op = v[0]
v = v[1]
if op is not None:
a = attropl()
else:
a = attrl()
if '.' in k:
(attribute, resource) = k.split('.')
a.name = attribute
a.resource = resource
else:
a.name = k
a.value = str(v)
if op is not None:
a.op = op
a.next = None
if prev is None:
head = a
else:
prev.next = a
prev = a
return head
def convert_to_attrl(self, attrib):
"""
Generic call to convert Python type to PBS attribute list
:param attrib: Attributes to be converted
:type attrib: List or tuple or dictionary or str
:returns: PBS attribute list
"""
return self.convert_to_attropl(attrib, None)
def convert_to_attropl(self, attrib, cmd=MGR_CMD_SET, op=None):
"""
Generic call to convert Python type to PBS attribute
operation list
:param attrib: Attributes to be converted
:type attrib: List or tuple or dictionary or str
:returns: PBS attribute operation list
"""
if op is None:
op = self.command_to_op(cmd)
if isinstance(attrib, (list, tuple)):
a = self.list_to_attropl(attrib, op)
elif isinstance(attrib, (dict, OrderedDict)):
a = self.dict_to_attropl(attrib, op)
elif isinstance(attrib, str):
a = self.str_to_attropl(attrib, op)
else:
a = None
return a
def command_to_op(self, cmd=None):
"""
Map command to a ``SET`` or ``UNSET`` Operation. An unrecognized
command will return SET. No command will return None.
:param cmd: Command to be mapped
:type cmd: str
:returns: ``SET`` or ``UNSET`` operation for the command
"""
if cmd is None:
return None
if cmd in (MGR_CMD_SET, MGR_CMD_EXPORT, MGR_CMD_IMPORT):
return SET
if cmd == MGR_CMD_UNSET:
return UNSET
return SET
def display_attrl(self, a=None, writer=sys.stdout):
"""
Display an attribute list using writer, defaults to sys.stdout
:param a: Attributes
:type a: List
:returns: Displays attribute list
"""
return self.display_attropl(a)
def display_attropl(self, attropl=None, writer=sys.stdout):
"""
Display an attribute operation list with writer, defaults to
sys.stdout
:param attropl: Attribute operation list
:type attropl: List
:returns: Displays an attribute operation list
"""
attrs = attropl
while attrs is not None:
if attrs.resource:
writer.write('\t' + attrs.name + '.' + attrs.resource + '= ' +
attrs.value + '\n')
else:
writer.write('\t' + attrs.name + '= ' + attrs.value + '\n')
attrs = attrs.next
def display_dict(self, d, writer=sys.stdout):
"""
Display a dictionary using writer, defaults to sys.stdout
:param d: Dictionary
:type d: Dictionary
:returns: Displays a dictionary
"""
if not d:
return
for k, v in d.items():
writer.write(k + ': ' + v + '\n')
def batch_status_to_dictlist(self, bs=None, attr_names=None, id=None):
"""
Convert a batch status to a list of dictionaries.
version 0.1a6 added this conversion as a typemap(out) as
part of the swig wrapping itself so there are fewer uses
for this function.Returns a list of dictionary
representation of batch status
:param bs: Batch status
:param attr_names: Attribute names
:returns: List of dictionaries
"""
attr_time = (
'ctime', 'mtime', 'qtime', 'start', 'end', 'reserve_start',
'reserve_end', 'estimated.start_time')
ret = []
while bs:
if id is not None and bs.name != id:
bs = bs.next
continue
d = {}
attrs = bs.attribs
while attrs is not None:
if attrs.resource:
key = attrs.name + '.' + attrs.resource
else:
key = attrs.name
if attr_names is not None:
if key not in attr_names:
attrs = attrs.next
continue
val = attrs.value
if attrs.name in attr_time:
val = self.convert_time(val)
# for attributes that may occur multiple times (e.g., max_run)
# append the value in a comma-separated representation
if key in d:
d[key] = d[key] + ',' + str(val)
else:
d[key] = str(val)
attrs = attrs.next
if len(d.keys()) > 0:
ret.append(d)
d['id'] = bs.name
bs = bs.next
return ret
def display_batch_status(self, bs=None, attr_names=None,
writer=sys.stdout):
"""
Display a batch status using writer, defaults to sys.stdout
:param bs: Batch status
:param attr_name: Attribute name
:type attr_name: str
:returns: Displays batch status
"""
if bs is None:
return
l = self.batch_status_to_dictlist(bs, attr_names)
self.display_batch_status_as_dictlist(l, writer)
def display_dictlist(self, l=[], writer=sys.stdout, fmt=None):
"""
Display a list of dictionaries using writer, defaults to
sys.stdout
:param l: The list to display
:type l: List
:param writer: The stream on which to write
:param fmt: An optional formatting string
:type fmt: str or None
:returns: Displays list of dictionaries
"""
self.display_batch_status_as_dictlist(l, writer, fmt)
def dictlist_to_file(self, l=[], filename=None, mode='w'):
"""
write a dictlist to file
:param l: Dictlist
:type l: List
:param filename: File to which dictlist need to be written
:type filename: str
:param mode: Mode of file
:type mode: str
:raises: Exception writing to file
"""
if filename is None:
self.logger.error('a filename is required')
return
d = os.path.dirname(filename)
if d != '' and not os.path.isdir(d):
os.makedirs(d)
try:
f = open(filename, mode)
self.display_dictlist(l, f)
f.close()
except:
self.logger.error('error writing to file ' + filename)
raise
def batch_status_as_dictlist_to_file(self, l=[], writer=sys.stdout):
"""
Write a dictlist to file
:param l: Dictlist
:type l: List
:raises: Exception writing to file
"""
return self.dictlist_to_file(l, writer)
def file_to_dictlist(self, file=None, attribs=None, id=None):
"""
Convert a file to a batch dictlist format
:param file: File to be converted
:type file: str
:param attribs: Attributes
:returns: File converted to a batch dictlist format
"""
if file is None:
return []
try:
f = open(file, 'r')
lines = f.readlines()
f.close()
except Exception, e:
self.logger.error('error converting list of dictionaries to ' +
'file ' + str(e))
return []
return self.convert_to_dictlist(lines, attribs, id=id)
def file_to_vnodedef(self, file=None):
"""
Convert a file output of pbsnodes -av to a vnode
definition format
:param file: File to be converted
:type sile: str
:returns: Vnode definition format
"""
if file is None:
return None
try:
f = open(file, 'r')
lines = f.readlines()
f.close()
except:
self.logger.error('error converting nodes to vnode def')
return None
dl = self.convert_to_dictlist(lines)
return self.dictlist_to_vnodedef(dl)
def show(self, l=[], name=None, fmt=None):
"""
Alias to display_dictlist with sys.stdout as writer
:param name: if specified only show the object of
that name
:type name: str
:param fmt: Optional formatting string, uses %n for
object name, %a for attributes, for example
a format of '%nE{\}nE{\}t%aE{\}n' will display
objects with their name starting on the first
column, a new line, and attributes indented by
a tab followed by a new line at the end.
:type fmt: str
"""
if name:
i = 0
for obj in l:
if obj['id'] == name:
l = [l[i]]
break
i += 1
self.display_dictlist(l, fmt=fmt)
def get_objtype(self, d={}):
"""
Get the type of a given object
:param d: Dictionary
:type d: Dictionary
:Returns: Type of the object
"""
if 'Job_Name' in d:
return JOB
elif 'queue_type' in d:
return QUEUE
elif 'Reserve_Name' in d:
return RESV
elif 'server_state' in d:
return SERVER
elif 'Mom' in d:
return NODE
elif 'event' in d:
return HOOK
elif 'type' in d:
return RSC
return None
def display_batch_status_as_dictlist(self, l=[], writer=sys.stdout,
fmt=None):
"""
Display a batch status as a list of dictionaries
using writer, defaults to sys.stdout
:param l: List
:type l: List
:param fmt: - Optional format string
:type fmt: str or None
:returns: Displays batch status as a list of dictionaries
"""
if l is None:
return
for d in l:
self.display_batch_status_as_dict(d, writer, fmt)
def batch_status_as_dict_to_str(self, d={}, fmt=None):
"""
Return a string representation of a batch status dictionary
:param d: Dictionary
:type d: Dictionary
:param fmt: Optional format string
:type fmt: str or None
:returns: String representation of a batch status dictionary
"""
objtype = self.get_objtype(d)
if fmt is not None:
if '%1' in fmt:
_d1 = fmt['%1']
else:
_d1 = '\n'
if '%2' in fmt:
_d2 = fmt['%2']
else:
_d2 = ' '
if '%3' in fmt:
_d3 = fmt['%3']
else:
_d3 = ' = '
if '%4' in fmt:
_d4 = fmt['%4']
else:
_d4 = '\n'
if '%5' in fmt:
_d5 = fmt['%5']
else:
_d5 = '\n'
if '%6' in fmt:
_d6 = fmt['%6']
else:
_d6 = ''
else:
_d1 = '\n'
_d2 = ' '
_d3 = ' = '
_d4 = '\n'
_d5 = '\n'
_d6 = ''
if objtype == JOB:
_n = 'Job Id: ' + d['id'] + _d1
elif objtype == QUEUE:
_n = 'Queue: ' + d['id'] + _d1
elif objtype == RESV:
_n = 'Name: ' + d['id'] + _d1
elif objtype == SERVER:
_n = 'Server: ' + d['id'] + _d1
elif objtype == RSC:
_n = 'Resource: ' + d['id'] + _d1
elif 'id' in d:
_n = d['id'] + _d1
del d['id']
else:
_n = ''
_a = []
for k, v in sorted(d.items()):
if k == 'id':
continue
_a += [_d2 + k + _d3 + str(v)]
return _n + _d4.join(_a) + _d5 + _d6
def display_batch_status_as_dict(self, d={}, writer=sys.stdout, fmt=None):
"""
Display a dictionary representation of a batch status
using writer, defaults to sys.stdout
:param d: Dictionary
:type d: Dictionary
:param fmt: Optional format string
:param fmt: str
:returns: Displays dictionary representation of a batch
status
"""
writer.write(self.batch_status_as_dict_to_str(d, fmt))
def decode_dictlist(self, l=None, json=True):
"""
decode a list of dictionaries
:param l: List of dictionaries
:type l: List
:param json: The target of the decode is meant for ``JSON``
formatting
:returns: Decoded list of dictionaries
"""
if l is None:
return ''
_js = []
for d in l:
_jdict = {}
for k, v in d.items():
if ',' in v:
_jdict[k] = v.split(',')
else:
_jdict[k] = self.decode_value(v)
_js.append(_jdict)
return _js
def convert_to_dictlist(self, l, attribs=None, mergelines=True, id=None):
"""
Convert a list of records into a dictlist format.
:param l: array of records to convert
:type l: List
:param mergelines: merge qstat broken lines into one
:returns: Record list converted into dictlist format
"""
if mergelines:
lines = []
for i in range(len(l)):
if l[i].startswith('\t'):
_e = len(lines) - 1
lines[_e] = lines[_e].strip('\r\n\t') + \
l[i].strip('\r\n\t')
else:
lines.append(l[i])
else:
lines = l
objlist = []
d = {}
for l in lines:
l = l.strip()
m = self.pbsobjname_re.match(l)
if m:
if len(d.keys()) > 1:
if id is None or (id is not None and d['id'] == id):
objlist.append(d.copy())
d = {}
d['id'] = m.group('name')
else:
m = self.pbsobjattrval_re.match(l)
if m:
attr = m.group('attribute')
if attribs is None or attr in attribs:
if attr in d:
d[attr] = d[attr] + "," + m.group('value')
else:
d[attr] = m.group('value')
# add the last element
if len(d.keys()) > 1:
if id is None or (id is not None and d['id'] == id):
objlist.append(d.copy())
return objlist
def convert_to_batch(self, l, mergelines=True):
"""
Convert a list of records into a batch format.
:param l: array of records to convert
:type l: List
:param mergelines: qstat breaks long lines over
multiple lines, merge them\
to one by default.
:type mergelines: bool
:returns: A linked list of batch status
"""
if mergelines:
lines = []
for i in range(len(l)):
if l[i].startswith('\t'):
_e = len(lines) - 1
lines[_e] = lines[_e].strip('\r\t') + \
l[i].strip('\r\n')
else:
lines.append(l[i])
else:
lines = l
head_bs = None
prev_bs = None
prev_attr = None
for l in lines:
l = l.strip()
m = self.pbsobjname_re.match(l)
if m:
bs = batch_status()
bs.name = m.group('name')
bs.attribs = None
bs.next = None
if prev_bs:
prev_bs.next = bs
if head_bs is None:
head_bs = bs
prev_bs = bs
prev_attr = None
else:
m = self.pbsobjattrval_re.match(l)
if m:
attr = attrl()
attr.name = m.group('attribute')
attr.value = m.group('value')
attr.next = None
if bs.attribs is None:
bs.attribs = attr
if prev_attr:
prev_attr.next = attr
prev_attr = attr
return head_bs
def file_to_batch(self, file=None):
"""
Convert a file to batch format
:param file: File to be converted
:type file: str or None
:returns: File converted into batch format
"""
if file is None:
return None
try:
f = open(file, 'r')
l = f.readlines()
f.close()
except:
self.logger.error('error converting file ' + file + ' to batch')
return None
return self.convert_to_batch(l)
def batch_to_file(self, bs=None, file=None):
"""
Write a batch object to file
:param bs: Batch status
:param file: File to which batch object is to be written
:type file: str
"""
if bs is None or file is None:
return
try:
f = open(file, 'w')
self.display_batch_status(bs, writer=f)
f.close()
except:
self.logger.error('error converting batch status to file')
def batch_to_vnodedef(self, bs):
"""
:param bs: Batch status
:returns: The vnode definition string representation
of nodes batch_status
"""
out = ["$configversion 2\n"]
while bs is not None:
attr = bs.attribs
while attr is not None:
if attr.name.startswith("resources_available") or \
attr.name.startswith("sharing"):
out += [bs.name + ": "]
out += [attr.name + "=" + attr.value + "\n"]
attr = attr.next
bs = bs.next
return "".join(out)
def dictlist_to_vnodedef(self, dl=None):
"""
:param dl: Dictionary list
:type dl: List
:returns: The vnode definition string representation
of a dictlist
"""
if dl is None:
return ''
out = ["$configversion 2\n"]
for node in dl:
for k, v in node.items():
if (k.startswith("resources_available") or
k.startswith("sharing") or
k.startswith("provision_enable") or
k.startswith("queue")):
out += [node['id'] + ": "]
# MoM dislikes empty values reported in vnode defs so
# we substitute no value for an actual empty string
if not v:
v = '""'
out += [k + "=" + str(v) + "\n"]
return "".join(out)
def objlist_to_dictlist(self, objlist=None):
"""
Convert a list of PBS/PTL objects ``(e.g. Server/Job...)``
into a dictionary list representation of the batch status
:param objlist: List of ``PBS/PTL`` objects
:type objlist: List
:returns: Dictionary list representation of the batch status
"""
if objlist is None:
return None
bsdlist = []
for obj in objlist:
newobj = self.obj_to_dict(obj)
bsdlist.append(newobj)
return bsdlist
def obj_to_dict(self, obj):
"""
Convert a PBS/PTL object (e.g. Server/Job...) into a
dictionary format
:param obj: ``PBS/PTL`` object
:returns: Dictionary of ``PBS/PTL`` objects
"""
newobj = dict(obj.attributes.items())
newobj[id] = obj.name
return newobj
def parse_execvnode(self, s=None):
"""
Parse an execvnode string into chunk objects
:param s: Execvnode string
:type s: str or None
:returns: Chunk objects for parsed execvnode string
"""
if s is None:
return None
chunks = []
start = 0
for c in range(len(s)):
if s[c] == '(':
start = c + 1
if s[c] == ')':
chunks.append(PbsTypeChunk(chunkstr=s[start:c]).info)
return chunks
def anupbs_exechost_numhosts(self, s=None):
"""
:param s: Exechost string
:type s: str or None
"""
n = 0
if '[' in s:
eh = re.sub(r'.*\[(.*)\].*', r'\1', s)
hosts = eh.split(',')
for hid in hosts:
elm = hid.split('-')
if len(elm) == 2:
n += int(elm[1]) - int(elm[0]) + 1
else:
n += 1
else:
n += 1
return n
def parse_exechost(self, s=None):
"""
Parse an exechost string into a dictionary representation
:param s: String to be parsed
:type s: str or None
:returns: Dictionary format of the exechost string
"""
if s is None:
return None
hosts = []
hsts = s.split('+')
for h in hsts:
hi = {}
ti = {}
(host, task) = h.split('/',)
d = task.split('*')
if len(d) == 1:
taskslot = d[0]
ncpus = 1
elif len(d) == 2:
(taskslot, ncpus) = d
else:
(taskslot, ncpus) = (0, 1)
ti['task'] = taskslot
ti['ncpus'] = ncpus
hi[host] = ti
hosts.append(hi)
return hosts
def parse_select(self, s=None):
"""
Parse a ``select/schedselect`` string into a list
of dictionaries.
:param s: select/schedselect string
:type s: str or None
:returns: List of dictonaries
"""
if s is None:
return
info = []
chunks = s.split('+')
for chunk in chunks:
d = chunk.split(':')
numchunks = int(d[0])
resources = {}
for e in d[1:]:
k, v = e.split('=')
resources[k] = v
for _ in range(numchunks):
info.append(resources)
return info
@classmethod
def isfloat(cls, value):
"""
returns true if value is a float or a string representation
of a float returns false otherwise
:param value: value to be checked
:type value: str or int or float
:returns: True or False
"""
if isinstance(value, float):
return True
if isinstance(value, str):
try:
float(value)
return True
except ValueError:
return False
@classmethod
def decode_value(cls, value):
"""
Decode an attribute/resource value, if a value is
made up of digits only then return the numeric value
of it, if it is made of alphanumeric values only, return
it as a string, if it is of type size, i.e., with a memory
unit such as b,kb,mb,gb then return the converted size to
kb without the unit
:param value: attribute/resource value
:type value: str or int
:returns: int or float or string
"""
if value is None or callable(value):
return value
if isinstance(value, (int, float)):
return value
if value.isdigit():
return int(value)
if value.isalpha() or value == '':
return value
if cls.isfloat(value):
return float(value)
if ':' in value:
try:
value = int(PbsTypeDuration(value))
except ValueError:
pass
return value
# TODO revisit: assume (this could be the wrong type, need a real
# data model anyway) that the remaining is a memory expression
try:
value = PbsTypeSize(value)
return value.value
except ValueError:
pass
except TypeError:
# if not then we pass to return the value as is
pass
return value
def convert_time(self, val, fmt='%a %b %d %H:%M:%S %Y'):
"""
Convert a date time format into number of seconds
since epoch
:param val: date time value
:param fmt: date time format
:type fmt: str
:returns: seconds
"""
# Tweak for NAS format that puts the number of seconds since epoch
# in between
if val.split()[0].isdigit():
val = int(val.split()[0])
elif not val.isdigit():
val = time.strptime(val, fmt)
val = int(time.mktime(val))
return val
def convert_duration(self, val):
"""
Convert HH:MM:SS into number of seconds
If a number is fed in, that number is returned
If neither formatted data is fed in, returns 0
:param val: duration value
:type val: str
:raises: Incorrect format error
:returns: seconds
"""
if val.isdigit():
return int(val)
hhmmss = val.split(':')
if len(hhmmss) != 3:
self.logger.error('Incorrect format, expected HH:MM:SS')
return 0
return int(hhmmss[0]) * 3600 + int(hhmmss[1]) * 60 + int(hhmmss[2])
def convert_seconds_to_resvtime(self, tm, fmt=None, seconds=True):
"""
Convert time format to number of seconds since epoch
:param tm: the time to convert
:type tm: str
:param fmt: optional format string. If used, the seconds
parameter is ignored.Defaults to ``%Y%m%d%H%M``
:type fmt: str or None
:param seconds: if True, convert time with seconds
granularity. Defaults to True.
:type seconds: bool
:returns: Number of seconds
"""
if fmt is None:
fmt = "%Y%m%d%H%M"
if seconds:
fmt += ".%S"
return time.strftime(fmt, time.localtime(int(tm)))
def convert_stime_to_seconds(self, st):
"""
Convert a time to seconds, if we fail we return the
original time
:param st: Time to be converted
:type st: str
:returns: Number of seconds
"""
try:
ret = time.mktime(time.strptime(st, '%a %b %d %H:%M:%S %Y'))
except:
ret = st
return ret
def convert_dedtime(self, dtime):
"""
Convert dedicated time string of form %m/%d/%Y %H:%M.
:param dtime: A datetime string, as an entry in the
dedicated_time file
:type dtime: str
:returns: A tuple of (from,to) of time since epoch
"""
dtime_from = None
dtime_to = None
m = self.dt_tag.match(dtime.strip())
if m:
try:
_f = "%m/%d/%Y %H:%M"
dtime_from = self.convert_datetime_to_epoch(m.group('dt_from'),
fmt=_f)
dtime_to = self.convert_datetime_to_epoch(m.group('dt_to'),
fmt=_f)
except:
self.logger.error('error converting dedicated time')
return (dtime_from, dtime_to)
def convert_datetime_to_epoch(self, mdyhms, fmt="%m/%d/%Y %H:%M:%S"):
"""
Convert the date time to epoch
:param mdyhms: date time
:type mdyhms: str
:param fmt: Format for date time
:type fmt: str
:returns: Epoch time
"""
return int(time.mktime(time.strptime(mdyhms, fmt)))
def compare_versions(self, v1, v2, op=None):
"""
Compare v1 to v2 with respect to operation op
:param v1: If not a looseversion, it gets converted
to it
:param v2: If not a looseversion, it gets converted
to it
:param op: An operation, one of ``LT``, ``LE``, ``EQ``,
``GE``, ``GT``
:type op: str
:returns: True or False
"""
if op is None:
self.logger.error('missing operator, one of LT,LE,EQ,GE,GT')
return None
if v1 is None or v2 is None:
return False
if isinstance(v1, str):
v1 = LooseVersion(v1)
if isinstance(v2, str):
v2 = LooseVersion(v2)
if op == GT:
if v1 > v2:
return True
elif op == GE:
if v1 >= v2:
return True
elif op == EQ:
if v1 == v2:
return True
elif op == LT:
if v1 < v2:
return True
elif op == LE:
if v1 <= v2:
return True
return False
def convert_arglist(self, attr):
"""
strip the XML attributes from the argument list attribute
:param attr: Argument list attributes
:type attr: List
:returns: Stripped XML attributes
"""
xmls = "<jsdl-hpcpa:Argument>"
xmle = "</jsdl-hpcpa:Argument>"
nattr = attr.replace(xmls, " ")
nattr = nattr.replace(xmle, " ")
return nattr.strip()
def convert_to_cli(self, attrs, op=None, hostname=None, dflt_conf=True,
exclude_attrs=None):
"""
Convert attributes into their CLI format counterpart. This
method is far from complete, it grows as needs come by and
could use a rewrite, especially going along with a rewrite
of pbs_api_to_cli
:param attrs: Attributes to convert
:type attrs: List or str or dictionary
:param op: The qualifier of the operation being performed,
such as ``IFL_SUBMIT``, ``IFL_DELETE``,
``IFL_TERMINUTE``...
:type op: str or None
:param hostname: The name of the host on which to operate
:type hostname: str or None
:param dflt_conf: Whether we are using the default PBS
configuration
:type dflt_conf: bool
:param exclude_attrs: Optional list of attributes to not
convert
:type exclude_attrs: List
:returns: CLI format of attributes
"""
ret = []
if op == IFL_SUBMIT:
executable = arglist = None
elif op == IFL_DELETE:
_c = []
if isinstance(attrs, str):
attrs = [attrs]
if isinstance(attrs, list):
for a in attrs:
if 'force' in a:
_c.append('-W')
_c.append('force')
if 'deletehist' in a:
_c.append('-x')
return _c
elif op == IFL_TERMINATE:
_c = []
if attrs is None:
_c = []
elif isinstance(attrs, str):
_c = ['-t', attrs]
else:
if ((attrs & SHUT_QUICK) == SHUT_QUICK):
_c = ['-t', 'quick']
if ((attrs & SHUT_IMMEDIATE) == SHUT_IMMEDIATE):
_c = ['-t', 'immediate']
if ((attrs & SHUT_DELAY) == SHUT_DELAY):
_c = ['-t', 'delay']
if ((attrs & SHUT_WHO_SCHED) == SHUT_WHO_SCHED):
_c.append('-s')
if ((attrs & SHUT_WHO_MOM) == SHUT_WHO_MOM):
_c.append('-m')
if ((attrs & SHUT_WHO_SECDRY) == SHUT_WHO_SECDRY):
_c.append('-f')
if ((attrs & SHUT_WHO_IDLESECDRY) == SHUT_WHO_IDLESECDRY):
_c.append('-F')
if ((attrs & SHUT_WHO_SECDONLY) == SHUT_WHO_SECDONLY):
_c.append('-i')
return _c
if attrs is None or len(attrs) == 0:
return ret
# if a list, convert to a dictionary to fall into a single processing
# of the attributes
if (isinstance(attrs, list) and len(attrs) > 0 and
not isinstance(attrs[0], tuple)):
tmp_attrs = {}
for each_attr in attrs:
tmp_attrs[each_attr] = ''
del attrs
attrs = tmp_attrs
del tmp_attrs
if isinstance(attrs, (dict, OrderedDict)):
attrs = attrs.items()
for a, v in attrs:
if exclude_attrs is not None and a in exclude_attrs:
continue
if op == IFL_SUBMIT:
if a == ATTR_executable:
executable = v
continue
if a == ATTR_Arglist:
if v is not None:
arglist = self.convert_arglist(v)
if len(arglist) == 0:
return []
continue
if isinstance(v, list):
v = ','.join(v)
# when issuing remote commands, escape spaces in attribute values
if (((hostname is not None) and
(not self.du.is_localhost(hostname))) or
(not dflt_conf)):
if ' ' in str(v):
v = '"' + v + '"'
if '.' in a:
(attribute, resource) = a.split('.')
ret.append('-' + api_to_cli[attribute])
rv = resource
if v is not None:
rv += '=' + str(v)
ret.append(rv)
else:
try:
val = api_to_cli[a]
except KeyError:
self.logger.error('error retrieving key ' + str(a))
# for unknown or junk options
ret.append(a)
if v is not None:
ret.append(str(v))
continue
# on a remote job submit append the remote server name
# to the queue name
if ((op == IFL_SUBMIT) and (hostname is not None)):
if ((not self.du.is_localhost(hostname)) and
(val == 'q') and (v is not None) and
('@' not in v) and (v != '')):
v += '@' + hostname
val = '-' + val
if '=' in val:
if v is not None:
ret.append(val + str(v))
else:
ret.append(val)
else:
ret.append(val)
if v is not None:
ret.append(str(v))
# Executable and argument list must come last in a job submission
if ((op == IFL_SUBMIT) and (executable is not None)):
ret.append('--')
ret.append(executable)
if arglist is not None:
ret.append(arglist)
return ret
def filter_batch_status(self, bs, attrib):
"""
Filter out elements that don't have the attributes requested
This is needed to adapt to the fact that requesting a
resource attribute returns all ``'<resource-name>.*'``
attributes so we need to ensure that the specific resource
requested is present in the stat'ed object.
This is needed especially when calling expect with an op=NE
because we need to filter on objects that have exactly
the attributes requested
:param bs: Batch status
:param attrib: Requested attributes
:type attrib: str or dictionary
:returns: Filtered batch status
"""
if isinstance(attrib, dict):
keys = attrib.keys()
elif isinstance(attrib, str):
keys = attrib.split(',')
else:
keys = attrib
if keys:
del_indices = []
for idx in range(len(bs)):
for k in bs[idx].keys():
if '.' not in k:
continue
if k != 'id' and k not in keys:
del bs[idx][k]
# if no matching resources, remove the object
if len(bs[idx]) == 1:
del_indices.append(idx)
for i in sorted(del_indices, reverse=True):
del bs[i]
return bs
def convert_attributes_by_op(self, attributes, setattrs=False):
"""
Convert attributes by operator, i.e. convert an attribute
of the form
``<attr_name><op><value>`` (e.g. resources_available.ncpus>4)
to
``<attr_name>: (<op>, <value>)``
(e.g. resources_available.ncpus: (GT, 4))
:param attributes: the attributes to convert
:type attributes: List
:param setattrs: if True, set the attributes with no operator
as (SET, '')
:type setattrs: bool
:returns: Converted attributes by operator
"""
# the order of operator matters because they are used to search by
# regex so the longer strings to search must come first
operators = ('<=', '>=', '!=', '=', '>', '<', '~')
d = {}
for attr in attributes:
found = False
for op in operators:
if op in attr:
a = attr.split(op)
d[a[0]] = (PTL_STR_TO_OP[op], a[1])
found = True
break
if not found and setattrs:
d[attr] = (SET, '')
return d
def operator_in_attribute(self, attrib):
"""
Returns True if an operator string is present in an
attribute name
:param attrib: Attribute name
:type attrib: str
:returns: True or False
"""
operators = PTL_STR_TO_OP.keys()
for a in attrib:
for op in operators:
if op in a:
return True
return False
def list_resources(self, objtype=None, objs=[]):
"""
Lists the resources
:param objtype: Type of the object
:type objtype: str
:param objs: Object list
:type objs: List
:returns: List of resources
"""
if objtype in (VNODE, NODE, SERVER, QUEUE, SCHED):
prefix = 'resources_available.'
elif objtype in (JOB, RESV):
prefix = 'Resource_List.'
else:
return
resources = []
for o in objs:
for a in o.keys():
if a.startswith(prefix):
res = a.replace(prefix, '')
if res not in resources:
resources.append(res)
return resources
def compare(self, obj1, obj2, showdiff=False):
"""
Compare two objects.
:param showdiff: whether to print the specific differences,
defaults to False
:type showdiff: bool
:returns: 0 if objects are identical and non zero otherwise
"""
if not showdiff:
ret = cmp(obj1, obj2)
if ret != 0:
self.logger.info('objects differ')
return ret
if not isinstance(obj1, type(obj2)):
self.logger.error('objects are of different type')
return 1
if isinstance(obj1, list):
if len(obj1) != len(obj2):
self.logger.info(
'comparing ' + str(
obj1) + ' and ' + str(
obj2))
self.logger.info('objects are of different lengths')
return
for i in range(len(obj1)):
self.compare(obj1[i], obj2[i], showdiff=showdiff)
return
if isinstance(obj1, dict):
self.logger.info('comparing ' + str(obj1) + ' and ' + str(obj2))
onlyobj1 = []
diffobjs = []
onlyobj2 = []
for k1, v1 in obj1.items():
if k1 not in obj2:
onlyobj1.append(k1 + '=' + str(v1))
if k1 in obj2 and obj2[k1] != v1:
diffobjs.append(
k1 + '=' + str(v1) + ' vs ' + k1 + '=' + str(obj2[k1]))
for k2, v2 in obj2.items():
if k2 not in obj1:
onlyobj2.append(k2 + '=' + str(v2))
if len(onlyobj1) > 0:
self.logger.info("only in first object: " + " ".join(onlyobj1))
if len(onlyobj2) > 0:
self.logger.info(
"only in second object: " + " ".join(onlyobj2))
if len(diffobjs) > 0:
self.logger.info("diff between objects: " + " ".join(diffobjs))
if len(onlyobj1) == len(onlyobj2) == len(diffobjs) == 0:
self.logger.info("objects are identical")
return 0
return 1
@classmethod
def random_str(cls, length=1, prefix=''):
"""
Generates the random string
:param length: Length of the string
:type length: int
:param prefix: Prefix of the string
:type prefix: str
:returns: Random string
"""
r = [random.choice(string.letters) for _ in range(length)]
r = ''.join([prefix] + r)
if hasattr(cls, '__uniq_rstr'):
while r in cls.__uniq_rstr:
r = [random.choice(string.letters) for _ in range(length)]
r = ''.join([prefix] + r)
cls.__uniq_rstr.append(r)
else:
cls.__uniq_rstr = [r]
return r
def _make_template_formula(self, formula):
"""
Create a template of the formula
:param formula: Formula for which template is to be created
:type formula: str
:returns: Template
"""
tformula = []
skip = False
for c in formula:
if not skip and c.isalpha():
tformula.append('$')
skip = True
if c in ('+', '-', '/', ' ', '*', '%'):
skip = False
tformula.append(c)
return "".join(tformula)
def update_attributes_list(self, obj):
"""
Updates the attribute list
:param obj: Objects
:returns: Updated attribute list
"""
if not hasattr(obj, 'attributes'):
return
if not hasattr(obj, 'Resource_List'):
setattr(obj, 'Resource_List', {})
for attr, val in obj.attributes.items():
if attr.startswith('Resource_List.'):
(_, resource) = attr.split('.')
obj.Resource_List[resource] = val
def parse_fgc_limit(self, limstr=None):
"""
Parse an ``FGC`` limit entry, of the form:
``<limtype>[.<resource>]=\[<entity_type>:<entity_name>
=<entity_value>\]``
:param limstr: FGC limit string
:type limstr: str or None
:returns: Parsed FGC string in given format
"""
m = self.lim_tag.match(limstr)
if m:
_v = str(self.decode_value(m.group('entity_value')))
return (m.group('limtype'), m.group('resource'),
m.group('entity_type'), m.group('entity_name'), _v)
return None
def is_job_array(self, jobid):
"""
If a job array return True, otherwise return False
:param jobid: PBS jobid
:returns: True or False
"""
if self.array_tag.match(jobid):
return True
return False
def is_subjob(self, jobid):
"""
If a subjob of a job array, return the subjob id
otherwise return False
:param jobid: PBS job id
:type jobid: str
:returns: True or False
"""
m = self.subjob_tag.match(jobid)
if m:
return m.group('subjobid')
return False
class PbsTypeFGCLimit(object):
"""
FGC limit entry, of the form:
``<limtype>[.<resource>]=\[<entity_type>:<entity_name>=
<entity_value>\]``
:param attr: FGC limit attribute
:type attr: str
:param value: Value of attribute
:type value: int
:returns: FGC limit entry of given format
"""
fgc_attr_pat = re.compile("(?P<ltype>[a-z_]+)[\.]*(?P<resource>[\w\d-]*)")
fgc_val_pat = re.compile("[\s]*\[(?P<etype>[ugpo]):(?P<ename>[\w\d-]+)"
"=(?P<eval>[\d]+)\][\s]*")
utils = BatchUtils()
def __init__(self, attr, val):
self.attr = attr
self.val = val
a = self.fgc_attr_pat.match(attr)
if a:
self.limit_type = a.group('ltype')
self.resource_name = a.group('resource')
else:
self.limit_type = None
self.resource_name = None
v = self.fgc_val_pat.match(val)
if v:
self.lim_value = self.utils.decode_value(v.group('eval'))
self.entity_type = v.group('etype')
self.entity_name = v.group('ename')
else:
self.lim_value = None
self.entity_type = None
self.entity_name = None
def __val__(self):
return ('[' + str(self.entity_type) + ':' +
str(self.entity_name) + '=' + str(self.lim_value) + ']')
def __str__(self):
return (self.attr + ' = ' + self.__val__())
class PbsBatchStatus(list):
"""
Wrapper class for Batch Status object
Converts a batch status (as dictlist) into a list of
PbsBatchObjects
:param bs: Batch status
:type bs: List or dictionary
:returns: List of PBS batch objects
"""
def __init__(self, bs):
if not isinstance(bs, (list, dict)):
raise TypeError("Expected a list or dictionary")
if isinstance(bs, dict):
self.append(PbsBatchObject(bs))
else:
for b in bs:
self.append(PbsBatchObject(b))
def __str__(self):
rv = []
for l in self.__bs:
rv += [self.__bu.batch_status_as_dict_to_str(l)]
return "\n".join(rv)
class PbsBatchObject(list):
def __init__(self, bs):
self.set_batch_status(bs)
def set_batch_status(self, bs):
"""
Sets the batch status
:param bs: Batch status
"""
if 'id' in bs:
self.name = bs['id']
for k, v in bs.items():
self.append(PbsAttribute(k, v))
class PbsAttribute(object):
"""
Descriptor class for PBS attribute
:param name: PBS attribute name
:type name: str
:param value: Value for the attribute
:type value: str or int or float
"""
utils = BatchUtils()
def __init__(self, name=None, value=None):
self.set_name(name)
self.set_value(value)
def set_name(self, name):
"""
Set PBS attribute name
:param name: PBS attribute
:type name: str
"""
self.name = name
if name is not None and '.' in name:
self.is_resource = True
self.resource_type, self.resource_name = self.name.split('.')
else:
self.is_resource = False
self.resource_type = self.resource_name = None
def set_value(self, value):
"""
Set PBS attribute value
:param value: Value of PBS attribute
:type value: str or int or float
"""
self.value = value
if isinstance(value, (int, float)) or str(value).isdigit():
self.is_consumable = True
else:
self.is_consumable = False
def obfuscate_name(self, a=None):
"""
Obfuscate PBS attribute name
"""
if a is not None:
on = a
else:
on = self.utils.random_str(len(self.name))
self.decoded_name = self.name
if self.is_resource:
self.set_name(self.resource_name + '.' + on)
def obfuscate_value(self, v=None):
"""
Obfuscate PBS attribute value
"""
if not self.is_consuable:
self.decoded_value = self.value
return
if v is not None:
ov = v
else:
ov = self.utils.random_str(len(self.value))
self.decoded_value = self.value
self.set_value(ov)
class PbsAnonymizer(object):
"""
Holds and controls anonymizing operations of PBS data
When a dictionary, the values associated to each key
is substituted during obfuscation.
The anonymizer operates on attributes or resources.
Resources operate on the resource name itself rather than
the entire name, for example,to obfuscate the values associated
to a custom resource "foo" that could be set as resources_available.
foo resources_default.foo or Resource_List.foo, all that needs to be
passed in to the function is "foo" in the resc_vals list.
:param attr_key: Attribute key
:type attr_key: str or None
:param attr_val: Attribute value
:type attr_val: str or None
:param resc_key: Resource key
:type resc_key: str or None
:param resc_val: Resource value
:type resc_val: str or None
"""
logger = logging.getLogger(__name__)
utils = BatchUtils()
du = DshUtils()
def __init__(self, attr_delete=None, resc_delete=None,
attr_key=None, attr_val=None,
resc_key=None, resc_val=None):
# special cases
self._entity = False
self.job_sort_formula = None
self.schedselect = None
self.select = None
self.set_attr_delete(attr_delete)
self.set_resc_delete(resc_delete)
self.set_attr_key(attr_key)
self.set_attr_val(attr_val)
self.set_resc_key(resc_key)
self.set_resc_val(resc_val)
self.anonymize = self.anonymize_batch_status
# global anonymized mapping data
self.gmap_attr_val = {}
self.gmap_resc_val = {}
self.gmap_attr_key = {}
self.gmap_resc_key = {}
def _initialize_key_map(self, keys):
k = {}
if keys is not None:
if isinstance(keys, dict):
return keys
elif isinstance(keys, list):
for i in keys:
k[i] = None
elif isinstance(keys, str):
for i in keys.split(','):
k[i] = None
else:
self.logger.error('unhandled map type')
k = {None: None}
return k
def _initialize_value_map(self, keys):
k = {}
if keys is not None:
if isinstance(keys, dict):
return keys
elif isinstance(keys, list):
for i in keys:
k[i] = {}
elif isinstance(keys, str):
for i in keys.split(','):
k[i] = {}
else:
self.logger.error('unhandled map type')
k = {None: None}
return k
def set_attr_delete(self, ad):
"""
Name of attributes to delete
:param ad: Attributes to delete
:type ad: str or list or dictionary
"""
self.attr_delete = self._initialize_value_map(ad)
def set_resc_delete(self, rd):
"""
Name of resources to delete
:param rd: Resources to delete
:type rd: str or list or dictionary
"""
self.resc_delete = self._initialize_value_map(rd)
def set_attr_key(self, ak):
"""
Name of attributes to obfuscate.
:param ak: Attribute keys
:type ak: str or list or dictionary
"""
self.attr_key = self._initialize_key_map(ak)
def set_attr_val(self, av):
"""
Name of attributes for which to obfuscate the value
:param av: Attributes value to obfuscate
:type av: str or list or dictionary
"""
self.attr_val = self._initialize_value_map(av)
if 'euser' in self.attr_val:
self._entity = True
elif 'egroup' in self.attr_val:
self._entity = True
elif 'project' in self.attr_val:
self._entity = True
def set_resc_key(self, rk):
"""
Name of resources to obfuscate
:param rk: Resource key
:type rk: str or list or dictionary
"""
self.resc_key = self._initialize_key_map(rk)
def set_resc_val(self, rv):
"""
Name of resources for which to obfuscate the value
:param rv: Resource value to obfuscate
:type rv: str or list or dictionary
"""
self.resc_val = self._initialize_value_map(rv)
def set_anon_map_file(self, name):
"""
Name of file in which to store anonymized map data.
This file is meant to remain private to a site as it
contains the sensitive anonymized data.
:param name: Name of file to which anonymized data to store.
:type name: str
"""
self.anon_map_file = name
def anonymize_resource_group(self, file):
"""
Anonymize the user and group fields of a resource
group file
:param file: Resource group file
:type file: str
"""
anon_rg = []
try:
f = open(file)
lines = f.readlines()
f.close()
except:
self.logger.error("Error processing " + file)
return None
for data in lines:
data = data.strip()
if data:
if data[0] == '#':
continue
_d = data.split()
ug = _d[0]
if ':' in ug:
(euser, egroup) = ug.split(':')
else:
euser = ug
egroup = None
if 'euser' not in self.attr_val:
anon_euser = euser
else:
anon_euser = None
if 'euser' in self.gmap_attr_val:
if euser in self.gmap_attr_val['euser']:
anon_euser = self.gmap_attr_val['euser'][euser]
else:
self.gmap_attr_val['euser'] = {}
if euser is not None and anon_euser is None:
anon_euser = self.utils.random_str(len(euser))
self.gmap_attr_val['euser'][euser] = anon_euser
if 'egroup' not in self.attr_val:
anon_egroup = egroup
else:
anon_egroup = None
if egroup is not None:
if 'egroup' in self.gmap_attr_val:
if egroup in self.gmap_attr_val['egroup']:
anon_egroup = (self.gmap_attr_val['egroup']
[egroup])
else:
self.gmap_attr_val['egroup'] = {}
if egroup is not None and anon_egroup is None:
anon_egroup = self.utils.random_str(len(egroup))
self.gmap_attr_val['egroup'][egroup] = anon_egroup
# reconstruct the fairshare info by combining euser and egroup
out = [anon_euser]
if anon_egroup is not None:
out[0] += ':' + anon_egroup
# and appending the rest of the original line
out.append(_d[1])
if len(_d) > 1:
p = _d[2].strip()
if ('euser' in self.gmap_attr_val and
p in self.gmap_attr_val['euser']):
out.append(self.gmap_attr_val['euser'][p])
else:
out.append(_d[2])
if len(_d) > 2:
out += _d[3:]
anon_rg.append(" ".join(out))
return anon_rg
def anonymize_resource_def(self, resources):
"""
Anonymize the resource definition
"""
if not self.resc_key:
return resources
for curr_anon_resc, val in self.resc_key.items():
if curr_anon_resc in resources:
tmp_resc = copy.copy(resources[curr_anon_resc])
del resources[curr_anon_resc]
if val is None:
if curr_anon_resc in self.gmap_resc_key:
val = self.gmap_resc_key[curr_anon_resc]
else:
val = self.utils.random_str(len(curr_anon_resc))
elif curr_anon_resc not in self.gmap_resc_key:
self.gmap_resc_key[curr_anon_resc] = val
tmp_resc.set_name(val)
resources[val] = tmp_resc
return resources
def __anonymize_fgc(self, d, attr, ar, name, val):
"""
Anonymize an FGC limit value
"""
m = {'u': 'euser', 'g': 'egroup', 'p': 'project'}
if ',' in val:
fgc_lim = val.split(',')
else:
fgc_lim = [val]
nfgc = []
for lim in fgc_lim:
_fgc = PbsTypeFGCLimit(attr, lim)
ename = _fgc.entity_name
if ename in ('PBS_GENERIC', 'PBS_ALL'):
nfgc.append(lim)
continue
obf_ename = ename
for etype, nm in m.items():
if _fgc.entity_type == etype:
if nm not in self.gmap_attr_val:
if nm in ar and ename in ar[nm]:
obf_ename = ar[nm][ename]
else:
obf_ename = self.utils.random_str(len(ename))
self.gmap_attr_val[nm] = {ename: obf_ename}
elif ename in self.gmap_attr_val[nm]:
if ename in self.gmap_attr_val[nm]:
obf_ename = self.gmap_attr_val[nm][ename]
break
_fgc.entity_name = obf_ename
nfgc.append(_fgc.__val__())
d[attr] = ",".join(nfgc)
def __anonymize_attr_val(self, d, attr, ar, name, val):
"""
Obfuscate an attribute/resource values
"""
# don't obfuscate default project
if attr == 'project' and val == '_pbs_project_default':
return
nstr = []
if '.' in attr:
m = self.gmap_resc_val
else:
m = self.gmap_attr_val
if val in ar[name]:
nstr.append(ar[name][val])
if name in self.lmap:
self.lmap[name][val] = ar[name][val]
else:
self.lmap[name] = {val: ar[name][val]}
if name not in m:
m[name] = {val: ar[name][val]}
elif val not in m[name]:
m[name][val] = ar[name][val]
else:
# Obfuscate by randomizing with a value of the same length
tmp_v = val.split(',')
for v in tmp_v:
if v in ar[name]:
r = ar[name][v]
elif name in m and v in m[name]:
r = m[name][v]
else:
r = self.utils.random_str(len(v))
if not isinstance(ar[name], dict):
ar[name] = {}
ar[name][v] = r
self.lmap[name] = {v: r}
if name not in m:
m[name] = {v: r}
elif v not in m[name]:
m[name][v] = r
nstr.append(r)
if d is not None:
d[attr] = ",".join(nstr)
def __anonymize_attr_key(self, d, attr, ar, name, res):
"""
Obfuscate an attribute/resource key
"""
if res is not None:
m = self.gmap_resc_key
else:
m = self.gmap_attr_key
if not ar[name]:
if name in m:
ar[name] = m[name]
else:
randstr = self.utils.random_str(len(name))
ar[name] = randstr
m[name] = randstr
if d is not None:
tmp_val = d[attr]
del d[attr]
if res is not None:
d[res + '.' + ar[name]] = tmp_val
else:
d[ar[name]] = tmp_val
if name not in self.lmap:
self.lmap[name] = ar[name]
if name not in m:
m[name] = ar[name]
def anonymize_batch_status(self, data=None):
"""
Anonymize arbitrary batch_status data
:param data: Batch status data
:type data: List or dictionary
"""
if not isinstance(data, (list, dict)):
self.logger.error('data expected to be dict or list')
return None
if isinstance(data, dict):
dat = [data]
else:
dat = data
# Local mapping data used to store obfuscation mapping data for this
# specific item, d
self.lmap = {}
# loop over each "batch_status" entry to obfuscate
for d in dat:
if self.attr_delete is not None:
for todel in self.attr_delete:
if todel in d:
del d[todel]
if self.resc_delete is not None:
for todel in self.resc_delete:
for tmpk, _ in d.items():
if '.' in tmpk and todel == tmpk.split('.')[1]:
del d[tmpk]
# Loop over each object's attributes, this is where the special
# cases are handled (e.g., FGC limits, formula, select spec...)
for attr in d:
val = d[attr]
if '.' in attr:
(res_type, res_name) = attr.split('.')
else:
res_type = None
res_name = attr
if res_type is not None:
if self._entity and attr.startswith('max_run'):
self.__anonymize_fgc(d, attr, self.attr_val,
attr, val)
if res_name in self.resc_val:
if attr.startswith('max_run'):
self.__anonymize_fgc(d, attr, self.attr_val,
attr, val)
self.__anonymize_attr_val(d, attr, self.resc_val,
res_name, val)
if res_name in self.resc_key:
self.__anonymize_attr_key(d, attr, self.resc_key,
res_name, res_type)
else:
if attr in self.attr_val:
self.__anonymize_attr_val(d, attr, self.attr_val,
attr, val)
if attr in self.attr_key:
self.__anonymize_attr_key(d, attr, self.attr_key,
attr, None)
if ((attr in ('job_sort_formula', 'schedselect',
'select')) and self.resc_key):
for r in self.resc_key:
if r in val:
if r not in self.gmap_resc_key:
self.gmap_resc_key[
r] = self.utils.random_str(len(r))
val = val.replace(r, self.gmap_resc_key[r])
setattr(self, attr, val)
d[attr] = val
def anonymize_file(self, filename, extension='.anon', inplace=False):
"""
Replace every occurrence of any entry in the global
map for the given file by its anonymized values.
Returns a file named after the original file with the
extension suffix,If inplace is True returns the original
file name for which contents have been replaced
:param filename: Name of the file to anonymize
:type filename: str
:param extension: Extension of the anonymized file
:type extension: str
:param inplace: If true returns the original file name for
which contents have been replaced
:type inplace: bool
"""
if not inplace:
fn = (filename + extension)
nf = open(fn, 'w')
else:
(_, fn) = self.du.mkstemp()
nf = open(fn, "w")
f = open(filename)
for data in f:
for k in self.attr_key:
if k in data:
if k not in self.gmap_attr_key:
ak = self.utils.random_str(len(k))
self.gmap_attr_key[k] = ak
else:
k = self.gmap_attr_key[k]
data = data.replace(k, self.gmap_attr_key[k])
for k in self.resc_key:
if k not in self.gmap_resc_key:
rk = self.utils.random_str(len(k))
self.gmap_resc_key[k] = rk
else:
rk = self.gmap_resc_key[k]
data = data.replace(k, self.gmap_resc_key[k])
for ak, av in self.gmap_attr_val.items():
for k, v in av.items():
data = data.replace(k, v)
for ak, av in self.gmap_resc_val.items():
for k, v in av.items():
data = data.replace(k, v)
nf.write(data)
nf.close()
f.close()
if inplace:
self.du.run_cmd(cmd=['mv', fn, filename])
return filename
return fn
def anonymize_accounting_log(self, logfile):
"""
Anonymize the accounting log
:param logfile: Acconting log file
:type logfile: str
"""
try:
f = open(logfile)
except:
self.logger.error("Error processing " + logfile)
return None
if 'euser' in self.attr_val:
self.attr_val['user'] = self.attr_val['euser']
self.attr_val['requestor'] = self.attr_val['euser']
if 'egroup' in self.attr_val:
self.attr_val['group'] = self.attr_val['egroup']
if 'euser' in self.gmap_attr_val:
self.gmap_attr_val['user'] = self.gmap_attr_val['euser']
if 'egroup' in self.gmap_attr_val:
self.gmap_attr_val['group'] = self.gmap_attr_val['egroup']
anon_data = []
for data in f:
# accounting log format is
# %Y/%m/%d %H:%M:%S;<Key>;<Id>;<key1=val1> <key2=val2> ...
curr = data.split(';', 3)
if curr[1] in ('A', 'L'):
anon_data.append(data.strip())
continue
buf = curr[3].strip().split(' ')
# Split the attribute list into key value pairs
kvl = map(lambda n: n.split('=', 1), buf)
for i in range(len(kvl)):
k, v = kvl[i]
if k == 'requestor':
if '@' in v:
(v, host) = v.split('@', 1)
if k in self.attr_val:
if k == 'project' and v == '_pbs_project_default':
continue
anon_kv = None
if k in self.gmap_attr_val:
if v in self.gmap_attr_val[k]:
anon_kv = self.gmap_attr_val[k][v]
else:
self.gmap_attr_val[k] = {}
if anon_kv is None:
anon_kv = self.utils.random_str(len(v))
self.gmap_attr_val[k][v] = anon_kv
kvl[i][1] = anon_kv
# append server from where request was made
if k == 'requestor':
kvl[i][1] += '@' + host
if k in self.attr_key:
if k in self.gmap_attr_key:
anon_ak = self.gmap_resc_key[k]
else:
anon_ak = self.utils.random_str(len(k))
self.gmap_attr_key[k] = anon_ak
kvl[i][0] = anon_ak
if '.' in k:
restype, resname = k.split('.')
for rv in self.resc_val:
if resname == rv:
anon_rv = None
if resname in self.gmap_resc_val:
if v in self.gmap_resc_val[resname]:
anon_rv = self.gmap_resc_val[resname][v]
else:
self.gmap_resc_val[resname] = {}
if anon_rv is None:
anon_rv = self.utils.random_str(len(v))
self.gmap_resc_val[resname][v] = anon_rv
kvl[i][1] = anon_rv
if resname in self.resc_key:
if resname in self.gmap_resc_key:
anon_rk = self.gmap_resc_key[resname]
else:
anon_rk = self.utils.random_str(len(resname))
self.gmap_resc_key[resname] = anon_rk
kvl[i][0] = restype + '.' + anon_rk
anon_data.append(";".join(curr[:3]) + ";" +
" ".join(map(lambda n: "=".join(n), kvl)))
f.close()
return anon_data
def anonymize_sched_config(self, scheduler):
"""
Anonymize the scheduler config
:param scheduler: PBS scheduler object
"""
if len(self.resc_key) == 0:
return
# when anonymizing we get rid of the comments as they may contain
# sensitive information
scheduler._sched_config_comments = {}
# If resources need to be anonymized then update the resources line
# job_sort_key and node_sort_key
sr = scheduler.get_resources()
if sr:
for i in range(0, len(sr)):
if sr[i] in self.resc_key:
if sr[i] in self.gmap_resc_key:
sr[i] = self.gmap_resc_key[sr[i]]
else:
anon_res = self.utils.random_str(len(sr[i]))
self.gmap_resc_key[sr[i]] = anon_res
sr[i] = anon_res
scheduler.sched_config['resources'] = ",".join(sr)
for k in ['job_sort_key', 'node_sort_key']:
if k in scheduler.sched_config:
sc_jsk = scheduler.sched_config[k]
if not isinstance(sc_jsk, list):
sc_jsk = list(sc_jsk)
for r in self.resc_key:
for i in range(len(sc_jsk)):
if r in sc_jsk[i]:
sc_jsk[i] = sc_jsk[i].replace(r, self.resc_key[r])
def __str__(self):
return ("Attributes Values: " + str(self.gmap_attr_val) + "\n" +
"Resources Values: " + str(self.gmap_resc_val) + "\n" +
"Attributes Keys: " + str(self.gmap_attr_key) + "\n" +
"Resources Keys: " + str(self.gmap_resc_key))
class Entity(object):
"""
Abstract representation of a PBS consumer that has an
external relationship to the PBS system. For example, a
user associated to an OS identifier (uid) maps to a PBS
user entity.
Entities may be subject to policies, such as limits, consume
a certain amount of resource and/or fairshare usage.
:param etype: Entity type
:type etype: str or None
:param name: Entity name
:type name: str or None
"""
def __init__(self, etype=None, name=None):
self.type = etype
self.name = name
self.limits = []
self.resource_usage = {}
self.fairshare_usage = 0
def set_limit(self, limit=None):
"""
:param limit: Limit to be set
:type limit: str or None
"""
for l in self.limits:
if str(limit) == str(l):
return
self.limits.append(limit)
def set_resource_usage(self, container=None, resource=None, usage=None):
"""
Set the resource type
:param resource: PBS resource
:type resource: str or None
:param usage: Resource usage value
:type usage: str or None
"""
if self.type:
if container in self.resource_usage:
if self.resource_usage[self.type]:
if resource in self.resource_usage[container]:
self.resource_usage[container][resource] += usage
else:
self.resource_usage[container][resource] = usage
else:
self.resource_usage[container] = {resource: usage}
def set_fairshare_usage(self, usage=0):
"""
Set fairshare usage
:param usage: Fairshare usage value
:type usage: int
"""
self.fairshare_usage += usage
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self.limits) + ' ' + str(self.resource_usage) + ' ' + \
str(self.fairshare_usage)
class Policy(object):
"""
Abstract PBS policy. Can be one of ``limits``,
``access control``, ``scheduling policy``, etc...this
class does not currently support any operations
"""
def __init__(self):
pass
class Limit(Policy):
"""
Representation of a PBS limit
Limits apply to containers, are of a certain type
(e.g., max_run_res.ncpus) associated to a given resource
(e.g., resource), on a given entity (e.g.,user Bob) and
have a certain value.
:param limit_type: Type of the limit
:type limit_type: str or None
:param resource: PBS resource
:type resource: str or None
:param entity_obj: Entity object
:param value: Limit value
:type value: int
"""
def __init__(self, limit_type=None, resource=None,
entity_obj=None, value=None, container=None,
container_id=None):
self.set_container(container, container_id)
self.soft_limit = False
self.hard_limit = False
self.set_limit_type(limit_type)
self.set_resource(resource)
self.set_value(value)
self.entity = entity_obj
def set_container(self, container, container_id):
"""
Set the container
:param container: Container which is to be set
:type container: str
:param container_id: Container id
"""
self.container = container
self.container_id = container_id
def set_limit_type(self, t):
"""
Set the limit type
:param t: Limit type
:type t: str
"""
self.limit_type = t
if '_soft' in t:
self.soft_limit = True
else:
self.hard_limit = True
def set_resource(self, resource):
"""
Set the resource
:param resource: resource value to set
:type resource: str
"""
self.resource = resource
def set_value(self, value):
"""
Set the resource value
:param value: Resource value
:type value: str
"""
self.value = value
def __eq__(self, value):
if str(self) == str(value):
return True
return False
def __str__(self):
return self.__repr__()
def __repr__(self):
l = [self.container_id, self.limit_type, self.resource, '[',
self.entity.type, ':', self.entity.name, '=', self.value, ']']
return " ".join(l)
class ExpectActions(object):
"""
List of action handlers to run when Server's expect
function does not get the expected result
:param action: Action to run
:type action: str
:param level: Logging level
"""
actions = {}
def __init__(self, action=None, level=logging.INFO):
self.logger = logging.getLogger(__name__)
self.add_action(action, level=level)
def add_action(self, action=None, hostname=None, level=logging.INFO):
"""
Add an action
:param action: Action to add
:param hostname: Machine hostname
:type hostname: str
:param level: Logging level
"""
if action is not None and action.name is not None and\
action.name not in self.actions:
self.actions[action.name] = action
msg = ['expect action: added action ' + action.name]
if hostname:
msg += [' to server ' + hostname]
if level >= logging.INFO:
self.logger.info("".join(msg))
else:
self.logger.debug("".join(msg))
def has_action(self, name):
"""
check whether action exists or not
:param name: Name of action
:type name: str
"""
if name in self.actions:
return True
return False
def get_action(self, name):
"""
Get an action if exists
:param name: Name of action
:type name: str
"""
if name in self.actions:
return self.actions[name]
return None
def list_actions(self, level=logging.INFO):
"""
List an actions
:param level: Logging level
"""
if level >= logging.INFO:
self.logger.info(self.get_all_cations)
else:
self.logger.debug(self.get_all_cations)
def get_all_actions(self):
"""
Get all the action
"""
return self.actions.values()
def get_actions_by_type(self, atype=None):
"""
Get an action by type
:param atype: Action type
:type atype: str
"""
if atype is None:
return None
ret_actions = []
for action in self.actions.values():
if action.type is not None and action.type == atype:
ret_actions.append(action)
return ret_actions
def _control_action(self, action=None, name=None, enable=None):
if action:
action.enabled = False
name = action.name
elif name is not None:
if name == 'ALL':
for a in self.actions:
a.enabled = enable
else:
a = self.get_action(name)
a.enabled = False
else:
return
if enable:
msg = 'enabled'
else:
msg = 'disabled'
self.logger.info('expect action: ' + name + ' ' + msg)
def disable_action(self, action=None, name=None):
"""
Disable an action
"""
self._control_action(action, name, enable=False)
def enable_action(self, action=None, name=None):
"""
Enable an action
"""
self._control_action(action, name, enable=True)
def disable_all_actions(self):
"""
Disable all actions
"""
for a in self.actions.values():
a.enabled = False
def enable_all_actions(self):
"""
Enable all actions
"""
for a in self.actions.values():
a.enabled = True
class ExpectAction(object):
"""
Action function to run when Server's expect function does
not get the expected result
:param atype: Action type
:type atype: str
"""
def __init__(self, name=None, enabled=True, atype=None, action=None,
level=logging.INFO):
self.logger = logging.getLogger(__name__)
self.set_name(name, level=level)
self.set_enabled(enabled)
self.set_type(atype)
self.set_action(action)
def set_name(self, name, level=logging.INFO):
"""
Set the actione name
:param name: Action name
:type name: str
"""
if level >= logging.INFO:
self.logger.info('expect action: created new action ' + name)
else:
self.logger.debug('expect action: created new action ' + name)
self.name = name
def set_enabled(self, enabled):
self.enabled = enabled
def set_type(self, atype):
self.type = atype
def set_action(self, action):
self.action = action
class PbsTypeAttribute(dict):
"""
Experimental. This is a placeholder object that will be used
in the future to map attribute information and circumvent
the error-pron dynamic type detection that is currently done
using ``decode_value()``
"""
def __getitem__(self, name):
return BatchUtils.decode_value(super(PbsTypeAttribute,
self).__getitem__(name))
class PBSObject(object):
"""
Generic PBS Object encapsulating attributes and defaults
:param name: The name associated to the object
:type name: str
:param attrs: Dictionary of attributes to set on object
:type attrs: Dictionary
:param defaults: Dictionary of default attributes. Setting
this will override any other object's default
:type defaults: Dictionary
"""
utils = BatchUtils()
platform = sys.platform
def __init__(self, name, attrs={}, defaults={}):
self.attributes = OrderedDict()
self.name = name
self.dflt_attributes = defaults
self.attropl = None
self.custom_attrs = OrderedDict()
self.ctime = int(time.time())
self.set_attributes(attrs)
def set_attributes(self, a={}):
"""
set attributes and custom attributes on this object.
custom attributes are used when converting attributes
to CLI
:param a: Attribute dictionary
:type a: Dictionary
"""
if isinstance(a, list):
a = OrderedDict(a)
self.attributes = OrderedDict(self.dflt_attributes.items() +
self.attributes.items() + a.items())
self.custom_attrs = OrderedDict(self.custom_attrs.items() +
a.items())
def unset_attributes(self, attrl=[]):
"""
Unset attributes from object's attributes and custom
attributes
:param attrl: Attribute list
:type attrl: List
"""
for attr in attrl:
if attr in self.attributes:
del self.attributes[attr]
if attr in self.custom_attrs:
del self.custom_attrs[attr]
def __str__(self):
"""
Return a string representation of this PBSObject
"""
if self.name is None:
return ""
s = []
if isinstance(self, Job):
s += ["Job Id: " + self.name + "\n"]
elif isinstance(self, Queue):
s += ["Queue: " + self.name + "\n"]
elif isinstance(self, Server):
s += ["Server: " + self.hostname + "\n"]
elif isinstance(self, Reservation):
s += ["Name: " + "\n"]
else:
s += [self.name + "\n"]
for k, v in self.attributes.items():
s += [" " + k + " = " + str(v) + "\n"]
return "".join(s)
def __repr__(self):
return str(self.attributes)
class PBSService(PBSObject):
"""
Generic PBS service object to hold properties of PBS daemons
:param name: The name associated to the object
:type name: str or None
:param attrs: Dictionary of attributes to set on object
:type attrs: Dictionary
:param defaults: Dictionary of default attributes. Setting
this will override any other object's default
:type defaults: Dictionary
:param pbsconf_file: Optional path to the pbs configuration
file
:type pbsconf_file: str or None
:param diagmap: A dictionary of PBS objects (node,server,etc)
to mapped files from PBS diag directory
:type diagmap: Dictionary
:param diag: path to PBS diag directory
(This will overrides diagmap)
:type diag: str or None
"""
du = DshUtils()
pu = ProcUtils()
def __init__(self, name=None, attrs={}, defaults={}, pbsconf_file=None,
diagmap={}, diag=None):
if name is None:
self.hostname = socket.gethostname()
else:
self.hostname = name
if diag:
self.diagmap = self._load_from_diag(diag)
self.has_diag = True
self.diag = diag
elif len(diagmap) > 0:
self.diagmap = diagmap
self.diag = None
self.has_diag = True
else:
self.diagmap = {}
self.diag = None
self.has_diag = False
if not self.has_diag:
try:
self.fqdn = socket.gethostbyaddr(self.hostname)[0]
if self.hostname != self.fqdn:
self.logger.info('FQDN name ' + self.fqdn + ' differs '
'from name provided ' + self.hostname)
self.hostname = self.fqdn
except:
pass
else:
self.fqdn = self.hostname
self.shortname = self.hostname.split('.')[0]
self.logutils = None
self.logfile = None
self.acctlogfile = None
self.pid = None
self.pbs_conf = {}
self.pbs_env = {}
self._is_local = True
self.launcher = None
PBSObject.__init__(self, name, attrs, defaults)
if not self.has_diag:
if not self.du.is_localhost(self.hostname):
self._is_local = False
if pbsconf_file is None and not self.has_diag:
self.pbs_conf_file = self.du.get_pbs_conf_file(name)
else:
self.pbs_conf_file = pbsconf_file
if self.pbs_conf_file == '/etc/pbs.conf':
self.default_pbs_conf = True
elif (('PBS_CONF_FILE' not in os.environ) or
(os.environ['PBS_CONF_FILE'] != self.pbs_conf_file)):
self.default_pbs_conf = False
else:
self.default_pbs_conf = True
# default pbs_server_name to hostname, it will get set again once the
# config file is processed
self.pbs_server_name = self.hostname
# If diag is given then bypass parsing pbs.conf
if self.has_diag:
if diag is None:
t = 'pbs_diag_%s' % (time.strftime("%y%m%d_%H%M%S"))
self.diag = os.path.join(self.du.get_tempdir(), t)
self.pbs_conf['PBS_HOME'] = self.diag
self.pbs_conf['PBS_EXEC'] = self.diag
self.pbs_conf['PBS_SERVER'] = self.hostname
m = re.match('.*pbs_diag_(?P<datetime>\d{6,6}_\d{6,6}).*',
self.diag)
if m:
tm = time.strptime(m.group('datetime'), "%y%m%d_%H%M%S")
self.ctime = int(time.mktime(tm))
else:
self.pbs_conf = self.du.parse_pbs_config(self.hostname,
self.pbs_conf_file)
if self.pbs_conf is None or len(self.pbs_conf) == 0:
self.pbs_conf = {'PBS_HOME': "", 'PBS_EXEC': ""}
else:
ef = os.path.join(self.pbs_conf['PBS_HOME'], 'pbs_environment')
self.pbs_env = self.du.parse_pbs_environment(self.hostname, ef)
self.pbs_server_name = self.du.get_pbs_server_name(
self.pbs_conf)
self.init_logfile_path(self.pbs_conf)
def _load_from_diag(self, diag):
diagmap = {}
diagmap[SERVER] = os.path.join(diag, 'qstat_Bf.out')
diagmap[VNODE] = os.path.join(diag, 'pbsnodes_va.out')
diagmap[QUEUE] = os.path.join(diag, 'qstat_Qf.out')
diagmap[JOB] = os.path.join(diag, 'qstat_tf.out')
if not os.path.isfile(diagmap[JOB]):
diagmap[JOB] = os.path.join(diag, 'qstat_f.out')
diagmap[RESV] = os.path.join(diag, 'pbs_rstat_f.out')
diagmap[SCHED] = os.path.join(diag, 'qmgr_psched.out')
diagmap[HOOK] = []
if (os.path.isdir(os.path.join(diag, 'server_priv')) and
os.path.isdir(os.path.join(diag, 'server_priv', 'hooks'))):
_ld = os.listdir(os.path.join(diag, 'server_priv', 'hooks'))
for f in _ld:
if f.endswith('.HK'):
diagmap[HOOK].append(
os.path.join(diag, 'server_priv', 'hooks', f))
# Format of qmgr_psched.out differs from Batch Status, we transform
# it to go through the common batch status parsing routines
if os.path.isfile(diagmap[SCHED]):
f = open(os.path.join(diag, 'ptl_qstat_Sched.out'), 'w')
lines = open(diagmap[SCHED])
f.write("Sched \n")
for l in lines:
recs = l.split()
f.write("".join(recs[2:5]) + "\n")
f.close()
diagmap[SCHED] = os.path.join(diag, 'ptl_qstat_Sched.out')
else:
diagmap[SCHED] = None
return diagmap
def init_logfile_path(self, conf=None):
"""
Initialize path to log files for this service
:param conf: PBS conf file parameters
:type conf: Dictionary
"""
elmt = self._instance_to_logpath(self)
if elmt is None:
return
if conf is not None and 'PBS_HOME' in conf:
tm = time.strftime("%Y%m%d", time.localtime())
self.logfile = os.path.join(conf['PBS_HOME'], elmt, tm)
self.acctlogfile = os.path.join(conf['PBS_HOME'], 'server_priv',
'accounting', tm)
def _instance_to_logpath(self, inst):
"""
returns the log path associated to this service
"""
if isinstance(inst, Scheduler):
logval = 'sched_logs'
elif isinstance(inst, Server):
logval = 'server_logs'
elif isinstance(inst, MoM):
logval = 'mom_logs'
elif isinstance(inst, Comm):
logval = 'comm_logs'
else:
logval = None
return logval
def _instance_to_cmd(self, inst):
"""
returns the command associated to this service
"""
if isinstance(inst, Scheduler):
cmd = 'pbs_sched'
elif isinstance(inst, Server):
cmd = 'pbs_server'
elif isinstance(inst, MoM):
cmd = 'pbs_mom'
elif isinstance(inst, Comm):
cmd = 'pbs_comm'
else:
cmd = None
return cmd
def _instance_to_servicename(self, inst):
"""
return the service name associated to the instance. One of
``server, scheduler, or mom.``
"""
if isinstance(inst, Scheduler):
nm = 'scheduler'
elif isinstance(inst, Server):
nm = 'server'
elif isinstance(inst, MoM):
nm = 'mom'
elif isinstance(inst, Comm):
nm = 'comm'
else:
nm = ''
return nm
def _instance_to_privpath(self, inst):
"""
returns the path to priv associated to this service
"""
if isinstance(inst, Scheduler):
priv = 'sched_priv'
elif isinstance(inst, Server):
priv = 'server_priv'
elif isinstance(inst, MoM):
priv = 'mom_priv'
elif isinstance(inst, Comm):
priv = 'server_priv'
else:
priv = None
return priv
def _instance_to_lock(self, inst):
"""
returns the path to lock file associated to this service
"""
if isinstance(inst, Scheduler):
lock = 'sched.lock'
elif isinstance(inst, Server):
lock = 'server.lock'
elif isinstance(inst, MoM):
lock = 'mom.lock'
elif isinstance(inst, Comm):
lock = 'comm.lock'
else:
lock = None
return lock
def set_launcher(self, execargs=None):
self.launcher = execargs
def _isUp(self, inst):
"""
returns True if service is up and False otherwise
"""
live_pids = self._all_instance_pids(inst)
pid = self._get_pid(inst)
if live_pids is not None and pid in live_pids:
return True
return False
def _signal(self, sig, inst=None, procname=None):
"""
Send signal ``sig`` to service. sig is the signal name
as it would be sent to the program kill, e.g. -HUP.
Return the ``out/err/rc`` from the command run to send
the signal. See DshUtils.run_cmd
:param inst: Instance
:type inst: str
:param procname: Process name
:type procname: str or None
"""
pid = None
if inst is not None:
if inst.pid is not None:
pid = inst.pid
else:
pid = self._get_pid(inst)
if procname is not None:
pi = self.pu.get_proc_info(self.hostname, procname)
if pi is not None and pi.values() and pi.values()[0]:
for _p in pi.values()[0]:
ret = self.du.run_cmd(self.hostname, ['kill', sig, _p.pid],
sudo=True)
return ret
if pid is None:
return {'rc': 0, 'err': '', 'out': 'no pid to signal'}
return self.du.run_cmd(self.hostname, ['kill', sig, pid], sudo=True)
def _all_instance_pids(self, inst):
"""
Return a list of all ``PIDS`` that match the
instance name or None.
"""
cmd = self._instance_to_cmd(inst)
self.pu.get_proc_info(self.hostname, ".*" + cmd + ".*",
regexp=True)
_procs = self.pu.processes.values()
if _procs:
_pids = []
for _p in _procs:
_pids.extend(map(lambda x: x.pid, _p))
return _pids
return None
def _get_pid(self, inst):
"""
Get the ``PID`` associated to this instance.
Implementation note, the pid is read from the
daemon's lock file.
This is different than _all_instance_pids in that
the PID of the last running instance can be retrieved
with ``_get_pid`` but not with ``_all_instance_pids``
"""
priv = self._instance_to_privpath(inst)
lock = self._instance_to_lock(inst)
path = os.path.join(self.pbs_conf['PBS_HOME'], priv, lock)
rv = self.du.cat(self.hostname, path, sudo=True, logerr=False)
if ((rv['rc'] == 0) and (len(rv['out']) > 0)):
self.pid = rv['out'][0].strip()
else:
self.pid = None
return self.pid
def _start(self, inst=None, args=None, cmd_map=None, launcher=None):
"""
Generic service startup
:param inst: The instance to act upon
:type inst: str
:param args: Optional command-line arguments
:type args: List
:param cmd_map: Optional dictionary of command line
options to configuration variables
:type cmd_map: Dictionary
:param launcher: Optional utility to invoke the launch
of the service. This option only takes
effect on ``Unix/Linux``. The option can
be a string or a list.Options may be passed
to the launcher, for example to start a
service through the valgrind utility
redirecting to a log file,launcher could be
set to e.g.
``['valgrind', '--log-file=/tmp/vlgrd.out']``
or ``'valgrind --log-file=/tmp/vlgrd.out'``
"""
if launcher is None and self.launcher is not None:
launcher = self.launcher
app = self._instance_to_cmd(inst)
if app is None:
return
_m = ['service: starting', app]
if args is not None:
_m += ['with args: ']
_m += args
as_script = False
wait_on = True
if launcher is not None:
if isinstance(launcher, str):
launcher = launcher.split()
if app == 'pbs_server':
# running the pbs server through valgrind requires a bit of
# a dance because the pbs_server binary is pbs_server.bin
# and to run it requires being able to find libraries, so
# LD_LIBRARY_PATH is set and pbs_server.bin is run as a
# script
pexec = inst.pbs_conf['PBS_EXEC']
ldlib = ['LD_LIBRARY_PATH=' +
os.path.join(pexec, 'lib') + ':' +
os.path.join(pexec, 'pgsql', 'lib')]
app = 'pbs_server.bin'
else:
ldlib = []
cmd = ldlib + launcher
as_script = True
wait_on = False
else:
cmd = []
cmd += [os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', app)]
if args is not None:
cmd += args
if not self.default_pbs_conf:
cmd = ['PBS_CONF_FILE=' + inst.pbs_conf_file] + cmd
as_script = True
if cmd_map is not None:
conf_cmd = self.du.map_pbs_conf_to_cmd(cmd_map,
pconf=self.pbs_conf)
cmd.extend(conf_cmd)
_m += conf_cmd
self.logger.info(" ".join(_m))
ret = self.du.run_cmd(self.hostname, cmd, sudo=True,
as_script=as_script, wait_on_script=wait_on,
level=logging.INFOCLI, logerr=False)
if ret['rc'] != 0:
raise PbsServiceError(rv=False, rc=ret['rc'], msg=ret['err'])
ret_msg = True
if ret['err']:
ret_msg = ret['err']
self.pid = self._get_pid(inst)
# get_pid gets information from a lock file that may not have been
# removed when the daemon stopped so we verify that the PID is
# actually alive in the list of pids returned by ps
live_pids = self._all_instance_pids(inst)
i = 0
while ((self.pid is None) or
(live_pids is None or self.pid not in live_pids)) and (i < 30):
time.sleep(1)
i += 1
live_pids = self._all_instance_pids(inst)
self.pid = self._get_pid(inst)
if live_pids is not None and self.pid in live_pids:
return ret_msg
if i == 30:
raise PbsServiceError(rv=False, rc=-1, msg="Could not find PID")
return ret_msg
def _stop(self, sig='-TERM', inst=None):
if inst is None:
return True
self._signal(sig, inst)
pid = self._get_pid(inst)
chk_pid = self._all_instance_pids(inst)
if pid is None or chk_pid is None:
return True
num_seconds = 0
while (chk_pid is not None) and (str(pid) in chk_pid):
if num_seconds > 60:
m = (self.logprefix + 'could not stop service ' +
self._instance_to_servicename(inst))
raise PbsServiceError(rv=False, rc=-1, msg=m)
time.sleep(1)
num_seconds += 1
chk_pid = self._all_instance_pids(inst)
inst.pid = None
return True
def log_lines(self, logtype, id=None, n=50, tail=True, day=None,
starttime=None, endtime=None):
"""
Return the last ``<n>`` lines of a PBS log file, which
can be one of ``server``, ``scheduler``, ``MoM``, or
``tracejob``
:param logtype: The entity requested, an instance of a
Scheduler, Server or MoM object, or the
string 'tracejob' for tracejob
:type logtype: str or object
:param id: The id of the object to trace. Only used for
tracejob
:param n: One of 'ALL' of the number of lines to
process/display, defaults to 50.
:type n: int
:param tail: if True, parse log from the end to the start,
otherwise parse from the start to the end.
Defaults to True.
:type tail: bool
:param day: Optional day in ``YYYMMDD`` format. Defaults
to current day
:type day: int
:param starttime: date timestamp to start matching
:param endtime: date timestamp to end matching
:returns: Last ``<n>`` lines of logfile for ``Server``,
``Scheduler``, ``MoM or tracejob``
"""
logval = None
lines = None
sudo = False
try:
if logtype == 'tracejob':
if id is None:
return None
cmd = [os.path.join(
self.pbs_conf['PBS_EXEC'],
'bin',
'tracejob')]
cmd += [str(id)]
lines = self.du.run_cmd(self.hostname, cmd)['out']
if n != 'ALL':
lines = lines[-n:]
else:
if day is None:
day = time.strftime("%Y%m%d", time.localtime(time.time()))
if logtype == 'accounting':
filename = os.path.join(self.pbs_conf['PBS_HOME'],
'server_priv', 'accounting', day)
sudo = True
else:
logval = self._instance_to_logpath(logtype)
if logval:
filename = os.path.join(self.pbs_conf['PBS_HOME'],
logval, day)
if n == 'ALL':
if self._is_local and not sudo:
lines = open(filename)
else:
lines = self.du.cat(self.hostname, filename, sudo=sudo,
level=logging.DEBUG2)['out']
# tail is not a standard, e.g. on Solaris it does not recognize
# -n. We circumvent this problem by using PTL's version of tail
# but it currently only works on a local host, for remote hosts
# we fall back to using tail/head -n
elif self._is_local and not sudo:
if tail:
futils = FileUtils(filename, FILE_TAIL)
else:
futils = FileUtils(filename)
lines = futils.next(n)
else:
if tail:
cmd = ['/usr/bin/tail']
else:
cmd = ['/usr/bin/head']
pyexec = os.path.join(self.pbs_conf['PBS_EXEC'], 'python',
'bin', 'python')
osflav = self.du.get_platform(self.hostname, pyexec)
if osflav.startswith('sunos'):
cmd += ['-']
else:
cmd += ['-n']
cmd += [str(n), filename]
lines = self.du.run_cmd(self.hostname, cmd, sudo=sudo,
level=logging.DEBUG2)['out']
except:
self.logger.error('error in log_lines ')
traceback.print_exc()
return None
return lines
def _log_match(self, logtype, msg, id=None, n=50, tail=True,
allmatch=False, regexp=False, day=None, max_attempts=1,
interval=1, starttime=None, endtime=None,
level=logging.INFO):
"""
If ``'msg'`` found in the ``'n'`` lines of the log file,
returns a ``tupe (x,y)`` where x is the matching line
number and y the line itself. If no match,return None.
If allmatch is True, a list of tuples is returned.
:param logtype: The entity requested, an instance of a
Scheduler, Server, or MoM object, or the
strings 'tracejob' for tracejob or
'accounting' for accounting logs.
:param id: The id of the object to trace. Only used for
tracejob
:param n: 'ALL' or the number of lines to search through,
defaults to 50
:param tail: If true (default), starts from the end of
the file
:type tail: bool
:param allmatch: If True all matching lines out of then
parsed are returned as a list. Defaults
to False
:type allmatch: bool
:param regexp: If true msg is a Python regular expression.
Defaults to False
:type regex: bool
:param day: Optional day in YYYMMDD format.
:param max_attempts: the number of attempts to make to find
a matching entry
:type max_attemps: int
:param interval: the interval between attempts
:type interval: int
:param starttime: If set ignore matches that occur before
specified time
:param endtime: If set ignore matches that occur after
specified time
.. note:: The matching line number is relative to the record
number, not the absolute line number in the file.
"""
try:
from ptl.utils.pbs_logutils import PBSLogUtils
except:
self.logger.error('error loading ptl.utils.pbs_logutils')
return None
if self.logutils is None:
self.logutils = PBSLogUtils()
rv = (None, None)
attempt = 1
name = self._instance_to_servicename(logtype)
infomsg = (name + ' ' + self.shortname +
' log match: searching for "' + msg + '"')
if regexp:
infomsg += ' - using regular expression '
if allmatch:
infomsg += ' - on all matches '
attemptmsg = ' - No match'
while attempt <= max_attempts:
if attempt > 1:
attemptmsg = ' - attempt ' + str(attempt)
lines = self.log_lines(logtype, id, n=n, tail=tail, day=day,
starttime=starttime, endtime=endtime)
rv = self.logutils.match_msg(lines, msg, allmatch=allmatch,
regexp=regexp, starttime=starttime,
endtime=endtime)
if rv:
self.logger.log(level, infomsg + '... OK')
break
else:
if ((starttime is not None or endtime is not None) and
n != 'ALL'):
if attempt > max_attempts:
# We will do one last attempt to match in case the
# number of lines that were provided did not capture
# the start or end time of interest
max_attempts += 1
n = 'ALL'
self.logger.log(level, infomsg + attemptmsg)
attempt += 1
time.sleep(interval)
try:
# Depending on whether the hostname is local or remote and whether
# sudo privileges were required, lines returned by log_lines can be
# an open file descriptor, we close here but ignore errors in case
# any were raised for all irrelevant cases
lines.close()
except:
pass
return rv
def accounting_match(self, msg, id=None, n=50, tail=True,
allmatch=False, regexp=False, day=None,
max_attempts=1, interval=1, starttime=None,
endtime=None):
"""
Find msg in accounting log.
If ``'msg'`` found in the ``'n'`` lines of the log file,
returns a ``tupe (x,y)`` where x is the matching line
number and y the line itself. If no match,return None.
If allmatch is True, a list of tuples is returned.
:param id: The id of the object to trace. Only used for
tracejob
:param n: 'ALL' or the number of lines to search through,
defaults to 50
:type n: int
:param tail: If true (default), starts from the end of
the file
:type tail: bool
:param allmatch: If True all matching lines out of then
parsed are returned as a list. Defaults
to False
:type allmatch: bool
:param regexp: If true msg is a Python regular expression.
Defaults to False
:type regexp: bool
:param day: Optional day in YYYMMDD format.
:param max_attempts: the number of attempts to make to
find a matching entry
:type max_attemplts: int
:param interval: the interval between attempts
:type interval: int
:param starttime: If set ignore matches that occur before
specified time
:param endtime: If set ignore matches that occur after
specified time
.. note:: The matching line number is relative to the
record number, not the absolute line number
in the file.
"""
return self._log_match('accounting', msg, id, n, tail, allmatch,
regexp, day, max_attempts, interval, starttime,
endtime)
def tracejob_match(self, msg, id=None, n=50, tail=True, allmatch=False,
regexp=False, **kwargs):
"""
Find msg in tracejob log. See _log_match for details
"""
return self._log_match('tracejob', msg, id, n, tail, allmatch,
regexp, kwargs)
def _save_config_file(self, dict_conf, fname):
ret = self.du.cat(self.hostname, fname, sudo=True)
if ret['rc'] == 0:
dict_conf[fname] = ret['out']
else:
self.logger.error('error saving configuration ' + fname)
def _load_configuration(self, infile, objtype=None):
"""
Load configuration as was saved in infile
:param infile: the file in which configuration
was saved
:type infile: str
:param objtype: the object type to load configuration
for, one of server, scheduler, mom or
if None, load all objects in infile
"""
if os.path.isfile(infile):
conf = {}
f = open(infile, 'r')
# load all objects from the Pickled file
while True:
try:
conf = cPickle.load(f)
except:
break
f.close()
if objtype and objtype in conf:
conf = conf[objtype]
else:
# load all object types that could be in infile
newconf = {}
for ky in [MGR_OBJ_SERVER, MGR_OBJ_SCHED, MGR_OBJ_NODE]:
if ky not in conf:
conf[ky] = {}
newconf = dict(newconf.items() + conf[ky].items())
conf = newconf
for k, v in conf.items():
(fd, fn) = self.du.mkstemp()
# handle server data saved as output of qmgr commands by piping
# data back into qmgr
if k.startswith('qmgr_'):
qmgr = os.path.join(self.client_conf['PBS_EXEC'],
'bin', 'qmgr')
os.write(fd, "\n".join(v))
self.du.run_cmd(
self.hostname,
[qmgr],
cstdin=fd,
sudo=True)
else:
os.write(fd, "\n".join(v))
# append the last line
os.write(fd, "\n")
self.du.run_cmd(self.hostname, ['cp', fn, k], sudo=True)
os.close(fd)
os.remove(fn)
return True
return False
def get_tempdir(self):
"""
platform independent call to get a temporary directory
"""
return self.du.get_tempdir(self.hostname)
def __str__(self):
return (self.__class__.__name__ + ' ' + self.hostname + ' config ' +
self.pbs_conf_file)
def __repr__(self):
return (self.__class__.__name__ + '/' + self.pbs_conf_file + '@' +
self.hostname)
class Comm(PBSService):
"""
PBS ``Comm`` configuration and control
"""
"""
:param name: The hostname of the Comm. Defaults to current hostname.
:type name: str
:param attrs: Dictionary of attributes to set, these will override
defaults.
:type attrs: dictionary
:param pbsconf_file: path to config file to parse for PBS_HOME,
PBS_EXEC, etc
:type pbsconf_file: str or None
:param diagmap: A dictionary of PBS objects (node,server,etc) to
mapped files from PBS diag directory
:type diagmap: dictionary
:param diag: path to PBS diag directory (This will override diagmap)
:type diag: str or None
:param server: A PBS server instance to which this Comm is associated
:type server: str
:param db_access: set to either file containing credentials to DB access or
dictionary containing {'dbname':...,'user':...,
'port':...}
:type db_access: str or dictionary
"""
dflt_attributes = {}
def __init__(self, name=None, attrs={}, pbsconf_file=None, diagmap={},
diag=None, server=None, db_access=None):
self.logger = logging.getLogger(__name__)
if server is not None:
self.server = server
if diag is None and self.server.diag is not None:
diag = self.server.diag
if (len(diagmap) == 0) and (len(self.server.diagmap) != 0):
diagmap = self.server.diagmap
else:
self.server = Server(name, pbsconf_file=pbsconf_file,
db_access=db_access, diag=diag,
diagmap=diagmap)
PBSService.__init__(self, name, attrs, self.dflt_attributes,
pbsconf_file, diagmap, diag)
_m = ['Comm ', self.shortname]
if pbsconf_file is not None:
_m += ['@', pbsconf_file]
_m += [': ']
self.logprefix = "".join(_m)
self.conf_to_cmd_map = {
'PBS_COMM_ROUTERS': '-r',
'PBS_COMM_THREADS': '-t'
}
self.pi = PBSInitServices(hostname=self.hostname,
conf=self.pbs_conf_file)
def isUp(self):
"""
Check for comm up
"""
return super(Comm, self)._isUp(self)
def signal(self, sig):
"""
Send signal to comm
"""
self.logger.info(self.logprefix + 'sent signal ' + sig)
return super(Comm, self)._signal(sig, inst=self)
def get_pid(self):
"""
Get the comm pid
"""
return super(Comm, self)._get_pid(inst=self)
def all_instance_pids(self):
"""
Get all pids of given instance
"""
return super(Comm, self)._all_instance_pids(inst=self)
def start(self, args=None, launcher=None):
"""
Start the comm
:param args: Argument required to start the comm
:type args: str
:param launcher: Optional utility to invoke the launch of the service
:type launcher: str or list
"""
if args is not None or launcher is not None:
return super(Comm, self)._start(inst=self, args=args,
cmd_map=self.conf_to_cmd_map,
launcher=launcher)
else:
try:
rv = self.pi.start_comm()
except PbsInitServicesError as e:
raise PbsServiceError(rc=e.rc, rv=e.rv, msg=e.msg)
return rv
def stop(self, sig=None):
"""
Stop the comm.
:param sig: Signal to stop the comm
:type sig: str
"""
if sig is not None:
self.logger.info(self.logprefix + 'stopping Comm on host ' +
self.hostname)
return super(Comm, self)._stop(sig, inst=self)
else:
try:
self.pi.stop_comm()
except PbsInitServicesError as e:
raise PbsServiceError(rc=e.rc, rv=e.rv, msg=e.msg)
return True
def restart(self):
"""
Restart the comm.
"""
if self.isUp():
if not self.stop():
return False
return self.start()
def log_match(self, msg=None, id=None, n=50, tail=True, allmatch=False,
regexp=False, day=None, max_attempts=1, interval=1,
starttime=None, endtime=None, level=logging.INFO):
"""
Match the comm logs
"""
return self._log_match(self, msg, id, n, tail, allmatch, regexp,
day, max_attempts, interval, starttime, endtime,
level=level)
class Server(PBSService):
"""
PBS server ``configuration`` and ``control``
The Server class is a container to PBS server attributes
and implements wrappers to the ``IFL API`` to perform
operations on the server. For example to submit, status,
delete, manage, etc... jobs, reservations and configurations.
This class also offers higher-level routines to ease testing,
see functions, for ``example: revert_to_defaults,
init_logging, expect, counter.``
The ptl_conf dictionary holds general configuration for the
framework's operations, specifically, one can control:
mode: set to ``PTL_CLI`` to operate in ``CLI`` mode or
``PTL_API`` to operate in ``API`` mode
expect_max_attempts: the default maximum number of attempts
to be used\ by expect. Defaults to 60
expect_interval: the default time interval (in seconds)
between expect\ requests. Defaults to 0.5
update_attributes: the default on whether Object attributes
should be\ updated using a list of dictionaries. Defaults
to True
:param name: The hostname of the server. Defaults to
calling pbs_default()
:type name: str
:param attrs: Dictionary of attributes to set, these will
override defaults.
:type attrs: Dictionary
:param defaults: Dictionary of default attributes.
Default: dflt_attributes
:type defaults: Dictionary
:param pbsconf_file: path to config file to parse for PBS_HOME,
PBS_EXEC, etc
:type pbsconf_file: str
:param diagmap: A dictionary of PBS objects (node,server,etc)
to mapped files from PBS diag directory
:type diagmap: Dictionary
:param diag: path to PBS diag directory (This will overrides
diagmap)
:type diag: str
:param client: The host to use as client for CLI queries.
Defaults to the local hostname.
:type client: str
:param client_pbsconf_file: The path to a custom PBS_CONF_FILE
on the client host. Defaults to
the same path as pbsconf_file.
:type client_pbsconf_file: str
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
{'dbname':...,'user':...,'port':...}
:param stat: if True, stat the server attributes
:type stat: bool
"""
logger = logging.getLogger(__name__)
dflt_attributes = {
ATTR_scheduling: "True",
ATTR_dfltque: "workq",
ATTR_logevents: "511",
ATTR_mailfrom: "adm",
ATTR_queryother: "True",
ATTR_rescdflt + ".ncpus": "1",
ATTR_DefaultChunk + ".ncpus": "1",
ATTR_schedit: "600",
ATTR_ResvEnable: "True",
ATTR_nodefailrq: "310",
ATTR_maxarraysize: "10000",
ATTR_license_linger: "3600",
ATTR_EligibleTimeEnable: "False",
ATTR_max_concurrent_prov: "5",
ATTR_FlatUID: 'True',
}
ptl_conf = {
'mode': PTL_API,
'expect_max_attempts': 60,
'expect_interval': 0.5,
'update_attributes': True,
}
# this pattern is a bit relaxed to match common developer build numbers
version_tag = re.compile("[a-zA-Z_]*(?P<version>[\d\.]+.[\w\d\.]*)[\s]*")
actions = ExpectActions()
def __init__(self, name=None, attrs={}, defaults={}, pbsconf_file=None,
diagmap={}, diag=None, client=None, client_pbsconf_file=None,
db_access=None, stat=True):
self.jobs = {}
self.nodes = {}
self.reservations = {}
self.queues = {}
self.resources = {}
self.hooks = {}
self.pbshooks = {}
self.entities = {}
self.scheduler = None
self.version = None
self.default_queue = None
self.last_error = [] # type: array. Set for CLI IFL errors. Not reset
self.last_rc = None # Set for CLI IFL return code. Not thread-safe
# default timeout on connect/disconnect set to 60s to mimick the qsub
# buffer introduced in PBS 11
self._conn_timeout = 60
self._conn_timer = None
self._conn = None
self._db_conn = None
self.current_user = pwd.getpwuid(os.getuid())[0]
if len(defaults.keys()) == 0:
defaults = self.dflt_attributes
self.pexpect_timeout = 15
self.pexpect_sleep_time = .1
PBSService.__init__(self, name, attrs, defaults, pbsconf_file, diagmap,
diag)
_m = ['server ', self.shortname]
if pbsconf_file is not None:
_m += ['@', pbsconf_file]
_m += [': ']
self.logprefix = "".join(_m)
self.pi = PBSInitServices(hostname=self.hostname,
conf=self.pbs_conf_file)
self.set_client(client)
if client_pbsconf_file is None:
self.client_pbs_conf_file = self.du.get_pbs_conf_file(self.client)
else:
self.client_pbs_conf_file = client_pbsconf_file
self.client_conf = self.du.parse_pbs_config(
self.client, file=self.client_pbs_conf_file)
if self.client_pbs_conf_file == '/etc/pbs.conf':
self.default_client_pbs_conf = True
elif (('PBS_CONF_FILE' not in os.environ) or
(os.environ['PBS_CONF_FILE'] != self.client_pbs_conf_file)):
self.default_client_pbs_conf = False
else:
self.default_client_pbs_conf = True
a = {}
if os.getuid() == 0:
a = {ATTR_aclroot: 'root'}
self.dflt_attributes.update(a)
if not API_OK:
# mode must be set before the first stat call
self.set_op_mode(PTL_CLI)
if stat:
try:
tmp_attrs = self.status(SERVER, level=logging.DEBUG,
db_access=db_access)
except (PbsConnectError, PbsStatusError):
tmp_attrs = None
if tmp_attrs is not None and len(tmp_attrs) > 0:
self.attributes = tmp_attrs[0]
if ATTR_dfltque in self.attributes:
self.default_queue = self.attributes[ATTR_dfltque]
self.update_version_info()
def update_version_info(self):
"""
Update the version information.
"""
if ATTR_version not in self.attributes:
self.attributes[ATTR_version] = 'unknown'
else:
m = self.version_tag.match(self.attributes[ATTR_version])
if m:
v = m.group('version')
self.version = LooseVersion(v)
self.logger.info(self.logprefix + 'version ' +
self.attributes[ATTR_version])
@classmethod
def set_update_attributes(cls, val):
"""
Set update attributes
"""
cls.logger.info('setting update attributes ' + str(val))
if val == 1 or val[0] in ('t', 'T'):
val = True
else:
val = False
cls.ptl_conf['update_attributes'] = val
@classmethod
def set_expect_max_attempts(cls, val):
"""
Set expect max attempts
"""
cls.logger.info('setting expect max attempts ' + str(val))
cls.ptl_conf['expect_max_attempts'] = int(val)
@classmethod
def set_expect_interval(cls, val):
"""
Set expect interval
"""
cls.logger.info('setting expect interval ' + str(val))
cls.ptl_conf['expect_interval'] = float(val)
def set_client(self, name=None):
"""
Set server client
:param name: Client name
:type name: str
"""
if name is None:
self.client = socket.gethostname()
else:
self.client = name
def _connect(self, hostname, attempt=1):
if ((self._conn is None or self._conn < 0) or
(self._conn_timeout == 0 or self._conn_timer is None)):
self._conn = pbs_connect(hostname)
self._conn_timer = time.time()
if self._conn is None or self._conn < 0:
if attempt > 5:
m = self.logprefix + 'unable to connect'
raise PbsConnectError(rv=None, rc=-1, msg=m)
else:
self._disconnect(self._conn, force=True)
time.sleep(1)
return self._connect(hostname, attempt + 1)
return self._conn
def _disconnect(self, conn, force=False):
"""
disconnect a connection to a Server.
For performance of the API calls, a connection is
maintained up to _conn_timer, unless the force parameter
is set to True
:param conn: Server connection
:param force: If true then diconnect forcefully
:type force: bool
"""
if ((conn is not None and conn >= 0) and
(force or
(self._conn_timeout == 0 or
(self._conn_timer is not None and
(time.time() - self._conn_timer > self._conn_timeout))))):
pbs_disconnect(conn)
self._conn_timer = None
self._conn = None
def set_connect_timeout(self, timeout=0):
"""
Set server connection timeout
:param timeout: Timeout value
:type timeout: int
"""
self._conn_timeout = timeout
def get_op_mode(self):
"""
Returns operating mode for calls to the PBS server.
Currently, two modes are supported, either the ``API``
or the ``CLI``. Default is ``API``
"""
if (not API_OK or (self.ptl_conf['mode'] == PTL_CLI)):
return PTL_CLI
return PTL_API
def set_op_mode(self, mode):
"""
set operating mode to one of either ``PTL_CLI`` or
``PTL_API``.Returns the mode that was set which can
be different from the value requested, for example, if
requesting to set ``PTL_API``, in the absence of the
appropriate SWIG wrappers, the library will fall back to
``CLI``, or if requesting ``PTL_CLI`` and there is no
``PBS_EXEC`` on the system, None is returned.
:param mode: Operating mode
:type mode: str
"""
if mode == PTL_API:
if self._conn is not None or self._conn < 0:
self._conn = None
if not API_OK:
self.logger.error(self.logprefix +
'API submission is not available')
return PTL_CLI
elif mode == PTL_CLI:
if ((not self.has_diag) and
not os.path.isdir(os.path.join(self.client_conf['PBS_EXEC'],
'bin'))):
self.logger.error(self.logprefix +
'PBS commands are not available')
return None
else:
self.logger.error(self.logprefix + "Unrecognized operating mode")
return None
self.ptl_conf['mode'] = mode
self.logger.info(self.logprefix + 'server operating mode set to ' +
mode)
return mode
def add_expect_action(self, name=None, action=None):
"""
Add an action handler to expect. Expect Actions are
custom handlers that are triggered when an unexpected
value is encountered
:param name: Action name
:type name: str or None
:param action: Action to add
"""
if name is None and action.name is None:
return
if name is None and action.name is not None:
name = action.name
if not self.actions.has_action(name):
self.actions.add_action(action, self.shortname)
def set_attributes(self, a={}):
"""
set server attributes
:param a: Attribute dictionary
:type a: Dictionary
"""
super(Server, self).set_attributes(a)
self.__dict__.update(a)
def isUp(self):
"""
returns ``True`` if server is up and ``False`` otherwise
"""
if self.has_diag:
return True
i = 0
op_mode = self.get_op_mode()
if ((op_mode == PTL_API) and (self._conn is not None)):
self._disconnect(self._conn, force=True)
while i < 20:
rv = False
try:
if op_mode == PTL_CLI:
self.status(SERVER, level=logging.DEBUG, logerr=False)
else:
c = self._connect(self.hostname)
self._disconnect(c, force=True)
return True
except (PbsConnectError, PbsStatusError):
# if the status/connect operation fails then there might be
# chances that server process is running but not responsive
# so we wait until the server is reported operational.
rv = self._isUp(self)
# We really mean to check != False rather than just "rv"
if str(rv) != 'False':
self.logger.warning('Server process started' +
'but not up yet')
time.sleep(1)
i += 1
else:
# status/connect failed + no server process means
# server is actually down
return False
return False
def signal(self, sig):
"""
Send signal to server
:param sig: Signal to send
:type sig: str
"""
self.logger.info('server ' + self.shortname + ': sent signal ' + sig)
return super(Server, self)._signal(sig, inst=self)
def get_pid(self):
"""
Get the server pid
"""
return super(Server, self)._get_pid(inst=self)
def all_instance_pids(self):
"""
Get all pids for a given instance
"""
return super(Server, self)._all_instance_pids(inst=self)
def start(self, args=None, launcher=None):
"""
Start the PBS server
:param args: Argument required to start the server
:type args: str
:param launcher: Optional utility to invoke the launch of the service
:type launcher: str or list
"""
if args is not None or launcher is not None:
rv = super(Server, self)._start(inst=self, args=args,
launcher=launcher)
else:
try:
rv = self.pi.start_server()
except PbsInitServicesError as e:
raise PbsServiceError(rc=e.rc, rv=e.rv, msg=e.msg)
if self.isUp():
return rv
else:
raise PbsServiceError(rv=False, rc=1, msg=rv['err'])
def stop(self, sig=None):
"""
Stop the PBS server
:param sig: Signal to stop PBS server
:type sig: str
"""
if sig is not None:
self.logger.info(self.logprefix + 'stopping Server on host ' +
self.hostname)
rc = super(Server, self)._stop(sig, inst=self)
else:
try:
self.pi.stop_server()
except PbsInitServicesError as e:
raise PbsServiceError(rc=e.rc, rv=e.rv, msg=e.msg,
post=self._disconnect, conn=self._conn,
force=True)
rc = True
self._disconnect(self._conn, force=True)
return rc
def restart(self):
"""
Terminate and start a PBS server.
"""
if self.isUp():
if not self.stop():
return False
return self.start()
def log_match(self, msg=None, id=None, n=50, tail=True, allmatch=False,
regexp=False, day=None, max_attempts=1, interval=1,
starttime=None, endtime=None, level=logging.INFO):
"""
Match the PBS server logs
"""
return self._log_match(self, msg, id, n, tail, allmatch, regexp,
day, max_attempts, interval, starttime, endtime,
level=level)
def revert_to_defaults(self, reverthooks=True, revertqueues=True,
revertresources=True, delhooks=True,
delqueues=True, server_stat=None):
"""
reset server attributes back to out of box defaults.
:param reverthooks: If True disable all hooks. Defaults
to True
:type reverthooks: bool
:param revertqueues: If True disable all non-default
queues. Defaults to True
:type revertqueues: bool
:param revertresources: If True, resourcedef file is
removed. Defaults to True.
Reverting resources causes a server
restart to occur.
:type revertresources: bool
:param delhooks: If True, hooks are deleted, if deletion
fails, fall back to reverting hooks. Defaults
to True.
:type delhooks: bool
:param delqueues: If True, all non-default queues are deleted,
will attempt to delete all jobs first, if it
fails, revertqueues will be honored,
otherwise,revertqueues is ignored. Defaults
to True
:type delqueues: bool
:returns: True upon success and False if an error is
encountered.
:raises: PbsStatusError or PbsManagerError
"""
ignore_attrs = ['id', 'pbs_license', ATTR_NODE_ProvisionEnable]
ignore_attrs += [ATTR_status, ATTR_total, ATTR_count]
ignore_attrs += [ATTR_rescassn, ATTR_FLicenses, ATTR_SvrHost]
ignore_attrs += [ATTR_license_count, ATTR_version, ATTR_managers]
ignore_attrs += [ATTR_pbs_license_info]
unsetlist = []
setdict = {}
self.logger.info(self.logprefix +
'reverting configuration to defaults')
self.cleanup_jobs_and_reservations()
if server_stat is None:
server_stat = self.status(SERVER, level=logging.DEBUG)[0]
for k in server_stat.keys():
if (k in ignore_attrs) or (k in self.dflt_attributes.keys()):
continue
elif (('.' in k) and (k.split('.')[0] in ignore_attrs)):
continue
else:
unsetlist.append(k)
if len(unsetlist) != 0:
self.manager(MGR_CMD_UNSET, MGR_OBJ_SERVER, unsetlist)
for k in self.dflt_attributes.keys():
if(k not in self.attributes or
self.attributes[k] != self.dflt_attributes[k]):
setdict[k] = self.dflt_attributes[k]
if delhooks:
reverthooks = False
hooks = self.status(HOOK, level=logging.DEBUG)
hooks = [h['id'] for h in hooks]
if len(hooks) > 0:
self.manager(MGR_CMD_DELETE, HOOK, id=hooks, expect=True)
if delqueues:
revertqueues = False
queues = self.status(QUEUE, level=logging.DEBUG)
queues = [q['id'] for q in queues]
if len(queues) > 0:
self.manager(MGR_CMD_DELETE, QUEUE, id=queues, expect=True)
a = {ATTR_qtype: 'Execution',
ATTR_enable: 'True',
ATTR_start: 'True'}
self.manager(MGR_CMD_CREATE, QUEUE, a, id='workq', expect=True)
setdict.update({ATTR_dfltque: 'workq'})
if reverthooks:
hooks = self.status(HOOK, level=logging.DEBUG)
hooks = [h['id'] for h in hooks]
a = {ATTR_enable: 'false'}
if len(hooks) > 0:
self.manager(MGR_CMD_SET, MGR_OBJ_HOOK, a, hooks, expect=True)
if revertqueues:
self.status(QUEUE, level=logging.DEBUG)
queues = []
for (qname, qobj) in self.queues.items():
# skip reservation queues. This syntax for Python 2.4
# compatibility
if (qname.startswith('R') or qname.startswith('S') or
qname == server_stat[ATTR_dfltque]):
continue
qobj.revert_to_defaults()
queues.append(qname)
a = {ATTR_enable: 'false'}
self.manager(MGR_CMD_SET, QUEUE, a, id=queues, expect=True)
a = {ATTR_enable: 'True', ATTR_start: 'True'}
self.manager(MGR_CMD_SET, MGR_OBJ_QUEUE, a,
id=server_stat[ATTR_dfltque], expect=True)
if len(setdict) > 0:
self.manager(MGR_CMD_SET, MGR_OBJ_SERVER, setdict)
if revertresources:
try:
rescs = self.status(RSC)
rescs = [r['id'] for r in rescs]
except:
rescs = []
if len(rescs) > 0:
self.manager(MGR_CMD_DELETE, RSC, id=rescs, expect=True)
return True
def save_configuration(self, outfile, mode='a'):
"""
Save a server configuration, this includes:
- ``server_priv/resourcedef``
- ``qmgr -c "print server"``
- ``qmgr -c "print sched"``
- ``qmgr -c "print hook"``
:param outfile: the output file to which onfiguration is
saved
:type outfile: str
:param mode: The mode in which to open outfile to save
configuration. The first object being saved
should open this file with 'w' and subsequent
calls from other objects should save with
mode 'a' or 'a+'. Defaults to a+
:type mode: str
:returns: True on success, False on error
"""
conf = {}
sconf = {MGR_OBJ_SERVER: conf}
rd = os.path.join(self.pbs_conf['PBS_HOME'], 'server_priv',
'resourcedef')
self._save_config_file(conf, rd)
qmgr = os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qmgr')
ret = self.du.run_cmd(self.client, [qmgr, '-c', 'p s'], sudo=True)
if ret['rc'] != 0:
return False
else:
conf['qmgr_print_server'] = ret['out']
ret = self.du.run_cmd(self.hostname, [qmgr, '-c', 'p sched'],
sudo=True)
if ret['rc'] != 0:
return False
else:
conf['qmgr_print_sched'] = ret['out']
ret = self.du.run_cmd(self.hostname, [qmgr, '-c', 'p h'], sudo=True)
if ret['rc'] != 0:
return False
else:
conf['qmgr_print_hook'] = ret['out']
try:
f = open(outfile, mode)
cPickle.dump(sconf, f)
f.close()
except:
self.logger.error('Error processing file ' + outfile)
return False
return True
def load_configuration(self, infile):
"""
load configuration from saved file ``infile``
"""
self.revert_to_defaults()
self._load_configuration(infile, MGR_OBJ_SERVER)
def get_hostname(self):
"""
return the default server hostname
"""
if self.get_op_mode() == PTL_CLI:
return self.hostname
return pbs_default()
def _db_connect(self, db_access=None):
if self._db_conn is None:
if 'user' not in db_access or\
'password' not in db_access:
self.logger.error('missing credentials to access DB')
return None
if 'dbname' not in db_access:
db_access['dbname'] = 'pbs_datastore'
if 'port' not in db_access:
db_access['port'] = '15007'
if 'host' not in db_access:
db_access['host'] = self.hostname
user = db_access['user']
dbname = db_access['dbname']
port = db_access['port']
password = db_access['password']
host = db_access['host']
cred = "host=%s dbname=%s user=%s password=%s port=%s" % \
(host, dbname, user, password, port)
self._db_conn = psycopg2.connect(cred)
return self._db_conn
def _db_server_host(self, cur=None, db_access=None):
"""
Get the server host name from the database. The server
host name is stored in the pbs.server table and not in
pbs.server_attr.
:param cur: Optional, a predefined cursor to use to
operate on the DB
:param db_acccess: set to either file containing
credentials to DB access or
dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
"""
local_init = False
if cur is None:
conn = self._db_connect(db_access)
local_init = True
if conn is None:
return None
cur = conn.cursor()
# obtain server name. The server hostname is stored in table
# pbs.server
cur.execute('SELECT sv_hostname from pbs.server')
if local_init:
conn.commit()
tmp_query = cur.fetchone()
if len(tmp_query) > 0:
svr_host = tmp_query[0]
else:
svr_host = "unknown"
return svr_host
def status_db(self, obj_type=None, attrib=None, id=None, db_access=None,
logerr=True):
"""
Status PBS objects from the SQL database
:param obj_type: The type of object to query, one of the
* objects,\ Default: SERVER
:param attrib: Attributes to query, can a string, a list,
a dictionary\ Default: None. All attributes
will be queried
:type attrib: str or list or dictionary
:param id: An optional identifier, the name of the object
to status
:type id: str
:param db_access: information needed to access the database,
can be either a file containing user,
port, dbname, password info or a
dictionary of key/value entries
:type db_access: str or dictionary
"""
if not PSYCOPG:
self.logger.error('psycopg module unavailable, install from ' +
'http://initd.org/psycopg/ and retry')
return None
if not isinstance(db_access, dict):
try:
f = open(db_access, 'r')
except IOError:
self.logger.error('Unable to access ' + db_access)
return None
lines = f.readlines()
db_access = {}
for line in lines:
(k, v) = line.split('=')
db_access[k] = v
conn = self._db_connect(db_access)
if conn is None:
return None
cur = conn.cursor()
stmt = []
if obj_type == SERVER:
stmt = ["SELECT sv_name,attr_name,attr_resource,attr_value " +
"FROM pbs.server_attr"]
svr_host = self.hostname # self._db_server_host(cur)
elif obj_type == SCHED:
stmt = ["SELECT sched_name,attr_name,attr_resource,attr_value " +
"FROM pbs.scheduler_attr"]
# reuse server host name for sched host
svr_host = self.hostname
elif obj_type == JOB:
stmt = ["SELECT ji_jobid,attr_name,attr_resource,attr_value " +
"FROM pbs.job_attr"]
if id:
id_stmt = ["ji_jobid='" + id + "'"]
elif obj_type == QUEUE:
stmt = ["SELECT qu_name,attr_name,attr_resource,attr_value " +
"FROM pbs.queue_attr"]
if id:
id_stmt = ["qu_name='" + id + "'"]
elif obj_type == RESV:
stmt = ["SELECT ri_resvid,attr_name,attr_resource,attr_value " +
"FROM pbs.resv_attr"]
if id:
id_stmt = ["ri_resvid='" + id + "'"]
elif obj_type in (NODE, VNODE):
stmt = ["SELECT nd_name,attr_name,attr_resource,attr_value " +
"FROM pbs.node_attr"]
if id:
id_stmt = ["nd_name='" + id + "'"]
else:
self.logger.error('status: object type not handled')
return None
if attrib or id:
stmt += ["WHERE"]
extra_stmt = []
if attrib:
if isinstance(attrib, dict):
attrs = attrib.keys()
elif isinstance(attrib, list):
attrs = attrib
elif isinstance(attrib, str):
attrs = attrib.split(',')
for a in attrs:
extra_stmt += ["attr_name='" + a + "'"]
stmt += [" OR ".join(extra_stmt)]
if id:
stmt += [" AND ", " AND ".join(id_stmt)]
exec_stmt = " ".join(stmt)
self.logger.debug('server: executing db statement: ' + exec_stmt)
cur.execute(exec_stmt)
conn.commit()
_results = cur.fetchall()
obj_dict = {}
for _res in _results:
if obj_type in (SERVER, SCHED):
obj_name = svr_host
else:
obj_name = _res[0]
if obj_name not in obj_dict:
obj_dict[obj_name] = {'id': obj_name}
attr = _res[1]
if _res[2]:
attr += '.' + _res[2]
obj_dict[obj_name][attr] = _res[3]
return obj_dict.values()
#
# Begin IFL Wrappers
#
def status(self, obj_type=SERVER, attrib=None, id=None,
extend=None, level=logging.INFO, db_access=None, runas=None,
resolve_indirectness=False, logerr=True):
"""
Stat any PBS object ``[queue, server, node, hook, job,
resv, sched]``.If the Server is setup from diag input,
see diag or diagmap member, the status calls are routed
directly to the data on files from diag.
The server can be queried either through the 'qstat'
command line tool or through the wrapped PBS IFL api,
see set_op_mode.
Return a dictionary representation of a batch status object
raises ``PbsStatsuError on error``.
:param obj_type: The type of object to query, one of the *
objects.Default: SERVER
:param attrib: Attributes to query, can be a string, a
list, a dictionary.Default is to query all
attributes.
:type attrib: str or list or dictionary
:param id: An optional id, the name of the object to status
:type id: str
:param extend: Optional extension to the IFL call
:param level: The logging level, defaults to INFO
:type level: str
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
:param runas: run stat as user
:type runas: str
:param resolve_indirectness: If True resolves indirect node
resources values
:type resolve_indirectness: bool
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
In addition to standard IFL stat call, this wrapper handles
a few cases that aren't implicitly offered by pbs_stat*,
those are for Hooks,Resources, and a formula evaluation.
"""
prefix = 'status on ' + self.shortname
if runas:
prefix += ' as ' + str(runas)
prefix += ': '
self.logit(prefix, obj_type, attrib, id, level)
bs = None
bsl = []
freebs = False
# 2 - Special handling for gathering the job formula value.
if attrib is not None and PTL_FORMULA in attrib:
if (((isinstance(attrib, list) or isinstance(attrib, dict)) and
(len(attrib) == 1)) or
(isinstance(attrib, str) and len(attrib.split(',')) == 1)):
bsl = self.status(
JOB, 'Resource_List.select', id=id, extend='t')
if self.scheduler is None:
self.scheduler = Scheduler(self.hostname)
if 'log_filter' in self.scheduler.sched_config:
_prev_filter = self.scheduler.sched_config['log_filter']
if int(_prev_filter) & 2048:
self.scheduler.set_sched_config(
{'log_filter': 2048})
self.manager(MGR_CMD_SET, SERVER, {'scheduling': 'True'})
if id is None:
_formulas = self.scheduler.job_formula()
else:
_formulas = {id: self.scheduler.job_formula(jobid=id)}
if not int(_prev_filter) & 2048:
self.scheduler.set_sched_config(
{'log_filter': int(_prev_filter)})
if len(bsl) == 0:
bsl = [{'id': id}]
for _b in bsl:
if _b['id'] in _formulas:
_b[PTL_FORMULA] = _formulas[_b['id']]
return bsl
# 3- Serve data from database if requested... and available for the
# given object type
if db_access and obj_type in (SERVER, SCHED, NODE, QUEUE, RESV, JOB):
bsl = self.status_db(obj_type, attrib, id, db_access=db_access,
logerr=logerr)
# 4- Serve data from diag files
elif obj_type in self.diagmap:
if obj_type in (HOOK, PBS_HOOK):
for f in self.diagmap[obj_type]:
_b = self.utils.file_to_dictlist(f, attrib)
if _b and 'hook_name' in _b[0]:
_b[0]['id'] = _b[0]['hook_name']
else:
_b[0]['id'] = os.path.basename(f)
if id is None or id == _b[0]['id']:
bsl.extend(_b)
else:
bsl = self.utils.file_to_dictlist(self.diagmap[obj_type],
attrib, id=id)
# 6- Stat using PBS CLI commands
elif self.get_op_mode() == PTL_CLI:
tgt = self.client
if obj_type in (JOB, QUEUE, SERVER):
pcmd = [os.path.join(
self.client_conf['PBS_EXEC'],
'bin',
'qstat')]
if extend:
pcmd += ['-' + extend]
if obj_type == JOB:
pcmd += ['-f']
if id:
pcmd += [id]
else:
pcmd += ['@' + self.hostname]
elif obj_type == QUEUE:
pcmd += ['-Qf']
if id:
if '@' not in id:
pcmd += [id + '@' + self.hostname]
else:
pcmd += [id]
else:
pcmd += ['@' + self.hostname]
elif obj_type == SERVER:
pcmd += ['-Bf', self.hostname]
elif obj_type in (NODE, VNODE, HOST):
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'pbsnodes')]
pcmd += ['-s', self.hostname]
if obj_type in (NODE, VNODE):
pcmd += ['-v']
if obj_type == HOST:
pcmd += ['-H']
if id:
pcmd += [id]
else:
pcmd += ['-a']
elif obj_type == RESV:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'pbs_rstat')]
pcmd += ['-f']
if id:
pcmd += [id]
elif obj_type in (SCHED, PBS_HOOK, HOOK, RSC):
try:
rc = self.manager(MGR_CMD_LIST, obj_type, attrib, id,
runas=runas, level=level, logerr=logerr)
except PbsManagerError, e:
rc = e.rc
# PBS bug, no hooks yields a return code of 1, we ignore
if obj_type != HOOK:
raise PbsStatusError(
rc=rc, rv=[], msg=self.geterrmsg())
if rc == 0:
if obj_type == HOOK:
o = self.hooks
elif obj_type == PBS_HOOK:
o = self.pbshooks
elif obj_type == SCHED:
if self.scheduler is None:
return []
o = {'sched': self.scheduler}
elif obj_type == RSC:
o = self.resources
if id:
if id in o:
return [o[id].attributes]
else:
return None
return [h.attributes for h in o.values()]
return []
else:
self.logger.error(self.logprefix + "unrecognized object type")
raise PbsStatusError(rc=-1, rv=[],
msg="unrecognized object type")
return None
# as_script is used to circumvent some shells that will not pass
# along environment variables when invoking a command through sudo
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
elif obj_type == RESV and not self._is_local:
pcmd = ['PBS_SERVER=' + self.hostname] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(tgt, pcmd, runas=runas, as_script=as_script,
level=logging.INFOCLI, logerr=logerr)
o = ret['out']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = ret['rc']
if ret['rc'] != 0:
raise PbsStatusError(rc=ret['rc'], rv=[], msg=self.geterrmsg())
bsl = self.utils.convert_to_dictlist(o, attrib, mergelines=True)
# 7- Stat with impersonation over PBS IFL swig-wrapped API
elif runas is not None:
_data = {'obj_type': obj_type, 'attrib': attrib, 'id': id}
bsl = self.pbs_api_as('status', user=runas, data=_data,
extend=extend)
else:
# 8- Stat over PBS IFL API
#
# resources are special attributes, all resources are queried as
# a single attribute.
# e.g. querying the resources_available attribute returns all
# resources such as ncpus, mem etc. when querying for
# resources_available.ncpus and resources_available.mem only query
# resources_available once and retrieve the resources desired from
# there
if isinstance(attrib, dict):
attribcopy = {}
restype = []
for k, v in attrib.items():
if isinstance(v, tuple):
# SET requires a special handling because status may
# have been called through counter to count the number
# of objects have a given attribute set, in this case
# we set the attribute to an empty string rather than
# the number of elements requested. This is a
# side-effect of the way pbs_statjob works
if v[0] in (SET, MATCH_RE):
v = ''
else:
v = v[1]
if callable(v):
v = ''
if '.' in k:
_r = k.split('.')[0]
if _r not in restype:
attribcopy[k] = v
restype.append(_r)
else:
attribcopy[k] = v
elif isinstance(attrib, list):
attribcopy = []
for k in attrib:
if '.' in k:
_found = False
for _e in attribcopy:
_r = k.split('.')[0]
if _r == _e.split('.')[0]:
_found = True
break
if not _found:
attribcopy.append(k)
else:
attribcopy.append(k)
else:
attribcopy = attrib
a = self.utils.convert_to_attrl(attribcopy)
c = self._connect(self.hostname)
if obj_type == JOB:
bs = pbs_statjob(c, id, a, extend)
elif obj_type == QUEUE:
bs = pbs_statque(c, id, a, extend)
elif obj_type == SERVER:
bs = pbs_statserver(c, a, extend)
elif obj_type == HOST:
bs = pbs_statnode(c, id, a, extend)
elif obj_type == VNODE:
bs = pbs_statvnode(c, id, a, extend)
elif obj_type == RESV:
bs = pbs_statresv(c, id, a, extend)
elif obj_type == SCHED:
bs = pbs_statsched(c, a, extend)
elif obj_type == RSC:
# up to PBSPro 12.3 pbs_statrsc was not in pbs_ifl.h
bs = pbs_statrsc(c, id, a, extend)
elif obj_type in (HOOK, PBS_HOOK):
if os.getuid() != 0:
try:
rc = self.manager(MGR_CMD_LIST, obj_type, attrib,
id, level=level)
if rc == 0:
if id:
if (obj_type == HOOK and
id in self.hooks):
return [self.hooks[id].attributes]
elif (obj_type == PBS_HOOK and
id in self.pbshooks):
return [self.pbshooks[id].attributes]
else:
return None
if obj_type == HOOK:
return [h.attributes for h in
self.hooks.values()]
elif obj_type == PBS_HOOK:
return [h.attributes for h in
self.pbshooks.values()]
except:
pass
else:
bs = pbs_stathook(c, id, a, extend)
else:
self.logger.error(self.logprefix +
"unrecognized object type " + str(obj_type))
freebs = True
err = self.geterrmsg()
self._disconnect(c)
if err:
raise PbsStatusError(rc=-1, rv=[], msg=err)
if not isinstance(bs, list):
bsl = self.utils.batch_status_to_dictlist(bs, attrib)
else:
bsl = self.utils.filter_batch_status(bs, attrib)
# Update each object's dictionary with corresponding attributes and
# values
self.update_attributes(obj_type, bsl)
# Hook stat is done through CLI, no need to free the batch_status
if (not isinstance(bs, list) and freebs and
obj_type not in (HOOK, PBS_HOOK) and os.getuid() != 0):
pbs_statfree(bs)
# 9- Resolve indirect resources
if obj_type in (NODE, VNODE) and resolve_indirectness:
nodes = {}
for _b in bsl:
for k, v in _b.items():
if v.startswith('@'):
if v[1:] in nodes:
_b[k] = nodes[v[1:]][k]
else:
for l in bsl:
if l['id'] == v[1:]:
nodes[k] = l[k]
_b[k] = l[k]
break
del nodes
return bsl
def submit_interactive_job(self, job, cmd):
"""
submit an ``interactive`` job. Returns a job identifier
or raises PbsSubmitError on error
:param cmd: The command to run to submit the interactive
job
:type cmd: str
:param job: the job object. The job must have the attribute
'interactive_job' populated. That attribute is
a list of tuples of the form:
(<command>, <expected output>, <...>)
for example to send the command
hostname and expect 'myhost.mydomain' one would
set:job.interactive_job =
[('hostname', 'myhost.mydomain')]
If more than one lines are expected they are
appended to the tuple.
:raises: PbsSubmitError
"""
ij = InteractiveJob(job, cmd, self.hostname)
# start the interactive job submission thread and wait to pickup the
# actual job identifier
ij.start()
while ij.jobid is None:
continue
return ij.jobid
def submit(self, obj, script=None, extend=None, submit_dir=None):
"""
Submit a job or reservation. Returns a job identifier
or raises PbsSubmitError on error
:param obj: The Job or Reservation instance to submit
:param script: Path to a script to submit. Default: None
as an executable\ /bin/sleep 100 is submitted
:type script: str or None
:param extend: Optional extension to the IFL call.
see pbs_ifl.h
:type extend: str or None
:param submit_dir: directory from which job is submitted.
Defaults to temporary directory
:type submit_dir: str or None
:raises: PbsSubmitError
"""
_interactive_job = False
as_script = False
rc = None
if isinstance(obj, Job):
if script is None and obj.script is not None:
script = obj.script
if ATTR_inter in obj.attributes:
_interactive_job = True
if ATTR_executable in obj.attributes:
del obj.attributes[ATTR_executable]
if ATTR_Arglist in obj.attributes:
del obj.attributes[ATTR_Arglist]
elif not isinstance(obj, Reservation):
m = self.logprefix + "unrecognized object type"
self.logger.error(m)
return None
if submit_dir is None:
submit_dir = tempfile.gettempdir()
cwd = os.getcwd()
os.chdir(submit_dir)
c = None
# 1- Submission using the command line tools
if self.get_op_mode() == PTL_CLI:
exclude_attrs = [] # list of attributes to not convert to CLI
if isinstance(obj, Job):
runcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'qsub')]
elif isinstance(obj, Reservation):
runcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'pbs_rsub')]
if ATTR_resv_start in obj.custom_attrs:
start = obj.custom_attrs[ATTR_resv_start]
obj.custom_attrs[ATTR_resv_start] = \
self.utils.convert_seconds_to_resvtime(start)
if ATTR_resv_end in obj.custom_attrs:
end = obj.custom_attrs[ATTR_resv_end]
obj.custom_attrs[ATTR_resv_end] = \
self.utils.convert_seconds_to_resvtime(end)
if ATTR_resv_timezone in obj.custom_attrs:
exclude_attrs += [ATTR_resv_timezone, ATTR_resv_standing]
# handling of impersonation differs widely across OS's,
# when setting PBS_TZID we standardize on running the cmd
# as a script instead of customizing for each OS flavor
_tz = obj.custom_attrs[ATTR_resv_timezone]
runcmd = ['PBS_TZID=' + _tz] + runcmd
as_script = True
if ATTR_resv_rrule in obj.custom_attrs:
_rrule = obj.custom_attrs[ATTR_resv_rrule]
if _rrule[0] not in ("'", '"'):
_rrule = "'" + _rrule + "'"
obj.custom_attrs[ATTR_resv_rrule] = _rrule
if not self._is_local:
if ATTR_queue not in obj.attributes:
runcmd += ['-q@' + self.hostname]
elif '@' not in obj.attributes[ATTR_queue]:
curq = obj.attributes[ATTR_queue]
runcmd += ['-q' + curq + '@' + self.hostname]
if obj.custom_attrs and (ATTR_queue in obj.custom_attrs):
del obj.custom_attrs[ATTR_queue]
_conf = self.default_client_pbs_conf
cmd = self.utils.convert_to_cli(obj.custom_attrs, IFL_SUBMIT,
self.hostname, dflt_conf=_conf,
exclude_attrs=exclude_attrs)
if cmd is None:
try:
os.chdir(cwd)
except OSError:
pass
return None
runcmd += cmd
if script:
runcmd += [script]
else:
if ATTR_executable in obj.attributes:
runcmd += ['--', obj.attributes[ATTR_executable]]
if ((ATTR_Arglist in obj.attributes) and
(obj.attributes[ATTR_Arglist] is not None)):
args = obj.attributes[ATTR_Arglist]
arglist = self.utils.convert_arglist(args)
if arglist is None:
try:
os.chdir(cwd)
except OSError:
pass
return None
runcmd += [arglist]
if obj.username != self.current_user:
runas = obj.username
else:
runas = None
if _interactive_job:
ijid = self.submit_interactive_job(obj, runcmd)
try:
os.chdir(cwd)
except OSError:
pass
return ijid
if not self.default_client_pbs_conf:
runcmd = [
'PBS_CONF_FILE=' + self.client_pbs_conf_file] + runcmd
as_script = True
ret = self.du.run_cmd(self.client, runcmd, runas=runas,
level=logging.INFOCLI, as_script=as_script,
logerr=False)
if ret['rc'] != 0:
objid = None
else:
objid = ret['out'][0]
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc = ret['rc']
# 2- Submission with impersonation over API
elif obj.username != self.current_user:
# submit job as a user requires setting uid to that user. It's
# done in a separate process
obj.set_variable_list(obj.username, submit_dir)
obj.set_attributes()
if (obj.script is not None and not self._is_local):
# This copy assumes that the file system layout on the
# remote host is identical to the local host. When not
# the case, this code will need to be updated to copy
# to a known remote location and update the obj.script
self.du.run_copy(self.hostname, obj.script, obj.script)
os.remove(obj.script)
objid = self.pbs_api_as('submit', obj, user=obj.username,
extend=extend)
# 3- Submission as current user over API
else:
c = self._connect(self.hostname)
if isinstance(obj, Job):
if script:
if ATTR_o not in obj.attributes:
obj.attributes[ATTR_o] = (self.hostname + ':' +
obj.script + '.o')
if ATTR_e not in obj.attributes:
obj.attributes[ATTR_e] = (self.hostname + ':' +
obj.script + '.e')
sc = os.path.basename(script)
obj.unset_attributes([ATTR_executable, ATTR_Arglist])
if ATTR_N not in obj.custom_attrs:
obj.attributes[ATTR_N] = sc
if ATTR_queue in obj.attributes:
destination = obj.attributes[ATTR_queue]
# queue must be removed otherwise will cause the submit
# to fail silently
del obj.attributes[ATTR_queue]
else:
destination = None
if (ATTR_o not in obj.attributes or
ATTR_e not in obj.attributes):
fn = self.utils.random_str(
length=4, prefix='PtlPbsJob')
tmp = self.du.get_tempdir(self.hostname)
fn = os.path.join(tmp, fn)
if ATTR_o not in obj.attributes:
obj.attributes[ATTR_o] = (self.hostname + ':' +
fn + '.o')
if ATTR_e not in obj.attributes:
obj.attributes[ATTR_e] = (self.hostname + ':' +
fn + '.e')
obj.attropl = self.utils.dict_to_attropl(obj.attributes)
objid = pbs_submit(c, obj.attropl, script, destination,
extend)
elif isinstance(obj, Reservation):
if ATTR_resv_duration in obj.attributes:
# reserve_duration is not a valid attribute, the API call
# will get rejected if it is used
wlt = ATTR_l + '.walltime'
obj.attributes[wlt] = obj.attributes[ATTR_resv_duration]
del obj.attributes[ATTR_resv_duration]
obj.attropl = self.utils.dict_to_attropl(obj.attributes)
objid = pbs_submit_resv(c, obj.attropl, extend)
prefix = 'submit to ' + self.shortname + ' as '
if isinstance(obj, Job):
self.logit(prefix + '%s: ' % obj.username, JOB, obj.custom_attrs,
objid)
if obj.script_body:
self.logger.log(logging.INFOCLI, 'job script ' + script +
'\n---\n' + obj.script_body + '\n---')
if objid is not None:
self.jobs[objid] = obj
elif isinstance(obj, Reservation):
# Reservations without -I option return as 'R123 UNCONFIRMED'
# so split to get the R123 only
self.logit(prefix + '%s: ' % obj.username, RESV, obj.attributes,
objid)
if objid is not None:
objid = objid.split()[0]
self.reservations[objid] = obj
if objid is not None:
obj.server[self.hostname] = objid
else:
try:
os.chdir(cwd)
except OSError:
pass
raise PbsSubmitError(rc=rc, rv=None, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
try:
os.chdir(cwd)
except OSError:
pass
return objid
def deljob(self, id=None, extend=None, runas=None, wait=False,
logerr=True, attr_W=None):
"""
delete a single job or list of jobs specified by id
raises ``PbsDeljobError`` on error
:param id: The identifier(s) of the jobs to delete
:type id: str or list
:param extend: Optional parameters to pass along to PBS
:type extend: str or None
:param runas: run as user
:type runas: str or None
:param wait: Set to True to wait for job(s) to no longer
be reported by PBS. False by default
:type wait: bool
:param logerr: Whether to log errors. Defaults to True.
:type logerr: bool
:param attr_w: -W args to qdel (Only for cli mode)
:type attr_w: str
:raises: PbsDeljobError
"""
prefix = 'delete job on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if id is not None:
if not isinstance(id, list):
id = id.split(',')
prefix += ', '.join(id)
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qdel')]
if extend is not None:
pcmd += self.utils.convert_to_cli(extend, op=IFL_DELETE,
hostname=self.hostname)
if attr_W is not None:
pcmd += ['-W']
if attr_W != PTL_NOARG:
pcmd += [attr_W]
if id is not None:
pcmd += id
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
elif not self._is_local:
pcmd = ['PBS_SERVER=' + self.hostname] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, logerr=logerr,
level=logging.INFOCLI)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('deljob', id, user=runas, extend=extend)
else:
c = self._connect(self.hostname)
rc = 0
for ajob in id:
tmp_rc = pbs_deljob(c, ajob, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsDeljobError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if self.jobs is not None:
for j in id:
if j in self.jobs:
if self.jobs[j].interactive_handle is not None:
self.jobs[j].interactive_handle.close()
del self.jobs[j]
if c:
self._disconnect(c)
if wait:
for oid in id:
self.expect(JOB, 'queue', id=oid, op=UNSET, runas=runas,
level=logging.DEBUG)
return rc
def delresv(self, id=None, extend=None, runas=None, wait=False,
logerr=True):
"""
delete a single job or list of jobs specified by id
raises ``PbsDeljobError`` on error
:param id: The identifier(s) of the jobs to delete
:type id: str or list
:param extend: Optional parameters to pass along to PBS
:type extend: str or None
:param runas: run as user
:type runas: str or None
:param wait: Set to True to wait for job(s) to no longer
be reported by PBS. False by default
:type wait: bool
:param logerr: Whether to log errors. Defaults to True.
:type logerr: bool
:raises: PbsDeljobError
"""
prefix = 'delete resv on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if id is not None:
if not isinstance(id, list):
id = id.split(',')
prefix += ', '.join(id)
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'pbs_rdel')]
if id is not None:
pcmd += id
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
elif not self._is_local:
pcmd = ['PBS_SERVER=' + self.hostname] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, logerr=logerr,
level=logging.INFOCLI)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('delresv', id, user=runas, extend=extend)
else:
c = self._connect(self.hostname)
rc = 0
for ajob in id:
tmp_rc = pbs_delresv(c, ajob, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsDelresvError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if self.reservations is not None:
for j in id:
if j in self.reservations:
del self.reservations[j]
if c:
self._disconnect(c)
if wait:
for oid in id:
self.expect(RESV, 'queue', id=oid, op=UNSET, runas=runas,
level=logging.DEBUG)
return rc
def delete(self, id=None, extend=None, runas=None, wait=False,
logerr=True):
"""
delete a single job or list of jobs specified by id
raises ``PbsDeleteError`` on error
:param id: The identifier(s) of the jobs/resvs to delete
:type id: str or list
:param extend: Optional parameters to pass along to PBS
:type extend: str or none
:param runas: run as user
:type runas: str
:param wait: Set to True to wait for job(s)/resv(s) to
no longer be reported by PBS. False by default
:type wait: bool
:param logerr: Whether to log errors. Defaults to True.
:type logerr: bool
:raises: PbsDeleteError
"""
prefix = 'delete on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if id is not None:
if not isinstance(id, list):
id = id.split(',')
prefix += ','.join(id)
if extend is not None:
prefix += ' with ' + str(extend)
self.logger.info(prefix)
if not len(id) > 0:
return 0
obj_type = {}
for j in id:
if j[0] in ('R', 'S'):
obj_type[j] = RESV
try:
rc = self.delresv(j, extend, runas, logerr=logerr)
except PbsDelresvError, e:
rc = e.rc
msg = e.msg
rv = e.rv
else:
obj_type[j] = JOB
try:
rc = self.deljob(j, extend, runas, logerr=logerr)
except PbsDeljobError, e:
rc = e.rc
msg = e.msg
rv = e.rv
if rc != 0:
raise PbsDeleteError(rc=rc, rv=rv, msg=msg)
if wait:
for oid in id:
self.expect(obj_type[oid], 'queue', id=oid, op=UNSET,
runas=runas, level=logging.DEBUG)
return rc
def select(self, attrib=None, extend=None, runas=None, logerr=True):
"""
Select jobs that match attributes list or all jobs if no
attributes raises ``PbsSelectError`` on error
:param attrib: A string, list, or dictionary of attributes
:type attrib: str or list or dictionary
:param extend: the extended attributes to pass to select
:type extend: str or None
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:returns: A list of job identifiers that match the
attributes specified
:raises: PbsSelectError
"""
prefix = "select on " + self.shortname
if runas is not None:
prefix += " as " + str(runas)
prefix += ": "
if attrib is None:
s = PTL_ALL
elif not isinstance(attrib, dict):
self.logger.error(prefix + "attributes must be a dictionary")
return
else:
s = str(attrib)
self.logger.info(prefix + s)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'],
'bin', 'qselect')]
cmd = self.utils.convert_to_cli(attrib, op=IFL_SELECT,
hostname=self.hostname)
if extend is not None:
pcmd += ['-' + extend]
if not self._is_local and ((attrib is None) or
(ATTR_queue not in attrib)):
pcmd += ['-q', '@' + self.hostname]
pcmd += cmd
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = ret['rc']
if self.last_rc != 0:
raise PbsSelectError(rc=self.last_rc, rv=False,
msg=self.geterrmsg())
jobs = ret['out']
# command returns no jobs as empty, since we expect a valid id,
# we reset the jobs to an empty array
if len(jobs) == 1 and jobs[0] == '':
jobs = []
elif runas is not None:
jobs = self.pbs_api_as('select', user=runas, data=attrib,
extend=extend)
else:
attropl = self.utils.convert_to_attropl(attrib, op=EQ)
c = self._connect(self.hostname)
jobs = pbs_selectjob(c, attropl, extend)
err = self.geterrmsg()
if err:
raise PbsSelectError(rc=-1, rv=False, msg=err,
post=self._disconnect, conn=c)
self._disconnect(c)
return jobs
def selstat(self, select_list, rattrib, runas=None, extend=None):
"""
stat and filter jobs attributes.
:param select_list: The filter criteria
:type select: List
:param rattrib: The attributes to query
:type rattrib: List
:param runas: run as user
:type runas: str or None
.. note:: No ``CLI`` counterpart for this call
"""
attrl = self.utils.convert_to_attrl(rattrib)
attropl = self.utils.convert_to_attropl(select_list)
c = self._connect(self.hostname)
bs = pbs_selstat(c, attropl, attrl, extend)
self._disconnect(c)
return bs
def manager(self, cmd, obj_type, attrib=None, id=None, extend=None,
expect=False, max_attempts=None, level=logging.INFO,
sudo=None, runas=None, logerr=True):
"""
issue a management command to the server, e.g to set an
attribute
Returns the return code of ``qmgr/pbs_manager()`` on
success, if expect is set to True, the return value is
that of the call to expect.Raises ``PbsManagerError`` on
error
:param cmd: The command to issue,
``MGR_CMD_[SET,UNSET, LIST,...]`` see pbs_ifl.h
:type cmd: str
:param obj_type: The type of object to query, one of
the * objects
:param attrib: Attributes to operate on, can be a string, a
list,a dictionary
:type attrib: str or list or dictionary
:param id: The name or list of names of the object(s) to act
upon.
:type id: str or list
:param extend: Optional extension to the IFL call. see
pbs_ifl.h
:type extend: str or None
:param expect: If set to True, query the server expecting
the value to be\ accurately reflected.
Defaults to False
:type expect: bool
:param max_attempts: Sets a maximum number of attempts to
call expect with.
:type max_attempts: int
:param level: logging level
:param sudo: If True, run the manager command as super user.
Defaults to None. Some attribute settings
should be run with sudo set to True, those are
acl_roots, job_sort_formula, hook operations,
no_sched_hook_event, in those cases, setting
sudo to False is only needed for testing
purposes
:type sudo: bool
:param runas: run as user
:type runas: str
:param logerr: If False, CLI commands do not log error,
i.e. silent mode
:type logerr: bool
:raises: PbsManagerError
When expect is ``False``, return the value, ``0/!0``
returned by pbs_manager
When expect is ``True``, return the value, ``True/False``,
returned by expect
"""
if isinstance(id, str):
oid = id.split(',')
else:
oid = id
self.logit('manager on ' + self.shortname +
[' as ' + str(runas), ''][runas is None] + ': ' +
PBS_CMD_MAP[cmd] + ' ', obj_type, attrib, oid, level=level)
c = None # connection handle
if (self.get_op_mode() == PTL_CLI or
sudo is not None or
obj_type in (HOOK, PBS_HOOK) or
(attrib is not None and ('job_sort_formula' in attrib or
'acl_roots' in attrib or
'no_sched_hook_event' in attrib))):
execcmd = [PBS_CMD_MAP[cmd], PBS_OBJ_MAP[obj_type]]
if oid is not None:
if cmd == MGR_CMD_DELETE and obj_type == NODE and oid[0] == "":
oid[0] = "@default"
execcmd += [",".join(oid)]
if attrib is not None and cmd != MGR_CMD_LIST:
if cmd == MGR_CMD_IMPORT:
execcmd += [attrib['content-type'],
attrib['content-encoding'],
attrib['input-file']]
else:
if isinstance(attrib, (dict, OrderedDict)):
kvpairs = []
for k, v in attrib.items():
if isinstance(v, tuple):
if v[0] == INCR:
op = '+='
elif v[0] == DECR:
op = '-='
else:
msg = 'Invalid operation: %s' % (v[0])
raise PbsManagerError(rc=1, rv=False,
msg=msg)
v = v[1]
else:
op = '='
# handle string arrays as double quotes if
# not already set:
if isinstance(v, str) and ',' in v and v[0] != '"':
v = '"' + v + '"'
kvpairs += [str(k) + op + str(v)]
if kvpairs:
execcmd += [",".join(kvpairs)]
del kvpairs
elif isinstance(attrib, list):
execcmd += [",".join(attrib)]
elif isinstance(attrib, str):
execcmd += [attrib]
if not self.default_pbs_conf or not self.default_client_pbs_conf:
as_script = True
else:
as_script = False
if not self._is_local or as_script:
execcmd = '\'' + " ".join(execcmd) + '\''
else:
execcmd = " ".join(execcmd)
# Hooks can only be queried as a privileged user on the host where
# the server is running, care must be taken to use the appropriate
# path to qmgr and appropriate escaping sequences
# VERSION INFO: no_sched_hook_event introduced in 11.3.120 only
if sudo is None:
if (obj_type in (HOOK, PBS_HOOK) or
(attrib is not None and
('job_sort_formula' in attrib or
'acl_roots' in attrib or
'no_sched_hook_event' in attrib))):
sudo = True
else:
sudo = False
pcmd = [os.path.join(self.pbs_conf['PBS_EXEC'], 'bin', 'qmgr'),
'-c', execcmd]
if as_script:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
ret = self.du.run_cmd(self.hostname, pcmd, sudo=sudo, runas=runas,
level=logging.INFOCLI, as_script=as_script,
logerr=logerr)
rc = ret['rc']
# NOTE: workaround the fact that qmgr overloads the return code in
# cases where the list returned is empty an error flag is set even
# through there is no error. Handled here by checking if there is
# no err and out message, in which case return code is set to 0
if rc != 0 and (ret['out'] == [''] and ret['err'] == ['']):
rc = 0
if rc == 0:
if cmd == MGR_CMD_LIST:
bsl = self.utils.convert_to_dictlist(ret['out'], attrib,
mergelines=False)
self.update_attributes(obj_type, bsl)
else:
# Need to rework setting error, this is not thread safe
self.last_error = ret['err']
self.last_rc = ret['rc']
elif runas is not None:
_data = {'cmd': cmd, 'obj_type': obj_type, 'attrib': attrib,
'id': oid}
rc = self.pbs_api_as('manager', user=runas, data=_data,
extend=extend)
else:
a = self.utils.convert_to_attropl(attrib, cmd)
c = self._connect(self.hostname)
rc = 0
if obj_type == SERVER and oid is None:
oid = [self.hostname]
if oid is None:
# server will run strlen on id, it can not be NULL
oid = ['']
if cmd == MGR_CMD_LIST:
if oid is None:
bsl = self.status(obj_type, attrib, oid, extend)
else:
bsl = None
for i in oid:
tmpbsl = self.status(obj_type, attrib, i, extend)
if tmpbsl is None:
rc = 1
else:
if bsl is None:
bsl = tmpbsl
else:
bsl += tmpbsl
else:
rc = 0
if oid is None:
rc = pbs_manager(c, cmd, obj_type, i, a, extend)
else:
for i in oid:
tmprc = pbs_manager(c, cmd, obj_type, i, a, extend)
if tmprc != 0:
rc = tmprc
break
if rc == 0:
rc = tmprc
if cmd == MGR_CMD_DELETE and oid is not None:
for i in oid:
if obj_type == MGR_OBJ_HOOK and i in self.hooks:
del self.hooks[i]
if obj_type in (NODE, VNODE) and i in self.nodes:
del self.nodes[i]
if obj_type == MGR_OBJ_QUEUE and i in self.queues:
del self.queues[i]
if obj_type == MGR_OBJ_RSC and i in self.resources:
del self.resources[i]
if rc != 0:
raise PbsManagerError(rv=False, rc=rc, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c is not None:
self._disconnect(c)
if expect:
offset = None
if obj_type in (NODE, HOST):
obj_type = VNODE
if obj_type in (VNODE, QUEUE):
offset = 0.5
if cmd in PBS_CMD_TO_OP:
op = PBS_CMD_TO_OP[cmd]
else:
op = EQ
if oid is None:
return self.expect(obj_type, attrib, oid, op=op,
max_attempts=max_attempts, offset=offset)
for i in oid:
rc = self.expect(obj_type, attrib, i, op=op,
max_attempts=max_attempts, offset=offset)
if not rc:
break
return rc
def sigjob(self, jobid=None, signal=None, extend=None, runas=None,
logerr=True):
"""
Send a signal to a job. Raises ``PbsSignalError`` on error.
:param jobid: identifier of the job or list of jobs to send
the signal to
:type jobid: str or list
:param signal: The signal to send to the job, see pbs_ifl.h
:type signal: str or None
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsSignalError
"""
prefix = 'signal on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if jobid is not None:
if not isinstance(jobid, list):
jobid = jobid.split(',')
prefix += ', '.join(jobid)
if signal is not None:
prefix += ' with signal = ' + str(signal)
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qsig')]
if signal is not None:
pcmd += ['-s']
if signal != PTL_NOARG:
pcmd += [str(signal)]
if jobid is not None:
pcmd += jobid
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('sigjob', jobid, runas, data=signal)
else:
c = self._connect(self.hostname)
rc = 0
for ajob in jobid:
tmp_rc = pbs_sigjob(c, ajob, signal, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsSignalError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
return rc
def msgjob(self, jobid=None, to_file=None, msg=None, extend=None,
runas=None, logerr=True):
"""
Send a message to a job. Raises ``PbsMessageError`` on
error.
:param jobid: identifier of the job or list of jobs to
send the message to
:type jobid: str or List
:param msg: The message to send to the job
:type msg: str or None
:param to_file: one of ``MSG_ERR`` or ``MSG_OUT`` or
``MSG_ERR|MSG_OUT``
:type to_file: str or None
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsMessageError
"""
prefix = 'msgjob on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if jobid is not None:
if not isinstance(jobid, list):
jobid = jobid.split(',')
prefix += ', '.join(jobid)
if to_file is not None:
prefix += ' with to_file = '
if MSG_ERR == to_file:
prefix += 'MSG_ERR'
elif MSG_OUT == to_file:
prefix += 'MSG_OUT'
elif MSG_OUT | MSG_ERR == to_file:
prefix += 'MSG_ERR|MSG_OUT'
else:
prefix += str(to_file)
if msg is not None:
prefix += ' msg = %s' % (str(msg))
if extend is not None:
prefix += ' extend = %s' % (str(extend))
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qmsg')]
if to_file is not None:
if MSG_ERR == to_file:
pcmd += ['-E']
elif MSG_OUT == to_file:
pcmd += ['-O']
elif MSG_OUT | MSG_ERR == to_file:
pcmd += ['-E', '-O']
else:
pcmd += ['-' + str(to_file)]
if msg is not None:
pcmd += [msg]
if jobid is not None:
pcmd += jobid
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
data = {'msg': msg, 'to_file': to_file}
rc = self.pbs_api_as('msgjob', jobid, runas, data=data,
extend=extend)
else:
c = self._connect(self.hostname)
if c < 0:
return c
for ajob in jobid:
tmp_rc = pbs_msgjob(c, ajob, to_file, msg, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsMessageError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
return rc
def alterjob(self, jobid=None, attrib=None, extend=None, runas=None,
logerr=True):
"""
Alter attributes associated to a job. Raises
``PbsAlterError`` on error.
:param jobid: identifier of the job or list of jobs to
operate on
:type jobid: str or list
:param attrib: A dictionary of attributes to set
:type attrib: dictionary
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If False, CLI commands do not log error,
i.e. silent mode
:type logerr: bool
:raises: PbsAlterError
"""
prefix = 'alter on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if jobid is not None:
if not isinstance(jobid, list):
jobid = jobid.split(',')
prefix += ', '.join(jobid)
if attrib is not None:
prefix += ' %s' % (str(attrib))
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'qalter')]
if attrib is not None:
_conf = self.default_client_pbs_conf
pcmd += self.utils.convert_to_cli(attrib, op=IFL_ALTER,
hostname=self.client,
dflt_conf=_conf)
if jobid is not None:
pcmd += jobid
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('alterjob', jobid, runas, data=attrib)
else:
c = self._connect(self.hostname)
if c < 0:
return c
a = self.utils.convert_to_attrl(attrib)
rc = 0
for ajob in jobid:
tmp_rc = pbs_alterjob(c, ajob, a, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsAlterError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
return rc
def holdjob(self, jobid=None, holdtype=None, extend=None, runas=None,
logerr=True):
"""
Hold a job. Raises ``PbsHoldError`` on error.
:param jobid: identifier of the job or list of jobs to hold
:type jobid: str or list
:param holdtype: The type of hold to put on the job
:type holdtype: str or None
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsHoldError
"""
prefix = 'holdjob on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if jobid is not None:
if not isinstance(jobid, list):
jobid = jobid.split(',')
prefix += ', '.join(jobid)
if holdtype is not None:
prefix += ' with hold_list = %s' % (holdtype)
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qhold')]
if holdtype is not None:
pcmd += ['-h']
if holdtype != PTL_NOARG:
pcmd += [holdtype]
if jobid is not None:
pcmd += jobid
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
logerr=logerr, as_script=as_script,
level=logging.INFOCLI)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('holdjob', jobid, runas, data=holdtype,
logerr=logerr)
else:
c = self._connect(self.hostname)
if c < 0:
return c
rc = 0
for ajob in jobid:
tmp_rc = pbs_holdjob(c, ajob, holdtype, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsHoldError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
return rc
def rlsjob(self, jobid, holdtype, extend=None, runas=None, logerr=True):
"""
Release a job. Raises ``PbsReleaseError`` on error.
:param jobid: job or list of jobs to release
:type jobid: str or list
:param holdtype: The type of hold to release on the job
:type holdtype: str
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsReleaseError
"""
prefix = 'release on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if jobid is not None:
if not isinstance(jobid, list):
jobid = jobid.split(',')
prefix += ', '.join(jobid)
if holdtype is not None:
prefix += ' with hold_list = %s' % (holdtype)
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qrls')]
if holdtype is not None:
pcmd += ['-h']
if holdtype != PTL_NOARG:
pcmd += [holdtype]
if jobid is not None:
pcmd += jobid
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('rlsjob', jobid, runas, data=holdtype)
else:
c = self._connect(self.hostname)
if c < 0:
return c
rc = 0
for ajob in jobid:
tmp_rc = pbs_rlsjob(c, ajob, holdtype, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsHoldError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
return rc
def rerunjob(self, jobid=None, extend=None, runas=None, logerr=True):
"""
Rerun a job. Raises ``PbsRerunError`` on error.
:param jobid: job or list of jobs to release
:type jobid: str or list
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsRerunError
"""
prefix = 'rerun on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if jobid is not None:
if not isinstance(jobid, list):
jobid = jobid.split(',')
prefix += ', '.join(jobid)
if extend is not None:
prefix += extend
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'qrerun')]
if extend:
pcmd += ['-W', extend]
if jobid is not None:
pcmd += jobid
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('rerunjob', jobid, runas, extend=extend)
else:
c = self._connect(self.hostname)
if c < 0:
return c
rc = 0
for ajob in jobid:
tmp_rc = pbs_rerunjob(c, ajob, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsRerunError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
return rc
def orderjob(self, jobid1=None, jobid2=None, extend=None, runas=None,
logerr=True):
"""
reorder position of ``jobid1`` and ``jobid2``. Raises
``PbsOrderJob`` on error.
:param jobid1: first jobid
:type jobid1: str or None
:param jobid2: second jobid
:type jobid2: str or None
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsOrderJob
"""
prefix = 'orderjob on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
prefix += str(jobid1) + ', ' + str(jobid2)
if extend is not None:
prefix += ' ' + str(extend)
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'qorder')]
if jobid1 is not None:
pcmd += [jobid1]
if jobid2 is not None:
pcmd += [jobid2]
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('orderjob', jobid1, runas, data=jobid2,
extend=extend)
else:
c = self._connect(self.hostname)
if c < 0:
return c
rc = pbs_orderjob(c, jobid1, jobid2, extend)
if rc != 0:
raise PbsOrderError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
return rc
def runjob(self, jobid=None, location=None, async=False, extend=None,
runas=None, logerr=False):
"""
Run a job on given nodes. Raises ``PbsRunError`` on error.
:param jobid: job or list of jobs to run
:type jobid: str or list
:param location: An execvnode on which to run the job
:type location: str or None
:param async: If true the call will return immediately
assuming success.
:type async: bool
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsRunError
"""
if async:
prefix = 'Async run on ' + self.shortname
else:
prefix = 'run on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if jobid is not None:
if not isinstance(jobid, list):
jobid = jobid.split(',')
prefix += ', '.join(jobid)
if location is not None:
prefix += ' with location = %s' % (location)
self.logger.info(prefix)
if self.has_diag:
return 0
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qrun')]
if async:
pcmd += ['-a']
if location is not None:
pcmd += ['-H']
if location != PTL_NOARG:
pcmd += [location]
if jobid:
pcmd += jobid
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as(
'runjob', jobid, runas, data=location, extend=extend)
else:
c = self._connect(self.hostname)
if c < 0:
return c
rc = 0
for ajob in jobid:
if async:
tmp_rc = pbs_asyrunjob(c, ajob, location, extend)
else:
tmp_rc = pbs_runjob(c, ajob, location, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsRunError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
return rc
def movejob(self, jobid=None, destination=None, extend=None, runas=None,
logerr=True):
"""
Move a job or list of job ids to a given destination queue.
Raises ``PbsMoveError`` on error.
:param jobid: A job or list of job ids to move
:type jobid: str or list
:param destination: The destination queue@server
:type destination: str or None
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsMoveError
"""
prefix = 'movejob on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if jobid is not None:
if not isinstance(jobid, list):
jobid = jobid.split(',')
prefix += ', '.join(jobid)
if destination is not None:
prefix += ' destination = %s' % (destination)
self.logger.info(prefix)
c = None
rc = 0
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qmove')]
if destination is not None:
pcmd += [destination]
if jobid is not None:
pcmd += jobid
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
logerr=logerr, as_script=as_script,
level=logging.INFOCLI)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('movejob', jobid, runas, data=destination,
extend=extend)
else:
c = self._connect(self.hostname)
if c < 0:
return c
for ajob in jobid:
tmp_rc = pbs_movejob(c, ajob, destination, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsMoveError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
return rc
def qterm(self, manner=None, extend=None, server_name=None, runas=None,
logerr=True):
"""
Terminate the ``pbs_server`` daemon
:param manner: one of ``(SHUT_IMMEDIATE | SHUT_DELAY |
SHUT_QUICK)`` and can be\
combined with SHUT_WHO_SCHED, SHUT_WHO_MOM,
SHUT_WHO_SECDRY, \
SHUT_WHO_IDLESECDRY, SHUT_WHO_SECDONLY. \
:param extend: extend options
:param server_name: name of the pbs server
:type server_name: str or None
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsQtermError
"""
prefix = 'terminate ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': with manner '
attrs = manner
if attrs is None:
prefix += "None "
elif isinstance(attrs, str):
prefix += attrs
else:
if ((attrs & SHUT_QUICK) == SHUT_QUICK):
prefix += "quick "
if ((attrs & SHUT_IMMEDIATE) == SHUT_IMMEDIATE):
prefix += "immediate "
if ((attrs & SHUT_DELAY) == SHUT_DELAY):
prefix += "delay "
if ((attrs & SHUT_WHO_SCHED) == SHUT_WHO_SCHED):
prefix += "schedular "
if ((attrs & SHUT_WHO_MOM) == SHUT_WHO_MOM):
prefix += "mom "
if ((attrs & SHUT_WHO_SECDRY) == SHUT_WHO_SECDRY):
prefix += "secondary server "
if ((attrs & SHUT_WHO_IDLESECDRY) == SHUT_WHO_IDLESECDRY):
prefix += "idle secondary "
if ((attrs & SHUT_WHO_SECDONLY) == SHUT_WHO_SECDONLY):
prefix += "shoutdown secondary only "
self.logger.info(prefix)
if self.has_diag:
return 0
c = None
rc = 0
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qterm')]
_conf = self.default_client_pbs_conf
pcmd += self.utils.convert_to_cli(manner, op=IFL_TERMINATE,
hostname=self.hostname,
dflt_conf=_conf)
if server_name is not None:
pcmd += [server_name]
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
level=logging.INFOCLI, as_script=as_script)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
attrs = {'manner': manner, 'server_name': server_name}
rc = self.pbs_api_as('terminate', None, runas, data=attrs,
extend=extend)
else:
if server_name is None:
server_name = self.hostname
c = self._connect(self.hostname)
rc = pbs_terminate(c, manner, extend)
if rc != 0:
raise PbsQtermError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c, force=True)
if c:
self._disconnect(c, force=True)
return rc
teminate = qterm
def geterrmsg(self):
"""
Get the error message
"""
mode = self.get_op_mode()
if mode == PTL_CLI:
return self.last_error
elif self._conn is not None and self._conn >= 0:
m = pbs_geterrmsg(self._conn)
if m is not None:
m = m.split('\n')
return m
#
# End IFL Wrappers
#
def qdisable(self, queue=None, runas=None, logerr=True):
"""
Disable queue. ``CLI`` mode only
:param queue: The name of the queue or list of queue to
disable
:type queue: str or list
:param runas: Optional name of user to run command as
:type runas: str or None
:param logerr: Set to False ot disable logging command
errors.Defaults to True.
:type logerr: bool
:raises: PbsQdisableError
"""
prefix = 'qdisable on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if queue is not None:
if not isinstance(queue, list):
queue = queue.split(',')
prefix += ', '.join(queue)
self.logger.info(prefix)
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'qdisable')]
if queue is not None:
pcmd += queue
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = ret['rc']
if self.last_rc != 0:
raise PbsQdisableError(rc=self.last_rc, rv=False,
msg=self.last_error)
else:
_msg = 'qdisable: currently not supported in API mode'
raise PbsQdisableError(rv=False, rc=1, msg=_msg)
def qenable(self, queue=None, runas=None, logerr=True):
"""
Enable queue. ``CLI`` mode only
:param queue: The name of the queue or list of queue to
enable
:type queue: str or list
:param runas: Optional name of user to run command as
:type runas: str or None
:param logerr: Set to False ot disable logging command
errors.Defaults to True.
:type logerr: bool
:raises: PbsQenableError
"""
prefix = 'qenable on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if queue is not None:
if not isinstance(queue, list):
queue = queue.split(',')
prefix += ', '.join(queue)
self.logger.info(prefix)
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'qenable')]
if queue is not None:
pcmd += queue
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = ret['rc']
if self.last_rc != 0:
raise PbsQenableError(rc=self.last_rc, rv=False,
msg=self.last_error)
else:
_msg = 'qenable: currently not supported in API mode'
raise PbsQenableError(rv=False, rc=1, msg=_msg)
def qstart(self, queue=None, runas=None, logerr=True):
"""
Start queue. ``CLI`` mode only
:param queue: The name of the queue or list of queue
to start
:type queue: str or list
:param runas: Optional name of user to run command as
:type runas: str or None
:param logerr: Set to False ot disable logging command
errors.Defaults to True.
:type logerr: bool
:raises: PbsQstartError
"""
prefix = 'qstart on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if queue is not None:
if not isinstance(queue, list):
queue = queue.split(',')
prefix += ', '.join(queue)
self.logger.info(prefix)
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'qstart')]
if queue is not None:
pcmd += queue
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = ret['rc']
if self.last_rc != 0:
raise PbsQstartError(rc=self.last_rc, rv=False,
msg=self.last_error)
else:
_msg = 'qstart: currently not supported in API mode'
raise PbsQstartError(rv=False, rc=1, msg=_msg)
def qstop(self, queue=None, runas=None, logerr=True):
"""
Stop queue. ``CLI`` mode only
:param queue: The name of the queue or list of queue to stop
:type queue: str or list
:param runas: Optional name of user to run command as
:type runas: str or None
:param logerr: Set to False ot disable logging command errors.
Defaults to True.
:type logerr: bool
:raises: PbsQstopError
"""
prefix = 'qstop on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if queue is not None:
if not isinstance(queue, list):
queue = queue.split(',')
prefix += ', '.join(queue)
self.logger.info(prefix)
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'qstop')]
if queue is not None:
pcmd += queue
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = ret['rc']
if self.last_rc != 0:
raise PbsQstopError(rc=self.last_rc, rv=False,
msg=self.last_error)
else:
_msg = 'qstop: currently not supported in API mode'
raise PbsQstopError(rv=False, rc=1, msg=_msg)
def parse_resources(self):
"""
Parse server resources as defined in the resourcedef file
Populates instance variable self.resources
:returns: The resources as a dictionary
"""
if not self.has_diag:
self.manager(MGR_CMD_LIST, RSC)
return self.resources
def remove_resource(self, name):
"""
Remove an entry from resourcedef
:param name: The name of the resource to remove
:type name: str
:param restart: Whether to restart the server or not.
Applicable to update_mode 'file'
operations only.
:param update_mode: one of 'file' or 'auto' (the default).
If 'file', updates the resourcedef file
only and will not use the qmgr
operations on resources introduced in
12.3. If 'auto', will automatically
handle the update on resourcedef or
using qmgr based on the version of the
Server.
"""
self.parse_resources()
if not self.has_diag:
if name in self.resources:
self.manager(MGR_CMD_DELETE, RSC, id=name)
def add_resource(self, name, type=None, flag=None):
"""
Define a server resource
:param name: The name of the resource to add to the
resourcedef file
:type name: str
:param type: The type of the resource, one of string,
long, boolean, float
:param flag: The target of the resource, one of n, h, q,
or none
:type flag: str or None
:param restart: Whether to restart the server after adding
a resource.Applicable to update_mode 'file'
operations only.
:param update_mode: one of 'file' or 'auto' (the default).
If 'file', updates the resourcedef file
only and will not use the qmgr
operations on resources introduced in
12.3. If 'auto', will automatically
handle the update on resourcedef or
using qmgr based on the version of the
Server.
:returns: True on success False on error
"""
rv = self.parse_resources()
if rv is None:
return False
resource_exists = False
if name in self.resources:
msg = [self.logprefix + "resource " + name]
if type:
msg += ["type: " + type]
if flag:
msg += ["flag: " + flag]
msg += [" already defined"]
self.logger.info(" ".join(msg))
(t, f) = (self.resources[name].type, self.resources[name].flag)
if type == t and flag == f:
return True
self.logger.info("resource: redefining resource " + name +
" type: " + str(type) + " and flag: " + str(flag))
del self.resources[name]
resource_exists = True
r = Resource(name, type, flag)
self.resources[name] = r
a = {}
if type:
a['type'] = type
if flag:
a['flag'] = flag
if resource_exists:
self.manager(MGR_CMD_SET, RSC, a, id=name)
else:
self.manager(MGR_CMD_CREATE, RSC, a, id=name)
return True
def write_resourcedef(self, resources=None, filename=None, restart=True):
"""
Write into resource def file
:param resources: PBS resources
:type resources: dictionary
:param filename: resourcedef file name
:type filename: str or None
"""
if resources is None:
resources = self.resources
if isinstance(resources, Resource):
resources = {resources.name: resources}
fn = self.du.mkstemp()[1]
f = open(fn, 'w+')
for r in resources.values():
f.write(r.attributes['id'])
if r.attributes['type'] is not None:
f.write(' type=' + r.attributes['type'])
if r.attributes['flag'] is not None:
f.write(' flag=' + r.attributes['flag'])
f.write('\n')
f.close()
if filename is None:
dest = os.path.join(self.pbs_conf['PBS_HOME'], 'server_priv',
'resourcedef')
else:
dest = filename
self.du.run_copy(self.hostname, fn, dest, mode=0644, sudo=True)
if filename is None:
self.du.chown(self.hostname, path=dest, uid=0, gid=0,
sudo=True)
os.remove(fn)
if restart:
return self.restart()
return True
def parse_resourcedef(self, file=None):
"""
Parse an arbitrary resource definition file passed as
input and return a dictionary of resources
:param file: resource definition file
:type file: str or None
:returns: Dictionary of resource
:raises: PbsResourceError
"""
if file is None:
file = os.path.join(self.pbs_conf['PBS_HOME'], 'server_priv',
'resourcedef')
ret = self.du.cat(self.hostname, file, logerr=False, sudo=True)
if ret['rc'] != 0 or len(ret['out']) == 0:
# Most probable error is that file does not exist, we'll let it
# be created
return {}
resources = {}
lines = ret['out']
try:
for l in lines:
l = l.strip()
if l == '' or l.startswith('#'):
continue
name = None
rtype = None
flag = None
res = l.split()
e0 = res[0]
if len(res) > 1:
e1 = res[1].split('=')
else:
e1 = None
if len(res) > 2:
e2 = res[2].split('=')
else:
e2 = None
if e1 is not None and e1[0] == 'type':
rtype = e1[1]
elif e2 is not None and e2[0] == 'type':
rtype = e2[1]
if e1 is not None and e1[0] == 'flag':
flag = e1[0]
elif e2 is not None and e2[0] == 'flag':
flag = e2[1]
name = e0
r = Resource(name, rtype, flag)
resources[name] = r
except:
raise PbsResourceError(rc=1, rv=False,
msg="error in parse_resources")
return resources
def pbs_api_as(self, cmd=None, obj=None, user=None, **kwargs):
"""
Generic handler to run an ``API`` call impersonating
a given user.This method is only used for impersonation
over the ``API`` because ``CLI`` impersonation takes place
through the generic ``DshUtils`` run_cmd mechanism.
:param cmd: PBS command
:type cmd: str or None
:param user: PBS user or current user
:type user: str or None
:raises: eval
"""
fn = None
objid = None
_data = None
if user is None:
user = self.du.get_current_user()
else:
# user may be a PbsUser object, cast it to string for the remainder
# of the function
user = str(user)
if cmd == 'submit':
if obj is None:
return None
_data = copy.copy(obj)
# the following attributes cause problems 'pickling',
# since they are not needed we unset them
_data.attrl = None
_data.attropl = None
_data.logger = None
_data.utils = None
elif cmd in ('alterjob', 'holdjob', 'sigjob', 'msgjob', 'rlsjob',
'rerunjob', 'orderjob', 'runjob', 'movejob',
'select', 'delete', 'status', 'manager', 'terminate',
'deljob', 'delresv'):
objid = obj
if 'data' in kwargs:
_data = kwargs['data']
if _data is not None:
(fd, fn) = self.du.mkstemp()
tmpfile = open(fn, 'w+b')
cPickle.dump(_data, tmpfile)
tmpfile.close()
os.close(fd)
os.chmod(fn, 0755)
if self._is_local:
os.chdir(tempfile.gettempdir())
else:
self.du.run_copy(self.hostname, fn, fn, sudo=True)
if not self._is_local:
p_env = '"import os; print os.environ[\'PTL_EXEC\']"'
ret = self.du.run_cmd(self.hostname, ['python', '-c', p_env],
logerr=False)
if ret['out']:
runcmd = [os.path.join(ret['out'][0], 'pbs_as')]
else:
runcmd = ['pbs_as']
elif 'PTL_EXEC' in os.environ:
runcmd = [os.path.join(os.environ['PTL_EXEC'], 'pbs_as')]
else:
runcmd = ['pbs_as']
runcmd += ['-c', cmd, '-u', user]
if objid is not None:
runcmd += ['-o']
if isinstance(objid, list):
runcmd += [','.join(objid)]
else:
runcmd += [objid]
if fn is not None:
runcmd += ['-f', fn]
if 'hostname' in kwargs:
hostname = kwargs['hostname']
else:
hostname = self.hostname
runcmd += ['-s', hostname]
if 'extend' in kwargs and kwargs['extend'] is not None:
runcmd += ['-e', kwargs['extend']]
ret = self.du.run_cmd(self.hostname, runcmd, logerr=False, runas=user)
out = ret['out']
if ret['err']:
if cmd in CMD_ERROR_MAP:
m = CMD_ERROR_MAP[cmd]
if m in ret['err'][0]:
if fn is not None:
os.remove(fn)
if not self._is_local:
self.du.rm(self.hostname, fn)
raise eval(str(ret['err'][0]))
self.logger.debug('err: ' + str(ret['err']))
if fn is not None:
os.remove(fn)
if not self._is_local:
self.du.rm(self.hostname, fn)
if cmd == 'submit':
if out:
return out[0].strip()
else:
return None
elif cmd in ('alterjob', 'holdjob', 'sigjob', 'msgjob', 'rlsjob',
'rerunjob', 'orderjob', 'runjob', 'movejob', 'delete',
'terminate'):
if ret['out']:
return int(ret['out'][0])
else:
return 1
elif cmd in ('manager', 'select', 'status'):
return eval(out[0])
def expect(self, obj_type, attrib=None, id=None, op=EQ, attrop=PTL_OR,
attempt=0, max_attempts=None, interval=None, count=None,
extend=None, offset=0, runas=None, level=logging.INFO,
msg=None):
"""
expect an attribute to match a given value as per an
operation.
:param obj_type: The type of object to query, JOB, SERVER,
SCHEDULER, QUEUE NODE
:type obj_type: str
:param attrib: Attributes to query, can be a string, a list,
or a dict
:type attrib: str or list or dictionary
:param id: The id of the object to act upon
:param op: An operation to perform on the queried data,
e.g., EQ, SET, LT,..
:param attrop: Operation on multiple attributes, either
PTL_AND, PTL_OR when an PTL_AND is used, only
batch objects having all matches are
returned, otherwise an OR is applied
:param attempt: The number of times this function has been
called
:type attempt: int
:param max_attempts: The maximum number of attempts to
perform.C{param_max_attempts}: 5
:type max_attempts: int or None
:param interval: The interval time btween attempts.
C{param_interval}: 1s
:param count: If True, attrib will be accumulated using
function counter
:type count: bool
:param extend: passed to the stat call
:param offset: the time to wait before the initial check.
Defaults to 0.
:type offset: int
:param runas: query as a given user. Defaults to current
user
:type runas: str or None
:param msg: Message from last call of this function, this
message will be used while raising
PtlExpectError.
:type msg: str or None
:returns: True if attributes are as expected and False
otherwise
"""
if attempt == 0 and offset > 0:
self.logger.log(level, self.logprefix + 'expect offset set to ' +
str(offset))
time.sleep(offset)
if attrib is None:
attrib = {}
if ATTR_version in attrib and max_attempts is None:
max_attempts = 3
if max_attempts is None:
max_attempts = int(self.ptl_conf['expect_max_attempts'])
if interval is None:
interval = self.ptl_conf['expect_interval']
if attempt >= max_attempts:
_msg = "expected on " + self.logprefix + msg
raise PtlExpectError(rc=1, rv=False, msg=_msg)
if obj_type == SERVER and id is None:
id = self.hostname
if isinstance(attrib, str):
attrib = {attrib: ''}
elif isinstance(attrib, list):
d = {}
for l in attrib:
d[l] = ''
attrib = d
# Add check for substate=42 for jobstate=R, if not added explicitly.
if obj_type == JOB:
add_attribs = {'substate': False}
substate = False
for k, v in attrib.items():
if k == 'job_state' and ((isinstance(v, tuple) and
'R' in v[-1]) or v == 'R'):
add_attribs['substate'] = 42
elif k == 'job_state=R':
add_attribs['substate=42'] = v
elif 'substate' in k:
substate = True
if add_attribs['substate'] and not substate:
attrib['substate'] = add_attribs['substate']
attrop = PTL_AND
del add_attribs, substate
prefix = 'expect on ' + self.logprefix
msg = []
for k, v in attrib.items():
args = None
if isinstance(v, tuple):
operator = v[0]
if len(v) > 2:
args = v[2:]
val = v[1]
else:
operator = op
val = v
msg += [k, PTL_OP_TO_STR[operator].strip()]
if callable(val):
msg += ['callable(' + val.__name__ + ')']
if args is not None:
msg.extend(map(lambda x: str(x), args))
else:
msg += [str(val)]
msg += [PTL_ATTROP_TO_STR[attrop]]
# remove the last converted PTL_ATTROP_TO_STR
if len(msg) > 1:
msg = msg[:-1]
if len(attrib) == 0:
msg += [PTL_OP_TO_STR[op]]
msg += [PBS_OBJ_MAP[obj_type]]
if id is not None:
msg += [str(id)]
if attempt > 0:
msg += ['attempt:', str(attempt + 1)]
# Default count to True if the attribute contains an '=' in its name
# for example 'job_state=R' implies that a count of job_state is needed
if count is None and self.utils.operator_in_attribute(attrib):
count = True
if count:
newattr = self.utils.convert_attributes_by_op(attrib)
if len(newattr) == 0:
newattr = attrib
statlist = [self.counter(obj_type, newattr, id, extend, op=op,
attrop=attrop, level=logging.DEBUG,
runas=runas)]
else:
try:
statlist = self.status(obj_type, attrib, id=id,
level=logging.DEBUG, extend=extend,
runas=runas, logerr=False)
except PbsStatusError:
statlist = []
if (len(statlist) == 0 or statlist[0] is None or
len(statlist[0]) == 0):
if op == UNSET or list(set(attrib.values())) == [0]:
self.logger.log(level, prefix + " ".join(msg) + ' ... OK')
return True
else:
time.sleep(interval)
msg = " no data for " + " ".join(msg)
self.logger.log(level, prefix + msg)
return self.expect(obj_type, attrib, id, op, attrop,
attempt + 1, max_attempts, interval, count,
extend, level=level, msg=msg)
if attrib is None:
time.sleep(interval)
return self.expect(obj_type, attrib, id, op, attrop, attempt + 1,
max_attempts, interval, count, extend,
runas=runas, level=level, msg=" ".join(msg))
for k, v in attrib.items():
varargs = None
if isinstance(v, tuple):
op = v[0]
if len(v) > 2:
varargs = v[2:]
v = v[1]
for stat in statlist:
if k == ATTR_version and k in stat:
m = self.version_tag.match(stat[k])
if m:
stat[k] = m.group('version')
else:
time.sleep(interval)
return self.expect(obj_type, attrib, id, op, attrop,
attempt + 1, max_attempts, interval,
count, extend, runas=runas,
level=level, msg=" ".join(msg))
if k not in stat:
if op == UNSET:
continue
else:
# functions/methods are invoked and their return value
# used on expect
if callable(v):
if varargs is not None:
rv = v(stat[k], *varargs)
else:
rv = v(stat[k])
if isinstance(rv, bool):
if op == NOT:
if not rv:
continue
if rv:
continue
else:
v = rv
stat[k] = self.utils.decode_value(stat[k])
v = self.utils.decode_value(v)
if k == ATTR_version:
stat[k] = LooseVersion(str(stat[k]))
v = LooseVersion(str(v))
if op == EQ and stat[k] == v:
continue
elif op == SET and count and stat[k] == v:
continue
elif op == SET and count in (False, None):
continue
elif op == NE and stat[k] != v:
continue
elif op == LT:
if stat[k] < v:
continue
elif op == GT:
if stat[k] > v:
continue
elif op == LE:
if stat[k] <= v:
continue
elif op == GE:
if stat[k] >= v:
continue
elif op == MATCH_RE:
if re.search(str(v), str(stat[k])):
continue
elif op == MATCH:
if str(stat[k]).find(str(v)) != -1:
continue
if k in stat:
msg += [' got: ' + str(k) + ' = ' + str(stat[k])]
self.logger.info(prefix + " ".join(msg))
time.sleep(interval)
# run custom actions defined for this object type
if self.actions:
for act_obj in self.actions.get_actions_by_type(obj_type):
if act_obj.enabled:
act_obj.action(self, obj_type, attrib, id, op,
attrop)
return self.expect(obj_type, attrib, id, op, attrop,
attempt + 1, max_attempts, interval, count,
extend, level=level, msg=" ".join(msg))
self.logger.log(level, prefix + " ".join(msg) + ' ... OK')
return True
def is_history_enabled(self):
"""
Short-hand method to return the value of job_history_enable
"""
a = ATTR_JobHistoryEnable
attrs = self.status(SERVER, level=logging.DEBUG)[0]
if ((a in attrs.keys()) and attrs[a] == 'True'):
return True
return False
def cleanup_jobs(self, extend=None, runas=None):
"""
Helper function to delete all jobs.
By default this method will determine whether
job_history_enable is on and will cleanup all history
jobs. Specifying an extend parameter could override
this behavior.
:param runas: Clean the job as
:type runas: str or None
"""
delete_xt = 'force'
select_xt = None
if self.is_history_enabled():
delete_xt += 'deletehist'
select_xt = 'x'
job_ids = self.select(extend=select_xt)
if len(job_ids) > 0:
try:
self.deljob(id=job_ids, extend=delete_xt, runas=runas,
wait=True)
except:
pass
rv = self.expect(JOB, {'job_state': 0}, count=True, op=SET)
if not rv:
return self.cleanup_jobs(extend=extend, runas=runas)
return rv
def cleanup_reservations(self, extend=None, runas=None):
"""
Helper function to delete all reservations
"""
reservations = self.status(RESV, level=logging.DEBUG)
while reservations is not None and len(reservations) != 0:
resvs = [r['id'] for r in reservations]
if len(resvs) > 0:
try:
self.delresv(resvs, logerr=False, runas=runas)
except:
pass
reservations = self.status(RESV, level=logging.DEBUG)
def cleanup_jobs_and_reservations(self, extend='forcedeletehist'):
"""
Helper function to delete all jobs and reservations
:param extend: Optional extend parameter that is passed
to delete. It defaults to 'deletehist' which
is used in qdel and pbs_deljob() to force
delete all jobs, including history jobs
:param extend: str
"""
rv = self.cleanup_jobs(extend)
self.cleanup_reservations()
return rv
def update_attributes(self, obj_type, bs):
"""
Populate objects from batch status data
"""
if bs is None:
return
for binfo in bs:
if 'id' not in binfo:
continue
id = binfo['id']
obj = None
if obj_type == JOB:
if ATTR_owner in binfo:
user = binfo[ATTR_owner].split('@')[0]
else:
user = None
if id in self.jobs:
self.jobs[id].attributes.update(binfo)
if self.jobs[id].username != user:
self.jobs[id].username = user
else:
self.jobs[id] = Job(user, binfo)
obj = self.jobs[id]
elif obj_type in (VNODE, NODE):
if id in self.nodes:
self.nodes[id].attributes.update(binfo)
else:
self.nodes[id] = MoM(id, binfo, diagmap={NODE: None},
server=self)
obj = self.nodes[id]
elif obj_type == SERVER:
self.attributes.update(binfo)
obj = self
elif obj_type == QUEUE:
if id in self.queues:
self.queues[id].attributes.update(binfo)
else:
self.queues[id] = Queue(id, binfo, server=self)
obj = self.queues[id]
elif obj_type == RESV:
if id in self.reservations:
self.reservations[id].attributes.update(binfo)
else:
self.reservations[id] = Reservation(id, binfo)
obj = self.reservations[id]
elif obj_type == HOOK:
if id in self.hooks:
self.hooks[id].attributes.update(binfo)
else:
self.hooks[id] = Hook(id, binfo, server=self)
obj = self.hooks[id]
elif obj_type == SCHED:
if self.scheduler:
self.scheduler.attributes.update(binfo)
else:
if SCHED in self.diagmap:
diag = self.diag
diagmap = self.diagmap
else:
diag = None
diagmap = None
self.scheduler = Scheduler(server=self, diag=diag,
diagmap=diagmap)
self.scheduler.attributes.update(binfo)
obj = self.scheduler
elif obj_type == RSC:
if id in self.resources:
self.resources[id].attributes.update(binfo)
else:
rtype = None
rflag = None
if 'type' in binfo:
rtype = binfo['type']
if 'flag' in binfo:
rflag = binfo['flag']
self.resources[id] = Resource(id, rtype, rflag)
if obj is not None:
self.utils.update_attributes_list(obj)
obj.__dict__.update(binfo)
def counter(self, obj_type=None, attrib=None, id=None, extend=None,
op=None, attrop=None, bslist=None, level=logging.INFO,
idonly=True, grandtotal=False, db_access=None, runas=None,
resolve_indirectness=False):
"""
Accumulate properties set on an object. For example, to
count number of free nodes:
``server.counter(VNODE,{'state':'free'})``
:param obj_type: The type of object to query, one of the
* objects
:param attrib: Attributes to query, can be a string, a
list, a dictionary
:type attrib: str or list or dictionary
:param id: The id of the object to act upon
:param extend: The extended parameter to pass to the stat
call
:param op: The operation used to match attrib to what is
queried. SET or None
:type op: str or None
:param attrop: Operation on multiple attributes, either
PTL_AND, PTL_OR
:param bslist: Optional, use a batch status dict list
instead of an obj_type
:param idonly: if true, return the name/id of the matching
objects
:type idonly: bool
:param db_access: credentials to access db, either a path
to file or dictionary
:type db_access: str or dictionary
:param runas: run as user
:type runas: str or None
"""
self.logit('counter: ', obj_type, attrib, id, level=level)
return self._filter(obj_type, attrib, id, extend, op, attrop, bslist,
PTL_COUNTER, idonly, grandtotal, db_access,
runas=runas,
resolve_indirectness=resolve_indirectness)
def filter(self, obj_type=None, attrib=None, id=None, extend=None, op=None,
attrop=None, bslist=None, idonly=True, grandtotal=False,
db_access=None, runas=None, resolve_indirectness=False):
"""
Filter objects by properties. For example, to filter all
free nodes:``server.filter(VNODE,{'state':'free'})``
For each attribute queried, if idonly is True, a list of
matching object names is returned; if idonly is False, then
the value of each attribute queried is returned.
This is unlike Python's built-in 'filter' that returns a
subset of objects matching from a pool of objects. The
Python filtering mechanism remains very useful in some
situations and should be used programmatically to achieve
desired filtering goals that can not be met easily with
PTL's filter method.
:param obj_type: The type of object to query, one of the
* objects
:param attrib: Attributes to query, can be a string, a
list, a dictionary
:type attrib: str or list or dictionary
:param id: The id of the object to act upon
:param extend: The extended parameter to pass to the stat
call
:param op: The operation used to match attrib to what is
queried. SET or None
:type op: str or None
:param bslist: Optional, use a batch status dict list
instead of an obj_type
:type bslist: List or None
:param idonly: if true, return the name/id of the matching
objects
:type idonly: bool
:param db_access: credentials to access db, either path to
file or dictionary
:type db_access: str or dictionary
:param runas: run as user
:type runas: str or None
"""
self.logit('filter: ', obj_type, attrib, id)
return self._filter(obj_type, attrib, id, extend, op, attrop, bslist,
PTL_FILTER, idonly, db_access, runas=runas,
resolve_indirectness=resolve_indirectness)
def _filter(self, obj_type=None, attrib=None, id=None, extend=None,
op=None, attrop=None, bslist=None, mode=PTL_COUNTER,
idonly=True, grandtotal=False, db_access=None, runas=None,
resolve_indirectness=False):
if bslist is None:
try:
_a = resolve_indirectness
tmp_bsl = self.status(obj_type, attrib, id,
level=logging.DEBUG, extend=extend,
db_access=db_access, runas=runas,
resolve_indirectness=_a)
del _a
except PbsStatusError:
return None
bslist = self.utils.filter_batch_status(tmp_bsl, attrib)
del tmp_bsl
if bslist is None:
return None
if isinstance(attrib, str):
attrib = attrib.split(',')
total = {}
for bs in bslist:
if isinstance(attrib, list):
# when filtering on multiple values, ensure that they are
# all present on the object, otherwise skip
if attrop == PTL_AND:
match = True
for k in attrib:
if k not in bs:
match = False
if not match:
continue
for a in attrib:
if a in bs:
if op == SET:
k = a
else:
# Since this is a list of attributes, no operator
# was provided so we settle on "equal"
k = a + '=' + str(bs[a])
if mode == PTL_COUNTER:
amt = 1
if grandtotal:
amt = self.utils.decode_value(bs[a])
if not isinstance(amt, (int, float)):
amt = 1
if a in total:
total[a] += amt
else:
total[a] = amt
else:
if k in total:
total[k] += amt
else:
total[k] = amt
elif mode == PTL_FILTER:
if k in total:
if idonly:
total[k].append(bs['id'])
else:
total[k].append(bs)
else:
if idonly:
total[k] = [bs['id']]
else:
total[k] = [bs]
else:
self.logger.error("Unhandled mode " + str(mode))
return None
elif isinstance(attrib, dict):
tmptotal = {} # The running count that will be used for total
# when filtering on multiple values, ensure that they are
# all present on the object, otherwise skip
match = True
for k, v in attrib.items():
if k not in bs:
match = False
if attrop == PTL_AND:
break
else:
continue
amt = self.utils.decode_value(bs[k])
if isinstance(v, tuple):
op = v[0]
val = self.utils.decode_value(v[1])
elif op == SET:
val = None
pass
else:
op = EQ
val = self.utils.decode_value(v)
if ((op == LT and amt < val) or
(op == LE and amt <= val) or
(op == EQ and amt == val) or
(op == GE and amt >= val) or
(op == GT and amt > val) or
(op == NE and amt != val) or
(op == MATCH and str(amt).find(str(val)) != -1) or
(op == MATCH_RE and
re.search(str(val), str(amt))) or
(op == SET)):
# There is a match, proceed to track the attribute
self._filter_helper(bs, k, val, amt, op, mode,
tmptotal, idonly, grandtotal)
elif attrop == PTL_AND:
match = False
if mode == PTL_COUNTER:
# requesting specific key/value pairs should result
# in 0 available elements
tmptotal[str(k) + PTL_OP_TO_STR[op] + str(val)] = 0
break
elif mode == PTL_COUNTER:
tmptotal[str(k) + PTL_OP_TO_STR[op] + str(val)] = 0
if attrop != PTL_AND or (attrop == PTL_AND and match):
for k, v in tmptotal.items():
if k not in total:
total[k] = v
else:
total[k] += v
return total
def _filter_helper(self, bs, k, v, amt, op, mode, total, idonly,
grandtotal):
# default operation to '='
if op is None or op not in PTL_OP_TO_STR:
op = '='
op_str = PTL_OP_TO_STR[op]
if op == SET:
# override PTL_OP_TO_STR fro SET operations
op_str = ''
v = ''
ky = k + op_str + str(v)
if mode == PTL_COUNTER:
incr = 1
if grandtotal:
if not isinstance(amt, (int, float)):
incr = 1
else:
incr = amt
if ky in total:
total[ky] += incr
else:
total[ky] = incr
elif mode == PTL_FILTER:
if ky in total:
if idonly:
total[ky].append(bs['id'])
else:
total[ky].append(bs)
else:
if idonly:
total[ky] = [bs['id']]
else:
total[ky] = [bs]
def logit(self, msg, obj_type, attrib, id, level=logging.INFO):
"""
Generic logging routine for ``IFL`` commands
:param msg: The message to log
:type msg: str
:param obj_type: object type, i.e *
:param attrib: attributes to log
:param id: name of object to log
:type id: str or list
:param level: log level, defaults to ``INFO``
"""
s = []
if self.logger is not None:
if obj_type is None:
obj_type = MGR_OBJ_NONE
s = [msg + PBS_OBJ_MAP[obj_type]]
if id:
if isinstance(id, list):
s += [' ' + ",".join(id)]
else:
s += [' ' + str(id)]
if attrib:
s += [' ' + str(attrib)]
self.logger.log(level, "".join(s))
def equivalence_classes(self, obj_type=None, attrib={}, bslist=None,
op=RESOURCES_AVAILABLE, show_zero_resources=True,
db_access=None, resolve_indirectness=False):
"""
:param obj_type: PBS Object to query, one of *
:param attrib: attributes to build equivalence classes
out of.
:type attrib: dictionary
:param bslist: Optional, list of dictionary representation
of a batch status
:type bslist: List
:param op: set to RESOURCES_AVAILABLE uses the dynamic
amount of resources available, i.e., available -
assigned, otherwise uses static amount of
resources available
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
"""
if attrib is None:
attrib = {}
if len(attrib) == 0 and obj_type is not None:
if obj_type in (VNODE, NODE):
attrib = ['resources_available.ncpus',
'resources_available.mem', 'state']
elif obj_type == JOB:
attrib = ['Resource_List.select',
'queue', 'array_indices_submitted']
elif obj_type == RESV:
attrib = ['Resource_List.select']
else:
return {}
if bslist is None and obj_type is not None:
# To get the resources_assigned we must stat the entire object so
# bypass the specific attributes that would filter out assigned
if op == RESOURCES_AVAILABLE:
bslist = self.status(obj_type, None, level=logging.DEBUG,
db_access=db_access,
resolve_indirectness=resolve_indirectness)
else:
bslist = self.status(obj_type, attrib, level=logging.DEBUG,
db_access=db_access,
resolve_indirectness=resolve_indirectness)
if bslist is None or len(bslist) == 0:
return {}
# automatically convert an objectlist into a batch status dict list
# for ease of use.
if not isinstance(bslist[0], dict):
bslist = self.utils.objlist_to_dictlist(bslist)
if isinstance(attrib, str):
attrib = attrib.split(',')
self.logger.debug("building equivalence class")
equiv = {}
for bs in bslist:
cls = ()
skip_cls = False
# attrs will be part of the EquivClass object
attrs = {}
# Filter the batch attributes by the attribs requested
for a in attrib:
if a in bs:
amt = self.utils.decode_value(bs[a])
if a.startswith('resources_available.'):
val = a.replace('resources_available.', '')
if (op == RESOURCES_AVAILABLE and
'resources_assigned.' + val in bs):
amt = (int(amt) - int(self.utils.decode_value(
bs['resources_assigned.' + val])))
# this case where amt goes negative is not a bug, it
# may happen when computing whats_available due to the
# fact that the computation is subtractive, it does
# add back resources when jobs/reservations end but
# is only concerned with what is available now for
# a given duration, that is why in the case where
# amount goes negative we set it to 0
if amt < 0:
amt = 0
# TODO: not a failproof way to catch a memory type
# but PbsTypeSize should return the right value if
# it fails to parse it as a valid memory value
if a.endswith('mem'):
try:
amt = PbsTypeSize().encode(amt)
except:
# we guessed the type incorrectly
pass
else:
val = a
if amt == 0 and not show_zero_resources:
skip_cls = True
break
# Build the key of the equivalence class
cls += (val + '=' + str(amt),)
attrs[val] = amt
# Now that we are done with this object, add it to an equiv class
if len(cls) > 0 and not skip_cls:
if cls in equiv:
equiv[cls].add_entity(bs['id'])
else:
equiv[cls] = EquivClass(cls, attrs, [bs['id']])
return equiv.values()
def show_equivalence_classes(self, eq=None, obj_type=None, attrib={},
bslist=None, op=RESOURCES_AVAILABLE,
show_zero_resources=True, db_access=None,
resolve_indirectness=False):
"""
helper function to show the equivalence classes
:param eq: equivalence classes as compute by
equivalence_classes see equivalence_classes
for remaining parameters description
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
"""
if eq is None:
equiv = self.equivalence_classes(obj_type, attrib, bslist, op,
show_zero_resources, db_access,
resolve_indirectness)
else:
equiv = eq
equiv = sorted(equiv, key=lambda e: len(e.entities))
for e in equiv:
# e.show()
print str(e)
def whats_available(self, attrib=None, jobs=None, resvs=None, nodes=None):
"""
Returns what's available as a list of node equivalence
classes listed by availability over time.
:param attrib: attributes to consider
:type attrib: List
:param jobs: jobs to consider, if None, jobs are queried
locally
:param resvs: reservations to consider, if None, they are
queried locally
:param nodes: nodes to consider, if None, they are queried
locally
"""
if attrib is None:
attrib = ['resources_available.ncpus',
'resources_available.mem', 'state']
if resvs is None:
self.status(RESV)
resvs = self.reservations
if jobs is None:
self.status(JOB)
jobs = self.jobs
if nodes is None:
self.status(NODE)
nodes = self.nodes
nodes_id = nodes.keys()
avail_nodes_by_time = {}
def alloc_resource(self, node, resources):
# helper function. Must work on a scratch copy of nodes otherwise
# resources_available will get corrupted
for rsc, value in resources.items():
if isinstance(value, int) or value.isdigit():
avail = node.attributes['resources_available.' + rsc]
nvalue = int(avail) - int(value)
node.attributes['resources_available.' + rsc] = nvalue
# Account for reservations
for resv in resvs.values():
resvnodes = resv.execvnode('resv_nodes')
if resvnodes:
starttime = self.utils.convert_stime_to_seconds(
resv.attributes['reserve_start'])
for node in resvnodes:
for n, resc in node.items():
tm = int(starttime) - int(self.ctime)
if tm < 0 or n not in nodes_id:
continue
if tm not in avail_nodes_by_time:
avail_nodes_by_time[tm] = []
if nodes[n].attributes['sharing'] in ('default_excl',
'force_excl'):
avail_nodes_by_time[tm].append(nodes[n])
try:
nodes_id.remove(n)
except:
pass
else:
ncopy = copy.copy(nodes[n])
ncopy.attributes = copy.deepcopy(
nodes[n].attributes)
avail_nodes_by_time[tm].append(ncopy)
self.alloc_resource(nodes[n], resc)
# go on to look at the calendar of scheduled jobs to run and set
# the node availability according to when the job is estimated to
# start on the node
for job in self.jobs.values():
if (job.attributes['job_state'] != 'R' and
'estimated.exec_vnode' in job.attributes):
estimatednodes = job.execvnode('estimated.exec_vnode')
if estimatednodes:
st = job.attributes['estimated.start_time']
# Tweak for nas format of estimated time that has
# num seconds from epoch followed by datetime
if st.split()[0].isdigit():
starttime = st.split()[0]
else:
starttime = self.utils.convert_stime_to_seconds(st)
for node in estimatednodes:
for n, resc in node.items():
tm = int(starttime) - int(self.ctime)
if (tm < 0 or n not in nodes_id or
nodes[n].state != 'free'):
continue
if tm not in avail_nodes_by_time:
avail_nodes_by_time[tm] = []
if (nodes[n].attributes['sharing'] in
('default_excl', 'force_excl')):
avail_nodes_by_time[tm].append(nodes[n])
try:
nodes_id.remove(n)
except:
pass
else:
ncopy = copy.copy(nodes[n])
ncopy.attributes = copy.deepcopy(
nodes[n].attributes)
avail_nodes_by_time[tm].append(ncopy)
self.alloc_resource(nodes[n], resc)
# remaining nodes are free "forever"
for node in nodes_id:
if self.nodes[node].state == 'free':
if 'infinity' not in avail_nodes_by_time:
avail_nodes_by_time['infinity'] = [nodes[node]]
else:
avail_nodes_by_time['infinity'].append(nodes[node])
# if there is a dedicated time, move the availaility time up to that
# time as necessary
if self.scheduler:
scheduler = self.scheduler
else:
scheduler = Scheduler(server=self)
scheduler.parse_dedicated_time()
if scheduler.dedicated_time:
dedtime = scheduler.dedicated_time[0]['from'] - int(self.ctime)
if dedtime <= int(time.time()):
dedtime = None
else:
dedtime = None
# finally, build the equivalence classes off of the nodes availability
# over time
self.logger.debug("Building equivalence classes")
whazzup = {}
if 'state' in attrib:
attrib.remove('state')
for tm, nds in avail_nodes_by_time.items():
equiv = self.equivalence_classes(VNODE, attrib, bslist=nds,
show_zero_resources=False)
if dedtime and (tm > dedtime or tm == 'infinity'):
tm = dedtime
if tm != 'infinity':
tm = str(datetime.timedelta(seconds=int(tm)))
whazzup[tm] = equiv
return whazzup
def show_whats_available(self, wa=None, attrib=None, jobs=None,
resvs=None, nodes=None):
"""
helper function to show availability as computed by
whats_available
:param wa: a dictionary of available attributes. see
whats_available for a\
description of the remaining parameters
:type wa: Dictionary
"""
if wa is None:
wa = self.whats_available(attrib, jobs, resvs, nodes)
if len(wa) > 0:
print "%24s\t%s" % ("Duration of availability", "Resources")
print "-------------------------\t----------"
swa = sorted(wa.items(), key=lambda x: x[0])
for (k, eq_classes) in swa:
for eq_cl in eq_classes:
print "%24s\t%s" % (str(k), str(eq_cl))
def utilization(self, resources=None, nodes=None, jobs=None, entity={}):
"""
Return utilization of consumable resources on a set of
nodes
:param nodes: A list of dictionary of nodes on which to
compute utilization.Defaults to nodes
resulting from a stat call to the current
server.
:type nodes: List
:param resources: comma-separated list of resources to
compute utilization on. The name of the
resource is for example, ncpus or mem
:type resources: List
:param entity: An optional dictionary of entities to
compute utilization of,
``e.g. {'user':u1, 'group':g1, 'project'=p1}``
:type entity: Dictionary
The utilization is returned as a dictionary of percentage
utilization for each resource.
Non-consumable resources are silently ignored.
"""
if nodes is None:
nodes = self.status(NODE)
if jobs is None:
jobs = self.status(JOB)
if resources is None:
rescs = ['ncpus', 'mem']
else:
rescs = resources
utilization = {}
resavail = {}
resassigned = {}
usednodes = 0
totnodes = 0
nodes_set = set()
for res in rescs:
resavail[res] = 0
resassigned[res] = 0
# If an entity is specified utilization must be collected from the
# Jobs usage, otherwise we can get the information directly from
# the nodes.
if len(entity) > 0 and jobs is not None:
for job in jobs:
if 'job_state' in job and job['job_state'] != 'R':
continue
entity_match = True
for k, v in entity.items():
if k not in job or job[k] != v:
entity_match = False
break
if entity_match:
for res in rescs:
r = 'Resource_List.' + res
if r in job:
tmpr = int(self.utils.decode_value(job[r]))
resassigned[res] += tmpr
if 'exec_host' in job:
hosts = ResourceResv.get_hosts(job['exec_host'])
nodes_set |= set(hosts)
for node in nodes:
# skip nodes in non-schedulable state
nstate = node['state']
if ('down' in nstate or 'unavailable' in nstate or
'unknown' in nstate or 'Stale' in nstate):
continue
totnodes += 1
# If an entity utilization was requested, all used nodes were
# already filtered into the nodes_set specific to that entity, we
# simply add them up. If no entity was requested, it suffices to
# have the node have a jobs attribute to count it towards total
# used nodes
if len(entity) > 0:
if node['id'] in nodes_set:
usednodes += 1
elif 'jobs' in node:
usednodes += 1
for res in rescs:
avail = 'resources_available.' + res
if avail in node:
val = self.utils.decode_value(node[avail])
if isinstance(val, int):
resavail[res] += val
# When entity matching all resources assigned are
# accounted for by the job usage
if len(entity) == 0:
assigned = 'resources_assigned.' + res
if assigned in node:
val = self.utils.decode_value(node[assigned])
if isinstance(val, int):
resassigned[res] += val
for res in rescs:
if res in resavail:
if res in resassigned:
if resavail[res] > 0:
utilization[res] = [resassigned[res], resavail[res]]
# Only report nodes utilization if no specific resources were requested
if resources is None:
utilization['nodes'] = [usednodes, totnodes]
return utilization
def create_vnodes(self, name=None, attrib=None, num=1, mom=None,
additive=False, sharednode=True, restart=True,
delall=True, natvnode=None, usenatvnode=False,
attrfunc=None, fname=None, vnodes_per_host=1,
createnode=True, expect=True):
"""
helper function to create vnodes.
:param name: prefix name of the vnode(s) to create
:type name: str or None
:param attrib: attributes to assign to each node
:param num: the number of vnodes to create. Defaults to 1
:type num: int
:param mom: the MoM object on which the vnode definition is
to be inserted
:param additive: If True, vnodes are added to the existing
vnode defs.Defaults to False.
:type additive: bool
:param sharednode: If True, all vnodes will share the same
host.Defaults to True.
:type sharednode: bool
:param restart: If True the MoM will be restarted.
:type restart: bool
:param delall: If True delete all server nodes prior to
inserting vnodes
:type delall: bool
:param natvnode: name of the natural vnode.i.e. The node
name in qmgr -c "create node <name>"
:type natvnode: str or None
:param usenatvnode: count the natural vnode as an
allocatable node.
:type usenatvnode: bool
:param attrfunc: an attribute=value function generator,
see create_vnode_def
:param fname: optional name of the vnode def file
:type fname: str or None
:param vnodes_per_host: number of vnodes per host
:type vnodes_per_host: int
:param createnode: whether to create the node via manage or
not. Defaults to True
:type createnode: bool
:param expect: whether to expect attributes to be set or
not. Defaults to True
:type expect: bool
:returns: True on success and False otherwise
"""
if mom is None or name is None or attrib is None:
self.logger.error("name, attributes, and mom object are required")
return False
if delall:
try:
rv = self.manager(MGR_CMD_DELETE, NODE, None, "")
if rv != 0:
return False
except PbsManagerError:
pass
if natvnode is None:
natvnode = mom.shortname
vdef = mom.create_vnode_def(name, attrib, num, sharednode,
usenatvnode=usenatvnode, attrfunc=attrfunc,
vnodes_per_host=vnodes_per_host)
mom.insert_vnode_def(vdef, fname=fname, additive=additive,
restart=restart)
if createnode:
try:
statm = self.status(NODE, id=natvnode)
except:
statm = []
if len(statm) >= 1:
_m = 'Mom %s already exists, not creating' % (natvnode)
self.logger.info(_m)
else:
if mom.pbs_conf and 'PBS_MOM_SERVICE_PORT' in mom.pbs_conf:
m_attr = {'port': mom.pbs_conf['PBS_MOM_SERVICE_PORT']}
else:
m_attr = None
self.manager(MGR_CMD_CREATE, NODE, m_attr, natvnode)
attrs = {}
# only expect if vnodes were added rather than the nat vnode modified
if expect and num > 0:
for k, v in attrib.items():
attrs[str(k) + '=' + str(self.utils.decode_value(v))] = num
attrs['state=free'] = num
rv = self.expect(VNODE, attrs, attrop=PTL_AND)
else:
rv = True
return rv
def create_moms(self, name=None, attrib=None, num=1, delall=True,
createnode=True, conf_prefix='pbs.conf_m',
home_prefix='pbs_m', momhosts=None, init_port=15011,
step_port=2):
"""
Create MoM configurations and optionall add them to the
server. Unique ``pbs.conf`` files are defined and created
on each hosts on which MoMs are to be created.
:param name: Optional prefix name of the nodes to create.
Defaults to the name of the MoM host.
:type name: str or None
:param attrib: Optional node attributes to assign to the
MoM.
:param num: Number of MoMs to create
:type num: int
:param delall: Whether to delete all nodes on the server.
Defaults to True.
:type delall: bool
:param createnode: Whether to create the nodes and add them
to the server.Defaults to True.
:type createnode: bool
:param conf_prefix: The prefix of the PBS conf file.Defaults
to pbs.conf_m
:type conf_prefix: str
:param home_prefix: The prefix of the PBS_HOME directory.
Defaults to pbs_m
:type home_prefix: str
:param momhosts: A list of hosts on which to deploy num
MoMs.
:type momhosts: List
:param init_port: The initial port number to start assigning
``PBS_MOM_SERIVCE_PORT to.
Default 15011``.
:type init_port: int
:param step_port: The increments at which ports are
allocated. Defaults to 2.
:type step_port: int
.. note:: Since PBS requires that
PBS_MANAGER_SERVICE_PORT = PBS_MOM_SERVICE_PORT+1
The step number must be greater or equal to 2.
"""
if not self.isUp():
logging.error("An up and running PBS server on " + self.hostname +
" is required")
return False
if delall:
try:
rc = self.manager(MGR_CMD_DELETE, NODE, None, "")
except PbsManagerError, e:
rc = e.rc
if rc:
if len(self.status(NODE)) > 0:
self.logger.error("create_moms: Error deleting all nodes")
return False
pi = PBSInitServices()
if momhosts is None:
momhosts = [self.hostname]
if attrib is None:
attrib = {}
error = False
for hostname in momhosts:
_pconf = self.du.parse_pbs_config(hostname)
if 'PBS_HOME' in _pconf:
_hp = _pconf['PBS_HOME']
if _hp.endswith('/'):
_hp = _hp[:-1]
_hp = os.path.dirname(_hp)
else:
_hp = '/var/spool'
_np_conf = _pconf
_np_conf['PBS_START_SERVER'] = '0'
_np_conf['PBS_START_SCHED'] = '0'
_np_conf['PBS_START_MOM'] = '1'
for i in xrange(0, num * step_port, step_port):
_np = os.path.join(_hp, home_prefix + str(i))
_n_pbsconf = os.path.join('/etc', conf_prefix + str(i))
_np_conf['PBS_HOME'] = _np
port = init_port + i
_np_conf['PBS_MOM_SERVICE_PORT'] = str(port)
_np_conf['PBS_MANAGER_SERVICE_PORT'] = str(port + 1)
self.du.set_pbs_config(hostname, fout=_n_pbsconf,
confs=_np_conf)
pi.initd(hostname, conf_file=_n_pbsconf, op='start')
m = MoM(hostname, pbsconf_file=_n_pbsconf)
if m.isUp():
m.stop()
if hostname != self.hostname:
m.add_config({'$clienthost': self.hostname})
try:
m.start()
except PbsServiceError:
# The service failed to start
self.logger.error("Service failed to start using port " +
str(port) + "...skipping")
self.du.rm(hostname, _n_pbsconf)
continue
if createnode:
attrib['Mom'] = hostname
attrib['port'] = port
if name is None:
name = hostname.split('.')[0]
_n = name + '-' + str(i)
rc = self.manager(MGR_CMD_CREATE, NODE, attrib, id=_n)
if rc != 0:
self.logger.error("error creating node " + _n)
error = True
if error:
return False
return True
def create_hook(self, name, attrs):
"""
Helper function to create a hook by name.
:param name: The name of the hook to create
:type name: str
:param attrs: The attributes to create the hook with.
:type attrs: str
:returns: False if hook already exists
:raises: PbsManagerError, otherwise return True.
"""
hooks = self.status(HOOK)
if ((hooks is None or len(hooks) == 0) or
(name not in map(lambda x: x['id'], hooks))):
self.manager(MGR_CMD_CREATE, HOOK, None, name)
else:
self.logger.error('hook named ' + name + ' exists')
return False
self.manager(MGR_CMD_SET, HOOK, attrs, id=name, expect=True)
return True
def import_hook(self, name, body):
"""
Helper function to import hook body into hook by name.
The hook must have been created prior to calling this
function.
:param name: The name of the hook to import body to
:type name: str
:param body: The body of the hook as a string.
:type body: str
:returns: True on success.
:raises: PbsManagerError
"""
(fd, fn) = self.du.mkstemp()
os.write(fd, body)
os.close(fd)
if not self._is_local:
tmpdir = self.du.get_tempdir(self.hostname)
rfile = os.path.join(tmpdir, os.path.basename(fn))
self.du.run_copy(self.hostname, fn, rfile)
else:
rfile = fn
a = {'content-type': 'application/x-python',
'content-encoding': 'default',
'input-file': rfile}
self.manager(MGR_CMD_IMPORT, HOOK, a, name)
os.remove(rfile)
if not self._is_local:
self.du.rm(self.hostname, rfile)
self.logger.info('server ' + self.shortname +
': imported hook body\n---\n' + body + '---')
return True
def create_import_hook(self, name, attrs=None, body=None, overwrite=True):
"""
Helper function to create a hook, import content into it,
set the event and enable it.
:param name: The name of the hook to create
:type name: str
:param attrs: The attributes to create the hook with.
Event and Enabled are mandatory. No defaults.
:type attrs: str
:param body: The hook body as a string
:type body: str
:param overwrite: If True, if a hook of the same name
already exists, bypass its creation.
Defaults to True
:returns: True on success and False otherwise
"""
if 'event' not in attrs:
self.logger.error('attrs must specify at least an event and key')
return False
hook_exists = False
hooks = self.status(HOOK)
for h in hooks:
if h['id'] == name:
hook_exists = True
if not hook_exists or not overwrite:
rv = self.create_hook(name, attrs)
if not rv:
return False
else:
if attrs is None:
attrs = {'enabled': 'true'}
rc = self.manager(MGR_CMD_SET, HOOK, attrs, id=name)
if rc != 0:
return False
# In 12.0 A MoM hook must be enabled and the event set prior to
# importing, otherwise the MoM does not get the hook content
return self.import_hook(name, body)
def evaluate_formula(self, jobid=None, formula=None, full=True,
include_running_jobs=False, exclude_subjobs=True):
"""
Evaluate the job sort formula
:param jobid: If set, evaluate the formula for the given
jobid, if not set,formula is evaluated for
all jobs in state Q
:type jobid: str or None
:param formula: If set use the given formula. If not set,
the server's formula, if any, is used
:param full: If True, returns a dictionary of job
identifiers as keys and the evaluated formula
as values. Returns None if no formula is used.
Each job id formula is returned as a tuple
(s,e) where s is the formula expression
associated to the job and e is the evaluated
numeric value of that expression, for example,
if job_sort_formula is ncpus + mem
a job requesting 2 cpus and 100kb of memory
would return ('2 + 100', 102). If False, if
a jobid is specified, return the integer
value of the evaluated formula.
:type full: bool
:param include_running_jobs: If True, reports formula
value of running jobs.
Defaults to False.
:type include_running_jobs: bool
:param exclude_subjobs: If True, only report formula of
parent job array
:type exclude_subjobs: bool
"""
_f_builtins = ['queue_priority', 'job_priority', 'eligible_time',
'fair_share_perc']
if formula is None:
d = self.status(SERVER, 'job_sort_formula')
if len(d) > 0 and 'job_sort_formula' in d[0]:
formula = d[0]['job_sort_formula']
else:
return None
template_formula = self.utils._make_template_formula(formula)
# to split up the formula into keywords, first convert all possible
# operators into spaces and split the string.
# TODO: The list of operators may need to be expanded
T = string.maketrans('()%+*/-', ' ' * 7)
fres = string.translate(formula, T).split()
if jobid:
d = self.status(JOB, id=jobid, extend='t')
else:
d = self.status(JOB, extend='t')
ret = {}
for job in d:
if not include_running_jobs and job['job_state'] != 'Q':
continue
f_value = {}
# initialize the formula values to 0
for res in fres:
f_value[res] = 0
if 'queue_priority' in fres:
queue = self.status(JOB, 'queue', id=job['id'])[0]['queue']
d = self.status(QUEUE, 'Priority', id=queue)
if d and 'Priority' in d[0]:
qprio = int(d[0]['Priority'])
f_value['queue_priority'] = qprio
else:
continue
if 'job_priority' in fres:
if 'Priority' in job:
jprio = int(job['Priority'])
f_value['job_priority'] = jprio
else:
continue
if 'eligible_time' in fres:
if 'eligible_time' in job:
f_value['eligible_time'] = self.utils.convert_duration(
job['eligible_time'])
if 'fair_share_perc' in fres:
if self.scheduler is None:
self.scheduler = Scheduler(server=self)
if 'fairshare_entity' in self.scheduler.sched_config:
entity = self.scheduler.sched_config['fairshare_entity']
else:
self.logger.error(self.logprefix +
' no fairshare entity in sched config')
continue
if entity not in job:
self.logger.error(self.logprefix +
' job does not have property ' + entity)
continue
try:
fs_info = self.scheduler.query_fairshare(name=job[entity])
if fs_info is not None and 'TREEROOT' in fs_info.perc:
f_value['fair_share_perc'] = \
(fs_info.perc['TREEROOT'] / 100)
except PbsFairshareError:
f_value['fair_share_perc'] = 0
for job_res, val in job.items():
val = self.utils.decode_value(val)
if job_res.startswith('Resource_List.'):
job_res = job_res.replace('Resource_List.', '')
if job_res in fres and job_res not in _f_builtins:
f_value[job_res] = val
tf = string.Template(template_formula)
tfstr = tf.safe_substitute(f_value)
if (jobid is not None or not exclude_subjobs or
(exclude_subjobs and not self.utils.is_subjob(job['id']))):
ret[job['id']] = (tfstr, eval(tfstr))
if not full and jobid is not None and jobid in ret:
return ret[job['id']][1]
return ret
def _parse_limits(self, container=None, dictlist=None, id=None,
db_access=None):
"""
Helper function to parse limits syntax on a given
container.
:param container: The PBS object to query, one of ``QUEUE``
or ``SERVER``.Metascheduling node group
limits are not yet queri-able
:type container: str or None
:param dictlist: A list of dictionaries off of a batch
status
:type diclist: List
:param id: Optional id of the object to query
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
"""
if container is None:
self.logger.error('parse_limits expect container to be set')
return {}
if dictlist is None:
d = self.status(container, db_access=db_access)
else:
d = dictlist
if not d:
return {}
limits = {}
for obj in d:
# filter the id here instead of during the stat call so that
# we can call a full stat once rather than one stat per object
if id is not None and obj['id'] != id:
continue
for k, v in obj.items():
if k.startswith('max_run'):
v = v.split(',')
for rval in v:
rval = rval.strip("'")
l = self.utils.parse_fgc_limit(k + '=' + rval)
if l is None:
self.logger.error("Couldn't parse limit: " +
k + str(rval))
continue
(lim_type, resource, etype, ename, value) = l
if (etype, ename) not in self.entities:
entity = Entity(etype, ename)
self.entities[(etype, ename)] = entity
else:
entity = self.entities[(etype, ename)]
lim = Limit(lim_type, resource, entity, value,
container, obj['id'])
if container in limits:
limits[container].append(lim)
else:
limits[container] = [lim]
entity.set_limit(lim)
return limits
def parse_server_limits(self, server=None, db_access=None):
"""
Parse all server limits
:param server: list of dictionary of server data
:type server: List
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
"""
return self._parse_limits(SERVER, server, db_access=db_access)
def parse_queue_limits(self, queues=None, id=None, db_access=None):
"""
Parse queue limits
:param queues: list of dictionary of queue data
:type queues: List
:param id: The id of the queue to parse limit for. If None,
all queue limits are parsed
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
"""
return self._parse_limits(QUEUE, queues, id=id, db_access=db_access)
def parse_all_limits(self, server=None, queues=None, db_access=None):
"""
Parse all server and queue limits
:param server: list of dictionary of server data
:type server: List
:param queues: list of dictionary of queue data
:type queues: List
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
"""
if hasattr(self, 'limits'):
del self.limits
slim = self.parse_server_limits(server, db_access=db_access)
qlim = self.parse_queue_limits(queues, id=None, db_access=db_access)
self.limits = dict(slim.items() + qlim.items())
del slim
del qlim
return self.limits
def limits_info(self, etype=None, ename=None, server=None, queues=None,
jobs=None, db_access=None, over=False):
"""
Collect limit information for each entity on which a
``server/queue`` limit is applied.
:param etype: entity type, one of u, g, p, o
:type etype: str or None
:param ename: entity name
:type ename: str or None
:param server: optional list of dictionary representation
of server object
:type server: List
:param queues: optional list of dictionary representation
of queues object
:type queues: List
:param jobs: optional list of dictionary representation of
jobs object
:type jobs: List
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
:param over: If True, show only entities that are over their
limit.Default is False.
:type over: bool
:returns: A list of dictionary similar to that returned by
a converted batch_status object, i.e., can be
displayed using the Utils.show method
"""
def create_linfo(lim, entity_type, id, used):
"""
Create limit information
:param lim: Limit to apply
:param entity_type: Type of entity
"""
tmp = {}
tmp['id'] = entity_type + ':' + id
c = [PBS_OBJ_MAP[lim.container]]
if lim.container_id:
c += [':', lim.container_id]
tmp['container'] = "".join(c)
s = [str(lim.limit_type)]
if lim.resource:
s += ['.', lim.resource]
tmp['limit_type'] = "".join(s)
tmp['usage/limit'] = "".join([str(used), '/', str(lim.value)])
tmp['remainder'] = int(lim.value) - int(used)
return tmp
def calc_usage(jobs, attr, name=None, resource=None):
"""
Calculate the usage for the entity
:param attr: Job attribute
:param name: Entity name
:type name: str or None
:param resource: PBS resource
:type resource: str or None
:returns: The usage
"""
usage = {}
# initialize usage of the named entity
if name is not None and name not in ('PBS_GENERIC', 'PBS_ALL'):
usage[name] = 0
for j in jobs:
entity = j[attr]
if entity not in usage:
if resource:
usage[entity] = int(
self.utils.decode_value(
j['Resource_List.' + resource]))
else:
usage[entity] = 1
else:
if resource:
usage[entity] += int(
self.utils.decode_value(
j['Resource_List.' + resource]))
else:
usage[entity] += 1
return usage
self.parse_all_limits(server, queues, db_access)
entities_p = self.entities.values()
linfo = []
cache = {}
if jobs is None:
jobs = self.status(JOB)
for entity in sorted(entities_p, key=lambda e: e.name):
for lim in entity.limits:
_t = entity.type
# skip non-matching entity types. We can't skip the entity
# name due to proper handling of the PBS_GENERIC limits
# we also can't skip overall limits
if (_t != 'o') and (etype is not None and etype != _t):
continue
_n = entity.name
a = {}
if lim.container == QUEUE and lim.container_id is not None:
a['queue'] = (EQ, lim.container_id)
if lim.resource:
resource = 'Resource_List.' + lim.resource
a[resource] = (GT, 0)
a['job_state'] = (EQ, 'R')
a['substate'] = (EQ, 42)
if etype == 'u' and ename is not None:
a['euser'] = (EQ, ename)
else:
a['euser'] = (SET, '')
if etype == 'g' and ename is not None:
a['egroup'] = (EQ, ename)
else:
a['egroup'] = (SET, '')
if etype == 'p' and ename is not None:
a['project'] = (EQ, ename)
else:
a['project'] = (SET, '')
# optimization: cache filtered results
d = None
for v in cache.keys():
if cmp(a, eval(v)) == 0:
d = cache[v]
break
if d is None:
d = self.filter(JOB, a, bslist=jobs, attrop=PTL_AND,
idonly=False, db_access=db_access)
cache[str(a)] = d
if not d or 'job_state=R' not in d:
# in the absence of jobs, display limits defined with usage
# of 0
if ename is not None:
_u = {ename: 0}
else:
_u = {_n: 0}
else:
if _t in ('u', 'o'):
_u = calc_usage(
d['job_state=R'], 'euser', _n, lim.resource)
# an overall limit applies across all running jobs
if _t == 'o':
all_used = sum(_u.values())
for k in _u.keys():
_u[k] = all_used
elif _t == 'g':
_u = calc_usage(
d['job_state=R'], 'egroup', _n, lim.resource)
elif _t == 'p':
_u = calc_usage(
d['job_state=R'], 'project', _n, lim.resource)
for k, used in _u.items():
if not over or (int(used) > int(lim.value)):
if ename is not None and k != ename:
continue
if _n in ('PBS_GENERIC', 'PBS_ALL'):
if k not in ('PBS_GENERIC', 'PBS_ALL'):
k += '/' + _n
elif _n != k:
continue
tmp_linfo = create_linfo(lim, _t, k, used)
linfo.append(tmp_linfo)
del a
del cache
return linfo
def __insert_jobs_in_db(self, jobs, hostname=None):
"""
An experimental interface that converts jobs from file
into entries in the PBS database that can be recovered
upon server restart if all other ``objects``, ``queues``,
``resources``, etc... are already defined.
The interface to PBS used in this method is incomplete
and will most likely cause serious issues. Use only for
development purposes
"""
if not jobs:
return []
if hostname is None:
hostname = socket.gethostname()
# a very crude, and not quite maintainale way to get the flag value
# of an attribute. This is one of the reasons why this conversion
# of jobs is highly experimental
flag_map = {'ctime': 9, 'qtime': 9, 'hop_count': 9, 'queue_rank': 9,
'queue_type': 9, 'etime': 9, 'job_kill_delay': 9,
'run_version': 9, 'job_state': 9, 'exec_host': 9,
'exec_host2': 9, 'exec_vnode': 9, 'mtime': 9, 'stime': 9,
'substate': 9, 'hashname': 9, 'comment': 9, 'run_count': 9,
'schedselect': 13}
state_map = {'Q': 1, 'H': 2, 'W': 3, 'R': 4, 'E': 5, 'X': 6, 'B': 7}
job_attr_stmt = ("INSERT INTO pbs.job_attr (ji_jobid, attr_name, "
"attr_resource, attr_value, attr_flags)")
job_stmt = ("INSERT INTO pbs.job (ji_jobid, ji_sv_name, ji_state, "
"ji_substate,ji_svrflags, ji_numattr,"
" ji_ordering, ji_priority, ji_stime, ji_endtbdry, "
"ji_queue, ji_destin, ji_un_type, ji_momaddr, "
"ji_momport, ji_exitstat, ji_quetime, ji_rteretry, "
"ji_fromsock, ji_fromaddr, ji_4jid, ji_4ash, "
"ji_credtype, ji_qrank, ji_savetm, ji_creattm)")
all_stmts = []
for job in jobs:
keys = []
values = []
flags = []
for k, v in job.items():
if k in ('id', 'Mail_Points', 'Mail_Users'):
continue
keys.append(k)
if not v.isdigit():
values.append("'" + v + "'")
else:
values.append(v)
if k in flag_map:
flags.append(flag_map[k])
elif k.startswith('Resource_List'):
flags.append(15)
else:
flags.append(11)
jobid = job['id'].split('.')[0] + '.' + hostname
for i in range(len(keys)):
stmt = job_attr_stmt
stmt += " VALUES('" + jobid + "', "
if '.' in keys[i]:
k, v = keys[i].split('.')
stmt += "'" + k + "', '" + v + "'" + ", "
else:
stmt += "'" + keys[i] + "', ''" + ", "
stmt += values[i] + "," + str(flags[i])
stmt += ");"
self.logger.debug(stmt)
all_stmts.append(stmt)
js = job['job_state']
svrflags = 1
state = 1
if js in state_map:
state = state_map[js]
if state == 4:
# Other states svrflags aren't handled and will
# cause issues, another reason this is highly experimental
svrflags = 12289
tm = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
stmt = job_stmt
stmt += " VALUES('" + jobid + "', 1, "
stmt += str(state) + ", " + job['substate']
stmt += ", " + str(svrflags)
stmt += ", 0, 0, 0"
if 'stime' in job:
print job['stime']
st = time.strptime(job['stime'], "%a %b %d %H:%M:%S %Y")
stmt += ", " + str(time.mktime(st))
else:
stmt += ", 0"
stmt += ", 0"
stmt += ", '" + job['queue'] + "'"
if 'exec_host2' in job:
stmt += ", " + job['exec_host2']
else:
stmt += ", ''"
stmt += ", 0, 0, 0, 0, 0, 0, 0, 0, '', '', 0, 0"
stmt += ", '" + tm + "', '" + tm + "');"
self.logger.debug(stmt)
all_stmts.append(stmt)
return all_stmts
def clusterize(self, conf_file=None, hosts=None, import_jobs=False,
db_creds_file=None):
"""
Mimic a ``pbs_diag`` snapshot onto a set of hosts running
a PBS ``server``,``scheduler``, and ``MoM``.
This method clones the following information from the diag:
``Server attributes``
``Server resourcedef``
``Hooks``
``Scheduler configuration``
``Scheduler resource_group``
``Scheduler holiday file``
``Per Queue attributes``
Nodes are copied as a vnode definition file inserted into
each host's MoM instance.
Currently no support for cloning the server 'sched' object,
nor to copy nodes to multi-mom instances.
Jobs are copied over only if import_jobs is True, see below
for details
:param asdiag: Path to the pbs_diag snapshot to use
:type asdiag: str
:param conf_file: Configuration file for the MoM instance
:param hosts: List of hosts on which to clone the diag
snapshot
:type hosts: List
:param include_jobs: [Experimental] if True jobs from the
pbs_diag are imported into the host's
database. There are several caveats to
this option:
The scripts are not imported
The users and groups are not created on
the local system.There are no actual
processes created on the MoM for each
job so operations on the job such as
signals or delete will fail (delete -W
force will still work)
:type include_jobs: bool
:param db_creds_file: Path to file containing credentials
to access the DB
:type db_creds_file: str or None
"""
if not self.has_diag:
return
if hosts is None:
return
for h in hosts:
svr = Server(h)
sched = Scheduler(server=svr, diag=self.diag, diagmap=self.diagmap)
try:
svr.manager(MGR_CMD_DELETE, NODE, None, id="")
except:
pass
svr.revert_to_defaults(delqueues=True, delhooks=True)
local = svr.pbs_conf['PBS_HOME']
diag_rdef = os.path.join(self.diag, 'server_priv', 'resourcedef')
diag_sc = os.path.join(self.diag, 'sched_priv', 'sched_config')
diag_rg = os.path.join(self.diag, 'sched_priv', 'resource_group')
diag_hldy = os.path.join(self.diag, 'sched_priv', 'holidays')
nodes = os.path.join(self.diag, 'pbsnodes_va.out')
diag_hooks = os.path.join(self.diag, 'qmgr_ph.out')
diag_ps = os.path.join(self.diag, 'qmgr_ps.out')
local_rdef = os.path.join(local, 'server_priv', 'resourcedef')
local_sc = os.path.join(local, 'sched_priv', 'sched_config')
local_rg = os.path.join(local, 'sched_priv', 'resource_group')
local_hldy = os.path.join(local, 'sched_priv', 'holidays')
_fcopy = [(diag_rdef, local_rdef), (diag_sc, local_sc),
(diag_rg, local_rg), (diag_hldy, local_hldy)]
# Restart since resourcedef may have changed
svr.restart()
if os.path.isfile(diag_ps):
tmp_ps = open(diag_ps)
cmd = [os.path.join(svr.pbs_conf['PBS_EXEC'], 'bin', 'qmgr')]
self.du.run_cmd(h, cmd, stdin=tmp_ps, sudo=True, logerr=False)
tmp_ps.close()
# Unset any site-sensitive attributes
for a in ['pbs_license_info', 'manager', 'operators',
'mail_from', 'acl_roots', 'acl_hosts']:
try:
svr.manager(MGR_CMD_UNSET, SERVER, a, sudo=True)
except:
pass
for (d, l) in _fcopy:
if os.path.isfile(d):
self.logger.info('copying ' + d + ' to ' + l)
self.du.run_copy(h, src=d, dest=l, sudo=True)
diag_sched = self.status(SCHED)
for ds in diag_sched:
for k, v in ds.items():
if k != 'id':
try:
svr.manager(MGR_CMD_SET, SCHED, {k: v},
logerr=False)
except PbsManagerError:
self.logger.warning(
'Skipping sched attribute ' + k)
sched.signal('-HUP')
if os.path.isfile(nodes):
f = open(nodes)
lines = f.readlines()
f.close()
dl = self.utils.convert_to_dictlist(lines)
vdef = self.utils.dictlist_to_vnodedef(dl)
if vdef:
try:
svr.manager(MGR_CMD_DELETE, NODE, None, "")
except:
pass
MoM(h, pbsconf_file=conf_file).insert_vnode_def(vdef)
svr.restart()
svr.manager(MGR_CMD_CREATE, NODE, id=svr.shortname)
# check if any node is associated to a queue.
# This is needed because the queues 'hasnodes' attribute
# does not get set through vnode def update and must be set
# via qmgr. It only needs to be set once, not for each node
qtoset = {}
for n in dl:
if 'queue' in n and n['queue'] not in qtoset:
qtoset[n['queue']] = n['id']
# before setting queue on nodes make sure that the vnode
# def is all set
svr.expect(NODE, {'state=free': (GE, len(dl))}, interval=3)
for k, v in qtoset.items():
svr.manager(MGR_CMD_SET, NODE, {'queue': k}, id=v)
# populate hooks
if os.path.isfile(diag_hooks):
tmp_hook = open(diag_hooks)
cmd = [os.path.join(svr.pbs_conf['PBS_EXEC'], 'bin', 'qmgr')]
self.du.run_cmd(h, cmd, stdin=tmp_hook, sudo=True)
tmp_hook.close()
# import jobs
if import_jobs is not None:
jobs = self.status(JOB)
sql_stmt = self.__insert_jobs_in_db(jobs, h)
print "\n".join(sql_stmt)
if db_creds_file is not None:
pass
class EquivClass(PBSObject):
"""
Equivalence class holds information on a collection of entities
grouped according to a set of attributes
:param attributes: Dictionary of attributes
:type attributes: Dictionary
:param entities: List of entities
:type entities: List
"""
def __init__(self, name, attributes={}, entities=[]):
self.name = name
self.attributes = attributes
self.entities = entities
self.logger = logging.getLogger(__name__)
def add_entity(self, entity):
"""
Add entities
:param entity: Entity to add
:type entity: str
"""
if entity not in self.entities:
self.entities.append(entity)
def __str__(self):
s = [str(len(self.entities)), ":", ":".join(self.name)]
return "".join(s)
def show(self, showobj=False):
"""
Show the entities
:param showobj: If true then show the entities
:type showobj: bool
"""
s = " && ".join(self.name) + ': '
if showobj:
s += str(self.entities)
else:
s += str(len(self.entities))
print s
return s
class Resource(PBSObject):
"""
PBS resource referenced by name, type and flag
:param name: Resource name
:type name: str or None
:param type: Type of resource
"""
def __init__(self, name=None, type=None, flag=None):
PBSObject.__init__(self, name)
self.set_name(name)
self.set_type(type)
self.set_flag(flag)
def set_name(self, name):
"""
Set the resource name
"""
self.name = name
self.attributes['id'] = name
def set_type(self, type):
"""
Set the resource type
"""
self.type = type
self.attributes['type'] = type
def set_flag(self, flag):
"""
Set the flag
"""
self.flag = flag
self.attributes['flag'] = flag
def __str__(self):
s = [self.attributes['id']]
if 'type' in self.attributes:
s.append('type=' + self.attributes['type'])
if 'flag' in self.attributes:
s.append('flag=' + self.attributes['flag'])
return " ".join(s)
class Holidays():
"""
Descriptive calss for Holiday file.
"""
def __init__(self):
self.year = {'id': "YEAR", 'value': None, 'valid': False}
self.weekday = {'id': "weekday", 'p': None, 'np': None, 'valid': None,
'position': None}
self.monday = {'id': "monday", 'p': None, 'np': None, 'valid': None,
'position': None}
self.tuesday = {'id': "tuesday", 'p': None, 'np': None, 'valid': None,
'position': None}
self.wednesday = {'id': "wednesday", 'p': None, 'np': None,
'valid': None, 'position': None}
self.thursday = {'id': "thursday", 'p': None, 'np': None,
'valid': None, 'position': None}
self.friday = {'id': "friday", 'p': None, 'np': None, 'valid': None,
'position': None}
self.saturday = {'id': "saturday", 'p': None, 'np': None,
'valid': None, 'position': None}
self.sunday = {'id': "sunday", 'p': None, 'np': None, 'valid': None,
'position': None}
self.days_set = [] # list of set days
self._days_map = {'weekday': self.weekday, 'monday': self.monday,
'tuesday': self.tuesday, 'wednesday': self.wednesday,
'thursday': self.thursday, 'friday': self.friday,
'saturday': self.saturday, 'sunday': self.sunday}
self.holidays = [] # list of calendar holidays
def __str__(self):
"""
Return the content to write to holidays file as a string
"""
content = []
if self.year['valid']:
content.append(self.year['id'] + "\t" +
self.year['value'])
for i in range(0, len(self.days_set)):
content.append(self.days_set[i]['id'] + "\t" +
self.days_set[i]['p'] + "\t" +
self.days_set[i]['np'])
# Add calendar holidays
for day in self.holidays:
content.append(day)
return "\n".join(content)
class Scheduler(PBSService):
"""
Container of Scheduler related properties
:param hostname: The hostname on which the scheduler instance
is operating
:type hostname: str or None
:param server: A PBS server instance to which this scheduler
is associated
:param pbsconf_file: path to a PBS configuration file
:type pbsconf_file: str or None
:param diagmap: A dictionary of PBS objects (node,server,etc)
to mapped files from PBS diag directory
:type diagmap: Dictionary
:param diag: path to PBS diag directory (This will overrides
diagmap)
:type diag: str or None
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
"""
# A vanilla scheduler configuration. This set may change based on
# updates to PBS
sched_dflt_config = {
"backfill": "true ALL",
"backfill_prime": "false ALL",
"help_starving_jobs": "true ALL",
"max_starve": "24:00:00",
"strict_ordering": "false ALL",
"provision_policy": "\"aggressive_provision\"",
"preempt_order": "\"SCR\"",
"fairshare_entity": "euser",
"dedicated_prefix": "ded",
"primetime_prefix": "p_",
"nonprimetime_prefix": "np_",
"preempt_queue_prio": "150",
"preempt_prio": "\"express_queue, normal_jobs\"",
"load_balancing": "false ALL",
"prime_exempt_anytime_queues": "false",
"round_robin": "False all",
"fairshare_usage_res": "cput",
"smp_cluster_dist": "pack",
"fair_share": "false ALL",
"preempt_sort": "min_time_since_start",
"node_sort_key": "\"sort_priority HIGH\" ALL",
"sort_queues": "true ALL",
"by_queue": "True ALL",
"preemptive_sched": "true ALL",
"resources": "\"ncpus, mem, arch, host, vnode, aoe\"",
"log_filter": "3328 ",
}
sched_config_options = ["node_group_key",
"dont_preempt_starving",
"fairshare_enforce_no_shares",
"strict_ordering",
"resource_unset_infinite",
"sync_time",
"unknown_shares",
"log_filter",
"dedicated_prefix",
"load_balancing",
"help_starving_jobs",
"max_starve",
"sort_queues",
"backfill",
"primetime_prefix",
"nonprimetime_prefix",
"backfill_prime",
"prime_exempt_anytime_queues",
"prime_spill",
"prime_exempt_anytime_queues",
"prime_spill",
"resources",
"mom_resources",
"smp_cluster_dist",
"preempt_queue_prio",
"preempt_suspend",
"preempt_checkpoint",
"preempt_requeue",
"preemptive_sched",
"dont_preempt_starving",
"node_group_key",
"dont_preempt_starving",
"fairshare_enforce_no_shares",
"strict_ordering",
"resource_unset_infinite",
"provision_policy",
"resv_confirm_ignore",
"allow_aoe_calendar",
"max_job_check",
"preempt_attempts",
"update_comments",
"sort_by",
"key",
"preempt_starving",
"preempt_fairshare",
"load_balancing_rr",
"assign_ssinodes",
"cpus_per_ssinode",
"mem_per_ssinode",
"strict_fifo",
"mem_per_ssinode",
"strict_fifo"
]
fs_re = '(?P<name>[\S]+)[\s]*:[\s]*Grp:[\s]*(?P<Grp>[-]*[0-9]*)' + \
'[\s]*cgrp:[\s]*(?P<cgrp>[-]*[0-9]*)[\s]*' + \
'Shares:[\s]*(?P<Shares>[-]*[0-9]*)[\s]*Usage:[\s]*' + \
'(?P<Usage>[0-9]+)[\s]*Perc:[\s]*(?P<Perc>.*)%'
fs_tag = re.compile(fs_re)
def __init__(self, hostname=None, server=None, pbsconf_file=None,
diagmap={}, diag=None, db_access=None):
self.sched_config_file = None
self.dflt_holidays_file = None
self.holidays_file = None
self.sched_config = {}
self._sched_config_comments = {}
self._config_order = []
self.dedicated_time_file = None
self.dedicated_time = None
self.dedicated_time_as_str = None
self.fairshare_tree = None
self.resource_group = None
self.server = None
self.server_dyn_res = None
self.deletable_files = ['usage']
self.logger = logging.getLogger(__name__)
if server is not None:
self.server = server
if diag is None and self.server.diag is not None:
diag = self.server.diag
if (len(diagmap) == 0) and (len(self.server.diagmap) != 0):
diagmap = self.server.diagmap
else:
self.server = Server(hostname, pbsconf_file=pbsconf_file,
db_access=db_access, diag=diag,
diagmap=diagmap)
if hostname is None:
hostname = self.server.hostname
self.server.scheduler = self
PBSService.__init__(self, hostname, pbsconf_file=pbsconf_file,
diag=diag, diagmap=diagmap)
_m = ['scheduler ', self.shortname]
if pbsconf_file is not None:
_m += ['@', pbsconf_file]
_m += [': ']
self.logprefix = "".join(_m)
self.pi = PBSInitServices(hostname=self.hostname,
conf=self.pbs_conf_file)
self.pbs_conf = self.server.pbs_conf
self.sched_config_file = os.path.join(self.pbs_conf['PBS_HOME'],
'sched_priv', 'sched_config')
self.dflt_sched_config_file = os.path.join(self.pbs_conf['PBS_EXEC'],
'etc', 'pbs_sched_config')
self.parse_sched_config(self.sched_config_file)
self.dflt_holidays_file = os.path.join(self.pbs_conf['PBS_EXEC'],
'etc', 'pbs_holidays')
self.holidays_file = os.path.join(self.pbs_conf['PBS_HOME'],
'sched_priv', 'holidays')
self.dflt_resource_group_file = os.path.join(self.pbs_conf['PBS_EXEC'],
'etc',
'pbs_resource_group')
self.resource_group_file = os.path.join(self.pbs_conf['PBS_HOME'],
'sched_priv', 'resource_group')
self.fairshare_tree = self.query_fairshare()
rg = self.parse_resource_group(self.hostname, self.resource_group_file)
self.resource_group = rg
try:
attrs = self.server.status(SCHED, level=logging.DEBUG,
db_access=db_access)
if attrs is not None and len(attrs) > 0:
self.attributes = attrs[0]
except (PbsManagerError, PbsStatusError), e:
self.logger.error('Error querying scheduler %s' % e.msg)
self.version = None
self.holidays_obj = Holidays()
self.holidays_parse_file(level=logging.DEBUG)
def isUp(self):
"""
Check for PBS scheduler up
"""
return super(Scheduler, self)._isUp(self)
def signal(self, sig):
"""
Send a signal to PBS scheduler
"""
self.logger.info('scheduler ' + self.shortname + ': sent signal ' +
sig)
return super(Scheduler, self)._signal(sig, inst=self)
def get_pid(self):
"""
Get the PBS scheduler pid
"""
return super(Scheduler, self)._get_pid(inst=self)
def all_instance_pids(self):
"""
Get the all pids for the instance
"""
return super(Scheduler, self)._all_instance_pids(inst=self)
def start(self, args=None, launcher=None):
"""
Start the scheduler
:param args: Arguments required to start the scheduler
:type args: str
:param launcher: Optional utility to invoke the launch of the service
:type launcher: str or list
"""
if args is not None or launcher is not None:
return super(Scheduler, self)._start(inst=self, args=args,
launcher=launcher)
else:
try:
rv = self.pi.start_sched()
except PbsInitServicesError as e:
raise PbsServiceError(rc=e.rc, rv=e.rv, msg=e.msg)
return rv
def stop(self, sig=None):
"""
Stop the PBS scheduler
:param sig: Signal to stop the PBS scheduler
:type sig: str
"""
if sig is not None:
self.logger.info(self.logprefix + 'stopping Scheduler on host ' +
self.hostname)
return super(Scheduler, self)._stop(sig, inst=self)
else:
try:
self.pi.stop_sched()
except PbsInitServicesError as e:
raise PbsServiceError(rc=e.rc, rv=e.rv, msg=e.msg)
return True
def restart(self):
"""
Restart the PBS scheduler
"""
if self.isUp():
if not self.stop():
return False
return self.start()
def log_match(self, msg=None, id=None, n=50, tail=True, allmatch=False,
regexp=False, day=None, max_attempts=1, interval=1,
starttime=None, endtime=None, level=logging.INFO):
"""
Match the scheduler logs
"""
return self._log_match(self, msg, id, n, tail, allmatch, regexp, day,
max_attempts, interval, starttime, endtime,
level=level)
def pbs_version(self):
"""
Get the version of the scheduler instance
"""
if self.version:
return self.version
version = self.log_match('pbs_version', tail=False)
if version:
version = version[1].strip().split('=')[1]
else:
version = "unknown"
self.version = LooseVersion(version)
return self.version
def parse_sched_config(self, schd_cnfg=None):
"""
Parse a sceduling configuration file into a dictionary.
Special handling of identical keys ``(e.g., node_sort_key)``
is done by appending a delimiter, '%', between each value
of the key. When printed back to file, each delimited entry
gets written on a line of its own. For example, the python
dictionary entry:
``{'node_sort_key':
["ncpus HIGH unusued" prime", "node_priority HIH"
non-prime"]}``
will get written as:
``node_sort_key: "ncpus HIGH unusued" prime``
``node_sort_key: "node_priority HIGH" non-prime``
Returns sched_config dictionary that gets reinitialized
every time this method is called.
"""
# sched_config is initialized
if self.sched_config:
del(self.sched_config)
self.sched_config = {}
self._sched_config_comments = {}
self._config_order = []
if schd_cnfg is None:
if self.sched_config_file is not None:
schd_cnfg = self.sched_config_file
else:
self.logger.error('no scheduler configuration file to parse')
return False
try:
conf_opts = self.du.cat(self.hostname, schd_cnfg,
sudo=(not self.has_diag),
level=logging.DEBUG2)['out']
except:
self.logger.error('error parsing scheduler configuration')
return False
_comment = []
conf_re = re.compile(
'[#]?[\s]*(?P<conf_id>[\w]+):[\s]*(?P<conf_val>.*)')
for line in conf_opts:
m = conf_re.match(line)
if m:
key = m.group('conf_id')
val = m.group('conf_val')
# line is a comment, it could be a commented out scheduling
# option, or the description of an option. It could also be
# that part of the description is an example setting of the
# option.
# We must keep track of commented out options in order to
# rewrite the configuration in the same order as it was defined
if line.startswith('#'):
if key in self.sched_config_options:
_comment += [line]
if key in self._sched_config_comments:
self._sched_config_comments[key] += _comment
_comment = []
else:
self._sched_config_comments[key] = _comment
_comment = []
if key not in self._config_order:
self._config_order.append(key)
else:
_comment += [line]
continue
if key not in self._sched_config_comments:
self._sched_config_comments[key] = _comment
else:
self._sched_config_comments[key] += _comment
if key not in self._config_order:
self._config_order.append(key)
_comment = []
if key in self.sched_config:
if isinstance(self.sched_config[key], list):
if isinstance(val, list):
self.sched_config[key].extend(val)
else:
self.sched_config[key].append(val)
else:
if isinstance(val, list):
self.sched_config[key] = [self.sched_config[key]]
self.sched_config[key].extend(val)
else:
self.sched_config[key] = [self.sched_config[key],
val]
else:
self.sched_config[key] = val
else:
_comment += [line]
self._sched_config_comments['PTL_SCHED_CONFIG_TAIL'] = _comment
return True
def check_defaults(self, config):
"""
Check the values in argument config against default values
"""
if len(config.keys()) == 0:
return
for k, v in self.sched_dflt_config.items():
if k in config:
s1 = v
s1 = s1.replace(" ", "")
s1 = s1.replace("\t", "").strip()
s2 = config[k]
s2 = s2.replace(" ", "")
s2 = s2.replace("\t", "").strip()
if s1 != s2:
self.logger.debug(k + ' non-default: ' + v +
' != ' + config[k])
def apply_config(self, config=None, validate=True, path=None):
"""
Apply the configuration specified by config
:param config: Configurations to set. Default: self.
sched_config
:param validate: If True (the default) validate that
settings did not yield an error.
Validation is done by parsing the
scheduler log which, in some cases may
be slow and therefore undesirable.
:type validate: bool
:param path: Optional path to file to which configuration
is written. If None, the configuration is
written to PBS_HOME/sched_priv/sched_config
:type path: str
:returns: True on success and False otherwise. Success
means that upon applying the new configuration
the scheduler did not emit an
"Error reading line" in its log file.
"""
if config is None:
config = self.sched_config
if len(config) == 0:
return True
reconfig_time = int(time.time())
try:
(fd, fn) = self.du.mkstemp()
for k in self._config_order:
if k in config:
if k in self._sched_config_comments:
os.write(fd, "\n".join(self._sched_config_comments[k]))
os.write(fd, "\n")
v = config[k]
if isinstance(v, list):
for val in v:
os.write(fd, k + ": " + str(val) + "\n")
else:
os.write(fd, k + ": " + str(v) + "\n")
elif k in self._sched_config_comments:
os.write(fd, "\n".join(self._sched_config_comments[k]))
os.write(fd, "\n")
for k, v in self.sched_config.items():
if k not in self._config_order:
os.write(fd, k + ": " + str(v).strip() + "\n")
if 'PTL_SCHED_CONFIG_TAIL' in self._sched_config_comments:
os.write(fd, "\n".join(
self._sched_config_comments['PTL_SCHED_CONFIG_TAIL']))
os.write(fd, "\n")
os.close(fd)
if path is None:
sp = os.path.join(self.pbs_conf['PBS_HOME'], "sched_priv",
"sched_config")
if self.du.is_localhost(self.hostname):
self.du.run_copy(self.hostname, sp, sp + '.bak', sudo=True)
else:
cmd = ['mv', sp, sp + '.bak']
self.du.run_cmd(self.hostname, cmd, sudo=True)
else:
sp = path
self.du.run_copy(self.hostname, fn, sp, mode=0644, sudo=True)
os.remove(fn)
self.du.chown(self.hostname, path=sp, uid=0, gid=0, sudo=True)
self.logger.debug(self.logprefix + "updated configuration")
except:
m = self.logprefix + 'error in apply_config '
self.logger.error(m + str(traceback.print_exc()))
raise PbsSchedConfigError(rc=1, rv=False, msg=m)
if validate:
self.signal('-HUP')
m = self.log_match("Error reading line", n=10,
starttime=reconfig_time)
if m is None:
# no match, successful config
return True
raise PbsSchedConfigError(rc=1, rv=False, msg=str(m))
return True
def set_sched_config(self, confs={}, apply=True, validate=True):
"""
set a ``sched_config`` property
:param confs: dictionary of key value sched_config entries
:type confs: Dictionary
:param apply: if True (the default), apply configuration.
:type apply: bool
:param validate: if True (the default), validate the
configuration settings.
:type validate: bool
"""
self.logger.info(self.logprefix + "config " + str(confs))
self.sched_config = dict(self.sched_config.items() + confs.items())
if apply:
try:
self.apply_config(validate=validate)
except PbsSchedConfigError:
for k in confs:
del self.sched_config[k]
self.apply_config(validate=validate)
return True
def add_server_dyn_res(self, custom_resource, script_body=None, file=None,
apply=True, validate=True):
"""
Add a server dynamic resource script or file to the scheduler
configuration
:param custom_resource: The name of the custom resource to
define
:type custom_resource: str
:param script_body: The body of the server dynamic resource
:param file: Alternatively to passing the script body, use
the file instead
:type file: str or None
:param apply: if True (the default), apply configuration.
:type apply: bool
:param validate: if True (the default), validate the
configuration settings.
:type validate: bool
"""
if file is not None:
f = open(file)
script_body = f.readlines()
f.close()
else:
(fd, file) = self.du.mkstemp(prefix='PtlPbsSchedConfig')
f = open(file, "w")
f.write(script_body)
f.close()
os.close(fd)
self.server_dyn_res = file
self.logger.info(self.logprefix + "adding server dyn res " + file)
self.logger.info("-" * 30)
self.logger.info(script_body)
self.logger.info("-" * 30)
self.du.chmod(self.hostname, path=file, mode=0755)
a = {'server_dyn_res': '"' + custom_resource + ' !' + file + '"'}
self.set_sched_config(a, apply=apply, validate=validate)
def unset_sched_config(self, name, apply=True):
"""
Delete a ``sched_config`` entry
:param name: the entry to delete from sched_config
:type name: str
:param apply: if True, apply configuration. Defaults to True
:type apply: bool
"""
self.parse_sched_config()
if name not in self.sched_config:
return True
self.logger.info(self.logprefix + "unsetting config " + name)
del self.sched_config[name]
if apply:
return self.apply_config()
def set_dedicated_time_file(self, file):
"""
Set the path to a dedicated time
"""
self.logger.info(self.logprefix + " setting dedicated time file to " +
str(file))
self.dedicated_time_file = file
def revert_to_defaults(self):
"""
Revert scheduler configuration to defaults.
:returns: True on success, False otherwise
"""
self.logger.info(self.logprefix +
"reverting configuration to defaults")
self.server.manager(MGR_CMD_LIST, SCHED)
ignore_attrs = ['id', 'pbs_version', 'sched_host']
unsetattrs = []
for k in self.attributes.keys():
if k not in ignore_attrs:
unsetattrs.append(k)
if len(unsetattrs) > 0:
self.server.manager(MGR_CMD_UNSET, SCHED, unsetattrs)
self.clear_dedicated_time(hup=False)
if self.du.cmp(self.hostname, self.dflt_resource_group_file,
self.resource_group_file) != 0:
self.du.run_copy(self.hostname, self.dflt_resource_group_file,
self.resource_group_file, mode=0644, sudo=True)
if self.server_dyn_res is not None:
self.du.rm(self.hostname, self.server_dyn_res, force=True,
sudo=True)
self.server_dyn_res = None
rc = self.holidays_revert_to_default()
if self.du.cmp(self.hostname, self.dflt_sched_config_file,
self.sched_config_file) != 0:
self.du.run_copy(self.hostname, self.dflt_sched_config_file,
self.sched_config_file, mode=0644, sudo=True)
self.signal('-HUP')
for f in self.deletable_files:
fn = os.path.join(self.pbs_conf['PBS_HOME'], 'sched_priv', f)
if fn is not None:
self.du.rm(self.hostname, fn, sudo=True, force=True)
self.parse_sched_config()
self.fairshare_tree = None
self.resource_group = None
return self.isUp()
def save_configuration(self, outfile, mode='a'):
"""
Save scheduler configuration
:param outfile: Path to a file to which configuration
is saved
:type outfile: str
:param mode: mode to use to access outfile. Defaults to
append, 'a'.
:type mode: str
:returns: True on success and False otherwise
"""
conf = {}
sconf = {MGR_OBJ_SCHED: conf}
sched_priv = os.path.join(self.pbs_conf['PBS_HOME'], 'sched_priv')
sc = os.path.join(sched_priv, 'sched_config')
self._save_config_file(conf, sc)
rg = os.path.join(sched_priv, 'resource_group')
self._save_config_file(conf, rg)
dt = os.path.join(sched_priv, 'dedicated_time')
self._save_config_file(conf, dt)
hd = os.path.join(sched_priv, 'holidays')
self._save_config_file(conf, hd)
try:
f = open(outfile, mode)
cPickle.dump(sconf, f)
f.close()
except:
self.logger.error('error saving configuration ' + outfile)
return False
return True
def load_configuration(self, infile):
"""
load configuration from saved file infile
"""
self._load_configuration(infile, MGR_OBJ_SCHED)
def get_resources(self, exclude=[]):
"""
returns a list of allocatable resources.
:param exclude: if set, excludes the named resources, if
they exist, from the resulting list
:type exclude: List
"""
if 'resources' not in self.sched_config:
return None
resources = self.sched_config['resources']
resources = resources.replace('"', '')
resources = resources.replace(' ', '')
res = resources.split(',')
if len(exclude) > 0:
for e in exclude:
if e in res:
res.remove(e)
return res
def add_resource(self, name, apply=True):
"""
Add a resource to ``sched_config``.
:param name: the resource name to add
:type name: str
:param apply: if True, apply configuration. Defaults to True
:type apply: bool
:returns: True on success and False otherwise.
Return True if the resource is already defined.
"""
# if the sched_config has not been read in yet, parse it
if not self.sched_config:
self.parse_sched_config()
if 'resources' in self.sched_config:
resources = self.sched_config['resources']
resources = resources.replace('"', '')
splitres = [r.strip() for r in resources.split(",")]
if name in splitres:
return True
resources = '"' + resources + ', ' + name + '"'
else:
resources = '"' + name + '"'
return self.set_sched_config({'resources': resources}, apply=apply)
def remove_resource(self, name, apply=True):
"""
Remove a resource to ``sched_config``.
:param name: the resource name to remove
:type name: str
:param apply: if True, apply configuration. Defaults to True
:type apply: bool
:returns: True on success and False otherwise
"""
# if the sched_config has not been read in yet, parse it
if not self.sched_config:
self.parse_sched_config()
if 'resources' in self.sched_config:
resources = self.sched_config['resources']
resources = resources.replace('"', '')
splitres = [r.strip() for r in resources.split(",")]
if name not in splitres:
return True
newres = []
for r in splitres:
if r != name:
newres.append(r)
resources = '"' + ",".join(newres) + '"'
return self.set_sched_config({'resources': resources}, apply=apply)
def holidays_revert_to_default(self, level=logging.INFO):
"""
Revert holidays file to default
"""
self.logger.log(level, self.logprefix +
"reverting holidays file to default")
rc = None
# Copy over the holidays file from PBS_EXEC if it exists
if self.du.cmp(self.hostname, self.dflt_holidays_file,
self.holidays_file) != 0:
ret = self.du.run_copy(self.hostname, self.dflt_holidays_file,
self.holidays_file, mode=0644, sudo=True,
logerr=True)
rc = ret['rc']
# Update the internal data structures for the updated file
self.holidays_parse_file(level=level)
else:
rc = 1
return rc
def holidays_parse_file(self, path=None, obj=None, level=logging.INFO):
"""
Parse the existing holidays file
:param path: optional path to the holidays file to parse
:type path: str or None
:param obj: optional holidays object to be used instead
of internal
:returns: The content of holidays file as a list of lines
"""
self.logger.log(level, self.logprefix + "Parsing holidays file")
if obj is None:
obj = self.holidays_obj
days_map = obj._days_map
days_set = obj.days_set
if path is None:
path = self.holidays_file
lines = self.du.cat(self.hostname, path, sudo=True)['out']
content = [] # valid content to return
self.holidays_delete_entry(
'a', apply=False, obj=obj, level=logging.DEBUG)
for line in lines:
entry = str(line).split()
if len(entry) == 0:
continue
tag = entry[0].lower()
if tag == "year": # initialize year
content.append("\t".join(entry))
obj.year['valid'] = True
if len(entry) > 1:
obj.year['value'] = entry[1]
elif tag in days_map.keys(): # initialize a day
content.append("\t".join(entry))
day = days_map[tag]
day['valid'] = True
days_set.append(day)
day['position'] = len(days_set) - 1
if len(entry) > 1:
day['p'] = entry[1]
if len(entry) > 2:
day['np'] = entry[2]
elif tag.isdigit(): # initialize a holiday
content.append("\t".join(entry))
obj.holidays.append(tag)
else:
pass
return content
def holidays_set_day(self, day_id, prime="", nonprime="", apply=True,
obj=None, level=logging.INFO):
"""
Set prime time values for a day
:param day_id: the day to be set (string)
:type day_id: str
:param prime: the prime time value
:param nonprime: the non-prime time value
:param apply: to reflect the changes to file
:type apply: bool
:param obj: optional holidays object to be used instead
of internal
:returns: The position ``(0-7)`` of the set day
"""
self.logger.log(level, self.logprefix +
"setting holidays file entry for %s",
day_id)
if obj is None:
obj = self.holidays_obj
day = obj._days_map[str(day_id).lower()]
days_set = obj.days_set
if day['valid'] is None: # Fresh entry
days_set.append(day)
day['position'] = len(days_set) - 1
elif day['valid'] is False: # Previously invalidated entry
days_set.insert(day['position'], day)
else:
pass
day['valid'] = True
day['p'] = str(prime)
day['np'] = str(nonprime)
self.logger.debug("holidays_set_day(): changed day struct: " +
str(day))
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
return day['position']
def holidays_get_day(self, day_id, obj=None, level=logging.INFO):
"""
:param obj: optional holidays object to be used instead
of internal
:param day_id: either a day's name or "all"
:type day_id: str
:returns: A copy of info about a day/all set days
"""
self.logger.log(level, self.logprefix +
"getting holidays file entry for " +
day_id)
if obj is None:
obj = self.holidays_obj
days_set = obj.days_set
days_map = obj._days_map
if day_id == "all":
return days_set[:]
else:
return days_map[day_id].copy()
def holidays_reposition_day(self, day_id, new_pos, apply=True, obj=None,
level=logging.INFO):
"""
Change position of a day ``(0-7)`` as it appears in the
holidays file
:param day_id: name of the day
:type day_id: str
:param new_pos: new position
:param apply: to reflect the changes to file
:type apply: bool
:param obj: optional holidays object to be used instead
of internal
:returns: The new position of the day
"""
self.logger.log(level, self.logprefix +
"repositioning holidays file entry for " +
day_id + " to position " + str(new_pos))
if obj is None:
obj = self.holidays_obj
days_map = obj._days_map
days_set = obj.days_set
day = days_map[str(day_id).lower()]
if new_pos == day['position']:
return
# We also want to update order of invalid days, so add them to
# days_set temporarily
invalid_days = []
for name in days_map:
if days_map[name]['valid'] is False:
invalid_days.append(days_map[name])
days_set += invalid_days
# Sort the old list
days_set.sort(key=itemgetter('position'))
# Change position of 'day_id'
day['position'] = new_pos
days_set.remove(day)
days_set.insert(new_pos, day)
# Update the 'position' field
for i in range(0, len(days_set)):
days_set[i]['position'] = i
# Remove invalid days from days_set
len_days_set = len(days_set)
days_set = [days_set[i] for i in range(0, len_days_set)
if days_set[i] not in invalid_days]
self.logger.debug("holidays_reposition_day(): List of days after " +
" re-positioning " + str(day_id) + " is:\n" +
str(days_set))
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
return new_pos
def holidays_unset_day(self, day_id, apply=True, obj=None,
level=logging.INFO):
"""
Unset prime time values for a day
:param day_id: day to unset (string)
:type day_id: str
:param apply: to reflect the changes to file
:param obj: optional holidays object to be used instead
of internal
.. note:: we do not unset the 'valid' field here so the entry
will still be displayed but without any values
"""
self.logger.log(level, self.logprefix +
"unsetting holidays file entry for " + day_id)
if obj is None:
obj = self.holidays_obj
day = obj._days_map[str(day_id).lower()]
day['p'] = ""
day['np'] = ""
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
def holidays_invalidate_day(self, day_id, apply=True, obj=None,
level=logging.INFO):
"""
Remove a day's entry from the holidays file
:param day_id: the day to remove (string)
:type day_id: str
:param apply: to reflect the changes to file
:type apply: bool
:param obj: optional holidays object to be used instead
of internal
"""
self.logger.log(level, self.logprefix +
"invalidating holidays file entry for " +
day_id)
if obj is None:
obj = self.holidays_obj
days_map = obj._days_map
days_set = obj.days_set
day = days_map[str(day_id).lower()]
day['valid'] = False
days_set.remove(day)
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
def holidays_validate_day(self, day_id, apply=True, obj=None,
level=logging.INFO):
"""
Make valid a previously set day's entry
:param day_id: the day to validate (string)
:type day_id: str
:param apply: to reflect the changes to file
:type apply: bool
:param obj: optional holidays object to be used instead
of internal
.. note:: The day will retain its previous position in
the file
"""
self.logger.log(level, self.logprefix +
"validating holidays file entry for " +
day_id)
if obj is None:
obj = self.holidays_obj
days_map = obj._days_map
days_set = obj.days_set
day = days_map[str(day_id).lower()]
if day in days_set: # do not insert a pre-existing day
self.logger.debug("holidays_validate_day(): " +
day_id + " is already valid!")
return
day['valid'] = True
days_set.insert(day['position'], day)
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
def holidays_delete_entry(self, entry_type, idx=None, apply=True,
obj=None, level=logging.INFO):
"""
Delete ``one/all`` entries from holidays file
:param entry_type: 'y':year, 'd':day, 'h':holiday or 'a': all
:type entry_type: str
:param idx: either a day of week (monday, tuesday etc.)
or Julian date of a holiday
:type idx: str or None
:param apply: to reflect the changes to file
:type apply: bool
:param obj: optional holidays object to be used instead of
internal
:returns: False if entry_type is invalid, otherwise True
.. note:: The day cannot be validated and will lose it's
position in the file
"""
self.logger.log(level, self.logprefix +
"Deleting entries from holidays file")
if obj is None:
obj = self.holidays_obj
days_map = obj._days_map
days_set = obj.days_set
holiday_list = obj.holidays
year = obj.year
if entry_type not in ['a', 'y', 'd', 'h']:
return False
if entry_type == 'y' or entry_type == 'a':
self.logger.debug(self.logprefix +
"deleting year entry from holidays file")
# Delete year entry
year['value'] = None
year['valid'] = False
if entry_type == 'd' or entry_type == 'a':
# Delete one/all day entries
num_days_to_delete = 1
if entry_type == 'a':
self.logger.debug(self.logprefix +
"deleting all days from holidays file")
num_days_to_delete = len(days_set)
for i in range(0, num_days_to_delete):
if (entry_type == 'd'):
self.logger.debug(self.logprefix +
"deleting " + str(idx) +
" entry from holidays file")
day = days_map[str(idx).lower()]
else:
day = days_set[0]
day['p'] = None
day['np'] = None
day['valid'] = None
day['position'] = None
days_set.remove(day)
if entry_type == 'd':
# Correct 'position' field of every day
for i in range(0, len(days_set)):
days_set[i]['position'] = i
if entry_type == 'h' or entry_type == 'a':
# Delete one/all calendar holiday entries
if entry_type == 'a':
self.logger.debug(self.logprefix +
"deleting all holidays from holidays file")
del holiday_list[:]
else:
self.logger.debug(self.logprefix +
"deleting holiday on " + str(idx) +
" from holidays file")
holiday_list.remove(str(idx))
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
return True
def holidays_set_year(self, new_year="", apply=True, obj=None,
level=logging.INFO):
"""
Set the year value
:param newyear: year value to set
:type newyear: str
:param apply: to reflect the changes to file
:type apply: bool
:param obj: optional holidays object to be used instead
of internal
"""
self.logger.log(level, self.logprefix +
"setting holidays file year entry to " +
str(new_year))
if obj is None:
obj = self.holidays_obj
year = obj.year
year['value'] = str(new_year)
year['valid'] = True
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
def holidays_unset_year(self, apply=True, obj=None, level=logging.INFO):
"""
Unset the year value
:param apply: to reflect the changes to file
:type apply: bool
:param obj: optional holidays object to be used instead
of internal
"""
self.logger.log(level, self.logprefix +
"unsetting holidays file year entry")
if obj is None:
obj = self.holidays_obj
obj.year['value'] = ""
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
def holidays_get_year(self, obj=None, level=logging.INFO):
"""
:param obj: optional holidays object to be used instead
of internal
:returns: The year entry of holidays file
"""
self.logger.log(level, self.logprefix +
"getting holidays file year entry")
if obj is None:
obj = self.holidays_obj
year = obj.year
return year.copy()
def holidays_add_holiday(self, date=None, apply=True, obj=None,
level=logging.INFO):
"""
Add a calendar holiday to the holidays file
:param date: Date value for the holiday
:param apply: to reflect the changes to file
:type apply: bool
:param obj: optional holidays object to be used instead
of internal
"""
self.logger.log(level, self.logprefix +
"adding holiday " + str(date) +
" to holidays file")
if obj is None:
obj = self.holidays_obj
holiday_list = obj.holidays
if date is not None:
holiday_list.append(str(date))
else:
pass
self.logger.debug("holidays list after adding one: " +
str(holiday_list))
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
def holidays_get_holidays(self, obj=None, level=logging.INFO):
"""
:param obj: optional holidays object to be used instead
of internal
:returns: The list of holidays in holidays file
"""
self.logger.log(level, self.logprefix +
"retrieving list of holidays")
if obj is None:
obj = self.holidays_obj
holiday_list = obj.holidays
return holiday_list[:]
def _holidays_process_content(self, content, obj=None):
"""
Process a user provided list of holidays file content
:param obj: optional holidays object to be used instead
of internal
"""
self.logger.debug("_holidays_process_content(): " +
"Processing user provided holidays content:\n" +
str(content))
if obj is None:
obj = self.holidays_obj
days_map = obj._days_map
year = obj.year
holiday_list = obj.holidays
days_set = obj.days_set
self.holidays_delete_entry(
'a', apply=False, obj=obj, level=logging.DEBUG)
if content is None:
self.logger.debug("Holidays file was wiped out")
return
for line in content:
entry = line.split()
if len(entry) == 0:
continue
tag = entry[0].lower()
if tag == "year": # initialize self.year
year['valid'] = True
if len(entry) > 1:
year['value'] = entry[1]
elif tag in days_map.keys(): # initialize self.<day>
day = days_map[tag]
day['valid'] = True
days_set.append(day)
day['position'] = len(days_set) - 1
if len(entry) > 1:
day['p'] = entry[1]
if len(entry) > 2:
day['np'] = entry[2]
elif tag.isdigit(): # initialize self.holiday
holiday_list.append(tag)
else:
pass
def holidays_write_file(self, content=None, out_path=None,
hup=True, obj=None, level=logging.INFO):
"""
Write to the holidays file with content ``given/generated``
:param hup: SIGHUP the scheduler after writing the holidays
file
:type hup: bool
:param obj: optional holidays object to be used instead of
internal
"""
self.logger.log(level, self.logprefix +
"Writing to the holidays file")
if obj is None:
obj = self.holidays_obj
if out_path is None:
out_path = self.holidays_file
if content is not None:
self._holidays_process_content(content, obj)
else:
content = str(obj)
self.logger.debug("content being written:\n" + str(content))
(fd, fn) = self.du.mkstemp(self.hostname, body=content)
ret = self.du.run_copy(self.hostname, fn, out_path, mode=0644,
sudo=True)
self.du.rm(self.hostname, fn)
self.du.chown(self.hostname, out_path, uid=0, gid=0,
sudo=True)
if ret['rc'] != 0:
raise PbsSchedConfigError(rc=ret['rc'], rv=ret['out'],
msg=('error applying holidays file' +
ret['err']))
if hup:
rv = self.signal('-HUP')
if not rv:
raise PbsSchedConfigError(rc=1, rv=False,
msg='error applying holidays file')
self.du.chown(self.hostname, path=out_path, uid=0,
gid=0, sudo=True)
return True
def parse_dedicated_time(self, file=None):
"""
Parse the dedicated_time file and populate dedicated times
as both a string dedicated_time array of dictionaries defined
as ``[{'from': datetime, 'to': datetime}, ...]`` as well as a
dedicated_time_as_str array with a string representation of
each entry
:param file: optional file to parse. Defaults to the one under
``PBS_HOME/sched_priv``
:type file: str or None
:returns: The dedicated_time list of dictionaries or None on
error.Return an empty array if dedicated time file
is empty.
"""
self.dedicated_time_as_str = []
self.dedicated_time = []
if file:
dt_file = file
elif self.dedicated_time_file:
dt_file = self.dedicated_time_file
else:
dt_file = os.path.join(self.pbs_conf['PBS_HOME'], 'sched_priv',
'dedicated_time')
try:
lines = self.du.cat(self.hostname, dt_file, sudo=True)['out']
if lines is None:
return []
for line in lines:
if not line.startswith('#') and len(line) > 0:
self.dedicated_time_as_str.append(line)
(dtime_from, dtime_to) = self.utils.convert_dedtime(line)
self.dedicated_time.append({'from': dtime_from,
'to': dtime_to})
except:
self.logger.error('error in parse_dedicated_time')
return None
return self.dedicated_time
def clear_dedicated_time(self, hup=True):
"""
Clear the dedicated time file
"""
self.parse_dedicated_time()
if ((len(self.dedicated_time) == 0) and
(len(self.dedicated_time_as_str) == 0)):
return True
if self.dedicated_time:
for d in self.dedicated_time:
del d
if self.dedicated_time_as_str:
for d in self.dedicated_time_as_str:
del d
self.dedicated_time = []
self.dedicated_time_as_str = []
dt = "# FORMAT: MM/DD/YYYY HH:MM MM/DD/YYYY HH:MM"
return self.add_dedicated_time(dt, hup=hup)
def add_dedicated_time(self, as_str=None, start=None, end=None, hup=True):
"""
Append a dedicated time entry. The function can be called
in one of two ways, either by passing in start and end as
time values, or by passing as_str, a string that gets
appended to the dedicated time entries and formatted as
follows, note that no check on validity of the format will
be made the function uses strftime to parse the datetime
and will fail if the strftime can not convert the string.
``MM/DD/YYYY HH:MM MM/DD/YYYY HH:MM``
:returns: True on success and False otherwise
"""
if self.dedicated_time is None:
self.parse_dedicated_time()
if start is not None and end is not None:
dtime_from = time.strftime("%m/%d/%Y %H:%M", time.localtime(start))
dtime_to = time.strftime("%m/%d/%Y %H:%M", time.localtime(end))
dedtime = dtime_from + " " + dtime_to
elif as_str is not None:
(dtime_from, dtime_to) = self.utils.convert_dedtime(as_str)
dedtime = as_str
else:
self.logger.warning("no dedicated from/to specified")
return True
for d in self.dedicated_time_as_str:
if dedtime == d:
if dtime_from is None or dtime_to is None:
self.logger.info(self.logprefix +
"dedicated time already defined")
else:
self.logger.info(self.logprefix +
"dedicated time from " + dtime_from +
" to " + dtime_to + " already defined")
return True
if dtime_from is not None and dtime_to is not None:
self.logger.info(self.logprefix +
"adding dedicated time " + dedtime)
self.dedicated_time_as_str.append(dedtime)
if dtime_from is not None and dtime_to is not None:
self.dedicated_time.append({'from': dtime_from, 'to': dtime_to})
try:
(fd, fn) = self.du.mkstemp()
for l in self.dedicated_time_as_str:
os.write(fd, l + '\n')
os.close(fd)
ddfile = os.path.join(self.pbs_conf['PBS_HOME'], 'sched_priv',
'dedicated_time')
self.du.run_copy(self.hostname, fn, ddfile, mode=0644, uid=0,
gid=0, sudo=True)
os.remove(fn)
except:
raise PbsSchedConfigError(rc=1, rv=False,
msg='error adding dedicated time')
if hup:
ret = self.signal('-HUP')
if ret['rc'] != 0:
raise PbsSchedConfigError(rc=1, rv=False,
msg='error adding dedicated time')
return True
def terminate(self):
self.signal('-KILL')
def valgrind(self):
"""
run scheduler instance through valgrind
"""
if self.isUp():
self.terminate()
rv = CliUtils().check_bin('valgrind')
if not rv:
self.logger.error(self.logprefix + 'valgrind not available')
return None
cmd = ['valgrind']
cmd += ["--log-file=" + os.path.join(tempfile.gettempdir(),
'schd.vlgrd')]
cmd += [os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbs_sched')]
return self.du.run_cmd(self.hostname, cmd, sudo=True)
def alloc_to_execvnode(self, chunks):
"""
convert a resource allocation to an execvnode string representation
"""
execvnode = []
for chunk in chunks:
execvnode += ["(" + chunk.vnode]
for res, val in chunk.resources.items():
execvnode += [":" + str(res) + "=" + str(val)]
for vchk in chunk.vchunk:
execvnode += ["+" + vchk.vnode]
for res, val in vchk.resources():
execvnode += [":" + str(res) + "=" + str(val)]
execvnode += [")+"]
if len(execvnode) != 0:
ev = execvnode[len(execvnode) - 1]
ev = ev[:-1]
execvnode[len(execvnode) - 1] = ev
return "".join(execvnode)
def cycles(self, start=None, end=None, firstN=None, lastN=None):
"""
Analyze scheduler log and return cycle information
:param start: Optional setting of the start time to consider
:param end: Optional setting of the end time to consider
:param firstN: Optional setting to consider the given first
N cycles
:param lastN: Optional setting to consider only the given
last N cycles
"""
try:
from ptl.utils.pbs_logutils import PBSSchedulerLog
except:
self.logger.error('error loading ptl.utils.pbs_logutils')
return None
sl = PBSSchedulerLog()
sl.analyze(self.logfile, start, end, self.hostname)
cycles = sl.cycles
if not cycles or cycles is None:
return []
if lastN is not None:
return cycles[-lastN:]
elif firstN is not None:
return cycles[:firstN]
return cycles
def query_fairshare(self, name=None, id=None):
"""
Parse fairshare data using ``pbsfs`` and populates
fairshare_tree.If name or id are specified, return the data
associated to that id.Otherwise return the entire fairshare
tree
"""
if self.has_diag:
return None
tree = FairshareTree()
cmd = [os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbsfs')]
ret = self.du.run_cmd(self.hostname, cmd, sudo=True, logerr=False)
if ret['rc'] != 0:
raise PbsFairshareError(rc=ret['rc'], rv=None,
msg=str(ret['err']))
pbsfs = ret['out']
for p in pbsfs:
m = self.fs_tag.match(p)
if m:
usage = int(m.group('Usage'))
perc = float(m.group('Perc'))
nm = m.group('name')
cgrp = int(m.group('cgrp'))
pid = int(m.group('Grp'))
nd = tree.get_node(id=pid)
if nd:
pname = nd.parent_name
else:
pname = None
# if an entity has a negative cgroup it should belong
# to the unknown resource, we work around the fact that
# PBS Pro (up to 13.0) sets this cgroup id to -1 by
# reassigning it to 0
# TODO: cleanup once PBS code is updated
if cgrp < 0:
cgrp = 0
node = FairshareNode(name=nm,
id=cgrp,
parent_id=pid,
parent_name=pname,
nshares=int(m.group('Shares')),
usage=usage,
perc={'TREEROOT': perc})
if perc:
node.prio['TREEROOT'] = float(usage) / perc
if nm == name or id == cgrp:
return node
tree.add_node(node, apply=False)
# now that all nodes are known, update parent and child
# relationship of the tree
tree.update()
for node in tree.nodes.values():
pnode = node._parent
while pnode is not None and pnode.id != 0:
if pnode.perc['TREEROOT']:
node.perc[pnode.name] = \
(node.perc['TREEROOT'] * 100 / pnode.perc[
'TREEROOT'])
if pnode.name in node.perc and node.perc[pnode.name]:
node.prio[pnode.name] = (
node.usage / node.perc[pnode.name])
pnode = pnode._parent
if name:
n = tree.get_node(name)
if n is None:
raise PbsFairshareError(rc=1, rv=None,
msg='Unknown entity ' + name)
return n
if id:
n = tree.get_node(id=id)
raise PbsFairshareError(rc=1, rv=None,
msg='Unknown entity ' + str(id))
return n
return tree
def set_fairshare_usage(self, name=None, usage=None):
"""
Set the fairshare usage associated to a given entity.
:param name: The entity to set the fairshare usage of
:type name: str or None
:param usage: The usage value to set
"""
if self.has_diag:
return True
if name is None:
self.logger.error(self.logprefix + ' an entity name required')
return False
if usage is None:
self.logger.error(self.logprefix + ' a usage is required')
return False
self.stop()
pbsfs = os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbsfs')
cmd = [pbsfs, '-s', name, str(usage)]
ret = self.du.run_cmd(self.hostname, cmd, sudo=True)
self.start()
if ret['rc'] == 0:
return True
return False
def decay_fairshare_tree(self):
"""
Decay the fairshare tree through pbsfs
"""
if self.has_diag:
return True
self.stop()
pbsfs = os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbsfs')
cmd = [pbsfs, '-d']
ret = self.du.run_cmd(self.hostname, cmd, sudo=True)
self.start()
if ret['rc'] == 0:
return True
return False
def cmp_fairshare_entities(self, name1=None, name2=None):
"""
Compare two fairshare entities. Wrapper of ``pbsfs -c e1 e2``
:param name1: name of first entity to compare
:type name1: str or None
:param name2: name of second entity to compare
:type name1: str or None
:returns: the name of the entity of higher priority or None on error
"""
if self.has_diag:
return None
if name1 is None or name2 is None:
self.logger.erro(self.logprefix + 'two fairshare entity names ' +
'required')
return None
pbsfs = os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbsfs')
cmd = [pbsfs, '-c', name1, name2]
ret = self.du.run_cmd(self.hostname, cmd, sudo=True)
if ret['rc'] == 0:
return ret['out'][0]
return None
def parse_resource_group(self, hostname=None, resource_group=None):
"""
Parse the Scheduler's ``resource_group`` file
:param hostname: The name of the host from which to parse
resource_group
:type hostname: str or None
:param resource_group: The path to a resource_group file
:type resource_group: str or None
:returns: A fairshare tree
"""
if hostname is None:
hostname = self.hostname
if resource_group is None:
resource_group = self.resource_group_file
# if has_diag is True acces to sched_priv may not require su privilege
ret = self.du.cat(hostname, resource_group, sudo=(not self.has_diag))
if ret['rc'] != 0:
self.logger.error(hostname + ' error reading ' + resource_group)
tree = FairshareTree(hostname, resource_group)
root = FairshareNode('root', -1, parent_id=0, nshares=100)
tree.add_node(root, apply=False)
lines = ret['out']
for line in lines:
line = line.strip()
if not line.startswith("#") and len(line) > 0:
# could have 5th column but we only need the first 4
(name, id, parent, nshares) = line.split()[:4]
node = FairshareNode(name, id, parent_name=parent,
nshares=nshares)
tree.add_node(node, apply=False)
tree.update()
return tree
def add_to_resource_group(self, name, id, parent, nshares):
"""
Add an entry to the resource group file
:param name: The name of the entity to add
:type name: str
:param id: The numeric identifier of the entity to add
:type id: int
:param parent: The name of the parent group
:type parent: str
:param nshares: The number of shares associated to the entity
:type nshares: int
"""
if self.resource_group is None:
self.resource_group = self.parse_resource_group(
self.hostname, self.resource_group_file)
if not self.resource_group:
self.resource_group = FairshareTree(
self.hostname, self.resource_group_file)
return self.resource_group.create_node(name, id, parent_name=parent,
nshares=nshares)
def job_formula(self, jobid=None, starttime=None, max_attempts=5):
"""
Extract formula value out of scheduler log
:param jobid: Optional, the job identifier for which to get
the formula.
:type jobid: str or int
:param starttime: The time at which to start parsing the
scheduler log
:param max_attempts: The number of attempts to search for
formula in the logs
:type max_attempts: int
:returns: If jobid is specified, return the formula value
associated to that job if no jobid is specified,
returns a dictionary mapping job ids to formula
"""
if jobid is None:
jobid = "(?P<jobid>.*)"
_alljobs = True
else:
if isinstance(jobid, int):
jobid = str(jobid)
_alljobs = False
formula_pat = (".*Job;" + jobid +
".*;Formula Evaluation = (?P<fval>.*)")
rv = self.log_match(formula_pat, regexp=True, starttime=starttime,
n='ALL', allmatch=True, max_attempts=5)
ret = {}
if rv:
for _, l in rv:
m = re.match(formula_pat, l)
if m:
if _alljobs:
jobid = m.group('jobid')
ret[jobid] = float(m.group('fval').strip())
if not _alljobs:
if jobid in ret:
return ret[jobid]
else:
return
return ret
class FairshareTree(object):
"""
Object representation of the Scheduler's resource_group
file and pbsfs data
:param hostname: Hostname of the machine
:type hostname: str
"""
du = DshUtils()
def __init__(self, hostname=None, resource_group=None):
self.logger = logging.getLogger(__name__)
self.hostname = hostname
self.resource_group = resource_group
self.nodes = {}
self.root = None
self._next_id = -1
def update_resource_group(self):
if self.resource_group:
(fd, fn) = self.du.mkstemp()
os.write(fd, self.__str__())
os.close(fd)
ret = self.du.run_copy(self.hostname, fn, self.resource_group,
mode=0644, sudo=True)
self.du.chown(self.hostname, self.resource_group, uid=0,
gid=0, sudo=True)
os.remove(fn)
if ret['rc'] != 0:
raise PbsFairshareError(rc=1, rv=False,
msg='error updating resource group')
return True
def update(self):
for node in self.nodes.values():
if node._parent is None:
pnode = self.get_node(id=node.parent_id)
if pnode:
node._parent = pnode
if node not in pnode._child:
pnode._child.append(node)
def _add_node(self, node):
if node.name == 'TREEROOT' or node.name == 'root':
self.root = node
self.nodes[node.name] = node
if node.parent_name in self.nodes:
self.nodes[node.parent_name]._child.append(node)
node._parent = self.nodes[node.parent_name]
def add_node(self, node, apply=True):
"""
add node to the fairshare tree
"""
self._add_node(node)
if apply:
return self.update_resource_group()
return True
def create_node(self, name, id, parent_name, nshares):
"""
Add an entry to the ``resource_group`` file
:param name: The name of the entity to add
:type name: str
:param id: The uniqe numeric identifier of the entity
:type id: int
:param parent: The name of the parent/group of the entity
:type parent: str
:param nshares: The number of shares assigned to this entity
:type nshares: int
:returns: True on success, False otherwise
"""
if name in self.nodes:
self.logger.warning('fairshare: node ' + name + ' already defined')
return True
self.logger.info('creating tree node: ' + name)
node = FairshareNode(name, id, parent_name=parent_name,
nshares=nshares)
self._add_node(node)
return self.update_resource_group()
def get_node(self, name=None, id=None):
"""
Return a node of the fairshare tree identified by either
name or id.
:param name: The name of the entity to query
:type name: str or None
:param id: The id of the entity to query
:returns: The fairshare information of the entity when
found, if not, returns None
.. note:: The name takes precedence over the id.
"""
for node in self.nodes.values():
if name is not None and node.name == name:
return node
if id is not None and node.id == id:
return node
return None
def __batch_status__(self):
"""
Convert fairshare tree object to a batch status format
"""
dat = []
for node in self.nodes.values():
if node.name == 'root':
continue
einfo = {}
einfo['cgroup'] = node.id
einfo['id'] = node.name
einfo['group'] = node.parent_id
einfo['nshares'] = node.nshares
if len(node.prio) > 0:
p = []
for k, v in node.prio.items():
p += ["%s:%d" % (k, int(v))]
einfo['penalty'] = ", ".join(p)
einfo['usage'] = node.usage
if node.perc:
p = []
for k, v in node.perc.items():
p += ["%s:%.3f" % (k, float(v))]
einfo['shares_perc'] = ", ".join(p)
ppnode = self.get_node(id=node.parent_id)
if ppnode:
ppname = ppnode.name
ppid = ppnode.id
else:
ppnode = self.get_node(name=node.parent_name)
if ppnode:
ppname = ppnode.name
ppid = ppnode.id
else:
ppname = ''
ppid = None
einfo['parent'] = "%s (%s) " % (str(ppid), ppname)
dat.append(einfo)
return dat
def get_next_id(self):
self._next_id -= 1
return self._next_id
def __repr__(self):
return self.__str__()
def _dfs(self, node, dat):
if node.name != 'root':
s = []
if node.name is not None:
s += [node.name]
if node.id is not None:
s += [str(node.id)]
if node.parent_name is not None:
s += [node.parent_name]
if node.nshares is not None:
s += [str(node.nshares)]
if node.usage is not None:
s += [str(node.usage)]
dat.append("\t".join(s))
for n in node._child:
self._dfs(n, dat)
def __str__(self):
dat = []
if self.root:
self._dfs(self.root, dat)
if len(dat) > 0:
dat += ['\n']
return "\n".join(dat)
class FairshareNode(object):
"""
Object representation of the fairshare data as queryable through
the command ``pbsfs``.
:param nshares: Number of shares
:type nshares: int or None
:param usage: Fairshare usage
:param perc: Percentage the entity has of the tree
"""
def __init__(self, name=None, id=None, parent_name=None, parent_id=None,
nshares=None, usage='unknown', perc=None):
self.name = name
self.id = id
self.parent_name = parent_name
self.parent_id = parent_id
self.nshares = nshares
self.usage = usage
self.perc = perc
self.prio = {}
self._parent = None
self._child = []
def __str__(self):
ret = []
if self.name is not None:
ret.append(self.name)
if self.id is not None:
ret.append(str(self.id))
if self.parent_name is not None:
ret.append(str(self.parent_name))
if self.nshares is not None:
ret.append(str(self.nshares))
if self.usage is not None:
ret.append(str(self.usage))
if self.perc is not None:
ret.append(str(self.perc))
return "\t".join(ret)
class MoM(PBSService):
"""
Container for MoM properties.
Provides various MoM operations, such as creation, insertion,
deletion of vnodes.
:param name: The hostname of the server. Defaults to calling
pbs_default()
:type name: str or None
:param attrs: Dictionary of attributes to set, these will
override defaults.
:type attrs: Dictionary
:param pbsconf_file: path to config file to parse for
``PBS_HOME``, ``PBS_EXEC``, etc
:type pbsconf_file: str or None
:param diagmap: A dictionary of PBS objects ``(node,server,etc)``
to mapped files from PBS diag directory
:type diagmap: Dictionary
:param diag: path to PBS diag directory (This will overrides
diagmap)
:type diag: str or None
:param server: A PBS server instance to which this mom is associated
:param db_acccess: set to either file containing credentials to DB
access or dictionary containing
{'dbname':...,'user':...,'port':...}
:type db_access: str or dictionary
"""
dflt_attributes = {}
conf_to_cmd_map = {'PBS_MOM_SERVICE_PORT': '-M',
'PBS_MANAGER_SERVICE_PORT': '-R',
'PBS_HOME': '-d'}
def __init__(self, name=None, attrs={}, pbsconf_file=None, diagmap={},
diag=None, server=None, db_access=None):
self.logger = logging.getLogger(__name__)
if server is not None:
self.server = server
if diag is None and self.server.diag is not None:
diag = self.server.diag
if (len(diagmap) == 0) and (len(self.server.diagmap) != 0):
diagmap = self.server.diagmap
else:
self.server = Server(name, pbsconf_file=pbsconf_file,
db_access=db_access, diag=diag,
diagmap=diagmap)
PBSService.__init__(self, name, attrs, self.dflt_attributes,
pbsconf_file, diag=diag, diagmap=diagmap)
_m = ['mom ', self.shortname]
if pbsconf_file is not None:
_m += ['@', pbsconf_file]
_m += [': ']
self.logprefix = "".join(_m)
self.pi = PBSInitServices(hostname=self.hostname,
conf=self.pbs_conf_file)
self.configd = os.path.join(self.pbs_conf['PBS_HOME'], 'mom_priv',
'config.d')
self.config = {}
self.dflt_config = {'$clienthost': self.server.hostname}
self.version = None
self._is_cpuset_mom = None
def isUp(self):
"""
Check for PBS mom up
"""
return super(MoM, self)._isUp(self)
def signal(self, sig):
"""
Send signal to PBS mom
"""
self.logger.info(self.logprefix + 'sent signal ' + sig)
return super(MoM, self)._signal(sig, inst=self)
def get_pid(self):
"""
Get the PBS mom pid
"""
return super(MoM, self)._get_pid(inst=self)
def all_instance_pids(self):
"""
Get all pids of a instance
"""
return super(MoM, self)._all_instance_pids(inst=self)
def start(self, args=None, launcher=None):
"""
Start the PBS mom
:param args: Arguments to start the mom
:type args: str or None
:param launcher: Optional utility to invoke the launch of the service
:type launcher: str or list or None
"""
if args is not None or launcher is not None:
return super(MoM, self)._start(inst=self, args=args,
cmd_map=self.conf_to_cmd_map,
launcher=launcher)
else:
try:
rv = self.pi.start_mom()
except PbsInitServicesError as e:
raise PbsServiceError(rc=e.rc, rv=e.rv, msg=e.msg)
return rv
def stop(self, sig=None):
"""
Stop the PBS mom
:param sig: Signal to stop the PBS mom
:type sig: str
"""
if sig is not None:
self.logger.info(self.logprefix + 'stopping MoM on host ' +
self.hostname)
return super(MoM, self)._stop(sig, inst=self)
else:
try:
self.pi.stop_mom()
except PbsInitServicesError as e:
raise PbsServiceError(rc=e.rc, rv=e.rv, msg=e.msg)
return True
def restart(self):
"""
Restart the PBS mom
"""
if self.isUp():
if not self.stop():
return False
return self.start()
def log_match(self, msg=None, id=None, n=50, tail=True, allmatch=False,
regexp=False, day=None, max_attempts=1, interval=1,
starttime=None, endtime=None):
"""
Match the PBS mom logs
"""
return self._log_match(self, msg, id, n, tail, allmatch, regexp, day,
max_attempts, interval, starttime, endtime)
def pbs_version(self):
"""
Get the PBS version
"""
if self.version:
return self.version
exe = os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbs_mom')
version = self.du.run_cmd(self.hostname,
[exe, '--version'], sudo=True)['out']
if version:
self.logger.debug(version)
# in some cases pbs_mom --version may return multiple lines, we
# only care about the one that carries pbs_version information
for ver in version:
if 'pbs_version' in ver:
version = ver.split('=')[1].strip()
break
else:
version = self.log_match('pbs_version', tail=False)
if version:
version = version[1].strip().split('=')[1].strip()
else:
version = "unknown"
self.version = LooseVersion(version)
return self.version
def delete_vnodes(self):
rah = ATTR_rescavail + '.host'
rav = ATTR_rescavail + '.vnode'
a = {rah: self.hostname, rav: None}
try:
_vs = self.server.status(HOST, a, id=self.hostname)
except PbsStatusError:
try:
_vs = self.server.status(HOST, a, id=self.shortname)
except PbsStatusError as e:
if e.msg[0].endswith('Server has no node list'):
_vs = []
else:
raise e
vs = []
for v in _vs:
if v[rav].split('.')[0] != v[rah].split('.')[0]:
vs.append(v['id'])
if len(vs) > 0:
self.server.manager(MGR_CMD_DELETE, VNODE, id=vs)
def revert_to_defaults(self, delvnodedefs=True):
"""
1. ``Revert MoM configuration to defaults.``
2. ``Remove epilogue and prologue``
3. ``Delete all vnode definitions
HUP MoM``
:param delvnodedefs: if True (the default) delete all vnode
definitions and restart the MoM
:type delvnodedefs: bool
:returns: True on success and False otherwise
"""
self.logger.info(self.logprefix +
'reverting configuration to defaults')
restart = False
if not self.has_diag:
self.delete_pelog()
if delvnodedefs and self.has_vnode_defs():
restart = True
if not self.delete_vnode_defs():
return False
self.delete_vnodes()
if cmp(self.config, self.dflt_config) != 0:
self.apply_config(self.dflt_config, hup=False, restart=False)
if restart:
self.restart()
else:
self.signal('-HUP')
return self.isUp()
return True
def save_configuration(self, outfile, mode='a'):
"""
Save a MoM ``mom_priv/config``
:param outfile: the output file to which onfiguration is
saved
:type outfile: str
:param mode: the mode in which to open outfile to save
configuration.
:type mode: str
:returns: True on success, False on error
.. note:: first object being saved should open this file
with 'w' and subsequent calls from other objects
should save with mode 'a' or 'a+'. Defaults to a+
"""
conf = {}
mconf = {MGR_OBJ_NODE: conf}
mpriv = os.path.join(self.pbs_conf['PBS_HOME'], 'mom_priv')
cf = os.path.join(mpriv, 'config')
self._save_config_file(conf, cf)
if os.path.isdir(os.path.join(mpriv, 'config.d')):
for f in os.listdir(os.path.join(mpriv, 'config.d')):
self._save_config_file(conf,
os.path.join(mpriv, 'config.d', f))
try:
f = open(outfile, mode)
cPickle.dump(mconf, f)
f.close()
except:
self.logger.error('error saving configuration to ' + outfile)
return False
return True
def load_configuration(self, infile):
"""
load configuration from saved file infile
"""
self._load_configuration(infile, MGR_OBJ_NODE)
def is_cray(self):
"""
Returns True if the version of PBS used was built for Cray platforms
"""
rv = self.log_match("alps_client", tail=False, max_attempts=10)
if rv:
return True
return False
def is_cpuset_mom(self):
"""
Check for cpuset mom
"""
if self._is_cpuset_mom is not None:
return self._is_cpuset_mom
raa = ATTR_rescavail + '.arch'
a = {raa: None}
try:
rv = self.server.status(NODE, a, id=self.shortname)
except PbsStatusError:
try:
rv = self.server.status(NODE, a, id=self.hostname)
except PbsStatusError as e:
if e.msg[0].endswith('Server has no node list'):
return False
else:
raise e
if rv[0][raa] == 'linux_cpuset':
self._is_cpuset_mom = True
else:
self._is_cpuset_mom = False
return self._is_cpuset_mom
def create_vnode_def(self, name, attrs={}, numnodes=1, sharednode=True,
pre='[', post=']', usenatvnode=False, attrfunc=None,
vnodes_per_host=1):
"""
Create a vnode definition string representation
:param name: The prefix for name of vnode to create,
name of vnode will be prefix + pre + <num> +
post
:type name: str
:param attrs: Dictionary of attributes to set on each vnode
:type attrs: Dictionary
:param numnodes: The number of vnodes to create
:type numnodes: int
:param sharednode: If true vnodes are shared on a host
:type sharednode: bool
:param pre: The symbol preceding the numeric value of that
vnode.
:type pre: str
:param post: The symbol following the numeric value of that
vnode.
:type post: str
:param usenatvnode: use the natural vnode as the first vnode
to allocate this only makes sense
starting with PBS 11.3 when natural
vnodes are reported as a allocatable
:type usenatvnode: bool
:param attrfunc: function to customize the attributes,
signature is (name, numnodes, curnodenum,
attrs), must return a dict that contains
new or modified attrs that will be added to
the vnode def. The function is called once
per vnode being created, it does not modify
attrs itself across calls.
:param vnodes_per_host: number of vnodes per host
:type vnodes_per_host: int
:returns: A string representation of the vnode definition
file
"""
sethost = False
attribs = attrs.copy()
if not sharednode and 'resources_available.host' not in attrs:
sethost = True
if attrfunc is None:
customattrs = attribs
vdef = ["$configversion 2"]
# altering the natural vnode information
if numnodes == 0:
for k, v in attribs.items():
vdef += [name + ": " + str(k) + "=" + str(v)]
else:
if usenatvnode:
if attrfunc:
customattrs = attrfunc(name, numnodes, "", attribs)
for k, v in customattrs.items():
vdef += [self.shortname + ": " + str(k) + "=" + str(v)]
# account for the use of the natural vnode
numnodes -= 1
else:
# ensure that natural vnode is not allocatable by the scheduler
vdef += [self.shortname + ": resources_available.ncpus=0"]
vdef += [self.shortname + ": resources_available.mem=0"]
for n in xrange(numnodes):
vnid = name + pre + str(n) + post
if sethost:
if vnodes_per_host > 1:
if n % vnodes_per_host == 0:
_nid = vnid
else:
_nid = name + pre + str(n - n % vnodes_per_host) + post
attribs['resources_available.host'] = _nid
else:
attribs['resources_available.host'] = vnid
if attrfunc:
customattrs = attrfunc(vnid, numnodes, n, attribs)
for k, v in customattrs.items():
vdef += [vnid + ": " + str(k) + "=" + str(v)]
if numnodes == 0:
nn = 1
else:
nn = numnodes
if numnodes > 1:
vnn_msg = ' vnodes '
else:
vnn_msg = ' vnode '
self.logger.info(self.logprefix + 'created ' + str(nn) +
vnn_msg + name + ' with attr ' +
str(attribs) + ' on host ' + self.hostname)
vdef += ["\n"]
del attribs
return "\n".join(vdef)
def parse_config(self):
"""
Parse mom config file into a dictionary of configuration
options.
:returns: A dictionary of configuration options on success,
and None otherwise
"""
try:
mconf = os.path.join(self.pbs_conf['PBS_HOME'], 'mom_priv',
'config')
ret = self.du.cat(self.hostname, mconf, sudo=True)
if ret['rc'] != 0:
self.logger.error('error parsing configuration file')
return None
self.config = {}
lines = ret['out']
for line in lines:
(k, v) = line.split()
if k in self.config:
if isinstance(self.config[k], list):
self.config[k].append(v)
else:
self.config[k] = [self.config[k], v]
else:
self.config[k] = v
except:
self.logger.error('error in parse_config')
return None
return self.config
def add_config(self, conf={}, hup=True):
"""
Add config options to mom_priv_config.
:param conf: The configurations to add to ``mom_priv/config``
:type conf: Dictionary
:param hup: If True (default) ``HUP`` the MoM
:type hup: bool
:returns: True on success and False otherwise
"""
doconfig = False
if not self.config:
self.parse_config()
mc = self.config
if mc is None:
mc = {}
for k, v in conf.items():
if k in mc and (mc[k] == v or (isinstance(v, list) and
mc[k] in v)):
self.logger.debug(self.logprefix + 'config ' + k +
' already set to ' + str(v))
continue
else:
doconfig = True
break
if not doconfig:
return True
self.logger.info(self.logprefix + "config " + str(conf))
return self.apply_config(conf, hup)
def unset_mom_config(self, name, hup=True):
"""
Delete a mom_config entry
:param name: The entry to remove from ``mom_priv/config``
:type name: String
:param hup: if True (default) ``HUP`` the MoM
:type hup: bool
:returns: True on success and False otherwise
"""
mc = self.parse_config()
if mc is None or name not in mc:
return True
self.logger.info(self.logprefix + "unsetting config " + name)
del mc[name]
return self.apply_config(mc, hup)
def apply_config(self, conf={}, hup=True, restart=False):
"""
Apply configuration options to MoM.
:param conf: A dictionary of configuration options to apply
to MoM
:type conf: Dictionary
:param hup: If True (default) , HUP the MoM to apply the
configuration
:type hup: bool
:returns: True on success and False otherwise.
"""
self.config = dict(self.config.items() + conf.items())
try:
(_, fn) = self.du.mkstemp()
f = open(fn, 'w+')
for k, v in self.config.items():
if isinstance(v, list):
for eachprop in v:
f.write(str(k) + ' ' + str(eachprop) + '\n')
else:
f.write(str(k) + ' ' + str(v) + '\n')
f.close()
dest = os.path.join(
self.pbs_conf['PBS_HOME'], 'mom_priv', 'config')
self.du.run_copy(self.hostname, fn, dest, mode=0644, sudo=True)
self.du.chown(self.hostname, path=dest, uid=0, gid=0, sudo=True)
os.remove(fn)
except:
raise PbsMomConfigError(rc=1, rv=False,
msg='error processing add_config')
if restart:
return self.restart()
elif hup:
return self.signal('-HUP')
return True
def get_vnode_def(self, vnodefile=None):
"""
:returns: A vnode def file as a single string
"""
if vnodefile is None:
return None
f = open(vnodefile)
lines = f.readlines()
f.close()
return "".join(lines)
def insert_vnode_def(self, vdef, fname=None, additive=False, restart=True):
"""
Insert and enable a vnode definition. Root privilege
is required
:param vdef: The vnode definition string as created by
create_vnode_def
:type vdef: str
:param fname: The filename to write the vnode def string to
:type fname: str or None
:param additive: If True, keep all other vnode def files
under config.d Default is False
:type additive: bool
:param delete: If True, delete all nodes known to the server.
Default is True
:type delete: bool
:param restart: If True, restart the MoM. Default is True
:type restart: bool
"""
try:
(fd, fn) = self.du.mkstemp(self.hostname)
os.write(fd, vdef)
os.close(fd)
except:
raise PbsMomConfigError(rc=1, rv=False,
msg="Failed to insert vnode definition")
if fname is None:
fname = 'pbs_vnode.def'
if not additive:
self.delete_vnode_defs()
cmd = [os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbs_mom')]
cmd += ['-s', 'insert', fname, fn]
ret = self.du.run_cmd(self.hostname, cmd, sudo=True, logerr=False,
level=logging.INFOCLI)
self.du.rm(hostname=self.hostname, path=fn, force=True)
if ret['rc'] != 0:
raise PbsMomConfigError(rc=1, rv=False, msg="\n".join(ret['err']))
msg = self.logprefix + 'inserted vnode definition file '
msg += fname + ' on host: ' + self.hostname
self.logger.info(msg)
if restart:
self.restart()
def has_vnode_defs(self):
"""
Check for vnode definition(s)
"""
cmd = [os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbs_mom')]
cmd += ['-s', 'list']
ret = self.du.run_cmd(self.hostname, cmd, sudo=True, logerr=False,
level=logging.INFOCLI)
if ret['rc'] == 0:
files = [x for x in ret['out'] if not x.startswith('PBS')]
if len(files) > 0:
return True
else:
return False
else:
return False
def delete_vnode_defs(self, vdefname=None):
"""
delete vnode definition(s) on this MoM
:param vdefname: name of a vnode definition file to delete,
if None all vnode definitions are deleted
:type vdefname: str
:returns: True if delete succeed otherwise False
"""
cmd = [os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbs_mom')]
cmd += ['-s', 'list']
ret = self.du.run_cmd(self.hostname, cmd, sudo=True, logerr=False,
level=logging.INFOCLI)
if ret['rc'] != 0:
return False
rv = True
if len(ret['out']) > 0:
for vnodedef in ret['out']:
vnodedef = vnodedef.strip()
if (vnodedef == vdefname) or vdefname is None:
if vnodedef.startswith('PBS'):
continue
cmd = [os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin',
'pbs_mom')]
cmd += ['-s', 'remove', vnodedef]
ret = self.du.run_cmd(self.hostname, cmd, sudo=True,
logerr=False, level=logging.INFOCLI)
if ret['rc'] != 0:
return False
else:
rv = True
return rv
def has_pelog(self, filename=None):
"""
Check for prologue and epilogue
"""
_has_pro = False
_has_epi = False
phome = self.pbs_conf['PBS_HOME']
prolog = os.path.join(phome, 'mom_priv', 'prologue')
epilog = os.path.join(phome, 'mom_priv', 'epilogue')
if self.du.isfile(self.hostname, path=prolog, sudo=True):
_has_pro = True
if filename == 'prologue':
return _has_pro
if self.du.isfile(self.hostname, path=epilog, sudo=True):
_has_epi = True
if filename == 'epilogue':
return _has_pro
if _has_epi or _has_pro:
return True
return False
def has_prologue(self):
"""
Check for prologue
"""
return self.has_pelog('prolouge')
def has_epilogue(self):
"""
Check for epilogue
"""
return self.has_pelog('epilogue')
def delete_pelog(self):
"""
Delete any prologue and epilogue files that may have been
defined on this MoM
"""
phome = self.pbs_conf['PBS_HOME']
prolog = os.path.join(phome, 'mom_priv', 'prologue')
epilog = os.path.join(phome, 'mom_priv', 'epilogue')
ret = self.du.rm(self.hostname, epilog, force=True,
sudo=True, logerr=False)
if ret:
ret = self.du.rm(self.hostname, prolog, force=True,
sudo=True, logerr=False)
if not ret:
self.logger.error('problem deleting prologue/epilogue')
# we don't bail because the problem may be that files did not
# exist. Let tester fix the issue
return ret
def create_pelog(self, body=None, src=None, filename=None):
"""
create ``prologue`` and ``epilogue`` files, functionality
accepts either a body of the script or a source file.
:returns: True on success and False on error
"""
if self.has_diag:
_msg = 'MoM is in loaded from diag so bypassing pelog creation'
self.logger.info(_msg)
return False
if (src is None and body is None) or (filename is None):
self.logger.error('file and body of script are required')
return False
pelog = os.path.join(self.pbs_conf['PBS_HOME'], 'mom_priv', filename)
self.logger.info(self.logprefix +
' creating ' + filename + ' with body\n' + '---')
if body is not None:
self.logger.info(body)
(fd, src) = self.du.mkstemp(prefix='pbs-pelog')
os.write(fd, body)
os.close(fd)
elif src is not None:
_b = open(src)
self.logger.info("\n".join(_b.readlines()))
_b.close()
self.logger.info('---')
ret = self.du.run_copy(self.hostname, src, pelog, sudo=True)
if body is not None:
os.remove(src)
if ret['rc'] != 0:
self.logger.error('error creating pelog ')
return False
ret = self.du.chown(self.hostname, path=pelog, uid=0, gid=0, sudo=True,
logerr=False)
if not ret:
self.logger.error('error chowning pelog to root')
return False
ret = self.du.chmod(self.hostname, path=pelog, mode=0755, sudo=True)
if not ret:
self.logger.error('error changing mode of pelog')
return False
return True
def prologue(self, body=None, src=None):
"""
create prologue
"""
return self.create_pelog(body, src, 'prologue')
def epilogue(self, body=None, src=None):
"""
Create epilogue
"""
return self.create_pelog(body, src, 'epilogue')
def action(self, act, script):
"""
Define action script. Not currently implemented
"""
pass
class Hook(PBSObject):
"""
PBS hook objects. Holds attributes information and pointer
to server
:param name: Hook name
:type name: str or None
:param attrs: Hook attributes
:type attrs: Dictionary
:param server: Pointer to server
"""
dflt_attributes = {}
def __init__(self, name=None, attrs={}, server=None):
self.logger = logging.getLogger(__name__)
PBSObject.__init__(self, name, attrs, self.dflt_attributes)
self.server = server
class ResourceResv(PBSObject):
"""
Generic PBS resource reservation, i.e., job or
``advance/standing`` reservation
"""
def execvnode(self, attr='exec_vnode'):
"""
PBS type execution vnode
"""
if attr in self.attributes:
return PbsTypeExecVnode(self.attributes[attr])
else:
return None
def exechost(self):
"""
PBS type execution host
"""
if 'exec_host' in self.attributes:
return PbsTypeExecHost(self.attributes['exec_host'])
else:
return None
def select(self):
if hasattr(self, '_select') and self._select is not None:
return self._select
if 'schedselect' in self.attributes:
self._select = PbsTypeSelect(self.attributes['schedselect'])
elif 'select' in self.attributes:
self._select = PbsTypeSelect(self.attributes['select'])
else:
return None
return self._select
@classmethod
def get_hosts(cls, exechost=None):
"""
:returns: The hosts portion of the exec_host
"""
hosts = []
exechosts = cls.utils.parse_exechost(exechost)
if exechosts:
for h in exechosts:
eh = h.keys()[0]
if eh not in hosts:
hosts.append(eh)
return hosts
def get_vnodes(self, execvnode=None):
"""
:returns: The unique vnode names of an execvnode as a list
"""
if execvnode is None:
if 'exec_vnode' in self.attributes:
execvnode = self.attributes['exec_vnode']
elif 'resv_nodes' in self.attributes:
execvnode = self.attributes['resv_nodes']
else:
return []
vnodes = []
execvnodes = PbsTypeExecVnode(execvnode)
if execvnodes:
for n in execvnodes:
ev = n.keys()[0]
if ev not in vnodes:
vnodes.append(ev)
return vnodes
def walltime(self, attr='Resource_List.walltime'):
if attr in self.attributes:
return self.utils.convert_duration(self.attributes[attr])
class Job(ResourceResv):
"""
PBS Job. Attributes and Resources
:param username: Job username
:type username: str or None
:param attrs: Job attributes
:type attrs: Dictionary
:param jobname: Name of the PBS job
:type jobname: str or None
"""
dflt_attributes = {
ATTR_N: 'STDIN',
ATTR_j: 'n',
ATTR_m: 'a',
ATTR_p: '0',
ATTR_r: 'y',
ATTR_k: 'oe',
}
runtime = 100
logger = logging.getLogger(__name__)
def __init__(self, username=None, attrs={}, jobname=None):
self.server = {}
self.script = None
self.script_body = None
if username is not None:
self.username = str(username)
else:
self.username = None
self.du = None
self.interactive_handle = None
PBSObject.__init__(self, None, attrs, self.dflt_attributes)
if jobname is not None:
self.custom_attrs[ATTR_N] = jobname
self.attributes[ATTR_N] = jobname
self.set_variable_list(self.username)
self.set_sleep_time(100)
def set_variable_list(self, user=None, workdir=None):
"""
Customize the ``Variable_List`` job attribute to ``<user>``
"""
if user is None:
userinfo = pwd.getpwuid(os.getuid())
user = userinfo[0]
homedir = userinfo[5]
else:
try:
homedir = pwd.getpwnam(user)[5]
except:
homedir = ""
self.username = user
s = ['PBS_O_HOME=' + homedir]
s += ['PBS_O_LANG=en_US.UTF-8']
s += ['PBS_O_LOGNAME=' + user]
s += ['PBS_O_PATH=/usr/bin:/bin:/usr/bin:/usr/local/bin']
s += ['PBS_O_MAIL=/var/spool/mail/' + user]
s += ['PBS_O_SHELL=/bin/bash']
s += ['PBS_O_SYSTEM=Linux']
if workdir is not None:
wd = workdir
else:
wd = os.getcwd()
s += ['PBS_O_WORKDIR=' + str(wd)]
self.attributes[ATTR_v] = ",".join(s)
self.set_attributes()
def set_sleep_time(self, duration):
"""
Set the sleep duration for this job.
:param duration: The duration, in seconds, to sleep
:type duration: int
"""
self.set_execargs('/bin/sleep', duration)
def set_execargs(self, executable, arguments=None):
"""
Set the executable and arguments to use for this job
:param executable: path to an executable. No checks are made.
:type executable: str
:param arguments: arguments to executable.
:type arguments: str or list or int
"""
msg = ['job: executable set to ' + str(executable)]
if arguments is not None:
msg += [' with arguments: ' + str(arguments)]
self.logger.info("".join(msg))
self.attributes[ATTR_executable] = executable
if arguments is not None:
args = ''
xml_beginargs = '<jsdl-hpcpa:Argument>'
xml_endargs = '</jsdl-hpcpa:Argument>'
if isinstance(arguments, list):
for a in arguments:
args += xml_beginargs + str(a) + xml_endargs
elif isinstance(arguments, str):
args = xml_beginargs + arguments + xml_endargs
elif isinstance(arguments, int):
args = xml_beginargs + str(arguments) + xml_endargs
self.attributes[ATTR_Arglist] = args
else:
self.unset_attributes([ATTR_Arglist])
self.set_attributes()
def create_script(self, body=None, uid=None, gid=None, hostname=None):
"""
Create a job script from a given body of text into a
temporary location
:param body: the body of the script
:param owner: Optionally the user to own this script,
defaults ot current user
:type owner: str
:param hostname: The host on which the job script is to
be created
:type hostname: str or None
"""
if body is None:
return None
if isinstance(body, list):
body = '\n'.join(body)
self.script_body = body
if self.du is None:
self.du = DshUtils()
# First create the temporary file as current user and only change
# its mode once the current user has written to it
(fd, fn) = self.du.mkstemp(hostname, prefix='PtlPbsJobScript', uid=uid,
gid=gid, mode=0755, body=body)
os.close(fd)
if not self.du.is_localhost(hostname):
self.du.run_copy(hostname, fn, fn)
self.script = fn
return fn
class Reservation(ResourceResv):
"""
PBS Reservation. Attributes and Resources
:param attrs: Reservation attributes
:type attrs: Dictionary
"""
dflt_attributes = {}
def __init__(self, username=None, attrs={}):
self.server = {}
self.script = None
self.attributes = attrs
if username is None:
userinfo = pwd.getpwuid(os.getuid())
self.username = userinfo[0]
else:
self.username = str(username)
# These are not in dflt_attributes because of the conversion to CLI
# options is done strictly
if ATTR_resv_start not in attrs:
attrs[ATTR_resv_start] = str(int(time.time()) + 36 * 3600)
if ATTR_resv_end not in attrs:
if ATTR_resv_duration not in attrs:
attrs[ATTR_resv_end] = str(int(time.time()) + 72 * 3600)
PBSObject.__init__(self, None, attrs, self.dflt_attributes)
self.set_attributes()
def set_variable_list(self, user, workdir=None):
pass
class InteractiveJob(threading.Thread):
"""
An Interactive Job thread
Interactive Jobs are submitted as a thread that sets the jobid
as soon as it is returned by ``qsub -I``, such that the caller
can get back to monitoring the state of PBS while the interactive
session goes on in the thread.
The commands to be run within an interactive session are
specified in the job's interactive_script attribute as a list of
tuples, where the first item in each tuple is the command to run,
and the subsequent items are the expected returned data.
Implementation details:
Support for interactive jobs is currently done through the
pexpect module which must be installed separately from PTL.
Interactive jobs are submitted through ``CLI`` only, there is no
API support for this operation yet.
The submission of an interactive job requires passing in job
attributes,the command to execute ``(i.e. path to qsub -I)``
and the hostname
when not impersonating:
pexpect spawns the ``qsub -I`` command and expects a prompt
back, for each tuple in the interactive_script, it sends the
command and expects to match the return value.
when impersonating:
pexpect spawns ``sudo -u <user> qsub -I``. The rest is as
described in non- impersonating mode.
"""
logger = logging.getLogger(__name__)
pexpect_timeout = 15
pexpect_sleep_time = .1
du = DshUtils()
def __init__(self, job, cmd, host):
threading.Thread.__init__(self)
self.job = job
self.cmd = cmd
self.jobid = None
self.hostname = host
def run(self):
"""
Run the interactive job
"""
try:
import pexpect
except:
self.logger.error('pexpect module is required for '
'interactive jobs')
return None
job = self.job
cmd = self.cmd
self.jobid = None
self.logger.info("submit interactive job as " + job.username +
": " + " ".join(cmd))
if not hasattr(job, 'interactive_script'):
self.logger.debug('no interactive_script attribute on job')
return None
try:
# sleep to allow server to communicate with client
# this value is set empirically so tweaking may be
# needed
_st = self.pexpect_sleep_time
_to = self.pexpect_timeout
_sc = job.interactive_script
cmd = ['sudo', '-u', job.username] + cmd
self.logger.debug(cmd)
_p = pexpect.spawn(" ".join(cmd), timeout=_to)
self.job.interactive_handle = _p
time.sleep(_st)
_p.expect('qsub: waiting for job (?P<jobid>[\d\w.]+) to start.*')
if _p.match:
self.jobid = _p.match.group('jobid')
else:
_p.close()
self.job.interactive_handle = None
return None
self.logger.debug(_p.after.decode())
for _l in _sc:
self.logger.debug('sending: ' + _l[0])
_p.sendline(_l[0])
time.sleep(_st)
# only way I could figure out to catch a sleep command
# within a spawned pexpect child. Might need revisiting
if 'sleep' in _l[0]:
_secs = _l[0].split()[1]
self.logger.debug('sleeping ' + str(_secs))
time.sleep(float(_secs))
if len(_l) > 1:
for _r in range(1, len(_l)):
self.logger.debug('expecting: ' + _l[_r])
_p.expect(_l[_r])
time.sleep(_st)
self.logger.debug('received: ' + _p.after.decode())
time.sleep(_st)
self.logger.debug('received: ' + _p.after.decode())
self.logger.debug('sending Ctrl-D')
_p.sendcontrol('d')
time.sleep(_st)
_p.close()
self.job.interactive_handle = None
self.logger.debug(_p.exitstatus)
except Exception:
self.logger.error(traceback.print_exc())
return None
return self.jobid
class Queue(PBSObject):
"""
PBS Queue container, holds attributes of the queue and
pointer to server
:param name: Queue name
:type name: str or None
:param attrs: Queue attributes
:type attrs: Dictionary
"""
dflt_attributes = {}
def __init__(self, name=None, attrs={}, server=None):
self.logger = logging.getLogger(__name__)
PBSObject.__init__(self, name, attrs, self.dflt_attributes)
self.server = server
m = ['queue']
if server is not None:
m += ['@' + server.shortname]
if self.name is not None:
m += [' ', self.name]
m += [': ']
self.logprefix = "".join(m)
def revert_to_defaults(self):
"""
reset queue attributes to defaults
"""
ignore_attrs = ['id', ATTR_count, ATTR_rescassn]
ignore_attrs += [ATTR_qtype, ATTR_enable, ATTR_start, ATTR_total]
ignore_attrs += ['THE_END']
len_attrs = len(ignore_attrs)
unsetlist = []
setdict = {}
self.logger.info(
self.logprefix +
"reverting configuration to defaults")
if self.server is not None:
self.server.status(QUEUE, id=self.name, level=logging.DEBUG)
for k in self.attributes.keys():
for i in range(len_attrs):
if k.startswith(ignore_attrs[i]):
break
if (i == (len_attrs - 1)) and k not in self.dflt_attributes:
unsetlist.append(k)
if len(unsetlist) != 0 and self.server is not None:
try:
self.server.manager(MGR_CMD_UNSET, MGR_OBJ_QUEUE, unsetlist,
self.name)
except PbsManagerError, e:
self.logger.error(e.msg)
for k in self.dflt_attributes.keys():
if (k not in self.attributes or
self.attributes[k] != self.dflt_attributes[k]):
setdict[k] = self.dflt_attributes[k]
if len(setdict.keys()) != 0 and self.server is not None:
self.server.manager(MGR_CMD_SET, MGR_OBJ_QUEUE, setdict)
class PBSInitServices(object):
"""
PBS initialization services
:param hostname: Machine hostname
:type hostname: str or None
:param conf: PBS configuaration file
:type conf: str or None
"""
def __init__(self, hostname=None, conf=None):
self.logger = logging.getLogger(__name__)
self.hostname = hostname
if self.hostname is None:
self.hostname = socket.gethostname()
self.dflt_conf_file = os.environ.get('PBS_CONF_FILE', '/etc/pbs.conf')
self.conf_file = conf
self.du = DshUtils()
self.is_sunos = sys.platform.startswith('sunos')
self.is_aix = sys.platform.startswith('aix')
self.is_linux = sys.platform.startswith('linux')
def initd(self, hostname=None, op='status', conf_file=None,
init_script=None, daemon='all'):
"""
Run the init script for a given operation
:param hostname: hostname on which to execute the init script
:type hostname: str or None
:param op: one of status, start, stop, restart
:type op: str
:param conf_file: optional path to a configuration file
:type conf_file: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
:param daemon: name of daemon to operate on. one of server, mom,
sched, comm or all
:type daemon: str
"""
if hostname is None:
hostname = self.hostname
if conf_file is None:
conf_file = self.conf_file
return self._unix_initd(hostname, op, conf_file, init_script, daemon)
def restart(self, hostname=None, init_script=None):
"""
Run the init script for a restart operation
:param hostname: hostname on which to execute the init script
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='restart', init_script=init_script)
def restart_server(self, hostname=None, init_script=None):
"""
Run the init script for a restart server
:param hostname: hostname on which to restart server
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='restart', init_script=init_script,
daemon='server')
def restart_mom(self, hostname=None, init_script=None):
"""
Run the init script for a restart mom
:param hostname: hostname on which to restart mom
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='restart', init_script=init_script,
daemon='mom')
def restart_sched(self, hostname=None, init_script=None):
"""
Run the init script for a restart sched
:param hostname: hostname on which to restart sched
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='restart', init_script=init_script,
daemon='sched')
def restart_comm(self, hostname=None, init_script=None):
"""
Run the init script for a restart comm
:param hostname: hostname on which to restart comm
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='restart', init_script=init_script,
daemon='comm')
def start(self, hostname=None, init_script=None):
"""
Run the init script for a start operation
:param hostname: hostname on which to execute the init script
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='start', init_script=init_script)
def start_server(self, hostname=None, init_script=None):
"""
Run the init script for a start server
:param hostname: hostname on which to start server
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='start', init_script=init_script,
daemon='server')
def start_mom(self, hostname=None, init_script=None):
"""
Run the init script for a start mom
:param hostname: hostname on which to start mom
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='start', init_script=init_script,
daemon='mom')
def start_sched(self, hostname=None, init_script=None):
"""
Run the init script for a start sched
:param hostname: hostname on which to start sched
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='start', init_script=init_script,
daemon='sched')
def start_comm(self, hostname=None, init_script=None):
"""
Run the init script for a start comm
:param hostname: hostname on which to start comm
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='start', init_script=init_script,
daemon='comm')
def stop(self, hostname=None, init_script=None):
"""
Run the init script for a stop operation
:param hostname: hostname on which to execute the init script
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='stop', init_script=init_script)
def stop_server(self, hostname=None, init_script=None):
"""
Run the init script for a stop server
:param hostname: hostname on which to stop server
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='stop', init_script=init_script,
daemon='server')
def stop_mom(self, hostname=None, init_script=None):
"""
Run the init script for a stop mom
:param hostname: hostname on which to stop mom
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='stop', init_script=init_script,
daemon='mom')
def stop_sched(self, hostname=None, init_script=None):
"""
Run the init script for a stop sched
:param hostname: hostname on which to stop sched
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='stop', init_script=init_script,
daemon='sched')
def stop_comm(self, hostname=None, init_script=None):
"""
Run the init script for a stop comm
:param hostname: hostname on which to stop comm
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='stop', init_script=init_script,
daemon='comm')
def status(self, hostname=None, init_script=None):
"""
Run the init script for a status operation
:param hostname: hostname on which to execute the init script
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='status', init_script=init_script)
def status_server(self, hostname=None, init_script=None):
"""
Run the init script for a status server
:param hostname: hostname on which to status server
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='status', init_script=init_script,
daemon='server')
def status_mom(self, hostname=None, init_script=None):
"""
Run the init script for a status mom
:param hostname: hostname on which to status mom
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='status', init_script=init_script,
daemon='mom')
def status_sched(self, hostname=None, init_script=None):
"""
Run the init script for a status sched
:param hostname: hostname on which to status sched
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='status', init_script=init_script,
daemon='sched')
def status_comm(self, hostname=None, init_script=None):
"""
Run the init script for a status comm
:param hostname: hostname on which to status comm
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='status', init_script=init_script,
daemon='comm')
def _unix_initd(self, hostname, op, conf_file, init_script, daemon):
"""
Helper function for initd ``(*nix version)``
:param hostname: hostname on which init script should run
:type hostname: str
:param op: Operation on daemons - start, stop, restart or status
:op type: str
:param conf_file: Optional path to the pbs configuration file
:type conf_file: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
:param daemon: name of daemon to operate on. one of server, mom,
sched, comm or all
:type daemon: str
"""
if daemon is not None and daemon != 'all':
conf = self.du.parse_pbs_config(hostname, conf_file)
dconf = {
'PBS_START_SERVER': 0,
'PBS_START_MOM': 0,
'PBS_START_SCHED': 0,
'PBS_START_COMM': 0
}
if daemon == 'server' and conf.get('PBS_START_SERVER', 0) != 0:
dconf['PBS_START_SERVER'] = 1
elif daemon == 'mom' and conf.get('PBS_START_MOM', 0) != 0:
dconf['PBS_START_MOM'] = 1
elif daemon == 'sched' and conf.get('PBS_START_SCHED', 0) != 0:
dconf['PBS_START_SCHED'] = 1
elif daemon == 'comm' and conf.get('PBS_START_COMM', 0) != 0:
dconf['PBS_START_COMM'] = 1
(fd, fn) = self.du.mkstemp(hostname)
os.close(fd)
self.du.set_pbs_config(hostname, fin=conf_file, fout=fn,
confs=dconf)
init_cmd = ['PBS_CONF_FILE=' + fn]
_as = True
else:
fn = None
if (conf_file is not None) and (conf_file != self.dflt_conf_file):
init_cmd = ['PBS_CONF_FILE=' + conf_file]
_as = True
else:
init_cmd = []
_as = False
conf = self.du.parse_pbs_config(hostname, conf_file)
if (init_script is None) or (not init_script.startswith('/')):
if 'PBS_EXEC' not in conf:
msg = 'Missing PBS_EXEC setting in pbs config'
raise PbsInitServicesError(rc=1, rv=False, msg=msg)
if init_script is None:
init_script = os.path.join(conf['PBS_EXEC'], 'libexec',
'pbs_init.d')
else:
init_script = os.path.join(conf['PBS_EXEC'], 'etc',
init_script)
if not self.du.isfile(hostname, path=init_script, sudo=True):
# Could be Type 3 installation where we will not have
# PBS_EXEC/libexec/pbs_init.d
return []
init_cmd += [init_script, op]
msg = 'running init script to ' + op + ' pbs'
if daemon is not None and daemon != 'all':
msg += ' ' + daemon
msg += ' on ' + hostname
if conf_file is not None:
msg += ' using ' + conf_file
msg += ' init_cmd=%s' % (str(init_cmd))
self.logger.info(msg)
ret = self.du.run_cmd(hostname, init_cmd, sudo=True, as_script=_as,
logerr=False)
if ret['rc'] != 0:
raise PbsInitServicesError(rc=ret['rc'], rv=False,
msg='\n'.join(ret['err']))
else:
return ret
def switch_version(self, hostname=None, version=None):
"""
Switch to another version of PBS installed on the system
:param hostname: The hostname to operate on
:type hostname: str or None
:param version: version to switch
"""
pbs_conf = self.du.parse_pbs_config(hostname)
if 'PBS_EXEC' in pbs_conf:
dn = os.path.dirname(pbs_conf['PBS_EXEC'])
newver = os.path.join(dn, version)
ret = self.du.isdir(hostname, path=newver)
if not ret:
msg = 'no version ' + version + ' on host ' + hostname
raise PbsInitServicesError(rc=0, rv=False, msg=msg)
self.stop(hostname)
dflt = os.path.join(dn, 'default')
ret = self.du.isfile(hostname, path=dflt)
if ret:
self.logger.info('removing symbolic link ' + dflt)
self.du.rm(hostname, dflt, sudo=True, logerr=False)
self.du.set_pbs_config(hostname, confs={'PBS_EXEC': dflt})
else:
self.du.set_pbs_config(hostname, confs={'PBS_EXEC': newver})
self.logger.info('linking ' + newver + ' to ' + dflt)
self.du.run_cmd(hostname, ['ln', '-s', newver, dflt],
sudo=True, logerr=False)
self.start(hostname)
|
vinodchitrali/pbspro
|
test/fw/ptl/lib/pbs_testlib.py
|
Python
|
agpl-3.0
| 502,834
|
# coding=utf-8
"""
Provides crossplatform checking of current power source, battery warning level and battery time remaining estimate.
Allows you to add observer for power notifications if platform supports it.
Usage:
from power import PowerManagement, PowerManagementObserver # Automatically imports platform-specific implementation
class Observer(PowerManagementObserver):
def on_power_sources_change(self, power_management):
print "Power sources did change."
def on_time_remaining_change(self, power_management):
print "Time remaining did change."
# class Observer(object):
# ...
# PowerManagementObserver.register(Observer)
"""
__author__ = 'kulakov.ilya@gmail.com'
__version__ = '1.2'
from sys import platform
from power.common import *
try:
if platform.startswith('darwin'):
from power.darwin import PowerManagement
elif platform.startswith('win32'):
from power.win32 import PowerManagement
elif platform.startswith('linux'):
from power.linux import PowerManagement
else:
raise RuntimeError("{platform} is not supported.".format(platform=platform))
except RuntimeError as e:
import warnings
warnings.warn("Unable to load PowerManagement for {platform}. No-op PowerManagement class is used: {error}".format(error=str(e), platform=platform))
from power.common import PowerManagementNoop as PowerManagement
|
tinkerinestudio/Tinkerine-Suite
|
TinkerineSuite/python/Lib/power/__init__.py
|
Python
|
agpl-3.0
| 1,441
|
# -*- coding: utf-8 -*-
# -*- encoding: utf-8 -*-
#############################################################################
#
# Copyright (c) 2007 Martin Reisenhofer <martin.reisenhofer@funkring.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.addons.at_base import util
class official_holiday_template_wizard(osv.osv_memory):
def do_create(self, cr, uid, ids, context=None):
holiday_obj = self.pool.get("official.holiday")
template_obj = self.pool.get("official.holiday.template")
user_obj = self.pool.get("res.users")
template_ids = template_obj.search(cr,uid,[("id","in",util.active_ids(context))])
company_id = user_obj.browse(cr, uid, uid, context).company_id.id
official_holiday_ids = []
for template_id in template_ids:
template = template_obj.browse(cr, uid, template_id)
for holiday in template.official_holiday_ids:
official_holiday_ids.append(holiday.id)
for wizard in self.browse(cr, uid, ids, context=context):
if wizard.calendar_ids:
for calendar in wizard.calendar_ids:
holiday_obj.create_calendar_entries(cr, uid, official_holiday_ids, fiscalyear_id=wizard.fiscalyear_id.id, company_id=company_id, calendar_id=calendar.id,context=context)
else:
holiday_obj.create_calendar_entries(cr, uid, official_holiday_ids, fiscalyear_id=wizard.fiscalyear_id.id, company_id=company_id, context=context)
return { "type" : "ir.actions.act_window_close" }
_name = "official.holiday.template.wizard"
_description = "Official holiday template wizard"
_columns = {
"fiscalyear_id" : fields.many2one("account.fiscalyear", "Fiscal Year"),
"calendar_ids" : fields.many2many("resource.calendar", "holiday_calendar_rel", "holiday_id", "calendar_id", "Working Time"),
}
|
funkring/fdoo
|
addons-funkring/official_holiday/wizard/official_holiday_wizard.py
|
Python
|
agpl-3.0
| 2,655
|
# -*- coding: utf-8 -*-
"""
Interactive wizard to guide user to set up EFB and modules.
Since newer version of pip (>=9.0), which checks Python version
prior to installation, is already widespread, we are dropping
Python version check in wizard script, and assuming user is
running an appropriate Python version.
"""
import argparse
import gettext
import os
import platform
import sys
from collections import namedtuple
from contextlib import suppress
from io import StringIO
from typing import Dict, Callable, Optional
from urllib.parse import quote
import bullet.utils
import cjkwrap
import pkg_resources
from bullet import Bullet, keyhandler, colors
from bullet.charDef import NEWLINE_KEY, BACK_SPACE_KEY
from ruamel.yaml import YAML
from ehforwarderbot import coordinator, utils
Module = namedtuple("Module", ['type', 'id', 'name', 'emoji', 'wizard'])
Module.replace = Module._replace # type: ignore
gettext.translation(
'ehforwarderbot',
pkg_resources.resource_filename('ehforwarderbot', 'locale'),
fallback=True
).install(names=["ngettext"])
_: Callable
ngettext: Callable
def print_wrapped(text):
paras = text.split("\n")
for i in paras:
print(*cjkwrap.wrap(i), sep="\n")
class DataModel:
def __init__(self, profile):
self.profile = profile
self.yaml = YAML()
self.config = None
self.modules: Dict[str, Module] = {}
@staticmethod
def default_config():
# TRANSLATORS: This part of text must be formatted in a monospaced font, and all lines must not exceed the width of a 70-cell-wide terminal.
config = _(
"# ===================================\n"
"# EH Forwarder Bot Configuration File\n"
"# ===================================\n"
"# \n"
"# This file determines what modules, including master channel, slave channels,\n"
"# and middlewares, are enabled in this profile.\n"
"# \n"
"# \n"
"# Master Channel\n"
"# --------------\n"
"# Exactly one instance of a master channel is required for a profile.\n"
"# Fill in the module ID and instance ID (if needed) below.\n"
)
config += "\nmaster_channel:\n\n"
# TRANSLATORS: This part of text must be formatted in a monospaced font, and all lines must not exceed the width of a 70-cell-wide terminal.
config += _(
"# Slave Channels\n"
"# --------------\n"
"# \n"
"# At least one slave channel is required for a profile.\n"
"# Fill in the module ID and instance ID (if needed) of each slave channel\n"
"# to be enabled below.\n"
)
config += "\nslave_channels: []\n\n"
# TRANSLATORS: This part of text must be formatted in a monospaced font, and all lines must not exceed the width of a 70-cell-wide terminal.
config += _(
"# Middlewares\n"
"# -----------\n"
"# Middlewares are not required to run an EFB profile. If you are not\n"
"# going to use any middleware in this profile, you can safely remove\n"
"# this section. Otherwise, please list down the module ID and instance\n"
"# ID of each middleware to be enabled below.\n"
)
config += "middlewares: []\n"
str_io = StringIO(config)
str_io.seek(0)
return str_io
def load_config(self):
coordinator.profile = self.profile
conf_path = utils.get_config_path()
if not os.path.exists(conf_path):
self.config = self.yaml.load(self.default_config())
else:
with open(conf_path) as f:
self.config = self.yaml.load(f)
self.load_modules_list()
def save_config(self):
coordinator.profile = self.profile
conf_path = utils.get_config_path()
if not conf_path.exists():
conf_path.parent.mkdir(parents=True, exist_ok=True)
with open(conf_path, 'w') as f:
self.yaml.dump(self.config, f)
def load_modules_list(self):
for i in pkg_resources.iter_entry_points("ehforwarderbot.master"):
cls = i.load()
self.modules[cls.channel_id] = Module(type="master",
id=cls.channel_id,
name=cls.channel_name,
emoji=cls.channel_emoji,
wizard=None)
for i in pkg_resources.iter_entry_points("ehforwarderbot.slave"):
cls = i.load()
self.modules[cls.channel_id] = Module(type="slave",
id=cls.channel_id,
name=cls.channel_name,
emoji=cls.channel_emoji,
wizard=None)
for i in pkg_resources.iter_entry_points("ehforwarderbot.middleware"):
cls = i.load()
self.modules[cls.middleware_id] = Module(type="middleware",
id=cls.middleware_id,
name=cls.middleware_name,
emoji=None,
wizard=None)
for i in pkg_resources.iter_entry_points("ehforwarderbot.wizard"):
if i.name in self.modules:
fn = i.load()
self.modules[i.name] = self.modules[i.name].replace(wizard=fn)
def get_master_lists(self):
names = []
ids = []
for i in self.modules.values():
if i.type == "master":
names.append(i.name)
ids.append(i.id)
return names, ids
def get_slave_lists(self):
names = []
ids = []
for i in self.modules.values():
if i.type == "slave":
names.append(i.name)
ids.append(i.id)
return names, ids
@staticmethod
def split_cid(cid):
if "#" in cid:
mid, iid = cid.split("#")
else:
mid = cid
iid = None
return mid, iid
def get_instance_display_name(self, cid):
if not cid:
return cid
mid, iid = self.split_cid(cid)
if mid not in self.modules:
if iid:
return _("Unknown/custom module (instance: {instance})").format(
iid
)
else:
return _("Unknown/custom module")
else:
if iid:
name = _("{channel} (instance: {instance})").format(
channel=self.modules[mid].name,
instance=iid
)
else:
name = self.modules[mid].name
return name
def has_wizard(self, cid):
mid, _ = self.split_cid(cid)
if mid not in self.modules:
return False
return callable(self.modules[mid].wizard)
def get_selected_slave_lists(self):
if 'slave_channels' not in self.config:
self.config['slave_channels'] = []
return [], []
i = 0
names = []
ids = []
while i < len(self.config['slave_channels']):
cid = self.config['slave_channels'][i]
mid, __ = self.split_cid(cid)
if mid not in self.modules or self.modules[mid].type != "slave":
names.append(_("Unknown/custom channel ({channel_id})").format(channel_id=cid))
ids.append(cid)
else:
name = self.get_instance_display_name(cid)
names.append(name)
ids.append(cid)
i += 1
return names, ids
def get_middleware_lists(self):
names = []
ids = []
for i in self.modules.values():
if i.type == "middleware":
names.append(i.name)
ids.append(i.id)
return names, ids
def get_selected_middleware_lists(self):
if 'middlewares' not in self.config:
self.config['middlewares'] = []
return [], []
i = 0
names = []
ids = []
while i < len(self.config['middlewares']):
cid = self.config['middlewares'][i]
mid, __ = self.split_cid(cid)
if mid not in self.modules or self.modules[mid].type != "middleware":
names.append(_("Unknown/custom middleware ({middleware_id})").format(middleware_id=cid))
ids.append(cid)
else:
name = self.get_instance_display_name(cid)
names.append(name)
ids.append(cid)
i += 1
return names, ids
# @keyhandler.init
class KeyValueBullet(Bullet):
def __init__(self, prompt: str = "", choices: list = [], choices_id: list = [], bullet: str = "●",
bullet_color: str = colors.foreground["default"], word_color: str = colors.foreground["default"],
word_on_switch: str = colors.REVERSE, background_color: str = colors.background["default"],
background_on_switch: str = colors.REVERSE, pad_right=0, indent: int = 0, align=0, margin: int = 0,
shift: int = 0):
super().__init__(prompt, choices, bullet, bullet_color, word_color, word_on_switch, background_color,
background_on_switch, pad_right, indent, align, margin, shift)
self.choices_id = choices_id
self._key_handler: Dict[int, Callable] = self._key_handler.copy()
self._key_handler[NEWLINE_KEY] = self.__class__.accept
# @keyhandler.register(NEWLINE_KEY)
def accept(self, *args):
pos = self.pos
bullet.utils.moveCursorDown(len(self.choices) - pos)
self.pos = 0
return self.choices[pos], self.choices_id[pos]
class ReorderBullet(Bullet):
def __init__(self, prompt: str = "", choices: list = None, choices_id: list = None, bullet: str = "●",
bullet_color: str = colors.foreground["default"], word_color: str = colors.foreground["default"],
word_on_switch: str = colors.REVERSE, background_color: str = colors.background["default"],
background_on_switch: str = colors.REVERSE, pad_right=0, indent: int = 0, align=0, margin: int = 0,
shift: int = 0, required: bool = False):
if choices is None:
choices = []
if choices_id is None:
choices_id = []
prompt += "\n" + _(
"[ =: Shift up; -: Shift down; Backspace: Remove ]"
)
choices.extend((
_("+ Add"),
_("✓ Submit")
))
super().__init__(prompt, choices, bullet, bullet_color, word_color, word_on_switch, background_color,
background_on_switch, pad_right, indent, align, margin, shift)
self.choices_id = choices_id
self.choices_id.extend((
"add",
"submit"
))
self._key_handler: Dict[int, Callable] = self._key_handler.copy()
self._key_handler[NEWLINE_KEY] = self.__class__.accept_fork
self.required = required
@keyhandler.register(ord('-'))
def shift_up(self):
choices = len(self.choices)
if self.pos - 1 < 0 or self.pos >= choices - 2:
return
else:
self.choices[self.pos - 1], self.choices[self.pos] = self.choices[self.pos], self.choices[self.pos - 1]
bullet.utils.clearLine()
old_pos = self.pos
self.pos -= 1
self.printBullet(old_pos)
bullet.utils.moveCursorUp(1)
bullet.utils.clearLine()
self.printBullet(self.pos)
@keyhandler.register(ord('='))
def shift_down(self):
choices = len(self.choices)
if self.pos >= choices - 3:
return
else:
self.choices[self.pos + 1], self.choices[self.pos] = self.choices[self.pos], self.choices[self.pos + 1]
bullet.utils.clearLine()
old_pos = self.pos
self.pos += 1
self.printBullet(old_pos)
bullet.utils.moveCursorDown(1)
bullet.utils.clearLine()
self.printBullet(self.pos)
@keyhandler.register(BACK_SPACE_KEY)
def delete_item(self):
choices = len(self.choices)
if self.pos >= choices - 2:
return
self.choices.pop(self.pos)
self.choices_id.pop(self.pos)
bullet.utils.moveCursorUp(self.pos - 1)
bullet.utils.clearConsoleDown(choices)
bullet.utils.moveCursorUp(1)
for i in range(len(self.choices)):
bullet.utils.moveCursorDown(1)
self.printBullet(i)
bullet.utils.moveCursorUp(1)
# @keyhandler.register(NEWLINE_KEY)
def accept_fork(self):
choices = len(self.choices)
if self.required and self.pos == choices - 1 and choices <= 2:
# Reject empty list
return None
if self.pos >= choices - 2: # Add / Submit
pos = self.pos
bullet.utils.moveCursorDown(len(self.choices) - pos)
self.pos = 0
return self.choices[:-2], self.choices_id[:-2], self.choices_id[pos]
return None
def get_platform_name():
p = platform.system()
if p == "Linux":
# noinspection PyBroadException
try:
# noinspection PyDeprecation
return ' '.join(platform.linux_distribution()[:2])
except: # lgtm [py/catch-base-exception]
# noinspection PyDeprecation
return ' '.join(platform.dist()[:2])
elif p == "Darwin":
return "macOS " + platform.mac_ver()[0]
elif p == "Windows":
return "Windows " + platform.win32_ver()[1]
else:
return ""
def build_search_query(query):
return "https://google.com/search?q=" + quote(query)
def prerequisite_check():
"""
Check prerequisites of the framework, including Python version, installation of
modules, etc.
Returns:
Optional[str]: If the check is not passed, return error message regarding
failed test case. None is returned otherwise.
"""
# Check Python version
print(_("Checking Python version... "), end="")
if sys.version_info < (3, 6):
version_str = "%s.%s.%s" % sys.version_info[:3]
# TRANSLATORS: This word is used as a part of search query suggested to users,
# it may appears in context like "Ubuntu 16.04 install Python 3.7"
search_url = build_search_query(_("install") + " Python 3.7")
print()
print(_("EH Forwarder Bot requires a minimum of Python 3.6 to run. You "
"are currently using Python {version}. \n"
"\n"
"You may want to try:\n"
"{url}").format(version=version_str, url=search_url))
exit(1)
else:
print(_("OK"))
# Check installations of modules
modules_err = _("You may want to visit the modules repository to find a list of "
"available modules to install.\n"
"https://efb-modules.1a23.studio")
# 1. At least 1 master channel must be installed
print(_("Checking master channels... "), end="")
try:
next(pkg_resources.iter_entry_points("ehforwarderbot.master"))
except StopIteration:
print()
print(_("No master channel detected. EH Forwarder Bot requires at least one "
"master channel installed to run.") + "\n\n" + modules_err)
exit(1)
print(_("OK"))
# 2. At least 1 slave channel must be installed
print(_("Checking slave channels... "), end="")
try:
next(pkg_resources.iter_entry_points("ehforwarderbot.slave"))
except StopIteration:
print()
print(_("No slave channel detected. EH Forwarder Bot requires at least one "
"slave channel installed to run.") + "\n\n" + modules_err)
exit(1)
print(_("OK"))
print()
def choose_master_channel(data: DataModel):
channel_names, channel_ids = data.get_master_lists()
list_widget = KeyValueBullet(prompt=_("1. Choose master channel"),
choices=channel_names,
choices_id=channel_ids)
default_idx = None
default_instance = ''
if "master_channel" in data.config and data.config['master_channel']:
default_config = data.config['master_channel'].split("#")
default_id = default_config[0]
if len(default_config) > 1:
default_instance = default_config[1]
with suppress(ValueError):
default_idx = channel_ids.index(default_id)
chosen_channel_name, chosen_channel_id = list_widget.launch(default=default_idx)
chosen_instance = input(_("Instance name to use with {channel_name}: [{default_instance}]")
.format(channel_name=chosen_channel_name,
default_instance=default_instance or _("default instance"))
+ " ").strip()
if chosen_instance:
chosen_channel_id += "#" + chosen_instance
data.config['master_channel'] = chosen_channel_id
def choose_slave_channels(data: DataModel):
chosen_slave_names, chosen_slave_ids = data.get_selected_slave_lists()
widget = ReorderBullet(_("2. Choose slave channels (at least one is required)."),
choices=chosen_slave_names,
choices_id=chosen_slave_ids,
required=True)
channel_names, channel_ids = data.get_slave_lists()
list_widget = KeyValueBullet(prompt=_("Choose a slave channel to add."),
choices=channel_names,
choices_id=channel_ids)
while True:
print()
chosen_slave_names, chosen_slave_ids, action = widget.launch()
if action == 'add':
print()
add_channel_name, add_channel_id = list_widget.launch()
add_channel_instance = input(_("Instance name to use with {channel_name}: [{default_instance}]")
.format(channel_name=add_channel_name,
default_instance=_("default instance"))
+ " ").strip()
if add_channel_instance:
add_channel_id += "#" + add_channel_instance
display_name = data.get_instance_display_name(add_channel_id)
if add_channel_id in widget.choices_id:
print_wrapped(_("{instance_name} ({instance_id}) is already enabled. "
"Please try another one.")
.format(instance_name=display_name, instance_id=add_channel_id))
else:
widget.choices.insert(-2, display_name)
widget.choices_id.insert(-2, add_channel_id)
else: # action == 'submit'
break
data.config['slave_channels'] = chosen_slave_ids
def choose_middlewares(data: DataModel):
chosen_middlewares_names, chosen_middlewares_ids = data.get_selected_middleware_lists()
widget = ReorderBullet(_("3. Choose middlewares (optional)."),
choices=chosen_middlewares_names,
choices_id=chosen_middlewares_ids)
middlewares_names, middlewares_ids = data.get_middleware_lists()
list_widget: Optional[KeyValueBullet]
if middlewares_ids:
list_widget = KeyValueBullet(prompt=_("Choose a middleware to add."),
choices=middlewares_names,
choices_id=middlewares_ids)
else:
list_widget = None
while True:
print()
chosen_middlewares_names, chosen_middlewares_ids, action = widget.launch()
if action == 'add':
print()
if not list_widget:
print_wrapped(_("No installed middleware is detected, press ENTER to go back."))
input()
else:
add_middleware_name, add_middleware_id = list_widget.launch()
add_middleware_instance = input(_("Instance name to use with {middleware_name}: [{default_instance}]")
.format(middleware_name=add_middleware_name,
default_instance=_("default instance"))
+ " ").strip()
if add_middleware_instance:
add_middleware_id += "#" + add_middleware_instance
display_name = data.get_instance_display_name(add_middleware_id)
if add_middleware_id in widget.choices_id:
print_wrapped(_("{instance_name} ({instance_id}) is already enabled. "
"Please try another one.")
.format(instance_name=display_name, instance_id=add_middleware_id))
else:
widget.choices.insert(-2, display_name)
widget.choices_id.insert(-2, add_middleware_id)
else: # action == 'submit'
break
data.config['middlewares'] = chosen_middlewares_ids
def confirmation(data: DataModel):
list_widget = KeyValueBullet(prompt=_("Would you like to continue?"),
choices=[_("Save and continue"),
_("Change master channel settings"),
_("Change slave channel settings"),
_("Change middleware settings")],
choices_id=["continue", "master", "slave", "middleware"])
while True:
print()
print_wrapped(_('You have chosen to enable the following '
'modules for profile "{profile}".')
.format(profile=data.profile))
print()
master_name = data.get_instance_display_name(data.config['master_channel'])
print_wrapped(_("Master channel: {channel_name}")
.format(channel_name=master_name))
print()
print_wrapped(ngettext("Slave channel:", "Slave channels:",
len(data.config['slave_channels'])))
for i in data.config['slave_channels']:
print_wrapped('- ' + data.get_instance_display_name(i))
num_middlewares = len(data.config.get('middlewares', []))
if num_middlewares > 0:
print()
print_wrapped(ngettext("Middleware:", "Middlewares:", num_middlewares))
for i in data.config['middlewares']:
print_wrapped('- ' + data.get_instance_display_name(i))
print()
outcome = list_widget.launch()[1]
if outcome == "master":
choose_master_channel(data)
elif outcome == "slave":
choose_slave_channels(data)
elif outcome == "middleware":
choose_middlewares(data)
else: # "continue"
break
data.save_config()
print()
print(_("Configuration is saved."))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--profile",
help=_("Choose a profile to start with."),
default="default")
parser.add_argument("-m", "--module",
help=_("Start the wizard of a module manually, skipping "
"the framework wizard."))
args = parser.parse_args()
data = DataModel(args.profile)
data.load_config()
if args.module:
mid, iid = data.split_cid(args.module)
if callable(data.modules[mid].wizard):
data.modules[mid].wizard(data.profile, iid)
return
else:
print(_("{module_id} did not register any wizard "
"program to start with.").format(module_id=args.module))
exit(1)
prerequisite_check()
print_wrapped(_("Welcome to EH Forwarder Bot Setup Wizard. This program "
"will guide you to finish up the last few steps to "
"get EFB ready to use.\n"
"\n"
"To use this wizard in another supported language, "
"please change your system language or modify the "
"language environment variable and restart the wizard."))
print()
data.profile = input(_("Profile") + f": [{data.profile}] ") or data.profile
print()
choose_master_channel(data)
choose_slave_channels(data)
choose_middlewares(data)
confirmation(data)
print_wrapped(_("Some more advanced settings, such as granulated log control, "
"are not included in this wizard. For further details, you may want to "
"refer to the documentation.\n\n"
"https://ehforwarderbot.readthedocs.io/en/latest/config.html"))
print()
modules_count = 1
missing_wizards = []
if not data.has_wizard(data.config['master_channel']):
missing_wizards.append(data.config['master_channel'])
for i in data.config['slave_channels']:
modules_count += 1
if not data.has_wizard(i):
missing_wizards.append(i)
for i in data.config['middlewares']:
modules_count += 1
if not data.has_wizard(i):
missing_wizards.append(i)
if missing_wizards:
prompt = ngettext("Note:\n"
"The following module does not have a setup wizard. It is probably because "
"that it does not need to be set up, or it requires you to set up manually.\n"
"Please consult its documentation for further details.\n",
"Note:\n"
"The following modules do not have a setup wizard. It is probably because "
"that they do not need to be set up, or they require you to set up manually.\n"
"Please consult their documentations respectively for further details.\n",
len(missing_wizards))
print_wrapped(prompt)
print()
for i in missing_wizards:
print_wrapped("- " + data.get_instance_display_name(i) + " (" + i + ")")
print()
if len(missing_wizards) == modules_count:
print_wrapped(_("Congratulations! You have finished setting up EFB "
"framework for the chosen profile. "
"You may now continue to configure modules you have "
"enabled manually, if necessary."))
exit(0)
else:
print_wrapped(_("We will now guide you to set up some modules you "
"have enabled. "
"But you may still need to configure other modules "
"manually if necessary."))
else:
print_wrapped("We will now guide you to set up modules you have enabled, "
"each at a time.")
modules = [data.config['master_channel']]
modules.extend(data.config['slave_channels'])
if 'middlewares' in data.config:
modules.extend(data.config['middlewares'])
for i in modules:
mid, iid = data.split_cid(i)
if mid in data.modules and callable(data.modules[mid].wizard):
print(_("Press ENTER/RETURN to start setting up {0}.").format(i))
input()
data.modules[mid].wizard(data.profile, iid)
print()
print_wrapped(_("Congratulations! You have now finished all wizard-enabled "
"modules. If you did not configure some modules enabled, "
"you might need to configure them manually."))
if __name__ == '__main__':
main()
|
blueset/ehForwarderBot
|
ehforwarderbot/wizard.py
|
Python
|
agpl-3.0
| 28,382
|
import inspect
import os
import sys
from datetime import timedelta
from io import StringIO
from unittest.mock import Mock
from urllib.error import HTTPError
import django
import django.db
import django.test.runner
import django.test.testcases
import django.test.utils
from django.test import TestCase
from aid.test.data import gen_member, gen_api_ladder
from aid.test.db import Db
from common.utils import to_unix, utcnow, classinstancemethod
from lib import sc2
from main.battle_net import LeagueResponse, ApiLeague, SeasonResponse, ApiSeason, LadderResponse, BnetClient
from main.models import Region, Enums, Mode, Version, League
# warnings.filterwarnings('ignore')
class DjangoTestCase(TestCase):
# This is really ugly hack of django test framework. This was made a long time ago maybe possible to make it
# better now, since djanog test framwork have been changed a lot. The c++ code needs to access the database so
# the django postgresql test rollback scheme does not really work. Since using postgresql like this makes the
# tests so slow sqlite is used for tests that doew not require postgresql. This makes it impossible to run
# them in the same process.
#
# Sometimes it is useful to debug the db. Set the KEEP_DATA environment variable to prevent deletion of the
# database.
maxDiff = 1e4
def __str__(self):
""" Return a string that can be used as a command line argument to nose. """
return "%s:%s.%s" % (inspect.getfile(self.__class__), self.__class__.__name__, self._testMethodName)
@classmethod
def _enter_atomics(cls):
# Prevent rollbacks.
pass
@classmethod
def _rollback_atomics(cls, atomics):
# Prevent rollbacks.
pass
def _fixture_teardown(self):
# Prevent clearing of test data.
pass
@classmethod
def setUpClass(self):
super().setUpClass()
self.runner = django.test.runner.DiscoverRunner(interactive=False)
django.test.utils.setup_test_environment()
self.old_config = self.runner.setup_databases()
self.db = Db()
@classmethod
def tearDownClass(self):
if 'KEEP_DATA' in os.environ:
print("\nkeeping test database: %r." % self.db.db_name, file=sys.stderr)
else:
self.db.delete_all()
self.runner.teardown_databases(self.old_config)
django.test.utils.teardown_test_environment()
super().tearDownClass()
def tearDown(self):
if hasattr(self, 'cpp') and self.cpp is not None:
self.cpp.release()
self.cpp = None
super().tearDown()
def load(self):
self.cpp = sc2.RankingData(self.db.db_name, Enums.INFO)
self.cpp.load(self.db.ranking.id)
def process_ladder(self, load=False, save=False, region=Region.EU, fetch_time=None,
mode=Mode.TEAM_1V1, version=Version.HOTS, league=League.GOLD, season=None, tier=0,
members=None, **kwargs):
""" Update a ranking building single member with kwargs or use members if set. """
season = season or self.db.season
fetch_time = fetch_time or utcnow()
members = members or [gen_member(**kwargs)]
if not getattr(self, 'cpp', None):
self.cpp = sc2.RankingData(self.db.db_name, Enums.INFO)
if load:
self.load()
self.cpp.update_with_ladder(0, # bid
0, # source_id
region,
mode,
league,
tier,
version,
season.id,
to_unix(fetch_time),
fetch_time.date().isoformat(),
Mode.team_size(mode),
members)
if save:
self.save_to_ranking()
def save_to_ranking(self):
self.cpp.save_data(self.db.ranking.id, self.db.ranking.season_id, to_unix(utcnow()))
@classinstancemethod
def date(self, **kwargs):
return self.today + timedelta(**kwargs)
@classinstancemethod
def datetime(self, **kwargs):
return self.now + timedelta(**kwargs)
@classinstancemethod
def unix_time(self, **kwargs):
return to_unix(self.now + timedelta(**kwargs))
def assert_team_ranks(self, ranking_id, *ranks, skip_len=False, sort=True):
""" Get all team ranks using the current ranking id and assert that all ranks corresponds to team ranks in
db. All keys in ranks will be verified against team ranks values. """
team_ranks = sc2.get_team_ranks(self.db.db_name, ranking_id, sort)
all_keys = {key for rank in ranks for key in rank}
try:
if not skip_len:
self.assertEqual(len(team_ranks), len(ranks))
for i, (team_rank, r) in enumerate(zip(team_ranks, ranks), start=1):
for key, value in r.items():
self.assertEqual(value, team_rank[key], msg="%s wrong @ rank %d, expected %r, was %r" %
(key, i, r, {key: team_rank.get(key, None) for key in r.keys()}))
except AssertionError:
print("Expected:\n%s" % "\n".join([repr(tr) for tr in ranks]))
print("Actual:\n%s" % "\n".join([repr({k: v for k, v in tr.items() if k in all_keys})
for tr in team_ranks]))
raise
class MockBnetTestMixin(object):
""" Class to help with common mockings. """
def setUp(self):
super().setUp()
self.bnet = BnetClient()
def mock_raw_get(self, status=200, content=""):
self.bnet.raw_get = Mock(side_effect=HTTPError('', status, '', '', StringIO(content)))
def mock_current_season(self, status=200, season_id=None, start_time=None, fetch_time=None):
self.bnet.fetch_current_season = \
Mock(return_value=SeasonResponse(status,
ApiSeason({'seasonId': season_id or self.db.season.id,
'startDate': to_unix(start_time or utcnow())},
'http://fake-url'),
fetch_time or utcnow(), 0))
def mock_fetch_ladder(self, status=200, fetch_time=None, members=None, **kwargs):
self.bnet.fetch_ladder = \
Mock(return_value=LadderResponse(status, gen_api_ladder(members, **kwargs), fetch_time or utcnow(), 0))
def mock_fetch_league(self, status=200, fetch_time=None, season_id=None, t0_bids=None, t1_bids=None, t2_bids=None):
season_id = season_id or self.db.season.id
self.bnet.fetch_league = \
Mock(return_value=LeagueResponse(status,
ApiLeague({'tier': [
{'id': 0, 'division': [{'ladder_id': lid} for lid in t0_bids or []]},
{'id': 1, 'division': [{'ladder_id': lid} for lid in t1_bids or []]},
{'id': 2, 'division': [{'ladder_id': lid} for lid in t2_bids or []]},
]}, url="http://fake-url", bid=season_id * 100000),
fetch_time or utcnow(), 0))
|
andersroos/rankedftw
|
aid/test/base.py
|
Python
|
agpl-3.0
| 7,615
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2004-2011 Pexego Sistemas Informáticos. All Rights Reserved
# $Omar Castiñeira Saavedra$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
|
diagramsoftware/l10n-spain
|
l10n_es_igic/data/__init__.py
|
Python
|
agpl-3.0
| 985
|
from django import template
from django.conf import settings
from django.template.defaultfilters import stringfilter
import os
register = template.Library()
@register.filter(name='basename')
@stringfilter
def basename(value):
return os.path.basename(value)
@register.filter(name='replace_macros')
@stringfilter
def replace_macros(value, user_dict):
return value.replace("#FIRSTNAME#", user_dict['first_name'].strip()) \
.replace("#LASTNAME#", user_dict['last_name'].strip())
@register.filter(name='state_label_css')
def state_label_css(subm):
green_label = "badge label label-success"
red_label = "badge label label-important"
grey_label = "badge label label-info"
# We expect a submission as input
if subm.is_closed() and subm.grading:
if subm.grading.means_passed:
return green_label
else:
return red_label
if subm.state in [subm.SUBMITTED_TESTED,
subm.SUBMITTED,
subm.TEST_FULL_PENDING,
subm.GRADED,
subm.TEST_FULL_FAILED]:
return green_label
if subm.state == subm.TEST_VALIDITY_FAILED:
return red_label
return grey_label
@register.assignment_tag
def setting(name):
return getattr(settings, name, "")
@register.inclusion_tag('inclusion_tags/details_table.html')
def details_table(submission):
return {'submission': submission}
@register.inclusion_tag('inclusion_tags/deadline.html')
def deadline_timeout(assignment):
return {'assignment': assignment, 'show_timeout': True}
@register.inclusion_tag('inclusion_tags/deadline.html')
def deadline(assignment):
return {'assignment': assignment, 'show_timeout': False}
@register.inclusion_tag('inclusion_tags/grading.html')
def grading(submission):
return {'submission': submission}
|
troeger/opensubmit
|
web/opensubmit/templatetags/projecttags.py
|
Python
|
agpl-3.0
| 1,869
|
"""Remove any personally identifying information from the database"""
from django.core.management.base import BaseCommand
from django.conf import settings
from django.contrib.admin.models import LogEntry
from django_openid_auth.models import UserOpenID
from rest_framework.authtoken.models import Token
from reversion.models import Revision
from games.models import (
Installer,
InstallerIssue,
InstallerIssueReply,
InstallerHistory,
Screenshot,
GameSubmission,
)
from accounts.models import User
from common.models import Upload, News
class Command(BaseCommand):
"""Django command to anonymize the database"""
@staticmethod
def get_main_user():
"""Return the only user remaining in the DB"""
return User.objects.first()
@staticmethod
def delete_tokens():
"""Remove all auth tokens (OpenID, DRF, ...)"""
res = UserOpenID.objects.all().delete()
print("Deleted %s openids" % res[0])
res = Token.objects.all().delete()
print("Deleted %s tokens" % res[0])
res = LogEntry.objects.all().delete()
print("Deleted %s log entries" % res[0])
def handle(self, *args, **kwargs):
if not settings.DEBUG:
raise RuntimeError("Never run this in production")
self.delete_tokens()
user = self.get_main_user()
res = InstallerIssue.objects.all().update(submitted_by=user)
print("Updated %s issues" % res)
res = InstallerIssueReply.objects.all().update(submitted_by=user)
print("Updated %s issue replies" % res)
res = InstallerHistory.objects.all().update(user=user)
print("Updated %s installer history" % res)
res = Installer.objects.all().update(user=user)
print("Updated %s installers" % res)
res = InstallerHistory.objects.all().update(user=user)
print("Updated %s installer history" % res)
res = GameSubmission.objects.all().update(user=user)
print("Updated %s game submissions" % res)
res = Screenshot.objects.all().update(uploaded_by=user)
print("Updated %s screenshots" % res)
res = Upload.objects.all().update(uploaded_by=user)
print("Updated %s uploads" % res)
res = News.objects.all().update(user=user)
print("Updated %s news" % res)
res = Revision.objects.all().update(user=user)
print("Updated %s revisions" % res)
res = User.objects.exclude(pk=user.id).delete()
print("Deleted %s users" % res[0])
default_password = "lutris"
user.set_password(default_password)
user.username = "lutris"
user.email = "root@localhost"
user.website = ""
user.steamid = ""
user.save()
print("Password for user %s is now %s" % (user, default_password))
|
lutris/website
|
common/management/commands/anon_db.py
|
Python
|
agpl-3.0
| 2,839
|
#!/usr/bin/env python
from distutils.core import setup
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(
name="wrtfreezer",
version="0.1",
description="A simple utility to mass build OpenWRT images.",
author="Shuhao Wu",
license="AGPL",
url="https://github.com/shuhaowu/wrtfreezer",
packages=["wrtfreezer"],
scripts=["wrtbuild"],
requires=requirements,
)
|
shuhaowu/wrtfreezer
|
setup.py
|
Python
|
agpl-3.0
| 408
|
# Copyright (C) 2006-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Some unit tests for BufferedPipe.
"""
import threading
import time
import unittest
from paramiko.buffered_pipe import BufferedPipe, PipeTimeout
from paramiko import pipe
from util import ParamikoTest
def delay_thread(pipe):
pipe.feed('a')
time.sleep(0.5)
pipe.feed('b')
pipe.close()
def close_thread(pipe):
time.sleep(0.2)
pipe.close()
class BufferedPipeTest(ParamikoTest):
def test_1_buffered_pipe(self):
p = BufferedPipe()
self.assert_(not p.read_ready())
p.feed('hello.')
self.assert_(p.read_ready())
data = p.read(6)
self.assertEquals(b'hello.', data)
p.feed('plus/minus')
self.assertEquals(b'plu', p.read(3))
self.assertEquals(b's/m', p.read(3))
self.assertEquals(b'inus', p.read(4))
p.close()
self.assert_(not p.read_ready())
self.assertEquals(b'', p.read(1))
def test_2_delay(self):
p = BufferedPipe()
self.assert_(not p.read_ready())
threading.Thread(target=delay_thread, args=(p,)).start()
self.assertEquals(b'a', p.read(1, 0.1))
try:
p.read(1, 0.1)
self.assert_(False)
except PipeTimeout:
pass
self.assertEquals(b'b', p.read(1, 1.0))
self.assertEquals(b'', p.read(1))
def test_3_close_while_reading(self):
p = BufferedPipe()
threading.Thread(target=close_thread, args=(p,)).start()
data = p.read(1, 1.0)
self.assertEquals(b'', data)
def test_4_or_pipe(self):
p = pipe.make_pipe()
p1, p2 = pipe.make_or_pipe(p)
self.assertFalse(p._set)
p1.set()
self.assertTrue(p._set)
p2.set()
self.assertTrue(p._set)
p1.clear()
self.assertTrue(p._set)
p2.clear()
self.assertFalse(p._set)
|
nischu7/paramiko
|
tests/test_buffered_pipe.py
|
Python
|
lgpl-2.1
| 2,696
|
# -*- coding: utf-8 -*-
#--------------------------------------------------------------------#
# This file is part of Py-notify. #
# #
# Copyright (C) 2006, 2007, 2008 Paul Pogonyshev. #
# #
# This library is free software; you can redistribute it and/or #
# modify it under the terms of the GNU Lesser General Public License #
# as published by the Free Software Foundation; either version 2.1 #
# of the License, or (at your option) any later version. #
# #
# This library is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# Lesser General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with this library; if not, write to the Free #
# Software Foundation, Inc., 51 Franklin Street, Fifth Floor, #
# Boston, MA 02110-1301 USA #
#--------------------------------------------------------------------#
"""
A collection of utilities that can also be used from outside, if wanted. Functions and
classes here can be assumed public and won’t disappear in future Py-notify versions.
@var is_callable:
Determine if C{object} is callable. E.g. if it is a function, method, class, instance of
a class with C{__call__}, etc. This is the same as built-in function C{callable} does.
C{is_callable} is provided since C{callable} is going to disappear in Python 3000 and may
issue warnings in 2.6.
@var as_string:
Convert any attribute to its name as string. Main use of this utility object is to
perform Python ‘private’ identifier mangling. E.g. you can write::
class MyClass (object):
__slots__ = ('__x')
def get_x (self):
if hasattr (self, as_string.__x):
return self.__x
Advantage is that you don’t have to do mangling ‘by hands’ and hence there is less chance
for a typing error. Furthermore, this code does not require changes if you change
C{MyClass} name to anything else, whereas custom mangling does.
However, usefulness of ‘as_string’ is still doubtful. When I wrote it, I didn’t know one
could just write ``__slots__ = ('__x')``, I thought it needed to be
``__slots__ = ('_MyClass__x')``. Imagine...
"""
__docformat__ = 'epytext en'
__all__ = ('is_callable', 'is_valid_identifier', 'mangle_identifier',
'as_string',
'raise_not_implemented_exception',
'execute',
'frozendict', 'DummyReference', 'ClassTypes', 'StringType')
import re
import sys
import types
from keyword import iskeyword
if sys.version_info[:3] < (2, 6, 0):
is_callable = callable
else:
def is_callable (object):
return hasattr (object, '__call__')
def is_valid_identifier (identifier):
"""
Determine if C{identifier} is a valid Python identifier. This function never raises
any exceptions. If C{identifier} is not a string, it simply returns C{False}.
@param identifier: identifier to determin if it is valid
@type identifier: C{basestring}
@rtype: C{bool}
"""
return (isinstance (identifier, StringType)
and re.match ('^[_a-zA-Z][_a-zA-Z0-9]*$', identifier) is not None
and not iskeyword (identifier))
def mangle_identifier (class_name, identifier):
"""
Mangle C{identifier} as how would be done if it appeared in a class with
C{class_name}. This function allows to mimic standard Python mangling of
pseudo-private attributes, i.e. those which names start with two underscores and don’t
end in two. If C{identifier} is not considered a private name, it is returned
unchanged.
@param class_name: name of Python class.
@type class_name: C{basestring}
@param identifier: name of an attribute of that class.
@type identifier: C{basestring}
@rtype: C{str}
@raises ValueError: if either C{class_name} or C{identifier} is not valid from
Python’s point of view.
"""
if not (is_valid_identifier (class_name) and is_valid_identifier (identifier)):
raise ValueError ("'class_name' and 'identifier' must be valid Python identifiers")
if (identifier.startswith ('__')
and not identifier.endswith ('__')
and class_name != '_' * len (class_name)):
return '_%s%s' % (class_name.lstrip ('_'), identifier)
else:
return identifier
class _AsString (object):
"""
Internal helper class for C{L{as_string}}. Don’t use directly.
"""
__slots__ = ()
def __getattribute__(self, name):
return name
def __setattr__(self, name, value):
raise TypeError ("'as_string' attributes cannot be set")
def __delattr__(self, name):
raise TypeError ("'as_string' attributes cannot be deleted")
def __repr__(self):
return 'notify.utils.as_string'
as_string = _AsString ()
def raise_not_implemented_exception (object = None, function_name = None):
"""
Raise C{NotImplementedError} for a method invoked with C{object} as C{self}. The
function determines object class and method declaration class(es) itself and that’s
the whole point of it.
It should be called like this:
>>> raise_not_implemented_exception (self)
And output might look like this::
File ".../foo.py", line # in ?
Foo ().bar ()
File ".../foo.py", line #, in bar
raise_not_implemented_exception (self)
File ".../notify/utils.py", line #, in raise_not_implemented_exception
raise exception
NotImplementedError: bar() not implemented in class Foo (declared in AbstractFoo)
Optionally, C{function_name} can be specified. This argument mainly exists for C
extension, since function name cannot be detected automatically in this case. In
Python code you should just leave this argument out.
@param object: the object for which a non-implemented method is called.
@type object: C{object}
@param function_name: name of the unimplemented function or method (inferred
automatically for non-extension functions).
@type function_name: C{basestring} or C{None}
@raises NotImplementedError: always.
"""
if function_name is None:
try:
raise Exception
except Exception:
try:
traceback = sys.exc_info () [2]
function_name = traceback.tb_frame.f_back.f_code.co_name
except Exception:
# We can do nothing, ignore.
pass
if function_name is not None:
function_description = '%s()' % function_name
else:
function_description = 'UNKNOWN FUNCTION'
try:
class_description = ' in class %s' % object.__class__.__name__
if function_name is not None:
declaration_classes = _find_declaration_classes (object.__class__, function_name)
if len (declaration_classes) == 1:
if declaration_classes[0] is not object.__class__:
class_description += ' (declared in %s)' % declaration_classes[0].__name__
elif len (declaration_classes) > 1:
class_description += (' (declared in %s)'
% ', '.join ([_class.__name__
for _class in declaration_classes]))
except Exception:
class_description = ''
exception = NotImplementedError ('%s not implemented%s'
% (function_description, class_description))
raise exception
def _find_declaration_classes (_class, function_name):
declaring_bases = [base for base in _class.__bases__ if hasattr (base, function_name)]
if declaring_bases:
return reduce (lambda list1, list2: list1 + list2,
[_find_declaration_classes (base, function_name)
for base in declaring_bases],
[])
else:
return [_class]
if sys.version_info[0] >= 3:
execute = eval ('exec')
else:
from notify._2_x import execute
class frozendict (dict):
__slots__ = ('__hash')
def __init__(self, *arguments, **keywords):
super (frozendict, self).__init__(*arguments, **keywords)
self.__hash = None
def clear (self):
raise TypeError ("'%s' object doesn't support clearing" % type (self).__name__)
def pop (self, key, default = None):
raise TypeError ("'%s' object doesn't support popping" % type (self).__name__)
def popitem (self):
raise TypeError ("'%s' object doesn't support popping" % type (self).__name__)
def setdefault (self, key, default = None):
raise TypeError ("'%s' object doesn't support setdefault operation" % type (self).__name__)
def update (self, dict):
raise TypeError ("'%s' object doesn't support updating" % type (self).__name__)
def __setitem__(self, key, value):
raise TypeError ("'%s' object doesn't support item setting" % type (self).__name__)
def __delitem__(self, key):
raise TypeError ("'%s' object doesn't support item deletion" % type (self).__name__)
def __hash__(self):
_hash = self.__hash
if _hash is None:
_hash = 0x1337
if hasattr (dict, 'iteritems'):
for key, value in self.iteritems ():
_hash ^= hash (key) ^ hash (value)
else:
for key, value in self.items ():
_hash ^= hash (key) ^ hash (value)
self.__hash = _hash
return _hash
def __repr__(self):
return '%s (%s)' % (type (self).__name__, super (frozendict, self).__repr__())
frozendict.EMPTY = frozendict ({ })
# Force hash to be precomputed.
hash (frozendict.EMPTY)
class DummyReference (object):
"""
Simple class that is interface-compatible with C{weakref.ReferenceType}. In other
words, its constructor accepts only one parameter and this value is later returned
from C{L{__call__}} method. Unlike weak references, instances of this class don’t do
anything special. They are only needed to avoid special cases for non-references,
since you can treat instances of C{weakref.ReferenceType} and this class in the same
way.
"""
__slots__ = ('__object')
def __init__(self, object):
"""
Create a new dummy reference that will return C{object} when called.
@param object: the object that will be returned by this reference.
@type object: C{object}
"""
self.__object = object
def __call__(self):
"""
Return the C{object} specified at construction time.
@rtype: C{object}
"""
return self.__object
def __repr__(self):
return ('<%s.%s at 0x%x; to %r>'
% (self.__module__, self.__class__.__name__, id (self), self.__object))
def __str__(self):
return '<%s at 0x%x; to %s>' % (self.__class__.__name__, id (self), self.__object)
if sys.version_info[0] >= 3:
ClassTypes = (type,)
StringType = str
else:
ClassTypes = (type, types.ClassType)
StringType = basestring
# Local variables:
# mode: python
# python-indent: 4
# indent-tabs-mode: nil
# fill-column: 90
# End:
|
berinhard/py-notify
|
notify/utils.py
|
Python
|
lgpl-2.1
| 11,952
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vi:ts=4:et
import pycurl
import unittest
import sys
try:
import urllib.parse as urllib_parse
except ImportError:
import urllib as urllib_parse
from . import appmanager
from . import util
setup_module, teardown_module = appmanager.setup(('app', 8380))
class ResetTest(unittest.TestCase):
def test_reset(self):
c = pycurl.Curl()
c.setopt(pycurl.URL, 'http://localhost:8380/success')
c.reset()
try:
c.perform()
self.fail('Perform worked when it should not have')
except pycurl.error:
exc = sys.exc_info()[1]
code = exc.args[0]
self.assertEqual(pycurl.E_URL_MALFORMAT, code)
# check that Curl object is usable
c.setopt(pycurl.URL, 'http://localhost:8380/success')
sio = util.BytesIO()
c.setopt(pycurl.WRITEFUNCTION, sio.write)
c.perform()
self.assertEqual('success', sio.getvalue().decode())
# XXX this test was broken when it was test_reset.py
def skip_reset_with_multi(self):
outf = util.BytesIO()
cm = pycurl.CurlMulti()
eh = pycurl.Curl()
for x in range(1, 20):
eh.setopt(pycurl.WRITEFUNCTION, outf.write)
eh.setopt(pycurl.URL, 'http://localhost:8380/success')
cm.add_handle(eh)
while 1:
ret, active_handles = cm.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
while active_handles:
ret = cm.select(1.0)
if ret == -1:
continue
while 1:
ret, active_handles = cm.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
count, good, bad = cm.info_read()
for h, en, em in bad:
print("Transfer to %s failed with %d, %s\n" % \
(h.getinfo(pycurl.EFFECTIVE_URL), en, em))
raise RuntimeError
for h in good:
httpcode = h.getinfo(pycurl.RESPONSE_CODE)
if httpcode != 200:
print("Transfer to %s failed with code %d\n" %\
(h.getinfo(pycurl.EFFECTIVE_URL), httpcode))
raise RuntimeError
else:
print("Recd %d bytes from %s" % \
(h.getinfo(pycurl.SIZE_DOWNLOAD),
h.getinfo(pycurl.EFFECTIVE_URL)))
cm.remove_handle(eh)
eh.reset()
eh.close()
cm.close()
outf.close()
|
andrewleech/script.module.pycurl
|
lib/pycurl/pycurl-7.19.5.1/tests/reset_test.py
|
Python
|
lgpl-2.1
| 2,691
|
#!/usr/bin/python3
"""
Copyright 2012-2013 YouView TV Ltd.
2014-2017 stb-tester.com Ltd.
License: LGPL v2.1 or (at your option) any later version (see
https://github.com/stb-tester/stb-tester/blob/master/LICENSE for details).
"""
import argparse
import sys
import _stbt.core
from _stbt import imgproc_cache
from _stbt.logging import debug
from _stbt.stbt_run import (load_test_function,
sane_unicode_and_exception_handling, video)
def main(argv):
parser = _stbt.core.argparser()
parser.prog = 'stbt run'
parser.description = 'Run an stb-tester test script'
parser.add_argument(
'--cache', default=imgproc_cache.default_filename,
help="Path for image-processing cache (default: %(default)s")
parser.add_argument(
'--save-screenshot', default='on-failure',
choices=['always', 'on-failure', 'never'],
help="Save a screenshot at the end of the test to screenshot.png")
parser.add_argument(
'--save-thumbnail', default='never',
choices=['always', 'on-failure', 'never'],
help="Save a thumbnail at the end of the test to thumbnail.jpg")
parser.add_argument(
'script', metavar='FILE[::TESTCASE]', help=(
"The python test script to run. Optionally specify a python "
"function name to run that function; otherwise only the script's "
"top-level will be executed."))
parser.add_argument(
'args', nargs=argparse.REMAINDER, metavar='ARG',
help='Additional arguments passed on to the test script (in sys.argv)')
args = parser.parse_args(argv[1:])
debug("Arguments:\n" + "\n".join([
"%s: %s" % (k, v) for k, v in args.__dict__.items()]))
dut = _stbt.core.new_device_under_test_from_config(args)
with sane_unicode_and_exception_handling(args.script), \
video(args, dut), \
imgproc_cache.setup_cache(filename=args.cache):
test_function = load_test_function(args.script, args.args)
test_function.call()
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
stb-tester/stb-tester
|
stbt_run.py
|
Python
|
lgpl-2.1
| 2,110
|
#!/usr/bin/python
## image-to-gcode is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by the
## Free Software Foundation; either version 2 of the License, or (at your
## option) any later version. image-to-gcode is distributed in the hope
## that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
## warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
## the GNU General Public License for more details. You should have
## received a copy of the GNU General Public License along with image-to-gcode;
## if not, write to the Free Software Foundation, Inc., 59 Temple Place,
## Suite 330, Boston, MA 02111-1307 USA
##
## image-to-gcode.py is Copyright (C) 2005 Chris Radek
## chris@timeguy.com
## image-to-gcode.py is Copyright (C) 2006 Jeff Epler
## jepler@unpy.net
import sys, os
BASE = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), ".."))
sys.path.insert(0, os.path.join(BASE, "lib", "python"))
import gettext;
gettext.install("emc2", localedir=os.path.join(BASE, "share", "locale"), unicode=True)
import Image, numarray
import numarray.ieeespecial as ieee
from rs274.author import Gcode
import rs274.options
from math import *
import operator
epsilon = 1e-5
def ball_tool(r,rad):
s = -sqrt(rad**2-r**2)
return s
def endmill(r,dia):
return 0
def vee_common(angle):
slope = tan(angle * pi / 180)
def f(r, dia):
return r * slope
return f
tool_makers = [ ball_tool, endmill, vee_common(30), vee_common(45), vee_common(60)]
def make_tool_shape(f, wdia, resp):
res = 1. / resp
dia = int(wdia*res+.5)
wrad = wdia/2.
if dia < 2: dia = 2
n = numarray.array([[ieee.plus_inf] * dia] * dia, type="Float32")
hdia = dia / 2.
l = []
for x in range(dia):
for y in range(dia):
r = hypot(x-hdia, y-hdia) * resp
if r < wrad:
z = f(r, wrad)
l.append(z)
n[x,y] = z
n = n - n.min()
return n
def amax(seq):
res = 0
for i in seq:
if abs(i) > abs(res): res = i
return res
def group_by_sign(seq, slop=sin(pi/18), key=lambda x:x):
sign = None
subseq = []
for i in seq:
ki = key(i)
if sign is None:
subseq.append(i)
if ki != 0:
sign = ki / abs(ki)
else:
subseq.append(i)
if sign * ki < -slop:
sign = ki / abs(ki)
yield subseq
subseq = [i]
if subseq: yield subseq
class Convert_Scan_Alternating:
def __init__(self):
self.st = 0
def __call__(self, primary, items):
st = self.st = self.st + 1
if st % 2: items.reverse()
if st == 1: yield True, items
else: yield False, items
def reset(self):
self.st = 0
class Convert_Scan_Increasing:
def __call__(self, primary, items):
yield True, items
def reset(self):
pass
class Convert_Scan_Decreasing:
def __call__(self, primary, items):
items.reverse()
yield True, items
def reset(self):
pass
class Convert_Scan_Upmill:
def __init__(self, slop = sin(pi / 18)):
self.slop = slop
def __call__(self, primary, items):
for span in group_by_sign(items, self.slop, operator.itemgetter(2)):
if amax([it[2] for it in span]) < 0:
span.reverse()
yield True, span
def reset(self):
pass
class Convert_Scan_Downmill:
def __init__(self, slop = sin(pi / 18)):
self.slop = slop
def __call__(self, primary, items):
for span in group_by_sign(items, self.slop, operator.itemgetter(2)):
if amax([it[2] for it in span]) > 0:
span.reverse()
yield True, span
def reset(self):
pass
class Reduce_Scan_Lace:
def __init__(self, converter, slope, keep):
self.converter = converter
self.slope = slope
self.keep = keep
def __call__(self, primary, items):
slope = self.slope
keep = self.keep
if primary:
idx = 3
test = operator.le
else:
idx = 2
test = operator.ge
def bos(j):
return j - j % keep
def eos(j):
if j % keep == 0: return j
return j + keep - j%keep
for i, (flag, span) in enumerate(self.converter(primary, items)):
subspan = []
a = None
for i, si in enumerate(span):
ki = si[idx]
if a is None:
if test(abs(ki), slope):
a = b = i
else:
if test(abs(ki), slope):
b = i
else:
if i - b < keep: continue
yield True, span[bos(a):eos(b+1)]
a = None
if a is not None:
yield True, span[a:]
def reset(self):
self.primary.reset()
unitcodes = ['G20', 'G21']
convert_makers = [ Convert_Scan_Increasing, Convert_Scan_Decreasing, Convert_Scan_Alternating, Convert_Scan_Upmill, Convert_Scan_Downmill ]
def progress(a, b):
if os.environ.has_key("AXIS_PROGRESS_BAR"):
print >>sys.stderr, "FILTER_PROGRESS=%d" % int(a*100./b+.5)
sys.stderr.flush()
class Converter:
def __init__(self,
image, units, tool_shape, pixelsize, pixelstep, safetyheight, \
tolerance, feed, convert_rows, convert_cols, cols_first_flag,
entry_cut, spindle_speed, roughing_offset, roughing_delta,
roughing_feed):
self.image = image
self.units = units
self.tool = tool_shape
self.pixelsize = pixelsize
self.pixelstep = pixelstep
self.safetyheight = safetyheight
self.tolerance = tolerance
self.base_feed = feed
self.convert_rows = convert_rows
self.convert_cols = convert_cols
self.cols_first_flag = cols_first_flag
self.entry_cut = entry_cut
self.spindle_speed = spindle_speed
self.roughing_offset = roughing_offset
self.roughing_delta = roughing_delta
self.roughing_feed = roughing_feed
self.cache = {}
w, h = self.w, self.h = image.shape
ts = self.ts = tool_shape.shape[0]
self.h1 = h - ts
self.w1 = w - ts
self.tool_shape = tool_shape * self.pixelsize * ts / 2;
def one_pass(self):
g = self.g
g.set_feed(self.feed)
if self.convert_cols and self.cols_first_flag:
self.g.set_plane(19)
self.mill_cols(self.convert_cols, True)
if self.convert_rows: g.safety()
if self.convert_rows:
self.g.set_plane(18)
self.mill_rows(self.convert_rows, not self.cols_first_flag)
if self.convert_cols and not self.cols_first_flag:
self.g.set_plane(19)
if self.convert_rows: g.safety()
self.mill_cols(self.convert_cols, not self.convert_rows)
if self.convert_cols:
self.convert_cols.reset()
if self.convert_rows:
self.convert_rows.reset()
g.safety()
def convert(self):
self.g = g = Gcode(safetyheight=self.safetyheight,
tolerance=self.tolerance,
spindle_speed=self.spindle_speed,
units=self.units)
g.begin()
g.continuous(self.tolerance)
g.safety()
if self.roughing_delta and self.roughing_offset:
base_image = self.image
rough = make_tool_shape(ball_tool,
2*self.roughing_offset, self.pixelsize)
w, h = base_image.shape
tw, th = rough.shape
w1 = w + tw
h1 = h + th
nim1 = numarray.zeros((w1, h1), 'Float32') + base_image.min()
nim1[tw/2:tw/2+w, th/2:th/2+h] = base_image
self.image = numarray.zeros((w,h), type="Float32")
for j in range(0, w):
progress(j,w)
for i in range(0, h):
self.image[j,i] = (nim1[j:j+tw,i:i+th] - rough).max()
self.feed = self.roughing_feed
r = -self.roughing_delta
m = self.image.min()
self.ro = self.roughing_offset
while r > m:
self.rd = r
self.one_pass()
r = r - self.roughing_delta
if r < m + epsilon:
self.rd = m
self.one_pass()
self.image = base_image
self.cache.clear()
self.feed = self.base_feed
self.ro = 0
self.rd = self.image.min()
self.one_pass()
g.end()
def get_z(self, x, y):
try:
return min(0, max(self.rd, self.cache[x,y]) + self.ro)
except KeyError:
m1 = self.image[y:y+self.ts, x:x+self.ts]
self.cache[x,y] = d = (m1 - self.tool).max()
return min(0, max(self.rd, d) + self.ro)
def get_dz_dy(self, x, y):
y1 = max(0, y-1)
y2 = min(self.image.shape[0]-1, y+1)
dy = self.pixelsize * (y2-y1)
return (self.get_z(x, y2) - self.get_z(x, y1)) / dy
def get_dz_dx(self, x, y):
x1 = max(0, x-1)
x2 = min(self.image.shape[1]-1, x+1)
dx = self.pixelsize * (x2-x1)
return (self.get_z(x2, y) - self.get_z(x1, y)) / dx
def mill_rows(self, convert_scan, primary):
w1 = self.w1; h1 = self.h1;
pixelsize = self.pixelsize; pixelstep = self.pixelstep
jrange = range(0, w1, pixelstep)
if w1-1 not in jrange: jrange.append(w1-1)
irange = range(h1)
for j in jrange:
progress(jrange.index(j), len(jrange))
y = (w1-j) * pixelsize
scan = []
for i in irange:
x = i * pixelsize
milldata = (i, (x, y, self.get_z(i, j)),
self.get_dz_dx(i, j), self.get_dz_dy(i, j))
scan.append(milldata)
for flag, points in convert_scan(primary, scan):
if flag:
self.entry_cut(self, points[0][0], j, points)
for p in points:
self.g.cut(*p[1])
self.g.flush()
def mill_cols(self, convert_scan, primary):
w1 = self.w1; h1 = self.h1;
pixelsize = self.pixelsize; pixelstep = self.pixelstep
jrange = range(0, h1, pixelstep)
irange = range(w1)
if h1-1 not in jrange: jrange.append(h1-1)
jrange.reverse()
for j in jrange:
progress(jrange.index(j), len(jrange))
x = j * pixelsize
scan = []
for i in irange:
y = (w1-i) * pixelsize
milldata = (i, (x, y, self.get_z(j, i)),
self.get_dz_dy(j, i), self.get_dz_dx(j, i))
scan.append(milldata)
for flag, points in convert_scan(primary, scan):
if flag:
self.entry_cut(self, j, points[0][0], points)
for p in points:
self.g.cut(*p[1])
self.g.flush()
def convert(*args, **kw):
return Converter(*args, **kw).convert()
class SimpleEntryCut:
def __init__(self, feed):
self.feed = feed
def __call__(self, conv, i0, j0, points):
p = points[0][1]
if self.feed:
conv.g.set_feed(self.feed)
conv.g.safety()
conv.g.rapid(p[0], p[1])
if self.feed:
conv.g.set_feed(conv.feed)
def circ(r,b):
"""\
Calculate the portion of the arc to do so that none is above the
safety height (that's just silly)"""
z = r**2 - (r-b)**2
if z < 0: z = 0
return z**.5
class ArcEntryCut:
def __init__(self, feed, max_radius):
self.feed = feed
self.max_radius = max_radius
def __call__(self, conv, i0, j0, points):
if len(points) < 2:
p = points[0][1]
if self.feed:
conv.g.set_feed(self.feed)
conv.g.safety()
conv.g.rapid(p[0], p[1])
if self.feed:
conv.g.set_feed(conv.feed)
return
p1 = points[0][1]
p2 = points[1][1]
z0 = p1[2]
lim = int(ceil(self.max_radius / conv.pixelsize))
r = range(1, lim)
if self.feed:
conv.g.set_feed(self.feed)
conv.g.safety()
x, y, z = p1
pixelsize = conv.pixelsize
cx = cmp(p1[0], p2[0])
cy = cmp(p1[1], p2[1])
radius = self.max_radius
if cx != 0:
h1 = conv.h1
for di in r:
dx = di * pixelsize
i = i0 + cx * di
if i < 0 or i >= h1: break
z1 = conv.get_z(i, j0)
dz = (z1 - z0)
if dz <= 0: continue
if dz > dx:
conv.g.write("(case 1)")
radius = dx
break
rad1 = (dx * dx / dz + dz) / 2
if rad1 < radius:
radius = rad1
if dx > radius:
break
z1 = min(p1[2] + radius, conv.safetyheight)
x1 = p1[0] + cx * circ(radius, z1 - p1[2])
conv.g.rapid(x1, p1[1])
conv.g.cut(z=z1)
conv.g.flush(); conv.g.lastgcode = None
if cx > 0:
conv.g.write("G3 X%f Z%f R%f" % (p1[0], p1[2], radius))
else:
conv.g.write("G2 X%f Z%f R%f" % (p1[0], p1[2], radius))
conv.g.lastx = p1[0]
conv.g.lasty = p1[1]
conv.g.lastz = p1[2]
else:
w1 = conv.w1
for dj in r:
dy = dj * pixelsize
j = j0 - cy * dj
if j < 0 or j >= w1: break
z1 = conv.get_z(i0, j)
dz = (z1 - z0)
if dz <= 0: continue
if dz > dy:
radius = dy
break
rad1 = (dy * dy / dz + dz) / 2
if rad1 < radius: radius = rad1
if dy > radius: break
z1 = min(p1[2] + radius, conv.safetyheight)
y1 = p1[1] + cy * circ(radius, z1 - p1[2])
conv.g.rapid(p1[0], y1)
conv.g.cut(z=z1)
conv.g.flush(); conv.g.lastgcode = None
if cy > 0:
conv.g.write("G2 Y%f Z%f R%f" % (p1[1], p1[2], radius))
else:
conv.g.write("G3 Y%f Z%f R%f" % (p1[1], p1[2], radius))
conv.g.lastx = p1[0]
conv.g.lasty = p1[1]
conv.g.lastz = p1[2]
if self.feed:
conv.g.set_feed(conv.feed)
def ui(im, nim, im_name):
import Tkinter
import ImageTk
import pickle
import nf
app = Tkinter.Tk()
rs274.options.install(app)
app.tk.call("source", os.path.join(BASE, "share", "axis", "tcl", "combobox.tcl"))
name = os.path.basename(im_name)
app.wm_title(_("%s: Image to gcode") % name)
app.wm_iconname(_("Image to gcode"))
w, h = im.size
r1 = w / 300.
r2 = h / 300.
nw = int(w / max(r1, r2))
nh = int(h / max(r1, r2))
ui_image = im.resize((nw,nh), Image.ANTIALIAS)
ui_image = ImageTk.PhotoImage(ui_image, master = app)
i = Tkinter.Label(app, image=ui_image, compound="top",
text=_("Image size: %(w)d x %(h)d pixels\n"
"Minimum pixel value: %(min)d\nMaximum pixel value: %(max)d")
% {'w': im.size[0], 'h': im.size[1], 'min': nim.min(), 'max': nim.max()},
justify="left")
f = Tkinter.Frame(app)
g = Tkinter.Frame(app)
b = Tkinter.Frame(app)
i.grid(row=0, column=0, sticky="nw")
f.grid(row=0, column=1, sticky="nw")
b.grid(row=1, column=0, columnspan=2, sticky="ne")
def filter_nonint(event):
if event.keysym in ("Return", "Tab", "ISO_Left_Tab", "BackSpace"):
return
if event.char == "": return
if event.char in "0123456789": return
return "break"
def filter_nonfloat(event):
if event.keysym in ("Return", "Tab", "ISO_Left_Tab", "BackSpace"):
return
if event.char == "": return
if event.char in "0123456789.": return
return "break"
validate_float = "expr {![regexp {^-?([0-9]+(\.[0-9]*)?|\.[0-9]+|)$} %P]}"
validate_int = "expr {![regexp {^-?([0-9]+|)$} %P]}"
validate_posfloat = "expr {![regexp {^?([0-9]+(\.[0-9]*)?|\.[0-9]+|)$} %P]}"
validate_posint = "expr {![regexp {^([0-9]+|)$} %P]}"
def floatentry(f, v):
var = Tkinter.DoubleVar(f)
var.set(v)
w = Tkinter.Entry(f, textvariable=var, validatecommand=validate_float, validate="key", width=10)
return w, var
def intentry(f, v):
var = Tkinter.IntVar(f)
var.set(v)
w = Tkinter.Entry(f, textvariable=var, validatecommand=validate_int, validate="key", width=10)
return w, var
def checkbutton(k, v):
var = Tkinter.BooleanVar(f)
var.set(v)
g = Tkinter.Frame(f)
w = Tkinter.Checkbutton(g, variable=var, text=_("Yes"))
w.pack(side="left")
return g, var
def intscale(k, v, min=1, max = 100):
var = Tkinter.IntVar(f)
var.set(v)
g = Tkinter.Frame(f, borderwidth=0)
w = Tkinter.Scale(g, orient="h", variable=var, from_=min, to=max, showvalue=False)
l = Tkinter.Label(g, textvariable=var, width=3)
l.pack(side="left")
w.pack(side="left", fill="x", expand=1)
return g, var
def _optionmenu(k, v, *options):
options = list(options)
def trace(*args):
try:
var.set(options.index(svar.get()))
except ValueError:
pass
try:
opt = options[v]
except (TypeError, IndexError):
v = 0
opt = options[0]
var = Tkinter.IntVar(f)
var.set(v)
svar = Tkinter.StringVar(f)
svar.set(options[v])
svar.trace("w", trace)
wp = f._w.rstrip(".") + ".c" + svar._name
f.tk.call("combobox::combobox", wp, "-editable", 0, "-width",
max(len(opt) for opt in options)+3, "-textvariable", svar._name,
"-background", "white")
f.tk.call(wp, "list", "insert", "end", *options)
w = nf.makewidget(f, Tkinter.Widget, wp)
return w, var
def optionmenu(*options): return lambda f, v: _optionmenu(f, v, *options)
rc = os.path.expanduser("~/.image2gcoderc")
constructors = [
("units", optionmenu(_("G20 (in)"), _("G21 (mm)"))),
("invert", checkbutton),
("normalize", checkbutton),
("expand", optionmenu(_("None"), _("White"), _("Black"))),
("tolerance", floatentry),
("pixel_size", floatentry),
("feed_rate", floatentry),
("plunge_feed_rate", floatentry),
("spindle_speed", floatentry),
("pattern", optionmenu(_("Rows"), _("Columns"), _("Rows then Columns"), _("Columns then Rows"))),
("converter", optionmenu(_("Positive"), _("Negative"), _("Alternating"), _("Up Milling"), _("Down Milling"))),
("depth", floatentry),
("pixelstep", intscale),
("tool_diameter", floatentry),
("safety_height", floatentry),
("tool_type", optionmenu(_("Ball End"), _("Flat End"), _("30 Degree"), _("45 Degree"), _("60 Degree"))),
("bounded", optionmenu(_("None"), _("Secondary"), _("Full"))),
("contact_angle", floatentry),
("roughing_offset", floatentry),
("roughing_depth", floatentry),
]
defaults = dict(
invert = False,
normalize = False,
expand = 0,
pixel_size = .006,
depth = 0.25,
pixelstep = 8,
tool_diameter = 1/16.,
safety_height = .012,
tool_type = 0,
tolerance = .001,
feed_rate = 12,
plunge_feed_rate = 12,
units = 0,
pattern = 0,
converter = 0,
bounded = 0,
contact_angle = 45,
spindle_speed = 1000,
roughing_offset = .1,
roughing_depth = .25,
)
texts = dict(
invert=_("Invert Image"),
normalize=_("Normalize Image"),
expand=_("Extend Image Border"),
pixel_size=_("Pixel Size (Units)"),
depth=_("Depth (units)"),
tolerance=_("Tolerance (units)"),
pixelstep=_("Stepover (pixels)"),
tool_diameter=_("Tool Diameter (units)"),
tool_type=_("Tool Type"),
feed_rate=_("Feed Rate (units per minute)"),
plunge_feed_rate=_("Plunge Feed Rate (units per minute)"),
units=_("Units"),
safety_height=_("Safety Height (units)"),
pattern=_("Scan Pattern"),
converter=_("Scan Direction"),
bounded=_("Lace Bounding"),
contact_angle=_("Contact Angle (degrees)"),
spindle_speed=_("Spindle Speed (RPM)"),
roughing_offset=_("Roughing offset (units, 0=no roughing)"),
roughing_depth=_("Roughing depth per pass (units)"),
)
try:
defaults.update(pickle.load(open(rc, "rb")))
except (IOError, pickle.PickleError): pass
vars = {}
widgets = {}
for j, (k, con) in enumerate(constructors):
v = defaults[k]
text = texts.get(k, k.replace("_", " "))
lab = Tkinter.Label(f, text=text)
widgets[k], vars[k] = con(f, v)
lab.grid(row=j, column=0, sticky="w")
widgets[k].grid(row=j, column=1, sticky="ew")
def trace_pattern(*args):
if vars['pattern'].get() > 1:
widgets['bounded'].configure(state="normal")
trace_bounded()
else:
widgets['bounded'].configure(state="disabled")
widgets['contact_angle'].configure(state="disabled")
def trace_bounded(*args):
if vars['bounded'].get() != 0:
widgets['contact_angle'].configure(state="normal")
else:
widgets['contact_angle'].configure(state="disabled")
def trace_offset(*args):
if vars['roughing_offset'].get() > 0:
widgets['roughing_depth'].configure(state='normal')
else:
widgets['roughing_depth'].configure(state='disabled')
vars['pattern'].trace('w', trace_pattern)
vars['bounded'].trace('w', trace_bounded)
vars['roughing_offset'].trace('w', trace_offset)
trace_pattern()
trace_bounded()
trace_offset()
status = Tkinter.IntVar()
bb = Tkinter.Button(b, text=_("OK"), command=lambda:status.set(1), width=8, default="active")
bb.pack(side="left", padx=4, pady=4)
bb = Tkinter.Button(b, text=_("Cancel"), command=lambda:status.set(-1), width=8, default="normal")
bb.pack(side="left", padx=4, pady=4)
app.bind("<Escape>", lambda evt: status.set(-1))
app.bind("<Return>", lambda evt: status.set(1))
app.wm_protocol("WM_DELETE_WINDOW", lambda: status.set(-1))
app.wm_resizable(0,0)
app.wait_visibility()
app.tk.call("after", "idle", ("after", "idle", "focus [tk_focusNext .]"))
#app.tk_focusNext().focus()
app.wait_variable(status)
for k, v in vars.items():
defaults[k] = v.get()
app.destroy()
if status.get() == -1:
raise SystemExit(_("image-to-gcode: User pressed cancel"))
pickle.dump(defaults, open(rc, "wb"))
return defaults
def main():
if len(sys.argv) > 1:
im_name = sys.argv[1]
else:
import tkFileDialog, Tkinter
im_name = tkFileDialog.askopenfilename(defaultextension=".png",
filetypes = (
(_("Depth images"), ".gif .png .jpg"),
(_("All files"), "*")))
if not im_name: raise SystemExit
Tkinter._default_root.destroy()
Tkinter._default_root = None
im = Image.open(im_name)
size = im.size
im = im.convert("L") #grayscale
w, h = im.size
nim = numarray.fromstring(im.tostring(), 'UInt8', (h, w)).astype('Float32')
options = ui(im, nim, im_name)
step = options['pixelstep']
depth = options['depth']
if options['normalize']:
a = nim.min()
b = nim.max()
if a != b:
nim = (nim - a) / (b-a)
else:
nim = nim / 255.0
maker = tool_makers[options['tool_type']]
tool_diameter = options['tool_diameter']
pixel_size = options['pixel_size']
tool = make_tool_shape(maker, tool_diameter, pixel_size)
if options['expand']:
if options['expand'] == 1: pixel = 1
else: pixel = 0
tw, th = tool.shape
w1 = w + 2*tw
h1 = h + 2*th
nim1 = numarray.zeros((w1, h1), 'Float32') + pixel
nim1[tw:tw+w, th:th+w] = nim
nim = nim1
w, h = w1, h1
nim = nim * depth
if options['invert']:
nim = -nim
else:
nim = nim - depth
rows = options['pattern'] != 1
columns = options['pattern'] != 0
columns_first = options['pattern'] == 3
spindle_speed = options['spindle_speed']
if rows: convert_rows = convert_makers[options['converter']]()
else: convert_rows = None
if columns: convert_cols = convert_makers[options['converter']]()
else: convert_cols = None
if options['bounded'] and rows and columns:
slope = tan(options['contact_angle'] * pi / 180)
if columns_first:
convert_rows = Reduce_Scan_Lace(convert_rows, slope, step+1)
else:
convert_cols = Reduce_Scan_Lace(convert_cols, slope, step+1)
if options['bounded'] > 1:
if columns_first:
convert_cols = Reduce_Scan_Lace(convert_cols, slope, step+1)
else:
convert_rows = Reduce_Scan_Lace(convert_rows, slope, step+1)
units = unitcodes[options['units']]
convert(nim, units, tool, pixel_size, step,
options['safety_height'], options['tolerance'], options['feed_rate'],
convert_rows, convert_cols, columns_first, ArcEntryCut(options['plunge_feed_rate'], .125),
spindle_speed, options['roughing_offset'], options['roughing_depth'], options['feed_rate'])
if __name__ == '__main__':
main()
# vim:sw=4:sts=4:et:
|
yishinli/emc2
|
src/emc/usr_intf/axis/scripts/image-to-gcode.py
|
Python
|
lgpl-2.1
| 26,563
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class A(AutotoolsPackage):
"""Simple package with one optional dependency"""
homepage = "http://www.example.com"
url = "http://www.example.com/a-1.0.tar.gz"
version('1.0', '0123456789abcdef0123456789abcdef')
version('2.0', '2.0_a_hash')
variant(
'foo',
values=('bar', 'baz', 'fee'),
default='bar',
description='',
multi=True
)
variant(
'foobar',
values=('bar', 'baz', 'fee'),
default='bar',
description='',
multi=False
)
variant('bvv', default=True, description='The good old BV variant')
depends_on('b', when='foobar=bar')
def with_or_without_fee(self, activated):
if not activated:
return '--no-fee'
return '--fee-all-the-time'
def autoreconf(self, spec, prefix):
pass
def configure(self, spec, prefix):
pass
def build(self, spec, prefix):
pass
def install(self, spec, prefix):
pass
|
skosukhin/spack
|
var/spack/repos/builtin.mock/packages/a/package.py
|
Python
|
lgpl-2.1
| 2,262
|
# -*- coding: utf8 -*-
# Extract entities from text
#
# Author: Romary Dupuis <romary.dupuis@altarika.com>
#
# Copyright (C) 2016 Romary Dupuis
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
from commis import Command
from commis import color
from feet.entities.extractor import Extractor
from feet.entities.registry import Registry
class ExtractCommand(Command):
name = 'extract'
help = 'extract a list of entities from text'
args = {
'--text': {
'metavar': 'TEXT',
'required': False,
'help': 'plain text'
},
'--registry': {
'metavar': 'REGISTRY',
'default': 'feet',
'required': False,
'help': 'registry of entities'
},
'--entity': {
'metavar': 'ENTITY',
'required': True,
'help': 'entity dictionary'
},
'--grammar': {
'metavar': 'GRAMMAR',
'required': False,
'help': 'grammar that defines entities in a sentence'
},
'--path': {
'metavar': 'PATH',
'required': False,
'help': 'path to the file that will be processed'
},
'--lang': {
'metavar': 'LANG',
'default': 'en',
'help': 'language of text: en, ja, fr etc.'
},
'--prefix': {
'metavar': 'PREFIX',
'default': 'feet',
'help': 'prefix used for all keys of entity'
}
}
def handle(self, args):
"""
CLI to extract entities from text.
"""
if args.text is None and args.path is None:
return color.format('* no text source specified', color.RED)
registry = Registry.find_or_create(args.registry,
key_prefix=args.prefix)
entity = registry.get_dict(args.entity)
engine = Extractor(entity, args.grammar)
if args.path is not None:
text = open(args.path).read()
else:
text = args.text
results = engine.extract(text, args.lang)
entities = []
for element in results[0]:
if element['entity_found'] == 1:
entities = list(set(entities).union(
element['entity_candidates']))
if len(entities) > 0:
print(color.format('%d entities detected' % len(entities),
color.GREEN))
print('\n'.join(entities))
else:
print(color.format('no entities detected', color.RED))
# print(color.format('%d' % results[1].elapsed, color.LIGHT_MAGENTA))
return '* text processed according to %s entity' %\
(color.format(args.entity, color.GREEN))
|
Altarika/feet
|
feet/commands/extract.py
|
Python
|
lgpl-2.1
| 2,821
|
#! /usr/bin/env python
"""
Sample for python PCSC wrapper module: send a Control Code to a card or
reader
__author__ = "Ludovic Rousseau"
Copyright 2007-2010 Ludovic Rousseau
Author: Ludovic Rousseau, mailto:ludovic.rousseau@free.fr
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from __future__ import print_function
from smartcard.scard import *
from smartcard.util import toBytes
try:
hresult, hcontext = SCardEstablishContext(SCARD_SCOPE_USER)
if hresult != SCARD_S_SUCCESS:
raise error(
'Failed to establish context: ' + SCardGetErrorMessage(hresult))
print('Context established!')
try:
hresult, readers = SCardListReaders(hcontext, [])
if hresult != SCARD_S_SUCCESS:
raise error(
'Failed to list readers: ' + SCardGetErrorMessage(hresult))
print('PCSC Readers:', readers)
if len(readers) < 1:
raise error('No smart card readers')
for zreader in readers:
print('Trying to Control reader:', zreader)
try:
hresult, hcard, dwActiveProtocol = SCardConnect(
hcontext, zreader, SCARD_SHARE_DIRECT, SCARD_PROTOCOL_T0)
if hresult != SCARD_S_SUCCESS:
raise error(
'Unable to connect: ' + SCardGetErrorMessage(hresult))
print('Connected with active protocol', dwActiveProtocol)
try:
if 'winscard' == resourceManager:
# IOCTL_SMARTCARD_GET_ATTRIBUTE = SCARD_CTL_CODE(2)
hresult, response = SCardControl(
hcard,
SCARD_CTL_CODE(2),
toBytes("%.8lx" % SCARD_ATTR_VENDOR_NAME))
if hresult != SCARD_S_SUCCESS:
raise error(
'SCardControl failed: ' +\
SCardGetErrorMessage(hresult))
r = ""
for i in range(len(response)):
r += "%c" % response[i]
print('SCARD_ATTR_VENDOR_NAME:', r)
elif 'pcsclite' == resourceManager:
# get firmware on Gemplus readers
hresult, response = SCardControl(
hcard,
SCARD_CTL_CODE(1),
[0x02])
if hresult != SCARD_S_SUCCESS:
raise error(
'SCardControl failed: ' + \
SCardGetErrorMessage(hresult))
r = ""
for i in range(len(response)):
r += "%c" % response[i]
print('Control:', r)
finally:
hresult = SCardDisconnect(hcard, SCARD_UNPOWER_CARD)
if hresult != SCARD_S_SUCCESS:
raise error(
'Failed to disconnect: ' + \
SCardGetErrorMessage(hresult))
print('Disconnected')
except error as message:
print(error, message)
finally:
hresult = SCardReleaseContext(hcontext)
if hresult != SCARD_S_SUCCESS:
raise error(
'Failed to release context: ' + \
SCardGetErrorMessage(hresult))
print('Released context.')
except error as e:
print(e)
import sys
if 'win32' == sys.platform:
print('press Enter to continue')
sys.stdin.read(1)
|
moreati/pyscard
|
smartcard/Examples/scard-api/sample_control.py
|
Python
|
lgpl-2.1
| 4,389
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RLimma(RPackage):
"""Data analysis, linear models and differential expression
for microarray data."""
homepage = "https://www.bioconductor.org/packages/limma/"
url = "https://www.bioconductor.org/packages/release/bioc/src/contrib/limma_3.32.6.tar.gz"
list_url = homepage
version('3.32.6', 'df5dc2b85189a24e939efa3a8e6abc41')
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/r-limma/package.py
|
Python
|
lgpl-2.1
| 1,617
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
import slopos
class TestTagger(unittest.TestCase):
def setUp(self):
slopos.load_from_path("slopos/sl-tagger.pickle")
def testSentenceTagging(self):
tagged = slopos.tag("To je test.")
self.assertEqual(tagged, [('To', 'ZK-SEI'), ('je', 'GP-STE-N'), ('test', 'SOMETN'), ('.', '-None-')])
def testListTagging(self):
tagged = slopos.tag(["To", "je", "test"])
self.assertEqual(tagged, [('To', 'ZK-SEI'), ('je', 'GP-STE-N'), ('test', 'SOMETN')])
def testUnicodeSentenceTagging(self):
tagged = slopos.tag("V kožuščku zelene lisice stopiclja jezen otrok.")
self.assertEqual(tagged, [('V', 'DM'), ('kožuščku', 'SOMEM'), ('zelene', 'PPNZER'), ('lisice', 'SOZER,'),
('stopiclja', 'GGNSTE'), ('jezen', 'PPNMEIN'), ('otrok', 'SOMEI.'), ('.', '-None-')])
if __name__ == "__main__":
unittest.main()
|
izacus/slo_pos
|
tests.py
|
Python
|
lgpl-2.1
| 987
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import argparse
import datetime
import time
import gettext
from pseudo_cluster.task import Task_record
from pseudo_cluster.tasks_list import Tasks_list
from pseudo_cluster.extended_task import Extended_task_record
from pseudo_cluster.actions_list import Action_list
def get_submit_string(self,time_limit,duration):
"""
Функция генерирует строку для submit
задачи в очередь slurm
"""
s=list()
s.append("sbatch")
#
# Uncomment for debug slurm
#
#s.append("-vv")
s.append("--account=%s" % self.task_class)
s.append("--comment=\"Pseudo cluster emulating task\"")
s.append("--job-name=\"pseudo_cluster|%s|%s\"" % (self.job_id, self.job_name))
try:
limit=self.other["memory_limit"]
except KeyError:
limit="0"
if int(limit) > 0:
s.append("--mem=%d" % int(limit))
s.append("--ntasks=%d" % self.required_cpus)
s.append("--partition=%s" % self.partition)
if self.priority !=0:
s.append("--priority=%d" % self.priority)
if time_limit > 0:
s.append("--time=%d" % time_limit)
#
# Path to this script must be available
# from environment variable PATH
#
s.append(self.path_to_task)
s.append("-t")
s.append(str(duration))
s.append("-s")
s.append(self.task_state)
return s
def get_cancel_string(self):
return [ "scancel" , str(self.actual_task_id) ]
def parse_task_id(self,f,first_line):
"""
Выковыривает ID задачи из файла и первой строчки,
которая была до этого прочитана в файле.
файл не закрывает.
"""
try:
tup=first_line.split(' ')
except:
return False
if (tup[0] == "Submitted") and (tup[1] == "batch"):
self.actual_task_id=int(tup[3])
return True
return False
def main(argv=None):
"""
То, с чего начинается программа
"""
if argv == None:
argv=sys.argv
gettext.install('pseudo-cluster')
parser= argparse.ArgumentParser(
description=_("""
Данная программа осуществляет постановку задач в очередь Slurm.
Список задач получается из файла статистики. При этом программа
ставит задачу в очередь с идентификатором пользователя и группы,
как они были указаны в статистике. Всё используемое задачами
время сжимается согласно коэффициента, и вместо реалного кода
программы, запускавшейся задачи запускается скрипт, который ничего
не делает определённое количество секунд.
"""),
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
epilog=_("Например можно запустить так:\n ")+argv[0]+" --time-compress 30"
)
parser.add_argument(
'--time-compress',
dest='compress_times',
type=int,
required=True,
help=_("Во сколько раз сжимать время. Напиример: 10")
)
parser.add_argument(
'--time-interval',
dest='interval',
type=int,
required=False,
default=2,
help=_("Раз во сколько минут обращаться к системе ведения очередей")
)
parser.add_argument(
'--prefix',
dest='prefix',
required=False,
default="./",
help=_("префикс, по которому находится файл со статистикой")
)
parser.add_argument(
'--path-to-task-script',
dest='path_to_task',
required=False,
default="/usr/local/bin/pseudo_cluster_task.sh",
help=_("""
Путь до скрипта, который реализует тело задачи
в псевдокластере.
""")
)
args=parser.parse_args()
if os.geteuid() != 0:
print _("""
Данная программа требует
полномочий пользователя root.
Запустите её от имени пользователя root,
либо с использованием команды sudo.
""")
return 2
#
# Регистрация методов, которые будут вызываться для объекта
# класса Extended_task_record
#
Extended_task_record.get_submit_string=get_submit_string
Extended_task_record.get_cancel_string=get_cancel_string
Extended_task_record.parse_task_id=parse_task_id
tasks_list=Tasks_list()
tasks_list.read_statistics_from_file(args.prefix)
extended_tasks=dict()
num_tasks=len(tasks_list)
begin_time=tasks_list[0].time_submit
last_task=0;
actions_list=Action_list()
while last_task != num_tasks-1:
end_time=begin_time+datetime.timedelta(minutes=args.interval*args.compress_times)
begin_actions_time=datetime.datetime.utcnow()
for i in xrange(0,num_tasks):
if i < last_task:
continue
task=tasks_list[i]
if task.time_submit < begin_time:
last_task=i
if task.time_submit < end_time:
if task.job_id not in extended_tasks:
extended_task=Extended_task_record()
extended_task.fill_by_task(task,args.path_to_task)
actions_list.register_action(extended_task,"submit")
extended_tasks[task.job_id]=extended_task
if (task.time_end < end_time) and (task.task_state == "canceled"):
actions_list.register_action(extended_tasks[task.job_id],"cancel")
actions_list.do_actions(args.compress_times)
print begin_time
print end_time
print "last_task=%d, num_tasks=%d" % (last_task,num_tasks)
delay_value = datetime.datetime.utcnow()- begin_actions_time
if delay_value < datetime.timedelta(minutes=args.interval):
how_much_sleep=args.interval*60-delay_value.total_seconds()
print (_("will sleep %d") % how_much_sleep)
time.sleep(how_much_sleep)
begin_time=end_time
if __name__ == "__main__":
sys.exit(main())
|
pseudo-cluster/pseudo-cluster
|
scripts/run_pseudo_tasks_slurm.py
|
Python
|
lgpl-2.1
| 7,019
|